diff -Nru fonttools-2.4/debian/changelog fonttools-3.0/debian/changelog --- fonttools-2.4/debian/changelog 2015-06-15 16:36:12.000000000 +0000 +++ fonttools-3.0/debian/changelog 2015-10-30 04:57:39.000000000 +0000 @@ -1,3 +1,14 @@ +fonttools (3.0-1) unstable; urgency=medium + + * New upstream release + * Adopt package (closes: #764300) + * Remove fonttools-eexecop package; code dropped upstream + * Switch to pybuild + * Drop dependency on python-all-dev + * Update watch and copyright to point to GitHub + + -- Luke Faraone Fri, 30 Oct 2015 04:57:37 +0000 + fonttools (2.4-2) unstable; urgency=medium * QA upload. diff -Nru fonttools-2.4/debian/control fonttools-3.0/debian/control --- fonttools-2.4/debian/control 2015-06-15 16:37:50.000000000 +0000 +++ fonttools-3.0/debian/control 2015-10-30 04:54:39.000000000 +0000 @@ -1,12 +1,11 @@ Source: fonttools Section: fonts Priority: optional -Maintainer: Debian QA Group +Maintainer: Luke Faraone Build-Depends: debhelper (>= 9), python (>= 2.6.6-3~), python-all (>= 2.6.6-3~), - python-all-dev (>= 2.6.6-3~), python-numpy, dh-python Standards-Version: 3.9.6 @@ -18,26 +17,9 @@ python-numpy, ${misc:Depends}, ${python:Depends} -Recommends: - fonttools-eexecop (>= ${source:Version}) Description: Converts OpenType and TrueType fonts to and from XML FontTools/TTX is a library to manipulate font files from Python. It supports reading and writing of TrueType/OpenType fonts, reading and writing of AFM files, reading (and partially writing) of PS Type 1 fonts. It also contains a tool called "TTX" which converts TrueType/OpenType fonts to and from an XML-based format. - -Package: fonttools-eexecop -Architecture: any -Enhances: - fonttools -Depends: - ${misc:Depends}, - ${python:Depends}, - ${shlibs:Depends} -Description: Python extension to speed up fonttools - This is an optional C implementation of part of fonttools that speeds - up the eexec and charstring encryption algorithms as used by PostScript - Type 1 fonts. fonttools is much faster with it, it is quite small but - most people will be using fonttools with TrueType or OpenType fonts - so fonttools recommends it instead of depending on it. diff -Nru fonttools-2.4/debian/copyright fonttools-3.0/debian/copyright --- fonttools-2.4/debian/copyright 2013-06-22 15:30:34.000000000 +0000 +++ fonttools-3.0/debian/copyright 2015-10-30 04:58:57.000000000 +0000 @@ -2,18 +2,23 @@ Mon, 16 Sep 2002 01:26:10 +0800. This package was adopted by Paul Wise on Fri, 07 Oct 2005 12:27:52 +0800 +This package was adopted by Luke Faraone on +Fri, 30 Oct 2015 04:57:37 +0000 Downloaded from: - http://sourceforge.net/p/fonttools/files/ + https://github.com/behdad/fonttools/ Author: - Just van Rossum + Just van Rossum et al Copyright: Copyright 1996-2004 Just van Rossum + Copyright (c) 2000 BeOpen.com. All Rights Reserved. + Copyright (c) 1995-2001 Corporation for National Research Initiatives. All Rights Reserved. + Copyright (c) 1991-1995 Stichting Mathematisch Centrum, Amsterdam. All Rights Reserved. Licence: diff -Nru fonttools-2.4/debian/doc-base fonttools-3.0/debian/doc-base --- fonttools-2.4/debian/doc-base 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.0/debian/doc-base 2009-11-08 13:14:31.000000000 +0000 @@ -0,0 +1,9 @@ +Document: fonttools +Title: fonttools/ttx Manual +Author: Just van Rossum +Abstract: This manual describes fonttools and ttx. +Section: Programming + +Format: HTML +Index: /usr/share/doc/fonttools/documentation.html +Files: /usr/share/doc/fonttools/documentation.html diff -Nru fonttools-2.4/debian/docs fonttools-3.0/debian/docs --- fonttools-2.4/debian/docs 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.0/debian/docs 2015-10-30 04:53:00.000000000 +0000 @@ -0,0 +1,2 @@ +build/README +Doc/documentation.html diff -Nru fonttools-2.4/debian/fonttools.doc-base fonttools-3.0/debian/fonttools.doc-base --- fonttools-2.4/debian/fonttools.doc-base 2009-11-08 13:14:31.000000000 +0000 +++ fonttools-3.0/debian/fonttools.doc-base 1970-01-01 00:00:00.000000000 +0000 @@ -1,9 +0,0 @@ -Document: fonttools -Title: fonttools/ttx Manual -Author: Just van Rossum -Abstract: This manual describes fonttools and ttx. -Section: Programming - -Format: HTML -Index: /usr/share/doc/fonttools/documentation.html -Files: /usr/share/doc/fonttools/documentation.html diff -Nru fonttools-2.4/debian/fonttools.docs fonttools-3.0/debian/fonttools.docs --- fonttools-2.4/debian/fonttools.docs 2009-11-08 13:14:31.000000000 +0000 +++ fonttools-3.0/debian/fonttools.docs 1970-01-01 00:00:00.000000000 +0000 @@ -1,3 +0,0 @@ -build/README -build/NEWS -Doc/documentation.html diff -Nru fonttools-2.4/debian/fonttools-eexecop.install fonttools-3.0/debian/fonttools-eexecop.install --- fonttools-2.4/debian/fonttools-eexecop.install 2013-06-12 04:29:53.000000000 +0000 +++ fonttools-3.0/debian/fonttools-eexecop.install 1970-01-01 00:00:00.000000000 +0000 @@ -1 +0,0 @@ -usr/lib/python*/*-packages/FontTools/fontTools/misc/eexecOp.so diff -Nru fonttools-2.4/debian/fonttools.install fonttools-3.0/debian/fonttools.install --- fonttools-2.4/debian/fonttools.install 2013-06-12 04:29:53.000000000 +0000 +++ fonttools-3.0/debian/fonttools.install 1970-01-01 00:00:00.000000000 +0000 @@ -1,3 +0,0 @@ -usr/bin -usr/lib -usr/share diff -Nru fonttools-2.4/debian/rules fonttools-3.0/debian/rules --- fonttools-2.4/debian/rules 2013-06-12 04:46:04.000000000 +0000 +++ fonttools-3.0/debian/rules 2015-10-30 04:54:27.000000000 +0000 @@ -1,15 +1,14 @@ #!/usr/bin/make -f %: - +dh $@ --with python2 + +dh $@ --with python2 --buildsystem=pybuild override_dh_auto_build: - dh_auto_build + dh_auto_build --buildsystem=pybuild cp Doc/changes.txt build/NEWS sed -f debian/stripinstall < Doc/install.txt > build/README override_dh_install: - dh_install --package=fonttools --exclude=.so - dh_install --remaining-packages + dh_install --package=fonttools override_dh_installchangelogs: - dh_installchangelogs Doc/ChangeLog + dh_installchangelogs Doc/changes.txt diff -Nru fonttools-2.4/debian/watch fonttools-3.0/debian/watch --- fonttools-2.4/debian/watch 2013-06-12 04:29:11.000000000 +0000 +++ fonttools-3.0/debian/watch 2015-10-30 04:56:32.000000000 +0000 @@ -1,3 +1,3 @@ version=3 -opts=uversionmangle=s/(\d)[_\.\-\+]?((RC|rc|pre|dev|beta|alpha|b|a)\d*)$/$1~$2/ \ -http://sf.net/fonttools/fonttools-([\d\.]*)\.(?:zip|tgz|tbz|txz|(?:tar\.(?:gz|bz2|xz))) +opts=filenamemangle=s/.+\/v?(\d\S*)\.tar\.gz/fonttools-$1\.tar\.gz/ \ + https://github.com/behdad/fonttools/tags .*/v?(\d\S*)\.tar\.gz diff -Nru fonttools-2.4/Doc/ChangeLog fonttools-3.0/Doc/ChangeLog --- fonttools-2.4/Doc/ChangeLog 2013-06-22 14:29:18.000000000 +0000 +++ fonttools-3.0/Doc/ChangeLog 1970-01-01 00:00:00.000000000 +0000 @@ -1,3274 +0,0 @@ -2013-06-22 14:25 pabs3 - - * Doc/changes.txt, Lib/fontTools/__init__.py, setup.py: Release - fonttools version 2.4 - -2013-06-22 13:01 pabs3 - - * MetaTools/buildChangeLog.py: Fix the location of the SVN - repository - -2013-06-22 08:16 pabs3 - - * Lib/fontTools/ttx.py: Fix syntax error - -2013-06-22 08:13 pabs3 - - * Lib/fontTools/ttx.py: Detect both types of quotes when detecting - OTF vs TTF XML. - - Fixes: http://sourceforge.net/p/fonttools/bugs/47/ - -2013-06-22 06:47 pabs3 - - * Doc/ttx.1, Lib/fontTools/ttx.py: Writing to stdout is not - actually implemented yet - -2013-06-22 06:43 pabs3 - - * Doc/ttx.1, Lib/fontTools/ttx.py: Implement writing output to - arbitrary files including stdout. - - Partially-fixes: - http://sourceforge.net/p/fonttools/feature-requests/7/ - -2013-06-12 05:04 pabs3 - - * Lib/fontTools/ttLib/tables/otConverters.py: Fix consistency of - space/tab usage. - - Reference: - http://docs.python.org/reference/lexical_analysis.html#indentation - -2012-11-10 17:58 jvr - - * Lib/fontTools/ttLib/tables/_k_e_r_n.py: Georg Seifert: fix bug - with Apple's kern table format - -2012-10-18 12:49 jvr - - * ., Lib/fontTools/afmLib.py, Lib/fontTools/cffLib.py, - Lib/fontTools/fondLib.py, Lib/fontTools/misc/arrayTools.py, - Lib/fontTools/misc/bezierTools.py, Lib/fontTools/misc/psLib.py, - Lib/fontTools/misc/textTools.py, Lib/fontTools/nfntLib.py, - Lib/fontTools/pens/basePen.py, Lib/fontTools/t1Lib.py, - Lib/fontTools/ttLib/__init__.py, Lib/fontTools/ttLib/macUtils.py, - Lib/fontTools/ttLib/sfnt.py, - Lib/fontTools/ttLib/tables/D_S_I_G_.py, - Lib/fontTools/ttLib/tables/_g_a_s_p.py, - Lib/fontTools/ttLib/tables/_g_l_y_f.py, - Lib/fontTools/ttLib/tables/ttProgram.py, Lib/xmlWriter.py, - fonttools: merging fixes & changes from delft-sprint-2012 - -2012-03-01 13:26 jvr - - * MetaTools/build_otData.py: cosmetic change to check whether I can - check in - -2011-10-30 12:26 pabs3 - - * Lib/fontTools/ttx.py: Fix some typos in the ttx usage - instructions - - Patch-by: Paul Flo Williams - Fixes: https://bugzilla.redhat.com/694387 - Fixes: http://sf.net/support/tracker.php?aid=3279073 - -2011-03-28 10:41 pabs3 - - * Lib/fontTools/ttx.py: Remove shebang from ttyx.py since it is not - executed directly - -2011-03-28 10:18 pabs3 - - * LICENSE.txt, Lib/fontTools/misc/transform.py, Windows/README.TXT, - Windows/fonttools-win-setup.iss, Windows/fonttools-win-setup.txt, - Windows/ttx.ico: Remove executable permissions from files that do - not need them - -2011-02-13 07:28 pabs3 - - * Lib/fontTools/ttLib/tables/_h_m_t_x.py: Fix bug in last commit - -2011-02-13 07:01 pabs3 - - * Lib/fontTools/ttLib/tables/_h_m_t_x.py: Be more thorough when - working around font bugs in the hmtx table - - https://bugs.launchpad.net/ubuntu/+source/fonttools/+bug/223884 - -2011-02-13 06:27 pabs3 - - * Lib/fontTools/ttLib/tables/_g_l_y_f.py, - Lib/fontTools/ttLib/tables/_l_o_c_a.py: Be more thorough when - working around font bugs in the loca/glyf tables - - https://bugs.launchpad.net/ubuntu/+source/fonttools/+bug/223884 - -2011-02-13 06:25 pabs3 - - * Lib/fontTools/ttLib/tables/O_S_2f_2.py: Be more thorough when - working around font bugs in the OS/2 table - - https://bugs.launchpad.net/ubuntu/+source/fonttools/+bug/223884 - -2011-02-13 06:24 pabs3 - - * Lib/fontTools/ttLib/tables/_l_o_c_a.py: Long-format loca tables - are unsigned not signed. - -2010-12-29 10:43 pabs3 - - * Windows/README.TXT, setup.py: Fix the instructions for building a - Windows installer. - -2010-01-09 09:12 pabs3 - - * Doc/documentation.html, Doc/ttx.1, Lib/fontTools/fondLib.py, - Lib/fontTools/ttLib/__init__.py: Fix typos: 'neccesary' should be - 'necessary'. - -2009-11-08 15:58 pabs3 - - * Doc/changes.txt, Lib/fontTools/__init__.py, setup.py: Release - fonttools version 2.3 - -2009-11-08 15:57 pabs3 - - * Lib/fontTools/misc/eexec.py: Fix loading the - fontTools.misc.eexecOp module that speeds up - fontTools.misc.eexec. - -2009-11-08 15:55 pabs3 - - * Lib/fontTools/ttLib/tables/_c_m_a_p.py: Fix some broken - assertions in the cmap format 1 code. - - Shame on Gentoo for not forwarding the patch upstream. - -2009-11-08 15:54 pabs3 - - * setup.py: Install the manual page to the correct location. - - Shame on MacPorts and Gentoo for not forwarding this change. - -2009-11-08 15:53 pabs3 - - * Lib/fontTools/ttx.py: Fix typo in help output. - -2009-11-08 15:52 pabs3 - - * Doc/changes.txt: Add brief entries to Doc/changes.txt for the - past two releases. - -2009-11-08 15:51 pabs3 - - * Doc/install.txt: Drop version number from Doc/install.txt - -2009-11-08 15:50 pabs3 - - * MANIFEST.in: Include the Doc/ChangeLog file in source tarballs - -2009-11-08 11:19 pabs3 - - * Doc/ttx.1: Document the new -y option to choose which font in a - TTC to decompile. - -2009-11-08 11:00 pabs3 - - * Lib/fontTools/unicode.py: updated unicode module to unicode 5.2.0 - -2009-11-08 06:39 pabs3 - - * Lib/fontTools/ttLib/tables/ttProgram.py: Raising strings is - deprecated in Python 2.5, raise an exception instead. - -2009-03-24 09:42 pabs3 - - * Lib/fontTools/ttLib/sfnt.py: Fix some warnings due to signedness - and 64-bitness issues - -2009-03-24 09:41 pabs3 - - * MetaTools/roundTrip.py: Fix arguments to diff in the roundTrip - testing tool - -2009-03-23 07:11 pabs3 - - * Lib/fontTools/ttLib/tables/.cvsignore: Remove old .cvsignore file - -2009-03-22 15:32 pabs3 - - * Doc/ChangeLog.txt, Doc/install.txt, MetaTools/buildChangeLog.py, - MetaTools/logmerge.py, Windows/README.TXT: Adapt Doc and - MetaTools to the use of SVN instead of CVS - -2009-02-22 08:55 pabs3 - - * Lib/fontTools/ttLib/__init__.py, Lib/fontTools/ttLib/sfnt.py, - Lib/fontTools/ttx.py: Apply remainder of #1675210: add support - for TrueType Collection (TTC) files. - -2008-09-16 14:14 jvr - - * Lib/fontTools/ttLib/tables/ttProgram.py: don't use 'as' as a - name, it's a keyword in Python >= 2.6 - -2008-06-17 20:41 jvr - - * Lib/fontTools/ttLib/sfnt.py: fixed buglet that caused the last - table in the font not to be padded to a 4-byte boundary (the spec - is a little vague about this, but I believe it's needed, also, - Suitcase may complain...) - -2008-05-18 06:30 pabs3 - - * Doc/ChangeLog.txt: Update changelog - -2008-05-18 06:28 pabs3 - - * Lib/fontTools/__init__.py, setup.py: Get ready to release version - 2.2 - -2008-05-17 09:21 jvr - - * Lib/fontTools/unicode.py: updated unicode module to unicode 5.1.0 - -2008-05-17 08:52 jvr - - * Lib/fontTools/ttLib/tables/_h_e_a_d.py: a different fix for - [1296026]: just comment out the offending assert - -2008-05-17 08:44 jvr - - * Lib/fontTools/ttLib/tables/_h_e_a_d.py: back out 'fix', as this - solution isn't portable - -2008-05-16 17:33 jvr - - * Lib/fontTools/agl.py: updated to aglfn 1.6 - -2008-05-16 15:07 jvr - - * Lib/fontTools/ttLib/tables/_c_m_a_p.py: cmap format 1 support, - contributed by rroberts - -2008-05-16 14:28 pabs3 - - * MANIFEST, MANIFEST.in, MetaTools/makeTarball.py: Use setup.py - sdist to create the tarball for distribution - -2008-05-16 08:45 pabs3 - - * Doc/ttx.1, setup.py: Add cleaned-up and updated manual page from - the Debian package. - -2008-05-16 07:17 pabs3 - - * Lib/fontTools/ttLib/tables/_h_e_a_d.py: Apply 1296026: fix - tracebacks in some timezones - -2008-03-11 07:25 jvr - - * Lib/fontTools/misc/psLib.py: - turned ps exceptions into classes - and renamed them - -2008-03-10 21:58 jvr - - * Lib/fontTools/misc/psCharStrings.py, Lib/fontTools/t1Lib.py: - - t1Lib.py can now properly read PFA fonts - - fixed "flex" bug in T1 charstring reader - -2008-03-09 21:43 jvr - - * Lib/fontTools/ttLib/tables/otTables.py: added refactoring note - -2008-03-09 20:48 jvr - - * Lib/fontTools/ttLib/tables/otBase.py: minor fix: one zero too - many in assert - -2008-03-09 20:39 jvr - - * Lib/fontTools/ttx.py: added comment, the OTL Extension mechanism - should not be here - -2008-03-09 20:13 jvr - - * Lib/fontTools/ttLib/tables/otBase.py: fixed some comment typos - -2008-03-09 08:58 jvr - - * Lib/fontTools/ttLib/tables/V_O_R_G_.py: don't crash on empty VORG - table (reported by Werner Lemberg) - -2008-03-08 20:29 jvr - - * Lib/fontTools/ttLib/sfnt.py, - Lib/fontTools/ttLib/tables/_g_l_y_f.py: squash 2 bugs related to - the numpy conversion - -2008-03-07 19:56 jvr - - * Lib/fontTools/cffLib.py: - use the builtin symbols instead of the - types module - -2008-03-07 19:49 jvr - - * Lib/fontTools/cffLib.py: better float testing, so numpy.floats - also work. - -2008-03-04 15:42 jvr - - * Doc/install.txt: updated for numpy (this file needs a thorough - review) - -2008-03-04 15:34 jvr - - * Lib/fontTools/ttLib/tables/_g_a_s_p.py: initialize data to empty - string instead of list - -2008-03-04 15:34 jvr - - * Lib/fontTools/ttLib/tables/_g_l_y_f.py: ar.typecode() doesn't - exist in numpy, but then again, this was overkill - -2008-03-04 15:25 jvr - - * Lib/fontTools/misc/arrayTools.py, - Lib/fontTools/misc/bezierTools.py, Lib/fontTools/nfntLib.py, - Lib/fontTools/ttLib/sfnt.py, - Lib/fontTools/ttLib/tables/G_P_K_G_.py, - Lib/fontTools/ttLib/tables/_c_m_a_p.py, - Lib/fontTools/ttLib/tables/_g_l_y_f.py, - Lib/fontTools/ttLib/tables/_h_m_t_x.py, - Lib/fontTools/ttLib/tables/_l_o_c_a.py, - Lib/fontTools/ttLib/test/ttBrowser.py, setup.py: converted usage - of Numeric to numpy - -2008-03-04 15:04 jvr - - * Lib/fontTools/ttLib/test/ttBrowser.py: note this file is - deprecated - -2008-03-04 15:02 jvr - - * Lib/fontTools/ttLib/tables/_v_m_t_x.py: removed some redundant - imports - -2008-03-04 14:47 jvr - - * Lib/fontTools/misc/arrayTools.py: - moved Numeric import to top - - converted tests to doctest - -2008-03-01 17:26 jvr - - * Doc/ChangeLog.txt: updated change log - -2008-03-01 17:22 jvr - - * MANIFEST: updated file list - -2008-03-01 17:20 jvr - - * Lib/fontTools/__init__.py, setup.py: post 2.1 version numbering - -2008-03-01 17:03 jvr - - * Doc/bugs.txt: see sf tracker - -2008-03-01 16:43 jvr - - * Lib/fontTools/ttLib/tables/_g_a_s_p.py: the gasp portion of patch - 1675210: support for ClearType - -2008-03-01 15:31 jvr - - * Lib/fontTools/ttLib/tables/_h_m_t_x.py: fixed oversight in - sys.byteorder transition - -2008-03-01 11:43 jvr - - * Lib/fontTools/ttLib/__init__.py, Lib/fontTools/ttLib/sfnt.py, - Lib/fontTools/ttLib/tables/G_P_K_G_.py, - Lib/fontTools/ttLib/tables/T_S_I__5.py, - Lib/fontTools/ttLib/tables/_c_m_a_p.py, - Lib/fontTools/ttLib/tables/_c_v_t.py, - Lib/fontTools/ttLib/tables/_g_l_y_f.py, - Lib/fontTools/ttLib/tables/_h_m_t_x.py, - Lib/fontTools/ttLib/tables/_l_o_c_a.py, - Lib/fontTools/ttLib/tables/_p_o_s_t.py: Use sys.byteorder, - getting rid of ttLib.endian - -2008-03-01 11:34 jvr - - * Lib/fontTools/misc/macCreatorType.py, Lib/fontTools/t1Lib.py, - Lib/fontTools/ttx.py: - removed support for Python 2.2 on MacOS - 10.2 - - worked around a bug in GetCreatorType() on intel Macs - -2008-03-01 09:42 jvr - - * Lib/fontTools/ttx.py: Expose ignoreDecompileErrors as a command - line option (-e, to set - ignoreDecompileErrors to to false) - -2008-03-01 09:30 jvr - - * Lib/fontTools/ttLib/__init__.py: Make a hidden feature - accessible: optionally ignore decompilation errors, - falling back to DefaultTable, retaining the binary data. It's a - bit - dangerous to enable this by default, since it can lead to hiding - other - errors or bugs (in the font or fonttools itself). - -2008-02-29 14:43 jvr - - * Lib/fontTools/ttLib/tables/_c_m_a_p.py: - skip subtables of - length zero - - minor tweak in cmap 4 logic - contributed by rroberts - -2008-02-19 18:49 jvr - - * Lib/fontTools/ttLib/tables/T_S_I__1.py: fixed problem with - private VTT table, found by Peter Bilak - -2008-01-28 07:11 pabs3 - - * Doc/ChangeLog.txt: Update changelog - -2008-01-28 07:09 pabs3 - - * Lib/fontTools/__init__.py: Woops, missed a version number - -2008-01-28 04:59 pabs3 - - * MANIFEST: Add a MANIFEST file so that we don't forget files in - the source distribution - -2008-01-28 04:22 pabs3 - - * Doc/ChangeLog.txt: Update the changelog from the CVS history - -2008-01-28 04:19 pabs3 - - * setup.py: Get ready to release version 2.1 - -2008-01-28 04:00 pabs3 - - * setup.py: Just use fonttools as the tarball name. - -2007-11-14 07:06 pabs3 - - * Lib/fontTools/ttLib/tables/_g_l_y_f.py: Fix 1762552: traceback on - amd64 with DejaVuSans.ttf - -2007-10-22 09:31 jvr - - * Lib/fontTools/ttLib/tables/O_S_2f_2.py: added 'support' for OS/2 - version 4: can anyone verify this is correct? I can't seem to - find an OS/2 v4 spec... - -2007-08-25 06:19 pabs3 - - * Lib/fontTools/misc/textTools.py: Patch #1296028 from Tomasz - Wegrzanowski: improve performance with CJK fonts - -2006-10-21 14:12 jvr - - * Lib/fontTools/ttLib/__init__.py, - Lib/fontTools/ttLib/tables/otBase.py, - Lib/fontTools/ttLib/tables/otConverters.py, - Lib/fontTools/ttLib/tables/otData.py, - Lib/fontTools/ttLib/tables/otTables.py, Lib/fontTools/ttx.py: - Misc patches from rroberts: - - fontTools/ttx.py - # support virtual GIDs, support handling some GSUB offset - overflows. - - fontTools/ttlib/__init__.py - # 1) make getReverseGlyphMap a public function; I find a reverse - map - to often be useful - # 2) support virtual glyphs, e.g. references to GID's that are - not in the font. - # Added the TTFont argument allowVID (default 0) to turn this off - and on; - # added the arg requireReal ( default 0) so as to get the obvious - default behaviour when - # allowVID is 0 or 1, but to allow requiring a true GID when - allowVID is 1. - - fontTools/ttlib/tables/otBase.py - fontTools/ttlib/tables/otConverters.py - fontTools/ttlib/tables/otData.py - fontTools/ttlib/tables/otTables.py - # 1) speed optimization - # - collapse for loops - # - do not decompile extension lookups until a record is - requested - from within the lookup. - # 2) handling offset overflows - # 3) support of extension lookups - # 4) Fixed FetauresParam converter class def so as to survive a - font - that has this offset non-NULL. - # This fixes a stack dump. - # The data will now just get ignored - -2006-10-21 13:54 jvr - - * Lib/fontTools/ttLib/tables/_c_m_a_p.py: patches from rroberts: - - # 1) Switched to using Numeric module arrays rather than array - modules arrays, - # because of a memory leak in array.array when handling elements - > 1 byte. - # 2) speed optimizations: - # - For loops are collapsed when possible - # - the data for a subtable is parsed only if a mapping value is - requested - # - if two subtables share the same data offset, then on - decompilation, they will - # share the same cmap dict, and not get decompiled twice. Same - for compiling. - # - as before, two tables with the same contents will get - compiled to a single - # data block in the font. - # 3) added (self.platformID, self.platEncID) == (3, 10) to the - list - of subtables that - # get Unicode comments. - # 4) allow item reference by GID as well as by name. I did this - when - experimenting to see if using GID references only would speed the - compile; it didn't but it seemed useful enough to leave in. - # 5) Fixed compile to/from XML: for cmap type unknown ( aka cmap - format 10, in practice.) - -2006-10-21 13:41 jvr - - * Lib/fontTools/cffLib.py: Some edits from rroberts: - # 1) speed optimizations - # 2) fixed parseCharset0 to support CFF-CID fonts. - # 3) fixed CharsetConverter.read to work a font that actually has - one - of the pre-canned encodings. - # This fixes a stack dump. - # I did not try to support using these encodings when writing a - font, - # as the cases will be so rare as to not justify the processing - overhead for all other fonts. - - (Read: I took out some of your loop optimizations since I believe - they - made the code a lot less clear. I also have my doubts whether - they were - actually performance improvements.) - -2006-10-21 13:29 jvr - - * Lib/fontTools/ttLib/tables/V_O_R_G_.py: Speed optimizations from - rroberts - -2006-10-21 13:27 jvr - - * Doc/documentation.html, Lib/fontTools/ttLib/tables/G_M_A_P_.py, - Lib/fontTools/ttLib/tables/G_P_K_G_.py, - Lib/fontTools/ttLib/tables/M_E_T_A_.py, - Lib/fontTools/ttLib/tables/S_I_N_G_.py, - Lib/fontTools/ttLib/tables/__init__.py: Some non-official OT - tables from rrboerts. He wrote: - - There are also some new files, for SING glyphlet support, that - you - may or may not want to add, because they are not in the OpenType - spec. - - M_E_T_A_.py # SING glyphlet meta data table. see - 'http://partners.adobe.com/public/developer/opentype/gdk/topic.html" - S_I_N_G_.py # SING glyphlet basic info. See same web site as for - META - data table. - - G_M_A_P_.py # Summary of sing glyphlet info that has been stuck - into - a parent font. Not documented anywhere yet. - G_P_K_G_.py # Opaque wrapper for SING glyphlet info; travels with - application document. Is also stuck into augmented parent font. - Not - documented anywhere yet - -2006-10-21 13:16 jvr - - * Lib/fontTools/nfntLib.py: make this module work with semi-recent - MacPython - -2006-02-25 21:39 jvr - - * Lib/fontTools/misc/psCharStrings.py: support the deprecated - dotsection T2 operator - -2006-01-25 15:24 fcoiffie - - * Lib/fontTools/ttLib/tables/_k_e_r_n.py: In some bad fonts, kern - table is incomplete (it only contains a version number). In this - case, the code accept a table without kernTables. - -2006-01-25 15:21 fcoiffie - - * Lib/fontTools/ttLib/tables/L_T_S_H_.py: LTSH length can be - different of numGlyphs as the table length must be 4-bytes - aligned (assertion changed to (len(data) % numGlyphs) < 4) - -2006-01-25 15:12 fcoiffie - - * Lib/fontTools/ttLib/tables/_h_e_a_d.py: The dates are stored in - 8-bytes fields (Microsoft reference document) but Macintosh dates - are only coded with 4-bytes. In some fonts, these date fields are - badly coded and 8-bytes are used. So, a ValueError occurs. - -2006-01-12 14:04 fcoiffie - - * Lib/fontTools/ttLib/tables/O_S_2f_2.py, - Lib/fontTools/ttLib/tables/_p_o_s_t.py: Unsigned long data field - must be packed with "L" instead of "l" (sometimes an - OverflowError can occur) - -2005-06-24 09:35 jvr - - * Lib/fontTools/fondLib.py: Hmm, fondLib has been broken on - Python.framework for a while: I used - the native struct alignment, which is different on OSX. Changed - all - struct calls to explicitly use big endian (ready for x86...), - which - also fixes the alignment issue. - -2005-05-07 08:41 jvr - - * Lib/fontTools/misc/psCharStrings.py: some flex hint fixes from - rroberts - -2005-04-10 13:18 jvr - - * Lib/fontTools/pens/basePen.py: avoid glyphSet.get(): not all - glyphsets in use implement it. - -2005-03-08 09:50 jvr - - * Lib/fontTools/pens/basePen.py, Lib/fontTools/ttLib/__init__.py: - BasePen should not fail if a base glyph does not exist in the - glyph set; added get() method to _TTGlyphSet class - -2005-03-08 09:40 jvr - - * Lib/fontTools/pens/pointInsidePen.py: added _endPath method; - without it, we'd fail on open paths (which requires pen.endPath() - to be called instead of pen.closePath()) - -2005-02-25 22:31 jvr - - * Lib/fontTools/misc/bezierTools.py: use highly unscientific - epsilon value - -2005-02-25 12:51 jvr - - * Lib/fontTools/misc/bezierTools.py: more doco, reformatted __all__ - -2005-02-25 12:40 jvr - - * Lib/fontTools/misc/bezierTools.py: reworked test code and - results, to make the results more readable - -2005-02-25 12:28 jvr - - * Lib/fontTools/misc/bezierTools.py: Refactored splitting logic; - added splitQuadraticAtT() and splitCubicAtT() - -2005-02-25 10:47 jvr - - * Lib/fontTools/misc/bezierTools.py: show/test that _testrepr() - reprs Numeric arrays nicely, too - -2005-02-25 10:42 jvr - - * Lib/fontTools/misc/bezierTools.py: added a comment - -2005-02-25 10:40 jvr - - * Lib/fontTools/misc/bezierTools.py: factored out param -> points - conversion - -2005-02-25 10:33 jvr - - * Lib/fontTools/misc/bezierTools.py: renamed and rewrote _tuplify() - to _testrepr(), added tests for splitCubic() - -2005-02-25 10:11 jvr - - * Lib/fontTools/misc/bezierTools.py: some refactoring, some - doctests - -2005-02-23 22:15 jvr - - * Doc/ChangeLog.txt: hm, it's been more than a year and a half - since I regenerated ChangeLog.txt... - -2005-02-23 21:22 jvr - - * Lib/fontTools/misc/psCharStrings.py: This patch fixes two things - - in T2 charstrings, a byte code of 255 is followed by a 16.16 - fixed - point number, not a 4-byte int as in T1. Noted by rroberts. - - some integers were not correctly encoded in the T1 compiler. - -2005-02-11 19:36 jvr - - * Lib/fontTools/ttLib/tables/otTables.py: fixed problem with empty - ClassDef, as well as added some more defenses for possible empty - tables - -2005-01-25 19:06 jvr - - * Lib/fontTools/t1Lib.py: expose onlyHeader keyword arg to generic - read() func - -2005-01-24 10:18 jvr - - * Lib/fontTools/ttLib/tables/T_S_I__0.py: uh, and the other one - -2005-01-24 10:06 jvr - - * Lib/fontTools/ttLib/__init__.py: fixed buglet in GlyphSet support - code - -2005-01-24 10:05 jvr - - * Lib/fontTools/ttLib/tables/T_S_I__0.py: fixed 2.4 compat issue - -2005-01-17 21:34 jvr - - * Lib/xmlWriter.py: - rename file to fileOrPath - - check for capability, not type, so XMLWriter can hanlde unicode - filenames (on OS-es that support them). - -2004-12-24 16:07 jvr - - * Lib/fontTools/ttLib/tables/_h_e_a_d.py: fix for new 2.4 hex() - behavior - -2004-12-24 15:59 jvr - - * Lib/fontTools/ttx.py: fix for new 2.4 hex() behavior - -2004-12-14 07:55 jvr - - * Windows/README.TXT, Windows/fonttools-win-setup.iss, - Windows/mcmillan.bat: Updates from Adam Twardoch. Thanks! - -2004-11-16 10:37 jvr - - * Lib/fontTools/ttLib/__init__.py, Lib/fontTools/ttLib/sfnt.py: - Refactored and enhanced table order support: - - Rewrote sorting function, it was really quite buggy. - - Added reorderFontTables() functions, which reorders the - tables in a font at the sfnt level. - - TTFont.save() will now by default rewrite the font in the - optimized order. This is done through a temp file since - our dependency checking logic gets in the way of writing - the tables in a predefined order directly (if table A depends - on B, table B will always be compiled and written first, so - this prevents A from showing up in the file before B). - - sfnt.py: - - removed closeStream option from SFNTWriter.close(); it's better - done by the caller (TTFont). - -2004-11-16 09:12 jvr - - * Lib/fontTools/ttLib/__init__.py: tweak & bugfix - -2004-09-26 18:32 jvr - - * Lib/fontTools/ttLib/tables/_c_m_a_p.py: make sure that a cmap - subtable instance always has a language attr, so __cmp__ can't - fail - -2004-09-25 10:56 jvr - - * Lib/fontTools/ttLib/tables/_k_e_r_n.py: Fix for [ 808370 ] - Dumping Legendum.otf fails on 'kern' table - Work around buggy kern table. - -2004-09-25 10:31 jvr - - * Lib/fontTools/ttLib/tables/otTables.py: [ 637398 ] Failure while - parsing OpenType file - Deal with empty Coverage table: it will be None so won't have a - .glyphs - attribute. - -2004-09-25 10:01 jvr - - * Lib/fontTools/ttLib/tables/_h_e_a_d.py: workaround for bug [ - 766694 ] Error from invalid date - -2004-09-25 09:12 jvr - - * Lib/fontTools/ttLib/tables/_n_a_m_e.py: bug #784690: simple - workaround for buggy name table - -2004-09-25 09:06 jvr - - * Lib/fontTools/ttLib/tables/_c_m_a_p.py: - Refactored XML writing, - removed lots of code duplicaiton - - Only output unicode names as comments if we're in fact dealing - with - a unicode cmap subtable (and this is -- in theory -- independent - of - cmap format) - -2004-09-25 08:24 jvr - - * MetaTools/roundTrip.py, Tools/ttx: whoops, rolling back - accidental #! commits - -2004-09-25 08:12 jvr - - * Lib/fontTools/agl.py: "Downgraded" AGL list to the "Adobe Glyph - List For New Fonts", which is - most appropriate here. There may be a use for the "big" AGL, but - that will - have to become a new module. - -2004-09-25 07:47 jvr - - * Lib/fontTools/unicode.py, MetaTools/roundTrip.py, Tools/ttx: [ - 845172 ] Updating to Unicode 4.0.0 - Instead of using a list internally, I now use a dict, since the - unicode - mapping is quite sparse (lots of unused slots). - -2004-09-25 07:35 jvr - - * LICENSE.txt, Lib/fontTools/ttx.py: Patch #845551 from Anthony - Fok: - - two minor typos - - changed copyright year in LICENSE (and it's 2004 now...) - -2004-09-25 07:30 jvr - - * Lib/fontTools/ttLib/tables/_c_m_a_p.py, - Lib/fontTools/ttLib/tables/_l_o_c_a.py: Patch #845571 from - Anthony Fok: - - better exception msg in loca table - - renamed "version" to "language" in cmap - - made cmap 12 work (untested by me) - -2004-09-25 07:19 jvr - - * Lib/fontTools/ttLib/tables/O_S_2f_2.py: whoops, forgot one part - os OS/2 version 3 support - -2004-09-24 18:33 jvr - - * Lib/fontTools/ttLib/tables/O_S_2f_2.py: added support for OS/2 - table #3 - -2003-10-14 20:30 jvr - - * Lib/fontTools/pens/pointInsidePen.py: Fixed subtle bug in curve - intersection logic: due to floating point errors, - sometimes a legitimate solution is ever so slightly over 1.0. - Those used to - be filtered out; now checking for 1.0 + 1e-10. - -2003-09-22 13:12 jvr - - * Lib/fontTools/ttLib/tables/otData.py, - Lib/fontTools/ttLib/tables/otTables.py: ReverseChainSingleSubst - support from Yannis H. (must get that - generate-otdata-from-the-docs working again) - -2003-09-22 07:09 jvr - - * Lib/fontTools/ttLib/tables/otData.py: bug from the spec leaked - into here; pointed out by Yannis H. - -2003-09-18 07:33 jvr - - * Doc/install.txt: checked in with unix line endings -- this - probably needs proper review - -2003-09-17 17:32 jvr - - * Lib/fontTools/misc/psCharStrings.py: - Implemented the flex - operators for T2 - - Changed a whole bunch of XXX traps into NotImplementedErrors - -2003-09-16 11:30 jvr - - * Lib/fontTools/misc/transform.py: more doctests - -2003-09-16 11:01 jvr - - * Lib/fontTools/misc/transform.py: Added lots of doco and doctests. - -2003-09-16 10:14 jvr - - * Lib/fontTools/pens/transformPen.py: debogofied doc string, added - another one - -2003-09-16 09:48 jvr - - * Lib/fontTools/misc/psCharStrings.py: - Properly support the pen - protocol for open sub paths in Type 1 - -2003-09-15 12:26 jvr - - * Lib/fontTools/pens/reportLabPen.py: Added Pen for - reportlab.graphics. - -2003-09-11 07:11 jvr - - * Lib/fontTools/ttLib/tables/otTables.py: this should have been - part of the previous path by some fixes from klchxbec - -2003-09-09 23:29 jvr - - * Lib/fontTools/pens/transformPen.py: Correctly deal with the - TT-no-on-curve special case. - -2003-09-07 09:41 jvr - - * Lib/fontTools/pens/basePen.py: Factored out the SuperBezier and - TT-implied-point algorithms, as the - may be useful separately from pens. - -2003-09-06 16:00 jvr - - * Lib/fontTools/pens/basePen.py: - added endPath() method to the - Pen protocol, as a counterpart for - closePath() for *open* sub paths. This allows pen implementations - to reliably detect the end of a sub path. - - improved various doc strings. - -2003-09-05 14:18 jvr - - * Lib/fontTools/pens/basePen.py: ensure that the current point is - always set correctly - -2003-09-02 19:23 jvr - - * Lib/fontTools/ttLib/tables/otBase.py: tweaked doc string - -2003-09-01 16:10 jvr - - * Doc/documentation.html: more acks - -2003-09-01 15:09 jvr - - * Lib/fontTools/ttLib/tables/_h_h_e_a.py: Ha, a reserved field got - eaten. Noticed by Yannis Haralambous. - -2003-08-30 06:46 jvr - - * Doc/ChangeLog.txt: *** empty log message *** - -2003-08-29 19:29 jvr - - * Lib/fontTools/misc/arrayTools.py: - renamed all l,t,r,b tuff to - xMin, yMin, xMax, yMax - - added ome more doc strings - - added some minimal test code - -2003-08-29 08:05 jvr - - * Lib/fontTools/misc/psCharStrings.py: T2: I'm not sure if the - seac-variant of the endchar operator may be - combined with actual outlines, but if it is, we need to do the - closePath - before the components are added. - -2003-08-28 20:43 jvr - - * Lib/fontTools/misc/psCharStrings.py: added deprecated - endchar/seac support for T2 charstrings - -2003-08-28 19:30 jvr - - * Lib/fontTools/pens/basePen.py: - added support for quadratic - contours that have NO on-curve points. - - more doco and comments. - -2003-08-28 19:03 jvr - - * Lib/fontTools/pens/pointInsidePen.py: more doco and comments - -2003-08-28 18:23 jvr - - * Lib/fontTools/ttLib/__init__.py: workaround for buggy 2.2 mac - support - -2003-08-28 18:14 jvr - - * Lib/fontTools/ttLib/tables/_c_m_a_p.py: whitespace nits - -2003-08-28 18:04 jvr - - * Lib/fontTools/ttLib/tables/_c_m_a_p.py: Another patch from - rroberts. He writes: - """It adds full support for cmap format 2, which is what - the Adobe CJK fonts use for the Mac cmap subtable.""" - -2003-08-28 17:59 jvr - - * setup.py: whoops, forgot to add the .pens subpacke to the - distutils script :-( noticed by rroberts. - -2003-08-28 08:51 jvr - - * Lib/fontTools/pens/pointInsidePen.py: Is the point inside or - outside the outline? - -2003-08-26 19:20 jvr - - * Lib/fontTools/misc/bezierTools.py: - Fixed ZeroDivisionError in - solveCubic(). The solution is mathematically - dubious (I don't think 0.0/0.0 == 0.0...) but the result seems to - be - correct. - - Documented that soleCubic() and solveQuadratic() are not - guaranteed to - return the roots in order, and nor that they are guaranteed to - not return - duplicate roots. - -2003-08-26 19:00 jvr - - * Lib/fontTools/ttLib/__init__.py: Set .width in _TTGlyph.__init__ - after all: these are just this wrapper - objects, _TTGlyphSet doesn't cache them, so setting .width in - .draw() - is confusing to say the least. - -2003-08-26 18:20 jvr - - * Lib/fontTools/pens/cocoaPen.py: new Cocoa=specific drawing pen - -2003-08-26 18:19 jvr - - * Lib/fontTools/ttLib/__init__.py: fixed AttributeError in - _TTGlyphSet.keys() - -2003-08-26 12:02 jvr - - * Doc/documentation.html: typo pointed out by Adam T. - -2003-08-25 21:19 jvr - - * MetaTools/buildTableList.py: output don't-edit note - -2003-08-25 21:19 jvr - - * Lib/fontTools/ttLib/tables/__init__.py: document that this file - is generated - -2003-08-25 21:16 jvr - - * Doc/documentation.html: add rroberts to the Acknowledgements - section; updated some years - -2003-08-25 17:53 jvr - - * Lib/fontTools/t1Lib.py: add the generic getGlyphSet() API to - T1Font as well. - -2003-08-25 13:20 jvr - - * Lib/fontTools/ttLib/__init__.py: comment typo fix, reflow - -2003-08-25 13:18 jvr - - * Doc/ChangeLog.txt: lots of stuff - -2003-08-25 13:15 jvr - - * Lib/fontTools/ttLib/__init__.py: TTFont now has a getGlyphSet() - method, which will return a generic - GlyphSet. A GlyphSet is simply a dict-like object mapping glyph - names - to glyphs. The glyphs in a GlyphSet have a .draw(pen) method and - a - .width attribute. This provides a generic interface for drawing - glyphs - or extracting outlines, and works both for CFF-based fonts and TT - fonts. - - See also fontTools.pens.basePen for a description of what makes a - Pen - a Pen. - -2003-08-25 12:23 jvr - - * Lib/fontTools/ttLib/tables/_g_l_y_f.py: add some more dict-like - stuff to the glyf table - -2003-08-25 07:37 jvr - - * Lib/fontTools/cffLib.py: small nits - -2003-08-24 19:56 jvr - - * Lib/fontTools/cffLib.py, Lib/fontTools/misc/psCharStrings.py, - Lib/fontTools/t1Lib.py: Refactored outline extraction for - CharStrings. The interface to - T{1,2}OutlineExtractor is not backwards compatible and this - change - basically makes them private classes: CharStrings now have a - .draw() - method that takes a Pen object (see fontTools.pens.*), so you - never - have to deal with the extractor objects yourself. Only lightly - tested. - -2003-08-24 19:52 jvr - - * Lib/fontTools/ttx.py: don't use macfs, it's deprecated - -2003-08-24 17:23 jvr - - * Lib/fontTools/pens/basePen.py: small tweak - -2003-08-24 16:25 jvr - - * Lib/fontTools/misc/psCharStrings.py: added and tweaked some - asserts - -2003-08-24 16:17 jvr - - * Lib/fontTools/misc/transform.py: remove trailing whitespace - -2003-08-24 09:48 jvr - - * Lib/fontTools/pens/transformPen.py: ugh, lineTo != moveTo... - -2003-08-23 20:24 jvr - - * Lib/fontTools/pens/basePen.py, Lib/fontTools/pens/boundsPen.py: - wrapped some long lines - -2003-08-23 20:19 jvr - - * Lib/fontTools/pens, Lib/fontTools/pens/__init__.py, - Lib/fontTools/pens/basePen.py, Lib/fontTools/pens/boundsPen.py, - Lib/fontTools/pens/transformPen.py: Pen stuff, see - http://just.letterror.com/cgi-bin/wypy?PenProtocol - Only lightly tested, component support is not tested at all. - -2003-08-22 20:21 jvr - - * Lib/fontTools/ttLib/tables/otData.py: some fixes from klchxbec - -2003-08-22 20:02 jvr - - * Lib/fontTools/ttLib/tables/V_O_R_G_.py: VORG support by rroberts. - -2003-08-22 19:53 jvr - - * Lib/fontTools/cffLib.py: Lots of CID work by rroberts. - -2003-08-22 19:44 jvr - - * Lib/fontTools/ttLib/__init__.py: - attempted to sort tables in - order recommended by spec. - TODO: need to fix table dependency order to complete this. - (Read: would you mind posting a bug report regarding this?) - -2003-08-22 19:38 jvr - - * Lib/fontTools/ttLib/sfnt.py: support for CEF fonts: don't depend - on the head table being available - -2003-08-22 19:34 jvr - - * Lib/fontTools/ttLib/tables/_m_a_x_p.py: recalc numGlyphs upon - writing - -2003-08-22 18:56 jvr - - * Lib/fontTools/ttLib/macUtils.py: update macUtils to current day - MacPython - -2003-08-22 18:53 jvr - - * Lib/fontTools/nfntLib.py: attempt to set the value for fRectWidth - 'more correctly' - -2003-08-22 18:52 jvr - - * Lib/fontTools/ttLib/__init__.py: some modernizations - -2003-08-22 18:50 jvr - - * Lib/fontTools/ttx.py: Jaguar Python 2.2 workaround - -2003-08-22 14:56 jvr - - * Lib/fontTools/misc/transform.py: this module has been included in - so many (in house) packages that it's time it gets a more central - place. - -2003-06-29 19:25 jvr - - * Lib/fontTools/misc/bezierTools.py: - optimized a couple of - invariant expressions - - made sure solveCubic() also works when called with integer - arguments - -2003-06-29 18:32 jvr - - * Lib/fontTools/misc/bezierTools.py: splitLine(): make sure the - split is between the end points - -2003-06-29 18:25 jvr - - * Lib/fontTools/misc/bezierTools.py: new module bezierTools.py - -2003-06-29 18:18 jvr - - * Lib/fontTools/misc/arrayTools.py: two new functions - -2003-06-07 15:15 jvr - - * Lib/fontTools/t1Lib.py: avoid FSSpec on MacOS - -2003-05-24 12:50 jvr - - * Lib/fontTools/afmLib.py: add default _attrs value so pickling AFM - objects doesn't blow up - -2003-05-24 12:34 jvr - - * Lib/fontTools/t1Lib.py: updated for MacPython2.3 - -2003-02-23 19:40 jvr - - * Lib/fontTools/ttLib/tables/_h_e_a_d.py: Fix for bug #691744; - calc_mac_epoch_diff() was broken when the timezone - was GMT (or perhaps other situations; it's not entirely clear). - -2003-02-08 10:45 jvr - - * Lib/fontTools/ttLib/tables/_c_m_a_p.py: cmap format 12 support, - donated by rroberts: thanks! - -2003-01-25 18:20 jvr - - * Lib/fontTools/ttLib/tables/_n_a_m_e.py: second try to work around - bogus stringOffset value - -2003-01-25 11:15 jvr - - * Lib/fontTools/ttLib/tables/_n_a_m_e.py: renamed stringoffset to - stringOffset as per spec - -2003-01-25 11:14 jvr - - * Lib/fontTools/ttLib/tables/_n_a_m_e.py: gracefully handle bogus - stringOffset values (thanks to Anthony Fok) - -2003-01-10 22:34 jvr - - * Lib/fontTools/ttLib/tables/_h_e_a_d.py: clean up - checkSumAdjustment XML output: suppress trialing 'L' - -2003-01-10 22:23 jvr - - * Lib/fontTools/ttLib/tables/_h_e_a_d.py: fix from Owen Taylor that - fixes my previous patch; thanks! - -2003-01-03 21:29 jvr - - * Lib/fontTools/ttLib/tables/_h_e_a_d.py: detab table string - literal - -2003-01-03 21:23 jvr - - * Lib/fontTools/ttLib/tables/_h_e_a_d.py: make two fields unsigned, - to conform to the spec but also to suppress Python 2.3 warnings - for hex(negativenumber). - -2003-01-03 21:01 jvr - - * setup.py: edited meta info, added trove classification - -2003-01-03 20:57 jvr - - * Lib/fontTools/ttLib/sfnt.py: suppres Python 2.3 warning - -2003-01-03 20:56 jvr - - * Lib/fontTools/cffLib.py: Added support for the Encoding field. - (Adam, please let me know if this - works for you.) - -2003-01-03 20:54 jvr - - * Lib/fontTools/ttLib/tables/otTables.py: make sure Coverage - instances have a 'glyphs' attribute - -2003-01-03 20:52 jvr - - * Lib/fontTools/ttLib/tables/O_S_2f_2.py: allow OS/2 tables - containing too much data - -2002-11-26 14:09 jvr - - * Lib/fontTools/afmLib.py: allow negative advance widths - -2002-10-29 15:51 jvr - - * Doc/ChangeLog.txt: it's been a while. - -2002-10-29 15:49 jvr - - * Lib/fontTools/fondLib.py: try Carbon.Res first. - -2002-10-27 19:47 jvr - - * Lib/fontTools/ttLib/standardGlyphOrder.py: revised comment, added - note about MS disagreement, removed alignment tabs - -2002-10-27 09:11 jvr - - * Windows/fonttools-win-setup.iss, Windows/fonttools-win-setup.txt: - Patches from Adam: - So, meanwhile, attached is a slightly improved isntaller. Now, - .ttx files - are registered as XML files so you can use Internet Explorer etc. - to view - them. Also, I'm creating a shortcut in the "SendTo" structure, so - the user - can click with RMB on the TTF, OTF or TTX file, then select Send - To / TTX. - Works with multiple files, very nice. - -2002-10-08 14:22 jvr - - * Lib/fontTools/ttLib/tables/_h_e_a_d.py: fixed previous fix: it - assumed 4 bytes of data, which is wrong - -2002-10-07 21:34 jvr - - * Lib/fontTools/ttLib/tables/_h_e_a_d.py: Handle negative long time - values gracefully instead of looping infinitely. Reported by - Jessica P. Hekman - -2002-09-24 20:50 jvr - - * Lib/fontTools/ttLib/tables/otTables.py: deal gracefully with - empty coverage tables; fixes bug 611509 - -2002-09-16 08:21 jvr - - * Doc/changes.txt: note about maxp change - -2002-09-16 08:10 jvr - - * Lib/fontTools/ttLib/tables/_m_a_x_p.py: minor cleanups - -2002-09-16 08:06 jvr - - * Lib/fontTools/ttLib/tables/_m_a_x_p.py: interpret any version - value as 0x0001000 if it's not 0x00005000 (workaround for buggy - font) - -2002-09-14 15:31 jvr - - * Doc/changes.txt, Lib/fontTools/ttx.py: ugh, the zfill string - method doesn't exist in 2.2.1 and earlier :-( - -2002-09-13 13:17 jvr - - * Doc/changes.txt, Doc/documentation.html: small changes, first bux - fix note since 2.0b1 - -2002-09-13 12:07 jvr - - * Lib/fontTools/__init__.py, Windows/fonttools-win-setup.iss: - bumped version to 2.0b2 - -2002-09-13 12:04 jvr - - * Lib/fontTools/ttLib/tables/otTables.py: whoops, reversed key and - value - -2002-09-13 00:17 jvr - - * Doc/bugs.txt: mention control chars in name table - -2002-09-12 23:15 jvr - - * Doc/ChangeLog.txt: last commit for 2.0b1, I hope... - -2002-09-12 23:14 jvr - - * Tools/ttx: macfreeze import hints - -2002-09-12 22:59 jvr - - * Lib/fontTools/ttx.py: add simple support for Mac Suitcases, when - running on MacOS - -2002-09-12 22:22 jvr - - * Doc/bugs.txt, Doc/documentation.html: converging towards 2.0b1 - -2002-09-12 22:03 jvr - - * Mac/README.txt: note about rustiness - -2002-09-12 21:48 jvr - - * Lib/fontTools/ttLib/tables/otTables.py: added manual - implementation of LigatureSubst to get nicer XML output - -2002-09-12 21:21 jvr - - * Lib/fontTools/ttLib/tables/otTables.py: added manual - implementation of AlternateSubst to get nicer XML output - -2002-09-12 20:51 jvr - - * Lib/fontTools/ttLib/tables/otTables.py: added manual - implementation of ClassDef to get nicer XML output as well as to - get rid of GlyphID dependencies - -2002-09-12 20:05 jvr - - * Lib/fontTools/ttx.py: doh! fixed wrong indentation, now does - batch jobs again... - -2002-09-12 19:54 jvr - - * Lib/fontTools/ttLib/tables/otTables.py: added manual - implementation of SingleSubst to get nicer XML output as well as - to get rid of GlyphID dependencies - -2002-09-12 19:14 jvr - - * MetaTools/roundTrip.py: break out of loop when cancelled - -2002-09-12 19:07 jvr - - * Lib/fontTools/ttLib/tables/_n_a_m_e.py: don't barf on empty name - tables (!) - -2002-09-12 18:53 jvr - - * MetaTools/roundTrip.py: adapted doc string to reality; removed -v - options - -2002-09-12 18:34 jvr - - * MetaTools/roundTrip.py: brand new round trip tool - -2002-09-12 17:33 jvr - - * Lib/fontTools/ttx.py, Tools/ttx: moved all ttx code to new - fontTools.ttx module - -2002-09-12 17:22 jvr - - * Tools/ttx: refactored slightly, preparing for miving most of this - code to fontTools.ttx.py - -2002-09-12 17:09 jvr - - * Lib/fontTools/ttLib/tables/otTables.py: renamed table to rawTable - -2002-09-12 17:02 jvr - - * Tools/ttroundtrip: new version will appear in MetaTools - -2002-09-12 17:01 jvr - - * Tools/ttcompile, Tools/ttdump, Tools/ttlist: these tools have - been replaced by the multi-purpose ttx tool - -2002-09-12 16:47 jvr - - * Lib/fontTools/ttLib/tables/otTables.py: added manual - implementation for the Coverage subtable to get rid of GlyphID - dependencies - -2002-09-12 16:45 jvr - - * Lib/fontTools/ttLib/tables/otBase.py, - Lib/fontTools/ttLib/tables/otConverters.py: minor refactoring - -2002-09-10 20:47 jvr - - * Doc/ChangeLog.txt: updating - -2002-09-10 20:43 jvr - - * Windows/fonttools-win-setup.txt: improved readme - -2002-09-10 20:42 jvr - - * MetaTools/buildTableList.py: also add tables to - documentation.html - -2002-09-10 20:41 jvr - - * Doc/documentation.html: added listing of all supported tables - -2002-09-10 20:35 jvr - - * Doc/ChangeLog.txt, Doc/changes.txt, Doc/documentation.html: - updating - -2002-09-10 20:23 jvr - - * Tools/ttx: include version in help text - -2002-09-10 20:14 jvr - - * Tools/ttx: catch SystemExit separately, factored out windows - keypress stuff - -2002-09-10 19:42 jvr - - * mktarball.py: moved to MetaTools - -2002-09-10 19:41 jvr - - * MetaTools/makeTarball.py: new name and location of mktarball.py - -2002-09-10 19:26 jvr - - * Lib/fontTools/ttLib/tables/otBase.py: refactored slightly to make - later specializations easier - -2002-09-10 17:25 jvr - - * Tools/ttx: trickery to keep the DOS window open if there was - exception - -2002-09-10 15:37 jvr - - * Windows/fonttools-win-setup.iss: adapt to renamed doco - -2002-09-10 14:10 jvr - - * Doc/changes.txt: more notes about 2.0b1 - -2002-09-10 13:39 jvr - - * Doc/ChangeLog.txt: mit freundlichen Gruessen an Werner Lemberg - ;-) - -2002-09-10 13:25 jvr - - * MetaTools/buildChangeLog.py, MetaTools/logmerge.py: 2 scripts to - generate ChangeLog file, logmerge.py is stolen from the python - distro - -2002-09-10 13:14 jvr - - * Tools/ttx: mjmja - -2002-09-10 13:13 jvr - - * Doc/documentation.html, Doc/install.txt, README.txt: updated - documentation, split user and developer doco - -2002-09-10 13:09 jvr - - * MetaTools/buildTableList.py: tool to generate - fontTools/ttLib/tables/__init__.py - -2002-09-10 13:08 jvr - - * Lib/fontTools/ttLib/tables/__init__.py: __init__.py is now - generated my MetaTools/builtTableList.py - -2002-09-10 11:55 jvr - - * Doc/documentation.html, Doc/index.html: renamed index.html to - documentation.html - -2002-09-10 11:38 jvr - - * Lib/fontTools/__init__.py: prepare for 2.0b1 - -2002-09-10 11:36 jvr - - * MetaTools, MetaTools/build_otData.py, MetaTools/doco.diff: tool - to generate the otData.py module - -2002-09-10 11:27 jvr - - * Windows/README.TXT: minor nit, cleaner, and works for me - -2002-09-10 11:23 jvr - - * Windows/README.TXT, Windows/fonttools-win-setup.iss, - Windows/fonttools-win-setup.txt: adapted to new ttx app (not yet - tested) - -2002-09-10 09:22 jvr - - * setup.py: ttx it is, the other tools are now obsolete - -2002-09-10 09:16 jvr - - * Tools/ttx: added doc string, fiddled with options, made file name - creation a little more sensible - -2002-09-10 08:47 jvr - - * Tools/ttx: new command line tool 'ttx', replaces ttdump, - ttcompile and ttlist - -2002-09-09 21:28 jvr - - * Windows/README.TXT: changed py2exe recommended options: removed - -O2, added encodings package (needed for compilation) - -2002-09-09 21:23 jvr - - * Windows/README.TXT: more fonttools->TTX renaming - -2002-09-09 21:22 jvr - - * Windows/fonttools-win-setup.iss, Windows/fonttools.ico, - Windows/ttx.ico: some fonttools->TTX renaming - -2002-09-09 21:12 jvr - - * Windows/README.TXT: fixed item numbering - -2002-09-09 21:12 jvr - - * Windows/README.TXT: fixed naming of the Win folder - -2002-09-09 20:38 uid55619 - - * ttx_shellext_win32.py: obsolete, see Windows subdirectory - -2002-09-09 20:09 uid55619 - - * Windows/fonttools-win-setup.txt: another dummy checkin - -2002-09-09 20:05 uid55619 - - * Windows/fonttools-win-setup.txt: dummy checkin - -2002-09-09 19:58 uid55619 - - * Windows, Windows/README.TXT, Windows/fonttools-win-setup.iss, - Windows/fonttools-win-setup.txt, Windows/fonttools.ico: Adam's - new windows installer stuff. - -2002-09-09 18:17 jvr - - * Lib/xmlWriter.py: by default, specify an encoding when creating - XML files - -2002-09-09 14:19 jvr - - * Lib/fontTools/ttLib/xmlImport.py: use latin-1 as the default - encoding when parsing XML files - -2002-09-09 14:18 jvr - - * Lib/fontTools/cffLib.py: make 8-bit chars work in CFF Notice and - Copyright fields - -2002-09-05 19:46 jvr - - * Lib/fontTools/ttLib/tables/_g_l_y_f.py: align glyphs on 4-byte - boundaries, seems the current recommendation by MS - -2002-09-05 19:35 jvr - - * Lib/fontTools/ttLib/tables/_g_l_y_f.py: allow 4-byte alignment of - glyph data - -2002-08-30 17:52 jvr - - * Lib/fontTools/ttLib/tables/otData.py: fixed spelling consistency - bug. Note to self: report as bug in OT doco. - -2002-07-29 21:39 jvr - - * Lib/fontTools/t1Lib.py: break before adding the data.. - -2002-07-29 21:33 jvr - - * Lib/fontTools/t1Lib.py: added only-read-the-header feature to - readLWFN(), similar to readPFB() - -2002-07-23 17:56 jvr - - * Tools/ttroundtrip: added -v (verbose) option to ttroundtrip, - causing stdout of ttdump and ttcompile not to be tossed. not all - that useful due to buffering. - -2002-07-23 16:44 jvr - - * Lib/fontTools/ttLib/__init__.py, Lib/fontTools/ttLib/macUtils.py, - Lib/fontTools/ttLib/tables/_g_l_y_f.py, - Lib/fontTools/ttLib/xmlImport.py: some (modified) progress bar - support - -2002-07-23 16:42 jvr - - * Lib/fontTools/cffLib.py: some progress bar support - -2002-07-23 16:41 jvr - - * Lib/xmlWriter.py: some preliminary progress bar support - -2002-07-23 14:54 jvr - - * Lib/fontTools/t1Lib.py: back out pfa 'fix'; it reverses a bug fix - from last year... - -2002-07-23 09:26 jvr - - * Lib/fontTools/misc/eexec.py: 'python' implementation of hex - functions - -2002-07-23 09:25 jvr - - * Lib/fontTools/t1Lib.py: fixed handling of PFA files by being less - smart about figuring out the end of the eexec part - -2002-07-23 08:43 jvr - - * Lib/fontTools/ttLib/tables/otBase.py: reordered/regrouped some - methods for clarity - -2002-07-23 08:19 jvr - - * Lib/fontTools/ttLib/tables/otBase.py: don't use __len__ for - arbitrary length method - -2002-07-23 07:51 jvr - - * Lib/fontTools/ttLib/tables/_c_m_a_p.py: clarified cmap4 - optimization strategy - -2002-07-22 22:39 jvr - - * Lib/fontTools/ttLib/tables/otBase.py: duh, I don't even _need_ to - track referers with the current scheme - -2002-07-22 22:22 jvr - - * Lib/fontTools/ttLib/tables/otBase.py: minor changes - -2002-07-22 22:13 jvr - - * Lib/fontTools/ttLib/tables/otBase.py: completely revamped - optimization strategy: now even _shrinks_ certain Adobe and MS - OTL tables. - -2002-07-21 20:05 jvr - - * Lib/fontTools/ttLib/sfnt.py: Wow, the master checksum in the - 'head' table was never written to file correctly on little-endian - platforms :-(. Fixed. - -2002-07-20 21:57 jvr - - * Lib/fontTools/ttLib/tables/_c_m_a_p.py: Optimized cmap format 4 - compile function: now creates more compact binary. The code is - horrible, but then again cmap format 4 is beyond horrible... - -2002-07-13 08:15 jvr - - * setup.py: oops. - -2002-07-12 19:20 jvr - - * Lib/fontTools/t1Lib.py: don't test for os.name, as the mac stuff - now all works under darwin/posix as well - -2002-07-11 18:19 jvr - - * setup.py: added py2exe support (yes, that was basically all there - was to it...) - -2002-07-11 18:17 jvr - - * Lib/fontTools/ttLib/__init__.py: make dynamic table import work - when importing from a zip file (for py2exe) - -2002-07-04 17:17 jvr - - * Lib/fontTools/ttLib/tables/_g_l_y_f.py: repair ttcompile -b - -2002-07-04 17:17 jvr - - * Lib/fontTools/ttLib/tables, - Lib/fontTools/ttLib/tables/.cvsignore: ignore .pyc files - -2002-07-01 09:11 jvr - - * setup.py: gracefully skip C extension if it can't be built - -2002-06-06 19:59 jvr - - * Lib/fontTools/ttLib/tables/_g_l_y_f.py: increment progress less - frequently, it was too costly... - -2002-06-06 19:58 jvr - - * Lib/fontTools/ttLib/macUtils.py: MacPython 2.2 compat - -2002-06-04 19:11 jvr - - * Lib/fontTools/misc/psLib.py: finally upgraded psLib to use re - instead of the long obsolete regex module. - -2002-06-04 19:10 jvr - - * Lib/fontTools/misc/psOperators.py: nits - -2002-06-04 19:08 jvr - - * Lib/fontTools/ttLib/tables/__init__.py: add dummy import - function, so modulefinder can find our tables. - -2002-05-25 16:08 jvr - - * Tools/ttcompile, Tools/ttdump: hm, forgot to remove the -d option - from the getopt format string - -2002-05-25 16:08 jvr - - * Tools/ttroundtrip: mucking with the usage string - -2002-05-25 16:04 jvr - - * Tools/ttroundtrip: allow some ttdump options; not -s as that - makes diffing that much harder - -2002-05-25 15:28 jvr - - * Lib/fontTools/ttLib/__init__.py, - Lib/fontTools/ttLib/tables/_g_l_y_f.py, - Lib/fontTools/ttLib/xmlImport.py: It still wasn't right; I think - the glyph order mess is now sufficiently cleaned up; at least - compiling the result of ttdump -x glyf works again. - -2002-05-25 14:56 jvr - - * Lib/fontTools/ttLib/__init__.py: make sure the glyph order is - loaded when importing XML as the TTX file may not contain it - (ttdump -t/ttcompile -i). - -2002-05-25 08:22 jvr - - * Lib/fontTools/ttLib/__init__.py: whoops, the new GlyphOrder table - stuff broke ttdump -s - -2002-05-24 18:44 jvr - - * Tools/ttroundtrip: slight doc rewording - -2002-05-24 18:36 jvr - - * Tools/ttroundtrip: test script: batch roundtripper - -2002-05-24 17:42 jvr - - * Lib/fontTools/ttLib/tables/_h_d_m_x.py: gross hack to allow ; in - glyph names (I don't think it _is_ allowed, but hey, I've got - this font here...) - -2002-05-24 16:52 jvr - - * Lib/fontTools/ttLib/tables/T_S_I__0.py: don't blow up on orphaned - VTT index tables - -2002-05-24 14:42 jvr - - * Lib/fontTools/ttLib/tables/ttProgram.py: fixed ttdump -i mode - -2002-05-24 11:55 jvr - - * Lib/fontTools/cffLib.py, Lib/fontTools/misc/psCharStrings.py: - added support for raw bytecode: this happens unintentionally for - subrs that aren't referenced, but it's good to have anyway, in - case we want to switch T2 decompilation off. - -2002-05-24 10:38 jvr - - * Lib/fontTools/cffLib.py: whoops, make charset format 2 work - also.. - -2002-05-24 10:35 jvr - - * Lib/fontTools/cffLib.py: implemented compiling charset format 1 - and 2 - -2002-05-24 09:58 jvr - - * Lib/fontTools/cffLib.py, Lib/fontTools/misc/psCharStrings.py, - Lib/fontTools/ttLib/__init__.py, - Lib/fontTools/ttLib/tables/C_F_F_.py: CFF/T2 <-> XML - roundtripping has begun! - -2002-05-23 21:50 jvr - - * Lib/fontTools/cffLib.py, Lib/fontTools/misc/psCharStrings.py: - first working version of CFF/T2 compiler; needs - cleanup/refactoring, and doesn't import from XML yet; hardly - tested. - -2002-05-23 09:42 jvr - - * Lib/fontTools/ttLib/__init__.py, - Lib/fontTools/ttLib/tables/_g_l_y_f.py, - Lib/fontTools/ttLib/xmlImport.py: big change: the glyph order is - now dumped as a separate table and not as part of glyf (which - didn't make much sense to begin with, but can't work at all in - the case of CFF...) - -2002-05-22 20:15 jvr - - * Lib/fontTools/ttLib/__init__.py: refactored saveXML() method - -2002-05-18 20:07 jvr - - * Lib/fontTools/cffLib.py: remove format 3 charset switch; add - newline after ROS - -2002-05-17 20:04 jvr - - * Lib/fontTools/cffLib.py: renaming, refactoring. - -2002-05-17 19:58 jvr - - * Lib/fontTools/cffLib.py: tweaked the XML output somewhat, reorder - the topdict fields, etc. - -2002-05-17 18:37 jvr - - * Lib/fontTools/misc/psCharStrings.py: fixed ctnrmask problem: - hints weren't counted correctly - -2002-05-17 18:36 jvr - - * Lib/fontTools/cffLib.py: more CID support, some refactoring, - stuff. - -2002-05-17 07:08 jvr - - * Lib/fontTools/cffLib.py: only debug if DEBUG... - -2002-05-17 07:07 jvr - - * Lib/fontTools/misc/psCharStrings.py: first stab at compiling T2 - CharStrings - -2002-05-17 07:06 jvr - - * Lib/fontTools/cffLib.py: tweaking, added some debug info - -2002-05-16 18:38 jvr - - * Lib/fontTools/cffLib.py: make decompiling charstrings work again - -2002-05-16 18:17 jvr - - * Lib/fontTools/cffLib.py: major refactoring, now evaluates - everything lazily, so it should be really fast if you only need - (say) the glyph order. - -2002-05-16 18:15 jvr - - * Lib/fontTools/ttLib/tables/C_F_F_.py: some changes to adapt to - new cffLib.py - -2002-05-16 18:13 jvr - - * Lib/fontTools/misc/psCharStrings.py: (nit) - -2002-05-16 18:12 jvr - - * Lib/fontTools/ttLib/tables/otConverters.py: whoops, compile was - broken due to Fixed 'fix' - -2002-05-15 07:50 jvr - - * Lib/fontTools/ttLib/__init__.py: ignore tables we don't have upon - saving as XML: this is indispensible for batch processing - -2002-05-15 07:41 jvr - - * Lib/fontTools/cffLib.py: more work in progress - -2002-05-15 07:40 jvr - - * Lib/fontTools/misc/psCharStrings.py: added delta array support to - DictDecompiler - -2002-05-14 13:51 jvr - - * Lib/fontTools/cffLib.py: more CID hackery - -2002-05-14 13:49 jvr - - * Lib/fontTools/misc/psCharStrings.py: fix argument type order - -2002-05-14 12:37 jvr - - * Lib/fontTools/cffLib.py: more rearranging, some fixes of the - previous version - -2002-05-14 12:22 jvr - - * Lib/fontTools/cffLib.py: resturcturing, reformatting - -2002-05-14 12:09 jvr - - * Lib/fontTools/ttLib/sfnt.py: fixed typo in comment - -2002-05-13 18:13 jvr - - * Lib/fontTools/ttLib/tables/_h_h_e_a.py: this wasn't meant te be - checked in yet. - -2002-05-13 18:10 jvr - - * Lib/fontTools/ttLib/tables/otConverters.py: added Fixed type - -2002-05-13 18:08 jvr - - * Lib/fontTools/ttLib/tables/_h_h_e_a.py, - Lib/fontTools/ttLib/tables/otBase.py: more cosmetics - -2002-05-13 16:21 jvr - - * Lib/fontTools/ttLib/__init__.py, - Lib/fontTools/ttLib/tables/table_API_readme.txt, - Lib/fontTools/ttLib/xmlImport.py: a whole bunch of renames, - purely stylistic - -2002-05-13 16:19 jvr - - * Lib/fontTools/cffLib.py, Lib/fontTools/misc/psCharStrings.py: - moved some stuff to cffLib - -2002-05-13 11:26 jvr - - * Lib/fontTools/ttLib/__init__.py: don't get glyph names from CFF - it it's a CID-keyed font; invent glyph name on the spot if - glyphID is too high (dubious change..). - -2002-05-13 11:25 jvr - - * Lib/fontTools/cffLib.py, Lib/fontTools/ttLib/tables/C_F_F_.py: - use a StringIO stream instead slicing strings all the time; don't - barf on CID-keyed fonts (but CID support is by no means there - yet!) - -2002-05-13 11:21 jvr - - * Lib/fontTools/ttLib/sfnt.py: use spaces for alignment - -2002-05-12 18:46 jvr - - * Tools/ttcompile, Tools/ttdump: test whether final argument is a - directory - -2002-05-12 17:14 jvr - - * Lib/fontTools/ttLib/__init__.py, Lib/fontTools/ttLib/sfnt.py, - Lib/fontTools/ttLib/tables/_c_m_a_p.py, - Lib/fontTools/ttLib/tables/_k_e_r_n.py: renamed several items to - use camelCase - -2002-05-12 17:02 jvr - - * Lib/fontTools/ttLib/sfnt.py: Applied patch from Owen Taylor that - allows zero-length tables to be ignored. Added comment why. - -2002-05-12 12:58 jvr - - * Doc/index.html: note about PyXML - -2002-05-12 12:48 jvr - - * Doc/index.html: reworded glyph name section - -2002-05-12 12:24 jvr - - * Doc/changes.txt: notes about recent changes - -2002-05-11 21:52 jvr - - * Tools/ttcompile: case typo - -2002-05-11 21:44 jvr - - * Lib/fontTools/ttLib/xmlImport.py: minor restructuring - -2002-05-11 21:38 jvr - - * Lib/fontTools/ttLib/xmlImport.py: added support for the new - ttdump -s output: read file references from mini-ttx file - -2002-05-11 21:18 jvr - - * Lib/fontTools/ttLib/__init__.py: change how saveXML with - splitTable=True works: it no longer creates a directory, and - outputs a small file that references the individual table files - -2002-05-11 21:16 jvr - - * Tools/ttcompile, Tools/ttdump: changed the command line interface - of ttdump and ttcompile ***incompatibly***; changed ttdump -s - considerably: now outputs a small file containing references to - the individual table file; -d is gone; etc. - -2002-05-11 10:21 jvr - - * Lib/fontTools/ttLib/tables/otBase.py, - Lib/fontTools/ttLib/tables/otConverters.py, - Lib/fontTools/ttLib/tables/otTables.py: results of morning-after - code review, added some doc strings, etc. - -2002-05-11 01:03 jvr - - * Lib/fontTools/ttLib/tables/otCommon.py: removed non-functioning - lame-ass previous attempt - -2002-05-11 00:59 jvr - - * Lib/fontTools/ttLib/tables/B_A_S_E_.py, - Lib/fontTools/ttLib/tables/G_D_E_F_.py, - Lib/fontTools/ttLib/tables/G_P_O_S_.py, - Lib/fontTools/ttLib/tables/G_S_U_B_.py, - Lib/fontTools/ttLib/tables/J_S_T_F_.py, - Lib/fontTools/ttLib/tables/otBase.py, - Lib/fontTools/ttLib/tables/otConverters.py, - Lib/fontTools/ttLib/tables/otData.py, - Lib/fontTools/ttLib/tables/otTables.py: Completely revamped OT - support; this time it works and is complete. XML output is not - yet as pretty as can be. - -2002-05-10 19:52 jvr - - * Lib/fontTools/ttLib/tables/_k_e_r_n.py: fix unknown subtable - format and handling of Apple fonts - -2002-05-10 19:03 jvr - - * Lib/fontTools/ttLib/__init__.py, - Lib/fontTools/ttLib/tables/O_S_2f_2.py, - Lib/fontTools/ttLib/tables/_c_m_a_p.py, - Lib/fontTools/ttLib/tables/_g_l_y_f.py, - Lib/fontTools/ttLib/tables/_k_e_r_n.py, - Lib/fontTools/ttLib/tables/_p_o_s_t.py: a few cosmetic/style - changes - -2002-05-10 19:02 jvr - - * Lib/fontTools/ttLib/tables/G_S_U_B_.py, - Lib/fontTools/ttLib/tables/otCommon.py: checking in last edits to - the old OT support; this stuff will be replaced by brand new code - soon. - -2002-05-05 11:29 jvr - - * Lib/fontTools/ttLib/__init__.py: Work around bootstrapping - problem in TTFont._getGlyphNamesFromCmap(): - If the cmap parser was the one to cause _getGlyphNamesFromCmap() - to be - called, no unicode cmap was found as it was just starting to get - loaded. This resulted in different glyph names, depending on when - the cmap parser was invoked. - Also added a bunch of comment describing what this method does. - -2002-05-05 09:55 jvr - - * Tools/ttlist: cosmetic change: I forgot I dislike backticks... - -2002-05-05 09:48 jvr - - * Lib/fontTools/ttLib/__init__.py: renamed _getTableData() to - getTableData(); optimized getGlyphOrder() somewhat. - -2002-05-04 22:04 jvr - - * Lib/fontTools/ttLib/__init__.py, Lib/fontTools/ttLib/sfnt.py: - added support for deleting tables: del f[tag] - -2002-05-04 22:03 jvr - - * Lib/fontTools/ttLib/tables/_p_o_s_t.py: use dict for extraNames - lookups, getting rid of quadratic behavior - -2002-05-03 19:38 jvr - - * Lib/fontTools/t1Lib.py: MacPython 2.2 compatibility fix. - -2002-05-03 19:35 jvr - - * Tools/ttcompile, Tools/ttdump: some reformatting of the usage - msg. - -2002-05-03 19:10 jvr - - * Doc/index.html: more TTX... - -2002-05-03 19:03 jvr - - * LICENSE.txt, Lib/fontTools/__init__.py, README.txt: typos, - version update, date update - -2002-05-03 19:02 jvr - - * Doc/index.html: cleaned up command line tool section, updated to - current state. - -2002-05-03 18:58 jvr - - * ttCompile.py, ttDump.py, ttList.py: these moved to Tools/ - -2002-05-03 18:57 jvr - - * setup.py: install the scripts from Tools/ - -2002-05-03 18:55 jvr - - * Tools, Tools/ttcompile, Tools/ttdump, Tools/ttlist: new place and - names for scripts/tools - -2002-05-03 17:05 jvr - - * ttCompile.py, ttDump.py: minor fiddling with usage. - -2002-05-03 17:05 jvr - - * ttList.py: minimal table lister tool - -2002-05-03 17:01 jvr - - * Lib/fontTools/ttLib/tables/C_F_F_.py, - Lib/fontTools/ttLib/tables/G_P_O_S_.py, - Lib/fontTools/ttLib/tables/G_S_U_B_.py, - Lib/fontTools/ttLib/tables/otCommon.py: Work in progress on CFF, - GPOS and GSUB. Since it's only partly working, it's diasabled by - default. - -2002-05-03 14:33 jvr - - * Lib/fontTools/ttLib/tables/C_F_F_.py: use composition rather than - inheritance; \ - -2002-05-03 08:59 jvr - - * setup.py: import expat instead of xmlproc, as that's what we're - using now - -2002-05-02 20:54 jvr - - * Lib/fontTools/ttLib/xmlImport.py: only keep the orginal table - around in two special cases.\n this fixes a problem with - importing individual tables. - -2002-05-02 15:26 jvr - - * Lib/fontTools/misc/eexec.py: eexecOp may be a global module or a - submodule. - -2002-05-02 15:23 jvr - - * Lib/fontTools/ttLib/__init__.py: use version from - fontTools.__init__.py - -2002-05-02 15:16 jvr - - * Lib/fontTools/ttLib/xmlImport.py: re-added progress support, to - be tested - -2002-05-02 10:53 jvr - - * Lib/fontTools/ttLib/tables/_h_m_t_x.py: whoops, lastIndex can't - be smaller than 1 - -2002-05-02 08:11 jvr - - * Doc/changes.txt, Doc/index.html: updated installation - instructions and changes.txt - -2002-05-01 21:32 jvr - - * Lib/fontTools/ttLib/xmlImport.py: rearranged a bit, removed - redundant imports - -2002-05-01 21:06 jvr - - * Lib/fontTools/ttLib/__init__.py, - Lib/fontTools/ttLib/tables/_g_l_y_f.py, - Lib/fontTools/ttLib/tables/_h_d_m_x.py, - Lib/fontTools/ttLib/tables/_h_m_t_x.py, - Lib/fontTools/ttLib/tables/_n_a_m_e.py, - Lib/fontTools/ttLib/xmlImport.py: Complety revised the XML import - code: - - use expat instead of xmlproc - - minor fixes here and there - - Fixed bug in hmtx/vmtx code that only occured if all advances - were equal. - - FontTools now officially requires Python 2.0 or up, due to exapt - and unicode. - -2002-03-12 14:34 jvr - - * Lib/fontTools/afmLib.py: Charnames can contain a period anywhere, - not just at the start. - -2002-01-17 09:36 jvr - - * Lib/fontTools/ttLib/tables/_p_o_s_t.py: another buggy font - workaround; sped up unpackPStrings somewhat - -2002-01-07 08:44 jvr - - * Lib/fontTools/ttLib/tables/ttProgram.py: Hm, these instructions - had their stack pop-count reversed. - Thanks to L. Peter Deutsch for finding this. - -2001-11-28 12:22 jvr - - * Lib/fontTools/t1Lib.py: At the expense of some speed, find the - end of an excrypted portion - more acurately. This fixes an obscure problem with Fog 4 fonts. - -2001-11-05 19:32 jvr - - * Lib/fontTools/ttLib/tables/_p_o_s_t.py: fixed post table format 1 - error: even though the glyph order is fixed, - that doesn't mean all glyphs in the standard order are there. - -2001-08-16 18:14 jvr - - * Src/eexecOp/eexecOpmodule.c: Ugh. The previous change broke under - 1.5.2. Work around it, and clean - up some more hwile we're at it. - -2001-08-16 16:34 jvr - - * Src/eexecOp/eexecOpmodule.c: Hm, using "h" format strings for - unsigned shorts broke in Python 2.1 - -2001-08-16 11:02 jvr - - * Lib/fontTools/misc/psCharStrings.py: behave nicely when *any* - subpath doesn't start with a moveto. - -2001-08-16 10:35 jvr - - * Lib/fontTools/misc/psCharStrings.py: behave nicely when the font - doesn't do an initial moveto. - -2001-08-16 10:34 jvr - - * Lib/fontTools/t1Lib.py: fixed saveAs() - -2001-08-15 09:26 jvr - - * Lib/fontTools/ttLib/tables/_h_e_a_d.py: style constency - -2001-08-15 07:01 jvr - - * Lib/fontTools/ttLib/tables/O_S_2f_2.py: spec changed: three - fields are now unsigned instead of signed, - and due to the the wonderful hungarian notation the field - names changed as well... (So this change is not b/w compatible) - -2001-08-15 07:00 jvr - - * Lib/fontTools/ttLib/tables/_h_e_a_d.py: dump macStyle as binary - -2001-08-14 06:43 jvr - - * Src/eexecOp/eexecOpmodule.c: copyright notice updated (but mostly - to test the CVS log msg mail) - -2001-08-10 22:17 jvr - - * Lib/fontTools/ttLib/macUtils.py: work around MacPython 2.1 - incompatibility - -2001-08-10 22:16 jvr - - * Mac/TTX.py: grab version from fontTools.__init__ - -2001-08-10 20:28 jvr - - * Doc/changes.txt: 1.0b1 release notes - -2001-08-10 20:27 jvr - - * Doc/index.html: added note about distutils needed for Python < - 2.0 - -2001-08-10 16:49 jvr - - * Lib/fontTools/__init__.py: added version variable - -2001-08-10 16:49 jvr - - * mktarball.py: add version to tarball filename - -2001-08-10 08:58 jvr - - * LICENSE.txt: Let's be vague about where I live(d)... - -2001-08-10 08:55 jvr - - * LEGAL.txt, LICENSE.txt: renamed LEGAL.txt to LICENSE.txt, to - match the doco... - -2001-08-10 08:54 jvr - - * Doc/index.html: updated to the current state of affairs. - -2001-08-10 00:04 jvr - - * ttx_shellext_win32.py: cleaned up regtext for clarity. Should be - a 100% cosmetic change, - but I was unable to test it (Hi Adam ;-) - -2001-08-09 23:45 jvr - - * ttCompile.py: print a banner for each output file - -2001-08-09 23:39 jvr - - * ttCompile.py: reworked command line options - -2001-08-09 23:03 jvr - - * setup.py: added warning about dependency on NumPy and PyXML - -2001-08-09 21:39 jvr - - * ttDump.py: - fixed -d (forgot to add it to the getopt arg) - - added comment that -s implies -f - -2001-08-09 21:31 jvr - - * ttDump.py: implemented -d (specify output dir) option - -2001-08-09 21:06 jvr - - * ttDump.py: first step of changing the command line usage to - something more - sensible: it is now possible to do batches, as in - ./ttDump.py *.ttf - This is not b/w compatible. (The new -d option is not yet - implemented) - -2001-08-09 19:39 jvr - - * Src/eexecOp/eexecOpmodule.c: And one more... - -2001-08-09 19:36 jvr - - * Src/eexecOp/eexecOpmodule.c: Hm, made func defs ANSI compliant. - -2001-08-09 19:18 jvr - - * setup.py: eexecOp.{so|pyd|slb} goes into fontTools/misc/ - -2001-08-09 19:13 jvr - - * Lib/interfaceChecker.py: old cruft - -2001-08-09 19:09 jvr - - * LEGAL.txt, README.txt: minor things: the real doco needs real - work... - -2001-08-09 19:06 jvr - - * Src/eexecOp/Makefile, Src/eexecOp/Makefile.pre.in, - Src/eexecOp/README.txt, Src/eexecOp/Setup.in, - Src/eexecOp/eexecOp.ppc.prj, Src/eexecOp/eexecOp.ppc.prj.exp: - removed obsolete unix makefile support: distutils takes care - of that now (see setup.py in fonttools/). - -2001-08-09 19:00 jvr - - * Lib/fontTools/misc/eexec.py: eexecOp may also be a global module. - -2001-08-09 18:47 jvr - - * install.py, setup.py: Removed obsolete install.py script, and - replaced it with a - proper setup.py, offering full distutils support. So far only - tested under MacOS. - -2001-07-30 19:05 Just - - * Lib/fontTools/ttLib/tables/_g_l_y_f.py: removed dependency on old - transformation class - -2001-07-30 19:04 Just - - * Lib/fontTools/t1Lib.py: updated OpenResFile() to FSpOpenResFile() - and CreateResFile() to - FSpCreateResFile() for carbon compatiblilty. - -2001-06-27 23:09 Just - - * Lib/fontTools/ttLib/tables/_h_e_a_d.py: Don't take month and day - names from calendar.py: a buggy Metrowerks strftime() made this - crash hard in Python 2.2. - -2001-06-24 15:14 Just - - * Lib/fontTools/misc/psCharStrings.py, Lib/fontTools/misc/psLib.py: - don't print extra error info to stdout - -2001-06-24 15:12 Just - - * Lib/fontTools/t1Lib.py: renamed a bunch of things to use - CamelCase - -2001-06-24 15:11 Just - - * Lib/fontTools/afmLib.py: improved API for creating AFM files from - scratch - -2001-06-24 15:10 Just - - * Lib/fontTools/fondLib.py: fixed style strings bug, as triggered - by the Thorndale font. - -2001-04-30 14:40 Just - - * Lib/fontTools/afmLib.py: - added support for composite info - - write attributes in a decent order - -2001-04-20 18:39 Just - - * Lib/fontTools/misc/eexec.py, Lib/fontTools/misc/psLib.py: minor - cleanups - -2001-03-09 12:42 Just - - * Lib/fontTools/nfntLib.py: New & improved, but dead slow. Reads - and writes. - -2001-02-23 21:58 Just - - * Lib/fontTools/ttLib/__init__.py: don't allow duplicate glyph - names when building names from cmap/agl - -2001-01-13 13:48 Just - - * Lib/fontTools/ttLib/tables/_c_m_a_p.py: workaround for currupt - (?) cmap subtable - -2000-11-03 10:29 Just - - * Lib/fontTools/ttLib/tables/_h_m_t_x.py, - Lib/fontTools/ttLib/tables/_l_o_c_a.py: don't complain as loudly - with fonts that don't completely adhere to the spec - -2000-10-23 14:36 Just - - * Lib/fontTools/ttLib/tables/_n_a_m_e.py: workaround for odd-length - unicode strings (!) - -2000-10-18 22:27 Petr - - * Lib/fontTools/ttLib/tables/_m_a_x_p.py: doh! font bounding box - goes to the head table, not maxp itself. - -2000-10-13 14:19 Just - - * Mac/TTX.rsrc.hqx: updated version & copyright - -2000-10-13 14:18 Just - - * Mac/TTX.py, Mac/TTXMain.py: what was I thinking - -2000-10-13 13:51 Just - - * Lib/fontTools/ttLib/tables/O_S_2f_2.py: added workaround for - buggy Apple fonts - -2000-10-11 18:04 Just - - * Lib/fontTools/ttLib/tables/_c_m_a_p.py: added workaround for Py - 1.5.1 compatibility - -2000-10-03 10:34 Just - - * Lib/fontTools/fondLib.py: initialize styleStrings with empty - string instead of None's: this allows certain Apple fonts to be - handled correctly. - -2000-10-02 07:51 Just - - * Lib/fontTools/ttLib/__init__.py, Lib/fontTools/ttLib/sfnt.py: - improved support for writing to (in memory) streams - -2000-08-23 12:34 Just - - * Lib/fontTools/ttLib/sfnt.py, - Lib/fontTools/ttLib/tables/_p_o_s_t.py: minor fix - -2000-08-23 12:34 Just - - * Lib/fontTools/ttLib/tables/_g_l_y_f.py: made calculating bounding - box handle empty coordinate arrays gracefully - -2000-08-23 12:33 Just - - * Lib/fontTools/ttLib/tables/_c_m_a_p.py: removed an assert that - was too strict - -2000-08-23 12:31 Just - - * Lib/fontTools/ttLib/__init__.py: minor changes - -2000-07-03 18:45 Just - - * Lib/fontTools/misc/homeResFile.py: module to find the home file - of a resource (handy for finding suitcase files when all you have - is a resource) - -2000-06-29 18:35 Just - - * Lib/fontTools/ttLib/tables/T_S_I_J_.py: another OT source table - -2000-06-08 18:38 Just - - * Lib/fontTools/ttLib/tables/O_S_2f_2.py: formatting - -2000-06-07 19:13 Just - - * Lib/fontTools/ttLib/tables/_g_l_y_f.py: Allow long-aligned glyph - records (as is in fact recommended by the latest MS spec, but - almost nobody seems to do it...) - -2000-06-07 18:25 Just - - * Lib/fontTools/ttLib/tables/_c_m_a_p.py: Fixed cmap optimizer bug: - needs more testing! - -2000-06-07 18:08 Just - - * Lib/fontTools/ttLib/tables/_g_l_y_f.py: Fixed getCoordinates() so - it works correctly with "empty" components. - -2000-06-07 18:07 Just - - * Lib/fontTools/ttLib/tables/_h_e_a_d.py: The "flags" field is an - unsigned short, not a byte - -2000-05-26 13:08 Just - - * Lib/fontTools/agl.py: Roozbeh Pournader found a working version - to one of the broken URLs in the Adobe document. - -2000-04-29 08:13 Just - - * ttx_shellext_win32.py: updated to conform to the latest - configuration (Adam Twardoch) - -2000-04-01 21:44 just - - * Src/eexecOp/Makefile, Src/eexecOp/Makefile.pre.in, - Src/eexecOp/Setup.in: unix Makefile and Setup - -2000-03-28 10:38 Just - - * Lib/fontTools/t1Lib.py: some minor improvements - -2000-03-28 10:37 Just - - * Lib/fontTools/cffLib.py: don't barf if there are no subroutines - -2000-03-28 10:33 Just - - * Lib/fontTools/unicode.py: updated to Unicode 3.0 by Antoine Leca. - -2000-03-15 20:57 Just - - * Mac/README.txt: mac readme file - -2000-03-15 20:56 Just - - * Doc/bugs.txt, Doc/changes.txt: known bugs and last changes - -2000-03-15 20:56 Just - - * Doc/index.html: finally written some more doco - -2000-03-15 20:55 Just - - * LEGAL.txt: updated license - -2000-03-15 20:55 Just - - * README.txt: new minimal readme file - -2000-03-14 23:21 Just - - * Lib/fontTools/ttLib/test/__init__.py: added doc strings to empty - __init__.py files: WinZip apparently skips empty files. Doh! - -2000-03-14 23:21 Just - - * Lib/fontTools/ttLib/test/ttBrowser.py: fixed multi-arg .append() - call, for Python 1.6 compatibility. - -2000-03-14 23:03 Just - - * Lib/fontTools/ttLib/tables/_n_a_m_e.py: - some method name - changes - - check for 3,0 platform/encoding wide strings - -2000-03-14 23:02 Just - - * Lib/fontTools/ttLib/tables/_k_e_r_n.py: minor fixes. Note: - format2 is not implemented correctly! - -2000-03-14 23:01 Just - - * Lib/fontTools/__init__.py, Lib/fontTools/encodings/__init__.py, - Lib/fontTools/misc/__init__.py, - Lib/fontTools/ttLib/tables/__init__.py: added doc strings to - empty __init__.py files: WinZip apparently skips empty files. - Doh! - -2000-03-14 23:00 Just - - * Lib/fontTools/ttLib/xmlImport.py: fixed multi-arg .append() call, - for Python 1.6 compatibility. - -2000-03-14 22:59 Just - - * Lib/fontTools/ttLib/macUtils.py: changes reflecting a method name - change in the kern table. - -2000-02-21 21:30 Just - - * Lib/fontTools/ttLib/tables/_h_e_a_d.py: be relaxed about zero - padding the input data to 4-byte boundaries - -2000-02-21 21:14 Just - - * Lib/fontTools/ttLib/tables/_h_e_a_d.py: another 64-bit fix - -2000-02-16 14:59 Just - - * ttffile.reg, ttx_shellext_win32.py: New Windows shell extension - by Adam Twardoch. - -2000-02-13 17:36 just - - * mktarball.py: some improvements; can optionally specify dest dir - -2000-02-13 17:00 Just - - * Lib/fontTools/ttLib/tables/_l_o_c_a.py: first patch to make ttLib - 64-bit clean - -2000-02-13 16:23 Just - - * Lib/fontTools/ttLib/__init__.py: disable decompilation exception - catching: it causes too many debugging nightmares. - -2000-02-04 19:19 Just - - * Lib/fontTools/agl.py: Added note about the incorrect old URL in - the Adobe text, and reverted the text to what it was: it still is - the latest officially released document, and I'd rather include - it as-is. - -2000-02-04 18:58 Erik - - * Lib/fontTools/agl.py: new URL for adobe's glyphlist doco - -2000-02-01 15:54 Just - - * Lib/fontTools/ttLib/tables/ttProgram.py: reinstated accidentally - deleted regex. - -2000-02-01 15:53 Just - - * ttDump.py: added -i option. This will enable TT instruction - disassembly. - -2000-02-01 15:32 Just - - * Lib/fontTools/ttLib/tables/_n_a_m_e.py: added an assert, plus - some (commented out) test code for bad unicode strings - -2000-02-01 15:31 Just - - * Lib/fontTools/ttLib/tables/_f_p_g_m.py, - Lib/fontTools/ttLib/tables/_g_l_y_f.py, - Lib/fontTools/ttLib/tables/_p_r_e_p.py: added support for - instruction disassembly - -2000-02-01 15:30 Just - - * Lib/fontTools/ttLib/tables/ttProgram.py: - added assembler: we've - got a full round trip now! - - added toXML() and fromXML() methods - -2000-02-01 15:29 Just - - * Lib/fontTools/ttLib/__init__.py: added support for instruction - disassembly in saveXML() - -2000-02-01 15:28 Just - - * Lib/fontTools/misc/textTools.py: fixed buglet in num2binary() - -2000-01-31 14:33 Just - - * Lib/fontTools/misc/psCharStrings.py: fixed broken import - -2000-01-31 14:31 Just - - * Lib/interfaceChecker.py: minor doc string change - -2000-01-26 19:32 Just - - * Lib/fontTools/misc/arrayTools.py: Two new functions: - - vectorLength(vector): calculate the length of a vector - - asInt16(): round and cast any array (or number) to 16 bit ints - -2000-01-26 19:20 Just - - * Lib/interfaceChecker.py: Support for interface checking. - Experimental. - -2000-01-23 19:10 Just - - * Lib/fontTools/misc/arrayTools.py: new functions: unionRect() and - rectCenter() - -2000-01-22 00:26 Just - - * Lib/fontTools/misc/arrayTools.py: added intRect() function. Turn - any rect into a rect using ints only. - -2000-01-20 11:08 Just - - * Lib/fontTools/ttLib/tables/_g_l_y_f.py: - added methods to get - composite component info conveniently - -2000-01-19 20:44 Just - - * Lib/fontTools/cffLib.py: various changes: - - the Transformation class is now a little cleaner & smarter - - pens now have a reference to a font - - pens have a new method called drawGlyph(), which is needed for - composites. - -2000-01-19 12:37 Just - - * Mac/TTXMain.py: use ".ttx" extension instead of ".xml". TTX is - now the name of the format, not just the app. Still needs work, - though. - -2000-01-18 22:29 Just - - * Lib/fontTools/misc/arrayTools.py: added a bunch of rectangle - tools that mimic some Qd.*Rect functions, like Qd.InsetRect. - -2000-01-17 18:58 Just - - * Lib/fontTools/ttLib/__init__.py, - Lib/fontTools/ttLib/tables/table_API_readme.txt, - Lib/fontTools/ttLib/test/ttBrowser.py: use ".ttx" extension - instead of ".xml". TTX is not the name of the format, not the - app... - -2000-01-17 18:49 Just - - * tt2xml.py, ttCompile.py, ttDump.py, ttffile.reg, xml2tt.py: - renamed tt2xml.pt to ttDump.py and xml2tt.py to ttCompile.py - -2000-01-16 22:14 Just - - * Lib/fontTools/cffLib.py, Lib/fontTools/misc/psCharStrings.py, - Lib/fontTools/misc/psLib.py, Lib/fontTools/misc/psOperators.py, - Lib/fontTools/psCharStrings.py, Lib/fontTools/psLib.py, - Lib/fontTools/psOperators.py, Lib/fontTools/t1Lib.py: Moved - psCharStrings.py, psLib.py and psOperators.py to fontTools.misc, - since they're not "toplevel" font tools. - -2000-01-16 20:37 Just - - * Lib/fontTools/misc/arrayTools.py: yet another reorganization - round... - -2000-01-12 19:15 Just - - * Lib/fontTools/psLib.py, Lib/fontTools/t1Lib.py: Changes to use - the new fontTools.misc.eexec module instead of the old eexec - module. - -2000-01-12 19:15 Just - - * Lib/fontTools/misc/eexec.py: added fontTools.misc.eexec and a - MacOS/PPC shared lib (eexecOp) that provides the C - implementation. - -2000-01-12 19:13 Just - - * Src, Src/eexecOp, Src/eexecOp/README.txt, - Src/eexecOp/eexecOp.ppc.prj, Src/eexecOp/eexecOp.ppc.prj.exp, - Src/eexecOp/eexecOpmodule.c: added eexecOp, C implementation of - the new fontTools.misc.eexec module. - -2000-01-05 20:45 Just - - * Lib/fontTools/ttLib/tables/asciiTable.py: Remove null bytes - before dumping to XML. This seems neccesary, but I'm not sure if - this breaks compilation. - -2000-01-05 20:44 Just - - * Lib/fontTools/ttLib/tables/T_S_I_V_.py: added TSIV table - -2000-01-05 20:43 Just - - * Lib/fontTools/ttLib/__init__.py: - Added skiptTables argument to - TTFont.saveXML(), to support -x option of tt2xml.py - - Fixed typo - -2000-01-05 20:41 Just - - * tt2xml.py, ttDump.py: added -x option, to exclude a - specific table. - -2000-01-04 14:03 Just - - * Lib/fontTools/ttLib/tables/T_S_I_B_.py, - Lib/fontTools/ttLib/tables/T_S_I_D_.py, - Lib/fontTools/ttLib/tables/T_S_I_P_.py, - Lib/fontTools/ttLib/tables/T_S_I_S_.py, - Lib/fontTools/ttLib/tables/asciiTable.py: Added private VOLT (?) - tables: TSIB, TSID, TSIP, TSIS. Easy, since they're plain ascii - tables. - -2000-01-04 14:02 Just - - * Lib/fontTools/ttLib/tables/T_S_I__1.py: added some initializer in - case the table is empty. - -2000-01-04 14:01 Just - - * Lib/fontTools/ttLib/tables/otCommon.py: while the OT modules are - in progress, disable decompilation and fall back to hex dumps. - -2000-01-04 13:51 Just - - * Lib/xmlWriter.py: set XML file type to BBEdit when on MacOS - -2000-01-03 23:01 Just - - * Lib/fontTools/ttLib/tables/otCommon.py: cleaned up error message - for failing version test - -2000-01-03 23:00 Just - - * Lib/fontTools/ttLib/tables/L_T_S_H_.py: added error messages to - the assert statements - -2000-01-03 23:00 Just - - * Lib/fontTools/ttLib/__init__.py, - Lib/fontTools/ttLib/tables/DefaultTable.py, - Lib/fontTools/ttLib/xmlImport.py: Added code to fall back to the - DefaultTable (and therefore to hex XML dumps) when an exception - occurs during decompilation. - -1999-12-29 13:10 Just - - * tt2xml.py, ttDump.py: When using -s (splitting files), save the - xml files in a separate directory instead of in the same dir as - the font file. - -1999-12-29 13:09 Just - - * ttCompile.py, xml2tt.py: Revert current directory after glob'ing - for *.xml files - -1999-12-29 13:07 Just - - * Lib/fontTools/ttLib/tables/_n_a_m_e.py: Treat platformID=3, - platEncID=0 also as Unicode strings. - -1999-12-29 13:06 Just - - * Lib/fontTools/ttLib/__init__.py: - fixed broken agl import - - changed TTFOnt.saveXML + splitTables<>0 behavior: now expects a - path to a directory - -1999-12-27 20:02 Just - - * Lib/fontTools/ttLib/xmlImport.py: XMLApplication: don't create a - new table when a table with that tag already exist in the TTFont - object. - -1999-12-27 19:52 Just - - * ttCompile.py, xml2tt.py: Added support to merge multiple XML - files into one font (the opposite of tt2xml.py's -s option). - Improved doc string. - -1999-12-27 19:49 Just - - * tt2xml.py, ttDump.py: Added -s option to split each table into a - separate XML file. - -1999-12-27 19:48 Just - - * Lib/fontTools/ttLib/__init__.py: Added optional splitTables - argument to TTFont.saveXML(). Set to non-zero, this will cause - each table to be dumped to an idividual XML file. - -1999-12-27 15:40 Just - - * ttCompile.py, xml2tt.py: print final (timing) message when in - verbose mode - -1999-12-27 15:39 Just - - * Lib/fontTools/nfntLib.py: slight cleanup. - -1999-12-23 15:16 Just - - * Lib/fontTools/ttLib/__init__.py: edited TTFont.__init__ doc - string, the recalcBBoxes explanation should now be clearer and - more complete. - -1999-12-23 14:44 Just - - * Lib/fontTools/ttLib/test/ttBrowser.py: added browseTTFont() - function. - -1999-12-23 14:44 Just - - * Lib/fontTools/ttLib/__init__.py, - Lib/fontTools/ttLib/tables/_g_l_y_f.py, - Lib/fontTools/ttLib/tables/_h_h_e_a.py, - Lib/fontTools/ttLib/tables/_m_a_x_p.py: don't recalc a number of - things then TTFont().recalcBBoxes is off. This allows us to - compact glyphs right after they've been parsed from XML, which - should greatly reduce memory usage, and therefore should speedup - compiling large fonts. - -1999-12-23 12:32 Just - - * Lib/fontTools/ttLib/tables/_h_d_m_x.py: oops: header struct is - big endian of course! noted by Werner Lemberg. - -1999-12-23 12:31 Just - - * ttCompile.py, xml2tt.py: fixed dumb error: recalcBBoxes goes into - the TTFont constructor, not int TTFont.save. - -1999-12-20 23:37 Just - - * Mac, Mac/TTX.py, Mac/TTX.rsrc.hqx, Mac/TTXMain.py: populating Mac - subdirectory - -1999-12-20 23:36 Just - - * TTX.py, TTX.rsrc: moved Mac subdirectory - -1999-12-20 22:02 Just - - * Lib/fontTools/cffLib.py: dummy checkin -- testing - -1999-12-20 21:59 Just - - * Lib/fontTools/cffLib.py: added $Id$ tag. - -1999-12-18 23:56 just - - * mktarball.py: fixed dir dependency - -1999-12-18 23:28 Just - - * mktarball.py: doco and cleanup - -1999-12-18 23:25 just - - * mktarball.py: working version - -1999-12-18 23:10 Just - - * mktarball.py: oops, syntax error - -1999-12-18 23:09 Just - - * mktarball.py: debugging... - -1999-12-18 23:05 Just - - * mktarball.py: added distribution script - -1999-12-18 22:56 Just - - * Lib/fontTools/encodings/MacRoman.py: dummy commit, testing - -1999-12-18 21:32 Just - - * Lib/fontTools/ttLib/__init__.py: moved an import statement - -1999-12-18 21:30 Just - - * Lib/fontTools/psOperators.py: now uses - fontTools.encodings.StandardEncoding instead of defining its own - -1999-12-18 21:29 Just - - * Lib/fontTools/encodings, Lib/fontTools/encodings/MacRoman.py, - Lib/fontTools/encodings/StandardEncoding.py, - Lib/fontTools/encodings/__init__.py: added some encoding files in - a new subpackage: fontTools.encodings - -1999-12-18 18:18 Just - - * Doc, Doc/index.html: added Doc directory and initial hmtl doc - file. Still pretty much empty... - -1999-12-18 18:12 Just - - * ttCompile.py, xml2tt.py: - changed some variable names - - added -b command line argument: sets recalcBBoxes to false - -1999-12-18 18:11 Just - - * tt2xml.py, ttDump.py: changed some variable names. - -1999-12-18 18:08 Just - - * Lib/fontTools/ttLib/tables/_g_l_y_f.py: added recalcBBoxes - argument to Glyph.compile() - -1999-12-18 18:06 Just - - * Lib/fontTools/ttLib/__init__.py: added recalcBBoxes argument to - TTFont.__init__() - -1999-12-17 12:54 Just - - * Lib/fontTools/ttLib/__init__.py: minor cleanup of some doc - strings - -1999-12-17 12:51 Just - - * Lib/fontTools/ttLib/__init__.py: added __release__ symbol, - changed __version__ to $Id$. - -1999-12-17 12:42 Just - - * install.py: changed the name of the .pth file to FontTools.pth - -1999-12-17 12:37 Just - - * Lib/fontTools/ttLib/test/ttBrowser.py: added doc string saying - that this module is Mac-only. - -1999-12-17 12:36 Just - - * Lib/fontTools/ttLib/test/__init__.py: forgot to check in the - __init__.py file for fontTools.ttLib.test - -1999-12-17 11:57 Just - - * Lib/fontTools/ttLib/tables/_k_e_r_n.py: fixed broken ttLib.sfnt - import statement - -1999-12-16 22:04 Just - - * LEGAL.txt, README.txt, TTX.py, TTX.rsrc, install.py, tt2xml.py, - ttCompile.py, ttDump.py, ttffile.reg, xml2tt.py: Added the TTX - main program, the command line programs and additional files. - -1999-12-16 21:34 Just - - * Lib, Lib/fontTools, Lib/fontTools/__init__.py, - Lib/fontTools/afmLib.py, Lib/fontTools/agl.py, - Lib/fontTools/cffLib.py, Lib/fontTools/fondLib.py, - Lib/fontTools/misc, Lib/fontTools/misc/__init__.py, - Lib/fontTools/misc/textTools.py, Lib/fontTools/nfntLib.py, - Lib/fontTools/psCharStrings.py, Lib/fontTools/psLib.py, - Lib/fontTools/psOperators.py, Lib/fontTools/t1Lib.py, - Lib/fontTools/ttLib, Lib/fontTools/ttLib/__init__.py, - Lib/fontTools/ttLib/macUtils.py, Lib/fontTools/ttLib/sfnt.py, - Lib/fontTools/ttLib/standardGlyphOrder.py, - Lib/fontTools/ttLib/tables, Lib/fontTools/ttLib/tables/C_F_F_.py, - Lib/fontTools/ttLib/tables/D_S_I_G_.py, - Lib/fontTools/ttLib/tables/DefaultTable.py, - Lib/fontTools/ttLib/tables/G_P_O_S_.py, - Lib/fontTools/ttLib/tables/G_S_U_B_.py, - Lib/fontTools/ttLib/tables/L_T_S_H_.py, - Lib/fontTools/ttLib/tables/O_S_2f_2.py, - Lib/fontTools/ttLib/tables/T_S_I__0.py, - Lib/fontTools/ttLib/tables/T_S_I__1.py, - Lib/fontTools/ttLib/tables/T_S_I__2.py, - Lib/fontTools/ttLib/tables/T_S_I__3.py, - Lib/fontTools/ttLib/tables/T_S_I__5.py, - Lib/fontTools/ttLib/tables/__init__.py, - Lib/fontTools/ttLib/tables/_c_m_a_p.py, - Lib/fontTools/ttLib/tables/_c_v_t.py, - Lib/fontTools/ttLib/tables/_f_p_g_m.py, - Lib/fontTools/ttLib/tables/_g_a_s_p.py, - Lib/fontTools/ttLib/tables/_g_l_y_f.py, - Lib/fontTools/ttLib/tables/_h_d_m_x.py, - Lib/fontTools/ttLib/tables/_h_e_a_d.py, - Lib/fontTools/ttLib/tables/_h_h_e_a.py, - Lib/fontTools/ttLib/tables/_h_m_t_x.py, - Lib/fontTools/ttLib/tables/_k_e_r_n.py, - Lib/fontTools/ttLib/tables/_l_o_c_a.py, - Lib/fontTools/ttLib/tables/_m_a_x_p.py, - Lib/fontTools/ttLib/tables/_n_a_m_e.py, - Lib/fontTools/ttLib/tables/_p_o_s_t.py, - Lib/fontTools/ttLib/tables/_p_r_e_p.py, - Lib/fontTools/ttLib/tables/_v_h_e_a.py, - Lib/fontTools/ttLib/tables/_v_m_t_x.py, - Lib/fontTools/ttLib/tables/otCommon.py, - Lib/fontTools/ttLib/tables/table_API_readme.txt, - Lib/fontTools/ttLib/tables/ttProgram.py, - Lib/fontTools/ttLib/test, Lib/fontTools/ttLib/test/ttBrowser.py, - Lib/fontTools/ttLib/xmlImport.py, Lib/fontTools/unicode.py, - Lib/sstruct.py, Lib/xmlWriter.py, fonttools: Created a new - library directory called "FreeLib". All OpenSource RFMKII - components will reside there, fontTools being the flagship. - -1999-12-16 21:34 - - * .: Create standard layout - diff -Nru fonttools-2.4/Doc/documentation.html fonttools-3.0/Doc/documentation.html --- fonttools-2.4/Doc/documentation.html 2013-04-23 04:08:08.000000000 +0000 +++ fonttools-3.0/Doc/documentation.html 2015-08-31 17:57:15.000000000 +0000 @@ -42,7 +42,7 @@ The following tables are currently supported:
-BASE, CFF, DSIG, GDEF, GMAP, GPKG, GPOS, GSUB, JSTF, LTSH, META, OS/2, SING, TSI0, TSI1, TSI2, TSI3, TSI5, TSIB, TSID, TSIJ, TSIP, TSIS, TSIV, VORG, cmap, cvt, fpgm, gasp, glyf, hdmx, head, hhea, hmtx, kern, loca, maxp, name, post, prep, vhea and vmtx +BASE, CBDT, CBLC, CFF, COLR, CPAL, DSIG, EBDT, EBLC, FFTM, GDEF, GMAP, GPKG, GPOS, GSUB, JSTF, LTSH, MATH, META, OS/2, SING, SVG, TSI0, TSI1, TSI2, TSI3, TSI5, TSIB, TSID, TSIJ, TSIP, TSIS, TSIV, VDMX, VORG, avar, cmap, cvt, feat, fpgm, fvar, gasp, glyf, gvar, hdmx, head, hhea, hmtx, kern, loca, ltag, maxp, meta, name, post, prep, sbix, vhea and vmtx
Other tables are dumped as hexadecimal data. diff -Nru fonttools-2.4/Doc/install.txt fonttools-3.0/Doc/install.txt --- fonttools-2.4/Doc/install.txt 2013-04-23 04:08:08.000000000 +0000 +++ fonttools-3.0/Doc/install.txt 2015-08-31 17:57:15.000000000 +0000 @@ -68,9 +68,9 @@ contribute, you can also subscribe to the fonttools-checkins mailing list. -Anonymous SVN-access +Anonymous VCS access -The FontTools sources are also accessible through SVN, see: +The FontTools sources are also accessible here: http://sourceforge.net/projects/fonttools/ Let me know if you'd like to become a co-developer. @@ -95,10 +95,6 @@ fontTools.agl -- Interface to the Adobe Glyph List: maps unicode values to glyph names and back. - Mac-specific - fontTools.fondLib -- A reader/writer class for Mac FOND resources. - fontTools.nfntLib -- Reads Mac NFNT bitmap font resources. - Thank-you's diff -Nru fonttools-2.4/.gitignore fonttools-3.0/.gitignore --- fonttools-2.4/.gitignore 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.0/.gitignore 2015-08-31 17:57:15.000000000 +0000 @@ -0,0 +1,5 @@ +MANIFEST +build +dist +*.pyc +*~ diff -Nru fonttools-2.4/Lib/fontTools/afmLib.py fonttools-3.0/Lib/fontTools/afmLib.py --- fonttools-2.4/Lib/fontTools/afmLib.py 2013-04-23 04:08:08.000000000 +0000 +++ fonttools-3.0/Lib/fontTools/afmLib.py 2015-08-31 17:57:15.000000000 +0000 @@ -4,12 +4,9 @@ # It does not implement the full spec (Adobe Technote 5004, Adobe Font Metrics # File Format Specification). Still, it should read most "common" AFM files. +from __future__ import print_function, division, absolute_import +from fontTools.misc.py23 import * import re -import string -import types - -__version__ = "$Id: afmLib.py,v 1.6 2003-05-24 12:50:47 jvr Exp $" - # every single line starts with a "word" identifierRE = re.compile("^([A-Za-z]+).*") @@ -17,47 +14,47 @@ # regular expression to parse char lines charRE = re.compile( "(-?\d+)" # charnum - "\s*;\s*WX\s+" # ; WX + "\s*;\s*WX\s+" # ; WX "(-?\d+)" # width - "\s*;\s*N\s+" # ; N - "([.A-Za-z0-9_]+)" # charname - "\s*;\s*B\s+" # ; B + "\s*;\s*N\s+" # ; N + "([.A-Za-z0-9_]+)" # charname + "\s*;\s*B\s+" # ; B "(-?\d+)" # left - "\s+" # + "\s+" "(-?\d+)" # bottom - "\s+" # + "\s+" "(-?\d+)" # right - "\s+" # + "\s+" "(-?\d+)" # top - "\s*;\s*" # ; + "\s*;\s*" # ; ) # regular expression to parse kerning lines kernRE = re.compile( - "([.A-Za-z0-9_]+)" # leftchar - "\s+" # - "([.A-Za-z0-9_]+)" # rightchar - "\s+" # + "([.A-Za-z0-9_]+)" # leftchar + "\s+" + "([.A-Za-z0-9_]+)" # rightchar + "\s+" "(-?\d+)" # value - "\s*" # + "\s*" ) # regular expressions to parse composite info lines of the form: # Aacute 2 ; PCC A 0 0 ; PCC acute 182 211 ; compositeRE = re.compile( - "([.A-Za-z0-9_]+)" # char name - "\s+" # + "([.A-Za-z0-9_]+)" # char name + "\s+" "(\d+)" # number of parts - "\s*;\s*" # + "\s*;\s*" ) componentRE = re.compile( "PCC\s+" # PPC - "([.A-Za-z0-9_]+)" # base char name - "\s+" # + "([.A-Za-z0-9_]+)" # base char name + "\s+" "(-?\d+)" # x offset - "\s+" # + "\s+" "(-?\d+)" # y offset - "\s*;\s*" # + "\s*;\s*" ) preferredAttributeOrder = [ @@ -80,13 +77,14 @@ ] -class error(Exception): pass +class error(Exception): + pass + +class AFM(object): -class AFM: - _attrs = None - + _keywords = ['StartFontMetrics', 'EndFontMetrics', 'StartCharMetrics', @@ -98,7 +96,7 @@ 'StartComposites', 'EndComposites', ] - + def __init__(self, path=None): self._attrs = {} self._chars = {} @@ -108,19 +106,19 @@ self._composites = {} if path is not None: self.read(path) - + def read(self, path): lines = readlines(path) for line in lines: - if not string.strip(line): + if not line.strip(): continue m = identifierRE.match(line) if m is None: - raise error, "syntax error in AFM file: " + `line` - + raise error("syntax error in AFM file: " + repr(line)) + pos = m.regs[1][1] word = line[:pos] - rest = string.strip(line[pos:]) + rest = line[pos:].strip() if word in self._keywords: continue if word == "C": @@ -131,56 +129,56 @@ self.parsecomposite(rest) else: self.parseattr(word, rest) - + def parsechar(self, rest): m = charRE.match(rest) if m is None: - raise error, "syntax error in AFM file: " + `rest` + raise error("syntax error in AFM file: " + repr(rest)) things = [] for fr, to in m.regs[1:]: things.append(rest[fr:to]) charname = things[2] del things[2] - charnum, width, l, b, r, t = map(string.atoi, things) + charnum, width, l, b, r, t = (int(thing) for thing in things) self._chars[charname] = charnum, width, (l, b, r, t) - + def parsekernpair(self, rest): m = kernRE.match(rest) if m is None: - raise error, "syntax error in AFM file: " + `rest` + raise error("syntax error in AFM file: " + repr(rest)) things = [] for fr, to in m.regs[1:]: things.append(rest[fr:to]) leftchar, rightchar, value = things - value = string.atoi(value) + value = int(value) self._kerning[(leftchar, rightchar)] = value - + def parseattr(self, word, rest): if word == "FontBBox": - l, b, r, t = map(string.atoi, string.split(rest)) + l, b, r, t = [int(thing) for thing in rest.split()] self._attrs[word] = l, b, r, t elif word == "Comment": self._comments.append(rest) else: try: - value = string.atoi(rest) + value = int(rest) except (ValueError, OverflowError): self._attrs[word] = rest else: self._attrs[word] = value - + def parsecomposite(self, rest): m = compositeRE.match(rest) if m is None: - raise error, "syntax error in AFM file: " + `rest` + raise error("syntax error in AFM file: " + repr(rest)) charname = m.group(1) ncomponents = int(m.group(2)) rest = rest[m.regs[0][1]:] components = [] - while 1: + while True: m = componentRE.match(rest) if m is None: - raise error, "syntax error in AFM file: " + `rest` + raise error("syntax error in AFM file: " + repr(rest)) basechar = m.group(1) xoffset = int(m.group(2)) yoffset = int(m.group(3)) @@ -190,72 +188,64 @@ break assert len(components) == ncomponents self._composites[charname] = components - + def write(self, path, sep='\r'): import time lines = [ "StartFontMetrics 2.0", - "Comment Generated by afmLib, version %s; at %s" % - (string.split(__version__)[2], - time.strftime("%m/%d/%Y %H:%M:%S", + "Comment Generated by afmLib; at %s" % ( + time.strftime("%m/%d/%Y %H:%M:%S", time.localtime(time.time())))] - + # write comments, assuming (possibly wrongly!) they should # all appear at the top for comment in self._comments: lines.append("Comment " + comment) - + # write attributes, first the ones we know about, in # a preferred order attrs = self._attrs for attr in preferredAttributeOrder: - if attrs.has_key(attr): + if attr in attrs: value = attrs[attr] if attr == "FontBBox": value = "%s %s %s %s" % value lines.append(attr + " " + str(value)) # then write the attributes we don't know about, # in alphabetical order - items = attrs.items() - items.sort() + items = sorted(attrs.items()) for attr, value in items: if attr in preferredAttributeOrder: continue lines.append(attr + " " + str(value)) - + # write char metrics - lines.append("StartCharMetrics " + `len(self._chars)`) - items = map(lambda (charname, (charnum, width, box)): - (charnum, (charname, width, box)), - self._chars.items()) - - def myCmp(a, b): - """Custom compare function to make sure unencoded chars (-1) + lines.append("StartCharMetrics " + repr(len(self._chars))) + items = [(charnum, (charname, width, box)) for charname, (charnum, width, box) in self._chars.items()] + + def myKey(a): + """Custom key function to make sure unencoded chars (-1) end up at the end of the list after sorting.""" if a[0] == -1: a = (0xffff,) + a[1:] # 0xffff is an arbitrary large number - if b[0] == -1: - b = (0xffff,) + b[1:] - return cmp(a, b) - items.sort(myCmp) - + return a + items.sort(key=myKey) + for charnum, (charname, width, (l, b, r, t)) in items: lines.append("C %d ; WX %d ; N %s ; B %d %d %d %d ;" % (charnum, width, charname, l, b, r, t)) lines.append("EndCharMetrics") - + # write kerning info lines.append("StartKernData") - lines.append("StartKernPairs " + `len(self._kerning)`) - items = self._kerning.items() - items.sort() # XXX is order important? + lines.append("StartKernPairs " + repr(len(self._kerning))) + items = sorted(self._kerning.items()) for (leftchar, rightchar), value in items: lines.append("KPX %s %s %d" % (leftchar, rightchar, value)) lines.append("EndKernPairs") lines.append("EndKernData") - + if self._composites: - composites = self._composites.items() - composites.sort() + composites = sorted(self._composites.items()) lines.append("StartComposites %s" % len(self._composites)) for charname, components in composites: line = "CC %s %s ;" % (charname, len(components)) @@ -263,82 +253,82 @@ line = line + " PCC %s %s %s ;" % (basechar, xoffset, yoffset) lines.append(line) lines.append("EndComposites") - + lines.append("EndFontMetrics") - + writelines(path, lines, sep) - + def has_kernpair(self, pair): - return self._kerning.has_key(pair) - + return pair in self._kerning + def kernpairs(self): - return self._kerning.keys() - + return list(self._kerning.keys()) + def has_char(self, char): - return self._chars.has_key(char) - + return char in self._chars + def chars(self): - return self._chars.keys() - + return list(self._chars.keys()) + def comments(self): return self._comments - + def addComment(self, comment): self._comments.append(comment) - + def addComposite(self, glyphName, components): self._composites[glyphName] = components - + def __getattr__(self, attr): - if self._attrs.has_key(attr): + if attr in self._attrs: return self._attrs[attr] else: - raise AttributeError, attr - + raise AttributeError(attr) + def __setattr__(self, attr, value): # all attrs *not* starting with "_" are consider to be AFM keywords if attr[:1] == "_": self.__dict__[attr] = value else: self._attrs[attr] = value - + def __delattr__(self, attr): # all attrs *not* starting with "_" are consider to be AFM keywords if attr[:1] == "_": try: del self.__dict__[attr] except KeyError: - raise AttributeError, attr + raise AttributeError(attr) else: try: del self._attrs[attr] except KeyError: - raise AttributeError, attr - + raise AttributeError(attr) + def __getitem__(self, key): - if type(key) == types.TupleType: + if isinstance(key, tuple): # key is a tuple, return the kernpair return self._kerning[key] else: # return the metrics instead return self._chars[key] - + def __setitem__(self, key, value): - if type(key) == types.TupleType: + if isinstance(key, tuple): # key is a tuple, set kernpair self._kerning[key] = value else: # set char metrics self._chars[key] = value - + def __delitem__(self, key): - if type(key) == types.TupleType: + if isinstance(key, tuple): # key is a tuple, del kernpair del self._kerning[key] else: # del char metrics del self._chars[key] - + def __repr__(self): if hasattr(self, "FullName"): return '' % self.FullName @@ -356,15 +346,14 @@ sep = sep + '\r' # mac or dos if '\n' in data: sep = sep + '\n' # unix or dos - return string.split(data, sep) + return data.split(sep) def writelines(path, lines, sep='\r'): f = open(path, 'wb') for line in lines: f.write(line + sep) f.close() - - + if __name__ == "__main__": import EasyDialogs @@ -373,16 +362,15 @@ afm = AFM(path) char = 'A' if afm.has_char(char): - print afm[char] # print charnum, width and boundingbox + print(afm[char]) # print charnum, width and boundingbox pair = ('A', 'V') if afm.has_kernpair(pair): - print afm[pair] # print kerning value for pair - print afm.Version # various other afm entries have become attributes - print afm.Weight + print(afm[pair]) # print kerning value for pair + print(afm.Version) # various other afm entries have become attributes + print(afm.Weight) # afm.comments() returns a list of all Comment lines found in the AFM - print afm.comments() + print(afm.comments()) #print afm.chars() #print afm.kernpairs() - print afm + print(afm) afm.write(path + ".muck") - diff -Nru fonttools-2.4/Lib/fontTools/agl.py fonttools-3.0/Lib/fontTools/agl.py --- fonttools-2.4/Lib/fontTools/agl.py 2013-06-22 14:25:29.000000000 +0000 +++ fonttools-3.0/Lib/fontTools/agl.py 2015-08-31 17:57:15.000000000 +0000 @@ -1,103 +1,119 @@ # The table below is taken from # http://www.adobe.com/devnet/opentype/archives/aglfn.txt +from __future__ import print_function, division, absolute_import +from fontTools.misc.py23 import * + _aglText = """\ -# ################################################################################### -# Copyright (c) 2003,2005,2006,2007 Adobe Systems Incorporated -# -# Permission is hereby granted, free of charge, to any person obtaining a -# copy of this documentation file to use, copy, publish, distribute, -# sublicense, and/or sell copies of the documentation, and to permit -# others to do the same, provided that: -# - No modification, editing or other alteration of this document is -# allowed; and -# - The above copyright notice and this permission notice shall be -# included in all copies of the documentation. -# -# Permission is hereby granted, free of charge, to any person obtaining a -# copy of this documentation file, to create their own derivative works -# from the content of this document to use, copy, publish, distribute, -# sublicense, and/or sell the derivative works, and to permit others to do -# the same, provided that the derived work is not represented as being a -# copy or version of this document. -# -# Adobe shall not be liable to any party for any loss of revenue or profit -# or for indirect, incidental, special, consequential, or other similar -# damages, whether based on tort (including without limitation negligence -# or strict liability), contract or other legal or equitable grounds even -# if Adobe has been advised or had reason to know of the possibility of -# such damages. The Adobe materials are provided on an "AS IS" basis. -# Adobe specifically disclaims all express, statutory, or implied -# warranties relating to the Adobe materials, including but not limited to -# those concerning merchantability or fitness for a particular purpose or -# non-infringement of any third party rights regarding the Adobe -# materials. -# ################################################################################### +# ----------------------------------------------------------- +# Copyright 2003, 2005-2008, 2010 Adobe Systems Incorporated. +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or +# without modification, are permitted provided that the +# following conditions are met: +# +# Redistributions of source code must retain the above +# copyright notice, this list of conditions and the following +# disclaimer. +# +# Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following +# disclaimer in the documentation and/or other materials +# provided with the distribution. +# +# Neither the name of Adobe Systems Incorporated nor the names +# of its contributors may be used to endorse or promote +# products derived from this software without specific prior +# written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND +# CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, +# INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF +# MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR +# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT +# NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) +# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR +# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +# ----------------------------------------------------------- # Name: Adobe Glyph List For New Fonts -# Table version: 1.6 -# Date: 30 Januaury 2006 +# Table version: 1.7 +# Date: November 6, 2008 +# URL: http://sourceforge.net/adobe/aglfn/ # # Description: # -# The Adobe Glyph List For New Fonts (AGLFN) is meant to provide a list of -# base glyph names which are compatible with the AGL specification at -# http://partners.adobe.com/asn/developer/type/unicodegn.html. -# and which can be used as described in section 6 of that document. -# -# This list comprises the set of glyph names from the AGLv2,0 which map -# to via the AGL rules to the semanticly correct Unicode value. For -# example, Asmall is omitted as the AGL maps this to the Unicode -# Private Use Area value F761, rather than to the Unicode value for the -# character "A". "ffi" is also omitted, as the AGL maps this to the -# Alphabetic Presentation Forms Area value FB03, rather than -# decomposing it to the three-value Unicode sequence 0066,0066,0069. -# See section 7.1 of the Unicode Standard 4.0 on this issue. -# "arrowvertex" is omitted becuase this now has a real Unicode -# character value, and the AGL is now incorrect in mapping this to the -# Private Use Area value F8E6. -# -# If you do not find an appropriate name for your glyph in this list, -# then please refer to section 6 of the document: -# http://partners.adobe.com/asn/developer/typeforum/unicodegn.html. -# -# The Unicode values and names are given for convenience. -# -# Format: Semicolon-delimited fields: -# -# (1) Standard UV or CUS UV. (4 uppercase hexadecimal digits) -# -# (2) Glyph name. (upper- and lowercase letters, digits) -# +# AGLFN (Adobe Glyph List For New Fonts) provides a list of base glyph +# names that are recommended for new fonts, which are compatible with +# the AGL (Adobe Glyph List) Specification, and which should be used +# as described in Section 6 of that document. AGLFN comprises the set +# of glyph names from AGL that map via the AGL Specification rules to +# the semantically correct UV (Unicode Value). For example, "Asmall" +# is omitted because AGL maps this glyph name to the PUA (Private Use +# Area) value U+F761, rather than to the UV that maps from the glyph +# name "A." Also omitted is "ffi," because AGL maps this to the +# Alphabetic Presentation Forms value U+FB03, rather than decomposing +# it into the following sequence of three UVs: U+0066, U+0066, and +# U+0069. The name "arrowvertex" has been omitted because this glyph +# now has a real UV, and AGL is now incorrect in mapping it to the PUA +# value U+F8E6. If you do not find an appropriate name for your glyph +# in this list, then please refer to Section 6 of the AGL +# Specification. +# +# Format: three semicolon-delimited fields: +# (1) Standard UV or CUS UV--four uppercase hexadecimal digits +# (2) Glyph name--upper/lowercase letters and digits # (3) Character names: Unicode character names for standard UVs, and -# descriptive names for CUS UVs. (uppercase letters, hyphen, space) -# -# The entries are sorted by glyph name in increasing ASCII order; entries -# with the same glyph name are sorted in decreasing priority order. +# descriptive names for CUS UVs--uppercase letters, hyphen, and +# space # -# Lines starting with "#" are comments; blank lines should be ignored. -# -# 1.6 [30 January 2006] -# - Completed work intended in 1.5 -# -# 1.5 [23 November 2005] -# - removed duplicated block at end of file -# - changed mappings: -# 2206;Delta;INCREMENT changed to 0394;Delta;GREEK CAPITAL LETTER DELTA -# 2126;Omega;OHM SIGN changed to 03A9;Omega;GREEK CAPITAL LETTER OMEGA -# 03BC;mu;MICRO SIGN changed to 03BC;mu;GREEK SMALL LETTER MU -# - corrected statement above about why ffi is omitted. - -# 1.4 [24 September 2003] Changed version to 1.4, to avoid confusion -# with the AGL 1.3 -# fixed spelling errors in the header -# fully removed arrowvertex, as it is mapped only to a PUA Unicode value in some fonts. -# -# 1.1 [17 April 2003] Renamed [Tt]cedilla back to [Tt]commaaccent: -# -# 1.0 [31 Jan 2003] Original version. Derived from the AGLv1.2 by: -# - removing the PUA area codes -# - removing duplicate Unicode mappings, and -# - renaming tcommaaccent to tcedilla and Tcommaaccent to Tcedilla +# The records are sorted by glyph name in increasing ASCII order, +# entries with the same glyph name are sorted in decreasing priority +# order, the UVs and Unicode character names are provided for +# convenience, lines starting with "#" are comments, and blank lines +# should be ignored. +# +# Revision History: +# +# 1.7 [6 November 2008] +# - Reverted to the original 1.4 and earlier mappings for Delta, +# Omega, and mu. +# - Removed mappings for "afii" names. These should now be assigned +# "uni" names. +# - Removed mappings for "commaaccent" names. These should now be +# assigned "uni" names. +# +# 1.6 [30 January 2006] +# - Completed work intended in 1.5. +# +# 1.5 [23 November 2005] +# - Removed duplicated block at end of file. +# - Changed mappings: +# 2206;Delta;INCREMENT changed to 0394;Delta;GREEK CAPITAL LETTER DELTA +# 2126;Omega;OHM SIGN changed to 03A9;Omega;GREEK CAPITAL LETTER OMEGA +# 03BC;mu;MICRO SIGN changed to 03BC;mu;GREEK SMALL LETTER MU +# - Corrected statement above about why "ffi" is omitted. +# +# 1.4 [24 September 2003] +# - Changed version to 1.4, to avoid confusion with the AGL 1.3. +# - Fixed spelling errors in the header. +# - Fully removed "arrowvertex," as it is mapped only to a PUA Unicode +# value in some fonts. +# +# 1.1 [17 April 2003] +# - Renamed [Tt]cedilla back to [Tt]commaaccent. +# +# 1.0 [31 January 2003] +# - Original version. +# - Derived from the AGLv1.2 by: +# removing the PUA area codes; +# removing duplicate Unicode mappings; and +# renaming "tcommaaccent" to "tcedilla" and "Tcommaaccent" to "Tcedilla" # 0041;A;LATIN CAPITAL LETTER A 00C6;AE;LATIN CAPITAL LETTER AE @@ -126,7 +142,7 @@ 0044;D;LATIN CAPITAL LETTER D 010E;Dcaron;LATIN CAPITAL LETTER D WITH CARON 0110;Dcroat;LATIN CAPITAL LETTER D WITH STROKE -0394;Delta;GREEK CAPITAL LETTER DELTA +2206;Delta;INCREMENT 0045;E;LATIN CAPITAL LETTER E 00C9;Eacute;LATIN CAPITAL LETTER E WITH ACUTE 0114;Ebreve;LATIN CAPITAL LETTER E WITH BREVE @@ -150,7 +166,6 @@ 011E;Gbreve;LATIN CAPITAL LETTER G WITH BREVE 01E6;Gcaron;LATIN CAPITAL LETTER G WITH CARON 011C;Gcircumflex;LATIN CAPITAL LETTER G WITH CIRCUMFLEX -0122;Gcommaaccent;LATIN CAPITAL LETTER G WITH CEDILLA 0120;Gdotaccent;LATIN CAPITAL LETTER G WITH DOT ABOVE 0048;H;LATIN CAPITAL LETTER H 25CF;H18533;BLACK CIRCLE @@ -178,12 +193,10 @@ 0134;Jcircumflex;LATIN CAPITAL LETTER J WITH CIRCUMFLEX 004B;K;LATIN CAPITAL LETTER K 039A;Kappa;GREEK CAPITAL LETTER KAPPA -0136;Kcommaaccent;LATIN CAPITAL LETTER K WITH CEDILLA 004C;L;LATIN CAPITAL LETTER L 0139;Lacute;LATIN CAPITAL LETTER L WITH ACUTE 039B;Lambda;GREEK CAPITAL LETTER LAMDA 013D;Lcaron;LATIN CAPITAL LETTER L WITH CARON -013B;Lcommaaccent;LATIN CAPITAL LETTER L WITH CEDILLA 013F;Ldot;LATIN CAPITAL LETTER L WITH MIDDLE DOT 0141;Lslash;LATIN CAPITAL LETTER L WITH STROKE 004D;M;LATIN CAPITAL LETTER M @@ -191,7 +204,6 @@ 004E;N;LATIN CAPITAL LETTER N 0143;Nacute;LATIN CAPITAL LETTER N WITH ACUTE 0147;Ncaron;LATIN CAPITAL LETTER N WITH CARON -0145;Ncommaaccent;LATIN CAPITAL LETTER N WITH CEDILLA 00D1;Ntilde;LATIN CAPITAL LETTER N WITH TILDE 039D;Nu;GREEK CAPITAL LETTER NU 004F;O;LATIN CAPITAL LETTER O @@ -204,7 +216,7 @@ 01A0;Ohorn;LATIN CAPITAL LETTER O WITH HORN 0150;Ohungarumlaut;LATIN CAPITAL LETTER O WITH DOUBLE ACUTE 014C;Omacron;LATIN CAPITAL LETTER O WITH MACRON -03A9;Omega;GREEK CAPITAL LETTER OMEGA +2126;Omega;OHM SIGN 038F;Omegatonos;GREEK CAPITAL LETTER OMEGA WITH TONOS 039F;Omicron;GREEK CAPITAL LETTER OMICRON 038C;Omicrontonos;GREEK CAPITAL LETTER OMICRON WITH TONOS @@ -219,7 +231,6 @@ 0052;R;LATIN CAPITAL LETTER R 0154;Racute;LATIN CAPITAL LETTER R WITH ACUTE 0158;Rcaron;LATIN CAPITAL LETTER R WITH CARON -0156;Rcommaaccent;LATIN CAPITAL LETTER R WITH CEDILLA 211C;Rfraktur;BLACK-LETTER CAPITAL R 03A1;Rho;GREEK CAPITAL LETTER RHO 0053;S;LATIN CAPITAL LETTER S @@ -267,13 +278,11 @@ 0160;Scaron;LATIN CAPITAL LETTER S WITH CARON 015E;Scedilla;LATIN CAPITAL LETTER S WITH CEDILLA 015C;Scircumflex;LATIN CAPITAL LETTER S WITH CIRCUMFLEX -0218;Scommaaccent;LATIN CAPITAL LETTER S WITH COMMA BELOW 03A3;Sigma;GREEK CAPITAL LETTER SIGMA 0054;T;LATIN CAPITAL LETTER T 03A4;Tau;GREEK CAPITAL LETTER TAU 0166;Tbar;LATIN CAPITAL LETTER T WITH STROKE 0164;Tcaron;LATIN CAPITAL LETTER T WITH CARON -0162;Tcommaaccent;LATIN CAPITAL LETTER T WITH CEDILLA 0398;Theta;GREEK CAPITAL LETTER THETA 00DE;Thorn;LATIN CAPITAL LETTER THORN 0055;U;LATIN CAPITAL LETTER U @@ -319,241 +328,6 @@ 00E4;adieresis;LATIN SMALL LETTER A WITH DIAERESIS 00E6;ae;LATIN SMALL LETTER AE 01FD;aeacute;LATIN SMALL LETTER AE WITH ACUTE -2015;afii00208;HORIZONTAL BAR -0410;afii10017;CYRILLIC CAPITAL LETTER A -0411;afii10018;CYRILLIC CAPITAL LETTER BE -0412;afii10019;CYRILLIC CAPITAL LETTER VE -0413;afii10020;CYRILLIC CAPITAL LETTER GHE -0414;afii10021;CYRILLIC CAPITAL LETTER DE -0415;afii10022;CYRILLIC CAPITAL LETTER IE -0401;afii10023;CYRILLIC CAPITAL LETTER IO -0416;afii10024;CYRILLIC CAPITAL LETTER ZHE -0417;afii10025;CYRILLIC CAPITAL LETTER ZE -0418;afii10026;CYRILLIC CAPITAL LETTER I -0419;afii10027;CYRILLIC CAPITAL LETTER SHORT I -041A;afii10028;CYRILLIC CAPITAL LETTER KA -041B;afii10029;CYRILLIC CAPITAL LETTER EL -041C;afii10030;CYRILLIC CAPITAL LETTER EM -041D;afii10031;CYRILLIC CAPITAL LETTER EN -041E;afii10032;CYRILLIC CAPITAL LETTER O -041F;afii10033;CYRILLIC CAPITAL LETTER PE -0420;afii10034;CYRILLIC CAPITAL LETTER ER -0421;afii10035;CYRILLIC CAPITAL LETTER ES -0422;afii10036;CYRILLIC CAPITAL LETTER TE -0423;afii10037;CYRILLIC CAPITAL LETTER U -0424;afii10038;CYRILLIC CAPITAL LETTER EF -0425;afii10039;CYRILLIC CAPITAL LETTER HA -0426;afii10040;CYRILLIC CAPITAL LETTER TSE -0427;afii10041;CYRILLIC CAPITAL LETTER CHE -0428;afii10042;CYRILLIC CAPITAL LETTER SHA -0429;afii10043;CYRILLIC CAPITAL LETTER SHCHA -042A;afii10044;CYRILLIC CAPITAL LETTER HARD SIGN -042B;afii10045;CYRILLIC CAPITAL LETTER YERU -042C;afii10046;CYRILLIC CAPITAL LETTER SOFT SIGN -042D;afii10047;CYRILLIC CAPITAL LETTER E -042E;afii10048;CYRILLIC CAPITAL LETTER YU -042F;afii10049;CYRILLIC CAPITAL LETTER YA -0490;afii10050;CYRILLIC CAPITAL LETTER GHE WITH UPTURN -0402;afii10051;CYRILLIC CAPITAL LETTER DJE -0403;afii10052;CYRILLIC CAPITAL LETTER GJE -0404;afii10053;CYRILLIC CAPITAL LETTER UKRAINIAN IE -0405;afii10054;CYRILLIC CAPITAL LETTER DZE -0406;afii10055;CYRILLIC CAPITAL LETTER BYELORUSSIAN-UKRAINIAN I -0407;afii10056;CYRILLIC CAPITAL LETTER YI -0408;afii10057;CYRILLIC CAPITAL LETTER JE -0409;afii10058;CYRILLIC CAPITAL LETTER LJE -040A;afii10059;CYRILLIC CAPITAL LETTER NJE -040B;afii10060;CYRILLIC CAPITAL LETTER TSHE -040C;afii10061;CYRILLIC CAPITAL LETTER KJE -040E;afii10062;CYRILLIC CAPITAL LETTER SHORT U -0430;afii10065;CYRILLIC SMALL LETTER A -0431;afii10066;CYRILLIC SMALL LETTER BE -0432;afii10067;CYRILLIC SMALL LETTER VE -0433;afii10068;CYRILLIC SMALL LETTER GHE -0434;afii10069;CYRILLIC SMALL LETTER DE -0435;afii10070;CYRILLIC SMALL LETTER IE -0451;afii10071;CYRILLIC SMALL LETTER IO -0436;afii10072;CYRILLIC SMALL LETTER ZHE -0437;afii10073;CYRILLIC SMALL LETTER ZE -0438;afii10074;CYRILLIC SMALL LETTER I -0439;afii10075;CYRILLIC SMALL LETTER SHORT I -043A;afii10076;CYRILLIC SMALL LETTER KA -043B;afii10077;CYRILLIC SMALL LETTER EL -043C;afii10078;CYRILLIC SMALL LETTER EM -043D;afii10079;CYRILLIC SMALL LETTER EN -043E;afii10080;CYRILLIC SMALL LETTER O -043F;afii10081;CYRILLIC SMALL LETTER PE -0440;afii10082;CYRILLIC SMALL LETTER ER -0441;afii10083;CYRILLIC SMALL LETTER ES -0442;afii10084;CYRILLIC SMALL LETTER TE -0443;afii10085;CYRILLIC SMALL LETTER U -0444;afii10086;CYRILLIC SMALL LETTER EF -0445;afii10087;CYRILLIC SMALL LETTER HA -0446;afii10088;CYRILLIC SMALL LETTER TSE -0447;afii10089;CYRILLIC SMALL LETTER CHE -0448;afii10090;CYRILLIC SMALL LETTER SHA -0449;afii10091;CYRILLIC SMALL LETTER SHCHA -044A;afii10092;CYRILLIC SMALL LETTER HARD SIGN -044B;afii10093;CYRILLIC SMALL LETTER YERU -044C;afii10094;CYRILLIC SMALL LETTER SOFT SIGN -044D;afii10095;CYRILLIC SMALL LETTER E -044E;afii10096;CYRILLIC SMALL LETTER YU -044F;afii10097;CYRILLIC SMALL LETTER YA -0491;afii10098;CYRILLIC SMALL LETTER GHE WITH UPTURN -0452;afii10099;CYRILLIC SMALL LETTER DJE -0453;afii10100;CYRILLIC SMALL LETTER GJE -0454;afii10101;CYRILLIC SMALL LETTER UKRAINIAN IE -0455;afii10102;CYRILLIC SMALL LETTER DZE -0456;afii10103;CYRILLIC SMALL LETTER BYELORUSSIAN-UKRAINIAN I -0457;afii10104;CYRILLIC SMALL LETTER YI -0458;afii10105;CYRILLIC SMALL LETTER JE -0459;afii10106;CYRILLIC SMALL LETTER LJE -045A;afii10107;CYRILLIC SMALL LETTER NJE -045B;afii10108;CYRILLIC SMALL LETTER TSHE -045C;afii10109;CYRILLIC SMALL LETTER KJE -045E;afii10110;CYRILLIC SMALL LETTER SHORT U -040F;afii10145;CYRILLIC CAPITAL LETTER DZHE -0462;afii10146;CYRILLIC CAPITAL LETTER YAT -0472;afii10147;CYRILLIC CAPITAL LETTER FITA -0474;afii10148;CYRILLIC CAPITAL LETTER IZHITSA -045F;afii10193;CYRILLIC SMALL LETTER DZHE -0463;afii10194;CYRILLIC SMALL LETTER YAT -0473;afii10195;CYRILLIC SMALL LETTER FITA -0475;afii10196;CYRILLIC SMALL LETTER IZHITSA -04D9;afii10846;CYRILLIC SMALL LETTER SCHWA -200E;afii299;LEFT-TO-RIGHT MARK -200F;afii300;RIGHT-TO-LEFT MARK -200D;afii301;ZERO WIDTH JOINER -066A;afii57381;ARABIC PERCENT SIGN -060C;afii57388;ARABIC COMMA -0660;afii57392;ARABIC-INDIC DIGIT ZERO -0661;afii57393;ARABIC-INDIC DIGIT ONE -0662;afii57394;ARABIC-INDIC DIGIT TWO -0663;afii57395;ARABIC-INDIC DIGIT THREE -0664;afii57396;ARABIC-INDIC DIGIT FOUR -0665;afii57397;ARABIC-INDIC DIGIT FIVE -0666;afii57398;ARABIC-INDIC DIGIT SIX -0667;afii57399;ARABIC-INDIC DIGIT SEVEN -0668;afii57400;ARABIC-INDIC DIGIT EIGHT -0669;afii57401;ARABIC-INDIC DIGIT NINE -061B;afii57403;ARABIC SEMICOLON -061F;afii57407;ARABIC QUESTION MARK -0621;afii57409;ARABIC LETTER HAMZA -0622;afii57410;ARABIC LETTER ALEF WITH MADDA ABOVE -0623;afii57411;ARABIC LETTER ALEF WITH HAMZA ABOVE -0624;afii57412;ARABIC LETTER WAW WITH HAMZA ABOVE -0625;afii57413;ARABIC LETTER ALEF WITH HAMZA BELOW -0626;afii57414;ARABIC LETTER YEH WITH HAMZA ABOVE -0627;afii57415;ARABIC LETTER ALEF -0628;afii57416;ARABIC LETTER BEH -0629;afii57417;ARABIC LETTER TEH MARBUTA -062A;afii57418;ARABIC LETTER TEH -062B;afii57419;ARABIC LETTER THEH -062C;afii57420;ARABIC LETTER JEEM -062D;afii57421;ARABIC LETTER HAH -062E;afii57422;ARABIC LETTER KHAH -062F;afii57423;ARABIC LETTER DAL -0630;afii57424;ARABIC LETTER THAL -0631;afii57425;ARABIC LETTER REH -0632;afii57426;ARABIC LETTER ZAIN -0633;afii57427;ARABIC LETTER SEEN -0634;afii57428;ARABIC LETTER SHEEN -0635;afii57429;ARABIC LETTER SAD -0636;afii57430;ARABIC LETTER DAD -0637;afii57431;ARABIC LETTER TAH -0638;afii57432;ARABIC LETTER ZAH -0639;afii57433;ARABIC LETTER AIN -063A;afii57434;ARABIC LETTER GHAIN -0640;afii57440;ARABIC TATWEEL -0641;afii57441;ARABIC LETTER FEH -0642;afii57442;ARABIC LETTER QAF -0643;afii57443;ARABIC LETTER KAF -0644;afii57444;ARABIC LETTER LAM -0645;afii57445;ARABIC LETTER MEEM -0646;afii57446;ARABIC LETTER NOON -0648;afii57448;ARABIC LETTER WAW -0649;afii57449;ARABIC LETTER ALEF MAKSURA -064A;afii57450;ARABIC LETTER YEH -064B;afii57451;ARABIC FATHATAN -064C;afii57452;ARABIC DAMMATAN -064D;afii57453;ARABIC KASRATAN -064E;afii57454;ARABIC FATHA -064F;afii57455;ARABIC DAMMA -0650;afii57456;ARABIC KASRA -0651;afii57457;ARABIC SHADDA -0652;afii57458;ARABIC SUKUN -0647;afii57470;ARABIC LETTER HEH -06A4;afii57505;ARABIC LETTER VEH -067E;afii57506;ARABIC LETTER PEH -0686;afii57507;ARABIC LETTER TCHEH -0698;afii57508;ARABIC LETTER JEH -06AF;afii57509;ARABIC LETTER GAF -0679;afii57511;ARABIC LETTER TTEH -0688;afii57512;ARABIC LETTER DDAL -0691;afii57513;ARABIC LETTER RREH -06BA;afii57514;ARABIC LETTER NOON GHUNNA -06D2;afii57519;ARABIC LETTER YEH BARREE -06D5;afii57534;ARABIC LETTER AE -20AA;afii57636;NEW SHEQEL SIGN -05BE;afii57645;HEBREW PUNCTUATION MAQAF -05C3;afii57658;HEBREW PUNCTUATION SOF PASUQ -05D0;afii57664;HEBREW LETTER ALEF -05D1;afii57665;HEBREW LETTER BET -05D2;afii57666;HEBREW LETTER GIMEL -05D3;afii57667;HEBREW LETTER DALET -05D4;afii57668;HEBREW LETTER HE -05D5;afii57669;HEBREW LETTER VAV -05D6;afii57670;HEBREW LETTER ZAYIN -05D7;afii57671;HEBREW LETTER HET -05D8;afii57672;HEBREW LETTER TET -05D9;afii57673;HEBREW LETTER YOD -05DA;afii57674;HEBREW LETTER FINAL KAF -05DB;afii57675;HEBREW LETTER KAF -05DC;afii57676;HEBREW LETTER LAMED -05DD;afii57677;HEBREW LETTER FINAL MEM -05DE;afii57678;HEBREW LETTER MEM -05DF;afii57679;HEBREW LETTER FINAL NUN -05E0;afii57680;HEBREW LETTER NUN -05E1;afii57681;HEBREW LETTER SAMEKH -05E2;afii57682;HEBREW LETTER AYIN -05E3;afii57683;HEBREW LETTER FINAL PE -05E4;afii57684;HEBREW LETTER PE -05E5;afii57685;HEBREW LETTER FINAL TSADI -05E6;afii57686;HEBREW LETTER TSADI -05E7;afii57687;HEBREW LETTER QOF -05E8;afii57688;HEBREW LETTER RESH -05E9;afii57689;HEBREW LETTER SHIN -05EA;afii57690;HEBREW LETTER TAV -05F0;afii57716;HEBREW LIGATURE YIDDISH DOUBLE VAV -05F1;afii57717;HEBREW LIGATURE YIDDISH VAV YOD -05F2;afii57718;HEBREW LIGATURE YIDDISH DOUBLE YOD -05B4;afii57793;HEBREW POINT HIRIQ -05B5;afii57794;HEBREW POINT TSERE -05B6;afii57795;HEBREW POINT SEGOL -05BB;afii57796;HEBREW POINT QUBUTS -05B8;afii57797;HEBREW POINT QAMATS -05B7;afii57798;HEBREW POINT PATAH -05B0;afii57799;HEBREW POINT SHEVA -05B2;afii57800;HEBREW POINT HATAF PATAH -05B1;afii57801;HEBREW POINT HATAF SEGOL -05B3;afii57802;HEBREW POINT HATAF QAMATS -05C2;afii57803;HEBREW POINT SIN DOT -05C1;afii57804;HEBREW POINT SHIN DOT -05B9;afii57806;HEBREW POINT HOLAM -05BC;afii57807;HEBREW POINT DAGESH OR MAPIQ -05BD;afii57839;HEBREW POINT METEG -05BF;afii57841;HEBREW POINT RAFE -05C0;afii57842;HEBREW PUNCTUATION PASEQ -02BC;afii57929;MODIFIER LETTER APOSTROPHE -2105;afii61248;CARE OF -2113;afii61289;SCRIPT SMALL L -2116;afii61352;NUMERO SIGN -202C;afii61573;POP DIRECTIONAL FORMATTING -202D;afii61574;LEFT-TO-RIGHT OVERRIDE -202E;afii61575;RIGHT-TO-LEFT OVERRIDE -200C;afii61664;ZERO WIDTH NON-JOINER -066D;afii63167;ARABIC FIVE POINTED STAR -02BD;afii64937;MODIFIER LETTER REVERSED COMMA 00E0;agrave;LATIN SMALL LETTER A WITH GRAVE 2135;aleph;ALEF SYMBOL 03B1;alpha;GREEK SMALL LETTER ALPHA @@ -684,7 +458,6 @@ 011F;gbreve;LATIN SMALL LETTER G WITH BREVE 01E7;gcaron;LATIN SMALL LETTER G WITH CARON 011D;gcircumflex;LATIN SMALL LETTER G WITH CIRCUMFLEX -0123;gcommaaccent;LATIN SMALL LETTER G WITH CEDILLA 0121;gdotaccent;LATIN SMALL LETTER G WITH DOT ABOVE 00DF;germandbls;LATIN SMALL LETTER SHARP S 2207;gradient;NABLA @@ -730,13 +503,11 @@ 0135;jcircumflex;LATIN SMALL LETTER J WITH CIRCUMFLEX 006B;k;LATIN SMALL LETTER K 03BA;kappa;GREEK SMALL LETTER KAPPA -0137;kcommaaccent;LATIN SMALL LETTER K WITH CEDILLA 0138;kgreenlandic;LATIN SMALL LETTER KRA 006C;l;LATIN SMALL LETTER L 013A;lacute;LATIN SMALL LETTER L WITH ACUTE 03BB;lambda;GREEK SMALL LETTER LAMDA 013E;lcaron;LATIN SMALL LETTER L WITH CARON -013C;lcommaaccent;LATIN SMALL LETTER L WITH CEDILLA 0140;ldot;LATIN SMALL LETTER L WITH MIDDLE DOT 003C;less;LESS-THAN SIGN 2264;lessequal;LESS-THAN OR EQUAL TO @@ -754,7 +525,7 @@ 2642;male;MALE SIGN 2212;minus;MINUS SIGN 2032;minute;PRIME -03BC;mu;GREEK SMALL LETTER MU +00B5;mu;MICRO SIGN 00D7;multiply;MULTIPLICATION SIGN 266A;musicalnote;EIGHTH NOTE 266B;musicalnotedbl;BEAMED EIGHTH NOTES @@ -762,7 +533,6 @@ 0144;nacute;LATIN SMALL LETTER N WITH ACUTE 0149;napostrophe;LATIN SMALL LETTER N PRECEDED BY APOSTROPHE 0148;ncaron;LATIN SMALL LETTER N WITH CARON -0146;ncommaaccent;LATIN SMALL LETTER N WITH CEDILLA 0039;nine;DIGIT NINE 2209;notelement;NOT AN ELEMENT OF 2260;notequal;NOT EQUAL TO @@ -837,7 +607,6 @@ 0155;racute;LATIN SMALL LETTER R WITH ACUTE 221A;radical;SQUARE ROOT 0159;rcaron;LATIN SMALL LETTER R WITH CARON -0157;rcommaaccent;LATIN SMALL LETTER R WITH CEDILLA 2286;reflexsubset;SUBSET OF OR EQUAL TO 2287;reflexsuperset;SUPERSET OF OR EQUAL TO 00AE;registered;REGISTERED SIGN @@ -850,7 +619,6 @@ 0161;scaron;LATIN SMALL LETTER S WITH CARON 015F;scedilla;LATIN SMALL LETTER S WITH CEDILLA 015D;scircumflex;LATIN SMALL LETTER S WITH CIRCUMFLEX -0219;scommaaccent;LATIN SMALL LETTER S WITH COMMA BELOW 2033;second;DOUBLE PRIME 00A7;section;SECTION SIGN 003B;semicolon;SEMICOLON @@ -873,7 +641,6 @@ 03C4;tau;GREEK SMALL LETTER TAU 0167;tbar;LATIN SMALL LETTER T WITH STROKE 0165;tcaron;LATIN SMALL LETTER T WITH CARON -0163;tcommaaccent;LATIN SMALL LETTER T WITH CEDILLA 2234;therefore;THEREFORE 03B8;theta;GREEK SMALL LETTER THETA 03D1;theta1;GREEK THETA SYMBOL @@ -934,36 +701,38 @@ 017C;zdotaccent;LATIN SMALL LETTER Z WITH DOT ABOVE 0030;zero;DIGIT ZERO 03B6;zeta;GREEK SMALL LETTER ZETA +#END """ -AGLError = "AGLError" +class AGLError(Exception): + pass AGL2UV = {} UV2AGL = {} def _builddicts(): import re - + lines = _aglText.splitlines() - + parseAGL_RE = re.compile("([0-9A-F]{4});([A-Za-z_0-9.]+);.*?$") - + for line in lines: if not line or line[:1] == '#': continue m = parseAGL_RE.match(line) if not m: - raise AGLError, "syntax error in glyphlist.txt: %s" % repr(line[:20]) + raise AGLError("syntax error in glyphlist.txt: %s" % repr(line[:20])) unicode = m.group(1) assert len(unicode) == 4 unicode = int(unicode, 16) glyphName = m.group(2) - if AGL2UV.has_key(glyphName): + if glyphName in AGL2UV: # the above table contains identical duplicates assert AGL2UV[glyphName] == unicode else: AGL2UV[glyphName] = unicode UV2AGL[unicode] = glyphName - + _builddicts() diff -Nru fonttools-2.4/Lib/fontTools/cffLib.py fonttools-3.0/Lib/fontTools/cffLib.py --- fonttools-2.4/Lib/fontTools/cffLib.py 2013-06-12 05:04:00.000000000 +0000 +++ fonttools-3.0/Lib/fontTools/cffLib.py 2015-08-31 17:57:15.000000000 +0000 @@ -1,13 +1,11 @@ """cffLib.py -- read/write tools for Adobe CFF fonts.""" -# -# $Id: cffLib.py,v 1.34 2008-03-07 19:56:17 jvr Exp $ -# - -import struct, sstruct -import string +from __future__ import print_function, division, absolute_import +from fontTools.misc.py23 import * +from fontTools.misc import sstruct from fontTools.misc import psCharStrings from fontTools.misc.textTools import safeEval +import struct DEBUG = 0 @@ -19,16 +17,16 @@ offSize: B """ -class CFFFontSet: - +class CFFFontSet(object): + def __init__(self): pass - + def decompile(self, file, otFont): sstruct.unpack(cffHeaderFormat, file.read(4), self) assert self.major == 1 and self.minor == 0, \ "unknown CFF format: %d.%d" % (self.major, self.minor) - + file.seek(self.hdrSize) self.fontNames = list(Index(file)) self.topDictIndex = TopDictIndex(file) @@ -36,23 +34,23 @@ self.GlobalSubrs = GlobalSubrsIndex(file) self.topDictIndex.strings = self.strings self.topDictIndex.GlobalSubrs = self.GlobalSubrs - + def __len__(self): return len(self.fontNames) - + def keys(self): return list(self.fontNames) - + def values(self): return self.topDictIndex - + def __getitem__(self, name): try: index = self.fontNames.index(name) except ValueError: - raise KeyError, name + raise KeyError(name) return self.topDictIndex[index] - + def compile(self, file, otFont): strings = IndexedStrings() writer = CFFWriter() @@ -65,21 +63,20 @@ writer.add(topCompiler) writer.add(strings.getCompiler()) writer.add(self.GlobalSubrs.getCompiler(strings, None)) - + for topDict in self.topDictIndex: if not hasattr(topDict, "charset") or topDict.charset is None: charset = otFont.getGlyphOrder() topDict.charset = charset - + for child in topCompiler.getChildren(strings): writer.add(child) - + writer.toFile(file) - + def toXML(self, xmlWriter, progress=None): - xmlWriter.newline() for fontName in self.fontNames: - xmlWriter.begintag("CFFFont", name=fontName) + xmlWriter.begintag("CFFFont", name=tostr(fontName)) xmlWriter.newline() font = self[fontName] font.toXML(xmlWriter, progress) @@ -91,9 +88,8 @@ self.GlobalSubrs.toXML(xmlWriter, progress) xmlWriter.endtag("GlobalSubrs") xmlWriter.newline() - xmlWriter.newline() - - def fromXML(self, (name, attrs, content)): + + def fromXML(self, name, attrs, content): if not hasattr(self, "GlobalSubrs"): self.GlobalSubrs = GlobalSubrsIndex() self.major = 1 @@ -112,31 +108,32 @@ for element in content: if isinstance(element, basestring): continue - topDict.fromXML(element) + name, attrs, content = element + topDict.fromXML(name, attrs, content) elif name == "GlobalSubrs": for element in content: if isinstance(element, basestring): continue name, attrs, content = element subr = psCharStrings.T2CharString() - subr.fromXML((name, attrs, content)) + subr.fromXML(name, attrs, content) self.GlobalSubrs.append(subr) -class CFFWriter: - +class CFFWriter(object): + def __init__(self): self.data = [] - + def add(self, table): self.data.append(table) - + def toFile(self, file): lastPosList = None count = 1 - while 1: + while True: if DEBUG: - print "CFFWriter.toFile() iteration:", count + print("CFFWriter.toFile() iteration:", count) count = count + 1 pos = 0 posList = [pos] @@ -153,7 +150,7 @@ break lastPosList = posList if DEBUG: - print "CFFWriter.toFile() writing to file." + print("CFFWriter.toFile() writing to file.") begin = file.tell() posList = [0] for item in self.data: @@ -177,15 +174,15 @@ return offSize -class IndexCompiler: - +class IndexCompiler(object): + def __init__(self, items, strings, parent): self.items = self.getItems(items, strings) self.parent = parent - + def getItems(self, items, strings): return items - + def getOffsets(self): pos = 1 offsets = [pos] @@ -196,7 +193,7 @@ pos = pos + len(item) offsets.append(pos) return offsets - + def getDataLength(self): lastOffset = self.getOffsets()[-1] offSize = calcOffSize(lastOffset) @@ -207,7 +204,7 @@ lastOffset - 1 # size of object data ) return dataLength - + def toFile(self, file): offsets = self.getOffsets() writeCard16(file, len(self.items)) @@ -223,23 +220,23 @@ if hasattr(item, "toFile"): item.toFile(file) else: - file.write(item) + file.write(tobytes(item, encoding="latin1")) class IndexedStringsCompiler(IndexCompiler): - + def getItems(self, items, strings): return items.strings class TopDictIndexCompiler(IndexCompiler): - + def getItems(self, items, strings): out = [] for item in items: out.append(item.getCompiler(strings, self)) return out - + def getChildren(self, strings): children = [] for topDict in self.items: @@ -248,13 +245,13 @@ class FDArrayIndexCompiler(IndexCompiler): - + def getItems(self, items, strings): out = [] for item in items: out.append(item.getCompiler(strings, self)) return out - + def getChildren(self, strings): children = [] for fontDict in self.items: @@ -300,32 +297,30 @@ self.parent.rawDict["CharStrings"] = pos -class Index: - +class Index(object): + """This class represents what the CFF spec calls an INDEX.""" - + compilerClass = IndexCompiler - + def __init__(self, file=None): + self.items = [] name = self.__class__.__name__ if file is None: - self.items = [] return if DEBUG: - print "loading %s at %s" % (name, file.tell()) + print("loading %s at %s" % (name, file.tell())) self.file = file count = readCard16(file) - self.count = count - self.items = [None] * count if count == 0: - self.items = [] return + self.items = [None] * count offSize = readCard8(file) if DEBUG: - print " index count: %s offSize: %s" % (count, offSize) + print(" index count: %s offSize: %s" % (count, offSize)) assert offSize <= 4, "offSize too large: %s" % offSize self.offsets = offsets = [] - pad = '\0' * (4 - offSize) + pad = b'\0' * (4 - offSize) for index in range(count+1): chunk = file.read(offSize) chunk = pad + chunk @@ -334,11 +329,11 @@ self.offsetBase = file.tell() - 1 file.seek(self.offsetBase + offsets[-1]) # pretend we've read the whole lot if DEBUG: - print " end of %s at %s" % (name, file.tell()) - + print(" end of %s at %s" % (name, file.tell())) + def __len__(self): return len(self.items) - + def __getitem__(self, index): item = self.items[index] if item is not None: @@ -352,21 +347,21 @@ item = self.produceItem(index, data, file, offset, size) self.items[index] = item return item - + def produceItem(self, index, data, file, offset, size): return data - + def append(self, item): self.items.append(item) - + def getCompiler(self, strings, parent): return self.compilerClass(self, strings, parent) class GlobalSubrsIndex(Index): - + compilerClass = GlobalSubrsCompiler - + def __init__(self, file=None, globalSubrs=None, private=None, fdSelect=None, fdArray=None): Index.__init__(self, file) self.globalSubrs = globalSubrs @@ -375,7 +370,7 @@ self.fdSelect = fdSelect if fdArray: self.fdArray = fdArray - + def produceItem(self, index, data, file, offset, size): if self.private is not None: private = self.private @@ -384,7 +379,7 @@ else: private = None return psCharStrings.T2CharString(data, private=private, globalSubrs=self.globalSubrs) - + def toXML(self, xmlWriter, progress): xmlWriter.comment("The 'index' attribute is only for humans; it is ignored when parsed.") xmlWriter.newline() @@ -398,14 +393,14 @@ subr.toXML(xmlWriter) xmlWriter.endtag("CharString") xmlWriter.newline() - - def fromXML(self, (name, attrs, content)): - if name <> "CharString": + + def fromXML(self, name, attrs, content): + if name != "CharString": return subr = psCharStrings.T2CharString() - subr.fromXML((name, attrs, content)) + subr.fromXML(name, attrs, content) self.append(subr) - + def getItemAndSelector(self, index): sel = None if hasattr(self, 'fdSelect'): @@ -418,14 +413,14 @@ class TopDictIndex(Index): - + compilerClass = TopDictIndexCompiler - + def produceItem(self, index, data, file, offset, size): top = TopDict(self.strings, file, offset, self.GlobalSubrs) top.decompile(data) return top - + def toXML(self, xmlWriter, progress): for i in range(len(self)): xmlWriter.begintag("FontDict", index=i) @@ -436,22 +431,23 @@ class FDArrayIndex(TopDictIndex): - + compilerClass = FDArrayIndexCompiler - def fromXML(self, (name, attrs, content)): - if name <> "FontDict": + def fromXML(self, name, attrs, content): + if name != "FontDict": return fontDict = FontDict() for element in content: if isinstance(element, basestring): continue - fontDict.fromXML(element) + name, attrs, content = element + fontDict.fromXML(name, attrs, content) self.append(fontDict) class FDSelect: - def __init__(self, file = None, numGlyphs = None, format=None): + def __init__(self, file=None, numGlyphs=None, format=None): if file: # read data in from file self.format = readCard8(file) @@ -461,6 +457,7 @@ elif self.format == 3: gidArray = [None] * numGlyphs nRanges = readCard16(file) + fd = None prev = None for i in range(nRanges): first = readCard16(file) @@ -475,29 +472,28 @@ gidArray[glyphID] = fd self.gidArray = gidArray else: - assert 0, "unsupported FDSelect format: %s" % format + assert False, "unsupported FDSelect format: %s" % format else: # reading from XML. Make empty gidArray,, and leave format as passed in. - # format == None will result in the smallest representation being used. + # format is None will result in the smallest representation being used. self.format = format self.gidArray = [] - def __len__(self): return len(self.gidArray) - + def __getitem__(self, index): return self.gidArray[index] - + def __setitem__(self, index, fdSelectValue): self.gidArray[index] = fdSelectValue def append(self, fdSelectValue): self.gidArray.append(fdSelectValue) - -class CharStrings: - + +class CharStrings(object): + def __init__(self, file, charset, globalSubrs, private, fdSelect, fdArray): if file is not None: self.charStringsIndex = SubrsIndex(file, globalSubrs, private, fdSelect, fdArray) @@ -510,39 +506,41 @@ self.charStringsAreIndexed = 0 self.globalSubrs = globalSubrs self.private = private - if fdSelect != None: + if fdSelect is not None: self.fdSelect = fdSelect - if fdArray!= None: + if fdArray is not None: self.fdArray = fdArray - + def keys(self): - return self.charStrings.keys() - + return list(self.charStrings.keys()) + def values(self): if self.charStringsAreIndexed: return self.charStringsIndex else: - return self.charStrings.values() - + return list(self.charStrings.values()) + def has_key(self, name): - return self.charStrings.has_key(name) - + return name in self.charStrings + + __contains__ = has_key + def __len__(self): return len(self.charStrings) - + def __getitem__(self, name): charString = self.charStrings[name] if self.charStringsAreIndexed: charString = self.charStringsIndex[charString] return charString - + def __setitem__(self, name, charString): if self.charStringsAreIndexed: index = self.charStrings[name] self.charStringsIndex[index] = charString else: self.charStrings[name] = charString - + def getItemAndSelector(self, name): if self.charStringsAreIndexed: index = self.charStrings[name] @@ -553,10 +551,9 @@ else: raise KeyError("fdSelect array not yet defined.") return self.charStrings[name], sel - + def toXML(self, xmlWriter, progress): - names = self.keys() - names.sort() + names = sorted(self.keys()) i = 0 step = 10 numGlyphs = len(names) @@ -577,15 +574,15 @@ xmlWriter.newline() if not i % step and progress is not None: progress.setLabel("Dumping 'CFF ' table... (%s)" % name) - progress.increment(step / float(numGlyphs)) + progress.increment(step / numGlyphs) i = i + 1 - - def fromXML(self, (name, attrs, content)): + + def fromXML(self, name, attrs, content): for element in content: if isinstance(element, basestring): continue name, attrs, content = element - if name <> "CharString": + if name != "CharString": continue fdID = -1 if hasattr(self, "fdArray"): @@ -593,32 +590,32 @@ private = self.fdArray[fdID].Private else: private = self.private - + glyphName = attrs["name"] charString = psCharStrings.T2CharString( private=private, globalSubrs=self.globalSubrs) - charString.fromXML((name, attrs, content)) + charString.fromXML(name, attrs, content) if fdID >= 0: charString.fdSelectIndex = fdID self[glyphName] = charString def readCard8(file): - return ord(file.read(1)) + return byteord(file.read(1)) def readCard16(file): value, = struct.unpack(">H", file.read(2)) return value def writeCard8(file, value): - file.write(chr(value)) + file.write(bytechr(value)) def writeCard16(file, value): file.write(struct.pack(">H", value)) def packCard8(value): - return chr(value) + return bytechr(value) def packCard16(value): return struct.pack(">H", value) @@ -633,9 +630,9 @@ d = {} for op, name, arg, default, conv in table: if isinstance(op, tuple): - op = chr(op[0]) + chr(op[1]) + op = bytechr(op[0]) + bytechr(op[1]) else: - op = chr(op) + op = bytechr(op) d[name] = (op, arg) return d @@ -659,7 +656,7 @@ return d -class SimpleConverter: +class SimpleConverter(object): def read(self, parent, value): return value def write(self, parent, value): @@ -667,13 +664,30 @@ def xmlWrite(self, xmlWriter, name, value, progress): xmlWriter.simpletag(name, value=value) xmlWriter.newline() - def xmlRead(self, (name, attrs, content), parent): + def xmlRead(self, name, attrs, content, parent): return attrs["value"] +class ASCIIConverter(SimpleConverter): + def read(self, parent, value): + return tostr(value, encoding='ascii') + def write(self, parent, value): + return tobytes(value, encoding='ascii') + def xmlWrite(self, xmlWriter, name, value, progress): + xmlWriter.simpletag(name, value=tounicode(value, encoding="ascii")) + xmlWriter.newline() + def xmlRead(self, name, attrs, content, parent): + return tobytes(attrs["value"], encoding=("ascii")) + class Latin1Converter(SimpleConverter): - def xmlRead(self, (name, attrs, content), parent): - s = unicode(attrs["value"], "utf-8") - return s.encode("latin-1") + def read(self, parent, value): + return tostr(value, encoding='latin1') + def write(self, parent, value): + return tobytes(value, encoding='latin1') + def xmlWrite(self, xmlWriter, name, value, progress): + xmlWriter.simpletag(name, value=tounicode(value, encoding="latin1")) + xmlWriter.newline() + def xmlRead(self, name, attrs, content, parent): + return tobytes(attrs["value"], encoding=("latin1")) def parseNum(s): @@ -684,17 +698,17 @@ return value class NumberConverter(SimpleConverter): - def xmlRead(self, (name, attrs, content), parent): + def xmlRead(self, name, attrs, content, parent): return parseNum(attrs["value"]) class ArrayConverter(SimpleConverter): def xmlWrite(self, xmlWriter, name, value, progress): - value = map(str, value) - xmlWriter.simpletag(name, value=" ".join(value)) + value = " ".join(map(str, value)) + xmlWriter.simpletag(name, value=value) xmlWriter.newline() - def xmlRead(self, (name, attrs, content), parent): + def xmlRead(self, name, attrs, content, parent): values = attrs["value"].split() - return map(parseNum, values) + return [parseNum(value) for value in values] class TableConverter(SimpleConverter): def xmlWrite(self, xmlWriter, name, value, progress): @@ -703,12 +717,13 @@ value.toXML(xmlWriter, progress) xmlWriter.endtag(name) xmlWriter.newline() - def xmlRead(self, (name, attrs, content), parent): + def xmlRead(self, name, attrs, content, parent): ob = self.getClass()() for element in content: if isinstance(element, basestring): continue - ob.fromXML(element) + name, attrs, content = element + ob.fromXML(name, attrs, content) return ob class PrivateDictConverter(TableConverter): @@ -720,7 +735,7 @@ priv = PrivateDict(parent.strings, file, offset) file.seek(offset) data = file.read(size) - len(data) == size + assert len(data) == size priv.decompile(data) return priv def write(self, parent, value): @@ -751,18 +766,18 @@ return CharStrings(file, charset, globalSubrs, private, fdSelect, fdArray) def write(self, parent, value): return 0 # dummy value - def xmlRead(self, (name, attrs, content), parent): + def xmlRead(self, name, attrs, content, parent): if hasattr(parent, "ROS"): - # if it is a CID-keyed font, then the private Dict is extracted from the parent.FDArray + # if it is a CID-keyed font, then the private Dict is extracted from the parent.FDArray private, fdSelect, fdArray = None, parent.FDSelect, parent.FDArray else: - # if it is a name-keyed font, then the private dict is in the top dict, and there is no fdArray. + # if it is a name-keyed font, then the private dict is in the top dict, and there is no fdArray. private, fdSelect, fdArray = parent.Private, None, None charStrings = CharStrings(None, None, parent.GlobalSubrs, private, fdSelect, fdArray) - charStrings.fromXML((name, attrs, content)) + charStrings.fromXML(name, attrs, content) return charStrings -class CharsetConverter: +class CharsetConverter(object): def read(self, parent, value): isCID = hasattr(parent, "ROS") if value > 2: @@ -770,7 +785,7 @@ file = parent.file file.seek(value) if DEBUG: - print "loading charset at %s" % value + print("loading charset at %s" % value) format = readCard8(file) if format == 0: charset = parseCharset0(numGlyphs, file, parent.strings, isCID) @@ -780,9 +795,9 @@ raise NotImplementedError assert len(charset) == numGlyphs if DEBUG: - print " charset end at %s" % file.tell() + print(" charset end at %s" % file.tell()) else: # offset == 0 -> no charset data. - if isCID or not parent.rawDict.has_key("CharStrings"): + if isCID or "CharStrings" not in parent.rawDict: assert value == 0 # We get here only when processing fontDicts from the FDArray of CFF-CID fonts. Only the real topDict references the chrset. charset = None elif value == 0: @@ -801,13 +816,13 @@ ##xmlWriter.simpletag("charset") xmlWriter.comment("charset is dumped separately as the 'GlyphOrder' element") xmlWriter.newline() - def xmlRead(self, (name, attrs, content), parent): + def xmlRead(self, name, attrs, content, parent): if 0: return safeEval(attrs["value"]) -class CharsetCompiler: - +class CharsetCompiler(object): + def __init__(self, strings, charset, parent): assert charset[0] == '.notdef' isCID = hasattr(parent.dictObj, "ROS") @@ -818,13 +833,13 @@ else: self.data = data0 self.parent = parent - + def setPos(self, pos, endPos): self.parent.rawDict["charset"] = pos - + def getDataLength(self): return len(self.data) - + def toFile(self, file): file.write(self.data) @@ -836,8 +851,8 @@ return strings.getSID(name) def packCharset0(charset, isCID, strings): - format = 0 - data = [packCard8(format)] + fmt = 0 + data = [packCard8(fmt)] if isCID: getNameID = getCIDfromName else: @@ -845,11 +860,11 @@ for name in charset[1:]: data.append(packCard16(getNameID(name,strings))) - return "".join(data) + return bytesjoin(data) def packCharset(charset, isCID, strings): - format = 1 + fmt = 1 ranges = [] first = None end = 0 @@ -857,48 +872,49 @@ getNameID = getCIDfromName else: getNameID = getSIDfromName - + for name in charset[1:]: SID = getNameID(name, strings) if first is None: first = SID - elif end + 1 <> SID: + elif end + 1 != SID: nLeft = end - first if nLeft > 255: - format = 2 + fmt = 2 ranges.append((first, nLeft)) first = SID end = SID - nLeft = end - first - if nLeft > 255: - format = 2 - ranges.append((first, nLeft)) - - data = [packCard8(format)] - if format == 1: + if end: + nLeft = end - first + if nLeft > 255: + fmt = 2 + ranges.append((first, nLeft)) + + data = [packCard8(fmt)] + if fmt == 1: nLeftFunc = packCard8 else: nLeftFunc = packCard16 for first, nLeft in ranges: data.append(packCard16(first) + nLeftFunc(nLeft)) - return "".join(data) + return bytesjoin(data) def parseCharset0(numGlyphs, file, strings, isCID): charset = [".notdef"] if isCID: for i in range(numGlyphs - 1): CID = readCard16(file) - charset.append("cid" + string.zfill(str(CID), 5) ) + charset.append("cid" + str(CID).zfill(5)) else: for i in range(numGlyphs - 1): SID = readCard16(file) charset.append(strings[SID]) return charset -def parseCharset(numGlyphs, file, strings, isCID, format): +def parseCharset(numGlyphs, file, strings, isCID, fmt): charset = ['.notdef'] count = 1 - if format == 1: + if fmt == 1: nLeftFunc = readCard8 else: nLeftFunc = readCard16 @@ -907,7 +923,7 @@ nLeft = nLeftFunc(file) if isCID: for CID in range(first, first+nLeft+1): - charset.append("cid" + string.zfill(str(CID), 5) ) + charset.append("cid" + str(CID).zfill(5)) else: for SID in range(first, first+nLeft+1): charset.append(strings[SID]) @@ -915,7 +931,7 @@ return charset -class EncodingCompiler: +class EncodingCompiler(object): def __init__(self, strings, encoding, parent): assert not isinstance(encoding, basestring) @@ -929,10 +945,10 @@ def setPos(self, pos, endPos): self.parent.rawDict["Encoding"] = pos - + def getDataLength(self): return len(self.data) - + def toFile(self, file): file.write(self.data) @@ -949,16 +965,16 @@ file = parent.file file.seek(value) if DEBUG: - print "loading Encoding at %s" % value - format = readCard8(file) - haveSupplement = format & 0x80 + print("loading Encoding at %s" % value) + fmt = readCard8(file) + haveSupplement = fmt & 0x80 if haveSupplement: - raise NotImplementedError, "Encoding supplements are not yet supported" - format = format & 0x7f - if format == 0: + raise NotImplementedError("Encoding supplements are not yet supported") + fmt = fmt & 0x7f + if fmt == 0: encoding = parseEncoding0(parent.charset, file, haveSupplement, parent.strings) - elif format == 1: + elif fmt == 1: encoding = parseEncoding1(parent.charset, file, haveSupplement, parent.strings) return encoding @@ -985,8 +1001,8 @@ xmlWriter.endtag(name) xmlWriter.newline() - def xmlRead(self, (name, attrs, content), parent): - if attrs.has_key("name"): + def xmlRead(self, name, attrs, content, parent): + if "name" in attrs: return attrs["name"] encoding = [".notdef"] * 256 for element in content: @@ -1022,7 +1038,7 @@ return encoding def packEncoding0(charset, encoding, strings): - format = 0 + fmt = 0 m = {} for code in range(len(encoding)): name = encoding[code] @@ -1032,19 +1048,19 @@ for name in charset[1:]: code = m.get(name) codes.append(code) - + while codes and codes[-1] is None: codes.pop() - data = [packCard8(format), packCard8(len(codes))] + data = [packCard8(fmt), packCard8(len(codes))] for code in codes: if code is None: code = 0 data.append(packCard8(code)) - return "".join(data) + return bytesjoin(data) def packEncoding1(charset, encoding, strings): - format = 1 + fmt = 1 m = {} for code in range(len(encoding)): name = encoding[code] @@ -1057,24 +1073,24 @@ code = m.get(name, -1) if first is None: first = code - elif end + 1 <> code: + elif end + 1 != code: nLeft = end - first ranges.append((first, nLeft)) first = code end = code nLeft = end - first ranges.append((first, nLeft)) - + # remove unencoded glyphs at the end. while ranges and ranges[-1][0] == -1: ranges.pop() - data = [packCard8(format), packCard8(len(ranges))] + data = [packCard8(fmt), packCard8(len(ranges))] for first, nLeft in ranges: if first == -1: # unencoded first = 0 data.append(packCard8(first) + packCard8(nLeft)) - return "".join(data) + return bytesjoin(data) class FDArrayConverter(TableConverter): @@ -1090,16 +1106,17 @@ def write(self, parent, value): return 0 # dummy value - def xmlRead(self, (name, attrs, content), parent): + def xmlRead(self, name, attrs, content, parent): fdArray = FDArrayIndex() for element in content: if isinstance(element, basestring): continue - fdArray.fromXML(element) + name, attrs, content = element + fdArray.fromXML(name, attrs, content) return fdArray -class FDSelectConverter: +class FDSelectConverter(object): def read(self, parent, value): file = parent.file @@ -1116,24 +1133,24 @@ xmlWriter.simpletag(name, [('format', value.format)]) xmlWriter.newline() - def xmlRead(self, (name, attrs, content), parent): - format = safeEval(attrs["format"]) + def xmlRead(self, name, attrs, content, parent): + fmt = safeEval(attrs["format"]) file = None numGlyphs = None - fdSelect = FDSelect(file, numGlyphs, format) + fdSelect = FDSelect(file, numGlyphs, fmt) return fdSelect - + def packFDSelect0(fdSelectArray): - format = 0 - data = [packCard8(format)] + fmt = 0 + data = [packCard8(fmt)] for index in fdSelectArray: data.append(packCard8(index)) - return "".join(data) + return bytesjoin(data) def packFDSelect3(fdSelectArray): - format = 3 + fmt = 3 fdRanges = [] first = None end = 0 @@ -1145,24 +1162,24 @@ fdRanges.append([i, fdIndex]) lastFDIndex = fdIndex sentinelGID = i + 1 - - data = [packCard8(format)] + + data = [packCard8(fmt)] data.append(packCard16( len(fdRanges) )) for fdRange in fdRanges: data.append(packCard16(fdRange[0])) data.append(packCard8(fdRange[1])) data.append(packCard16(sentinelGID)) - return "".join(data) + return bytesjoin(data) -class FDSelectCompiler: - +class FDSelectCompiler(object): + def __init__(self, fdSelect, parent): - format = fdSelect.format + fmt = fdSelect.format fdSelectArray = fdSelect.gidArray - if format == 0: + if fmt == 0: self.data = packFDSelect0(fdSelectArray) - elif format == 3: + elif fmt == 3: self.data = packFDSelect3(fdSelectArray) else: # choose smaller of the two formats @@ -1176,13 +1193,13 @@ fdSelect.format = 3 self.parent = parent - + def setPos(self, pos, endPos): self.parent.rawDict["FDSelect"] = pos - + def getDataLength(self): return len(self.data) - + def toFile(self, file): file.write(self.data) @@ -1191,51 +1208,50 @@ def xmlWrite(self, xmlWriter, name, value, progress): registry, order, supplement = value - xmlWriter.simpletag(name, [('Registry', registry), ('Order', order), + xmlWriter.simpletag(name, [('Registry', tostr(registry)), ('Order', tostr(order)), ('Supplement', supplement)]) xmlWriter.newline() - def xmlRead(self, (name, attrs, content), parent): + def xmlRead(self, name, attrs, content, parent): return (attrs['Registry'], attrs['Order'], safeEval(attrs['Supplement'])) - topDictOperators = [ -# opcode name argument type default converter - ((12, 30), 'ROS', ('SID','SID','number'), None, ROSConverter()), - ((12, 20), 'SyntheticBase', 'number', None, None), - (0, 'version', 'SID', None, None), - (1, 'Notice', 'SID', None, Latin1Converter()), - ((12, 0), 'Copyright', 'SID', None, Latin1Converter()), - (2, 'FullName', 'SID', None, None), - ((12, 38), 'FontName', 'SID', None, None), - (3, 'FamilyName', 'SID', None, None), - (4, 'Weight', 'SID', None, None), - ((12, 1), 'isFixedPitch', 'number', 0, None), - ((12, 2), 'ItalicAngle', 'number', 0, None), - ((12, 3), 'UnderlinePosition', 'number', None, None), - ((12, 4), 'UnderlineThickness', 'number', 50, None), - ((12, 5), 'PaintType', 'number', 0, None), - ((12, 6), 'CharstringType', 'number', 2, None), - ((12, 7), 'FontMatrix', 'array', [0.001,0,0,0.001,0,0], None), - (13, 'UniqueID', 'number', None, None), - (5, 'FontBBox', 'array', [0,0,0,0], None), - ((12, 8), 'StrokeWidth', 'number', 0, None), - (14, 'XUID', 'array', None, None), - ((12, 21), 'PostScript', 'SID', None, None), - ((12, 22), 'BaseFontName', 'SID', None, None), - ((12, 23), 'BaseFontBlend', 'delta', None, None), - ((12, 31), 'CIDFontVersion', 'number', 0, None), - ((12, 32), 'CIDFontRevision', 'number', 0, None), - ((12, 33), 'CIDFontType', 'number', 0, None), - ((12, 34), 'CIDCount', 'number', 8720, None), - (15, 'charset', 'number', 0, CharsetConverter()), - ((12, 35), 'UIDBase', 'number', None, None), - (16, 'Encoding', 'number', 0, EncodingConverter()), - (18, 'Private', ('number','number'), None, PrivateDictConverter()), - ((12, 37), 'FDSelect', 'number', None, FDSelectConverter()), - ((12, 36), 'FDArray', 'number', None, FDArrayConverter()), - (17, 'CharStrings', 'number', None, CharStringsConverter()), +# opcode name argument type default converter + ((12, 30), 'ROS', ('SID', 'SID', 'number'), None, ROSConverter()), + ((12, 20), 'SyntheticBase', 'number', None, None), + (0, 'version', 'SID', None, None), + (1, 'Notice', 'SID', None, Latin1Converter()), + ((12, 0), 'Copyright', 'SID', None, Latin1Converter()), + (2, 'FullName', 'SID', None, None), + ((12, 38), 'FontName', 'SID', None, None), + (3, 'FamilyName', 'SID', None, None), + (4, 'Weight', 'SID', None, None), + ((12, 1), 'isFixedPitch', 'number', 0, None), + ((12, 2), 'ItalicAngle', 'number', 0, None), + ((12, 3), 'UnderlinePosition', 'number', None, None), + ((12, 4), 'UnderlineThickness', 'number', 50, None), + ((12, 5), 'PaintType', 'number', 0, None), + ((12, 6), 'CharstringType', 'number', 2, None), + ((12, 7), 'FontMatrix', 'array', [0.001, 0, 0, 0.001, 0, 0], None), + (13, 'UniqueID', 'number', None, None), + (5, 'FontBBox', 'array', [0, 0, 0, 0], None), + ((12, 8), 'StrokeWidth', 'number', 0, None), + (14, 'XUID', 'array', None, None), + ((12, 21), 'PostScript', 'SID', None, None), + ((12, 22), 'BaseFontName', 'SID', None, None), + ((12, 23), 'BaseFontBlend', 'delta', None, None), + ((12, 31), 'CIDFontVersion', 'number', 0, None), + ((12, 32), 'CIDFontRevision', 'number', 0, None), + ((12, 33), 'CIDFontType', 'number', 0, None), + ((12, 34), 'CIDCount', 'number', 8720, None), + (15, 'charset', 'number', 0, CharsetConverter()), + ((12, 35), 'UIDBase', 'number', None, None), + (16, 'Encoding', 'number', 0, EncodingConverter()), + (18, 'Private', ('number', 'number'), None, PrivateDictConverter()), + ((12, 37), 'FDSelect', 'number', None, FDSelectConverter()), + ((12, 36), 'FDArray', 'number', None, FDArrayConverter()), + (17, 'CharStrings', 'number', None, CharStringsConverter()), ] # Note! FDSelect and FDArray must both preceed CharStrings in the output XML build order, @@ -1243,27 +1259,27 @@ privateDictOperators = [ -# opcode name argument type default converter - (6, 'BlueValues', 'delta', None, None), - (7, 'OtherBlues', 'delta', None, None), - (8, 'FamilyBlues', 'delta', None, None), - (9, 'FamilyOtherBlues', 'delta', None, None), - ((12, 9), 'BlueScale', 'number', 0.039625, None), - ((12, 10), 'BlueShift', 'number', 7, None), - ((12, 11), 'BlueFuzz', 'number', 1, None), - (10, 'StdHW', 'number', None, None), - (11, 'StdVW', 'number', None, None), - ((12, 12), 'StemSnapH', 'delta', None, None), - ((12, 13), 'StemSnapV', 'delta', None, None), - ((12, 14), 'ForceBold', 'number', 0, None), - ((12, 15), 'ForceBoldThreshold', 'number', None, None), # deprecated - ((12, 16), 'lenIV', 'number', None, None), # deprecated - ((12, 17), 'LanguageGroup', 'number', 0, None), - ((12, 18), 'ExpansionFactor', 'number', 0.06, None), - ((12, 19), 'initialRandomSeed', 'number', 0, None), - (20, 'defaultWidthX', 'number', 0, None), - (21, 'nominalWidthX', 'number', 0, None), - (19, 'Subrs', 'number', None, SubrsConverter()), +# opcode name argument type default converter + (6, 'BlueValues', 'delta', None, None), + (7, 'OtherBlues', 'delta', None, None), + (8, 'FamilyBlues', 'delta', None, None), + (9, 'FamilyOtherBlues', 'delta', None, None), + ((12, 9), 'BlueScale', 'number', 0.039625, None), + ((12, 10), 'BlueShift', 'number', 7, None), + ((12, 11), 'BlueFuzz', 'number', 1, None), + (10, 'StdHW', 'number', None, None), + (11, 'StdVW', 'number', None, None), + ((12, 12), 'StemSnapH', 'delta', None, None), + ((12, 13), 'StemSnapV', 'delta', None, None), + ((12, 14), 'ForceBold', 'number', 0, None), + ((12, 15), 'ForceBoldThreshold', 'number', None, None), # deprecated + ((12, 16), 'lenIV', 'number', None, None), # deprecated + ((12, 17), 'LanguageGroup', 'number', 0, None), + ((12, 18), 'ExpansionFactor', 'number', 0.06, None), + ((12, 19), 'initialRandomSeed', 'number', 0, None), + (20, 'defaultWidthX', 'number', 0, None), + (21, 'nominalWidthX', 'number', 0, None), + (19, 'Subrs', 'number', None, SubrsConverter()), ] def addConverters(table): @@ -1276,9 +1292,9 @@ elif arg == "number": conv = NumberConverter() elif arg == "SID": - conv = SimpleConverter() + conv = ASCIIConverter() else: - assert 0 + assert False table[i] = op, name, arg, default, conv addConverters(privateDictOperators) @@ -1293,8 +1309,8 @@ operators = buildOperatorDict(privateDictOperators) -class DictCompiler: - +class DictCompiler(object): + def __init__(self, dictObj, strings, parent): assert isinstance(strings, IndexedStrings) self.dictObj = dictObj @@ -1311,17 +1327,17 @@ continue rawDict[name] = value self.rawDict = rawDict - + def setPos(self, pos, endPos): pass - + def getDataLength(self): return len(self.compile("getDataLength")) - + def compile(self, reason): if DEBUG: - print "-- compiling %s for %s" % (self.__class__.__name__, reason) - print "in baseDict: ", self + print("-- compiling %s for %s" % (self.__class__.__name__, reason)) + print("in baseDict: ", self) rawDict = self.rawDict data = [] for name in self.dictObj.order: @@ -1341,11 +1357,11 @@ arghandler = getattr(self, "arg_" + argType) data.append(arghandler(value)) data.append(op) - return "".join(data) - + return bytesjoin(data) + def toFile(self, file): file.write(self.compile("toFile")) - + def arg_number(self, num): return encodeNumber(num) def arg_SID(self, s): @@ -1354,7 +1370,7 @@ data = [] for num in value: data.append(encodeNumber(num)) - return "".join(data) + return bytesjoin(data) def arg_delta(self, value): out = [] last = 0 @@ -1364,7 +1380,7 @@ data = [] for num in out: data.append(encodeNumber(num)) - return "".join(data) + return bytesjoin(data) def encodeNumber(num): @@ -1375,9 +1391,9 @@ class TopDictCompiler(DictCompiler): - + opcodes = buildOpcodeDict(topDictOperators) - + def getChildren(self, strings): children = [] if hasattr(self.dictObj, "charset") and self.dictObj.charset: @@ -1395,7 +1411,6 @@ if len(fdSelect) == 0: # probably read in from XML; assume fdIndex in CharString data charStrings = self.dictObj.CharStrings for name in self.dictObj.charset: - charstring = charStrings[name] fdSelect.append(charStrings[name].fdSelectIndex) fdSelectComp = FDSelectCompiler(fdSelect, self) children.append(fdSelectComp) @@ -1421,9 +1436,9 @@ class FontDictCompiler(DictCompiler): - + opcodes = buildOpcodeDict(topDictOperators) - + def getChildren(self, strings): children = [] if hasattr(self.dictObj, "Private"): @@ -1434,14 +1449,14 @@ class PrivateDictCompiler(DictCompiler): - + opcodes = buildOpcodeDict(privateDictOperators) - + def setPos(self, pos, endPos): size = endPos - pos self.parent.rawDict["Private"] = size, pos self.pos = pos - + def getChildren(self, strings): children = [] if hasattr(self.dictObj, "Subrs"): @@ -1449,42 +1464,42 @@ return children -class BaseDict: - +class BaseDict(object): + def __init__(self, strings=None, file=None, offset=None): self.rawDict = {} if DEBUG: - print "loading %s at %s" % (self.__class__.__name__, offset) + print("loading %s at %s" % (self.__class__.__name__, offset)) self.file = file self.offset = offset self.strings = strings self.skipNames = [] - + def decompile(self, data): if DEBUG: - print " length %s is %s" % (self.__class__.__name__, len(data)) + print(" length %s is %s" % (self.__class__.__name__, len(data))) dec = self.decompilerClass(self.strings) dec.decompile(data) self.rawDict = dec.getDict() self.postDecompile() - + def postDecompile(self): pass - + def getCompiler(self, strings, parent): return self.compilerClass(self, strings, parent) - + def __getattr__(self, name): value = self.rawDict.get(name) if value is None: value = self.defaults.get(name) if value is None: - raise AttributeError, name + raise AttributeError(name) conv = self.converters[name] value = conv.read(self, value) setattr(self, name, value) return value - + def toXML(self, xmlWriter, progress): for name in self.order: if name in self.skipNames: @@ -1494,28 +1509,28 @@ continue conv = self.converters[name] conv.xmlWrite(xmlWriter, name, value, progress) - - def fromXML(self, (name, attrs, content)): + + def fromXML(self, name, attrs, content): conv = self.converters[name] - value = conv.xmlRead((name, attrs, content), self) + value = conv.xmlRead(name, attrs, content, self) setattr(self, name, value) class TopDict(BaseDict): - + defaults = buildDefaults(topDictOperators) converters = buildConverters(topDictOperators) order = buildOrder(topDictOperators) decompilerClass = TopDictDecompiler compilerClass = TopDictCompiler - + def __init__(self, strings=None, file=None, offset=None, GlobalSubrs=None): BaseDict.__init__(self, strings, file, offset) self.GlobalSubrs = GlobalSubrs - + def getGlyphOrder(self): return self.charset - + def postDecompile(self): offset = self.rawDict.get("CharStrings") if offset is None: @@ -1523,7 +1538,7 @@ # get the number of glyphs beforehand. self.file.seek(offset) self.numGlyphs = readCard16(self.file) - + def toXML(self, xmlWriter, progress): if hasattr(self, "CharStrings"): self.decompileAllCharStrings(progress) @@ -1535,7 +1550,7 @@ self.skipNames = ['CIDFontVersion', 'CIDFontRevision', 'CIDFontType', 'CIDCount'] BaseDict.toXML(self, xmlWriter, progress) - + def decompileAllCharStrings(self, progress): # XXX only when doing ttdump -i? i = 0 @@ -1543,34 +1558,33 @@ try: charString.decompile() except: - print "Error in charstring ", i + print("Error in charstring ", i) import sys - type, value = sys. exc_info()[0:2] - raise type(value) + typ, value = sys.exc_info()[0:2] + raise typ(value) if not i % 30 and progress: progress.increment(0) # update i = i + 1 class FontDict(BaseDict): - + defaults = buildDefaults(topDictOperators) converters = buildConverters(topDictOperators) order = buildOrder(topDictOperators) decompilerClass = None compilerClass = FontDictCompiler - + def __init__(self, strings=None, file=None, offset=None, GlobalSubrs=None): BaseDict.__init__(self, strings, file, offset) self.GlobalSubrs = GlobalSubrs - + def getGlyphOrder(self): return self.charset - + def toXML(self, xmlWriter, progress): self.skipNames = ['Encoding'] BaseDict.toXML(self, xmlWriter, progress) - class PrivateDict(BaseDict): @@ -1581,45 +1595,45 @@ compilerClass = PrivateDictCompiler -class IndexedStrings: - +class IndexedStrings(object): + """SID -> string mapping.""" - + def __init__(self, file=None): if file is None: strings = [] else: - strings = list(Index(file)) + strings = [tostr(s, encoding="latin1") for s in Index(file)] self.strings = strings - + def getCompiler(self): return IndexedStringsCompiler(self, None, None) - + def __len__(self): return len(self.strings) - + def __getitem__(self, SID): if SID < cffStandardStringCount: return cffStandardStrings[SID] else: return self.strings[SID - cffStandardStringCount] - + def getSID(self, s): if not hasattr(self, "stringMapping"): self.buildStringMapping() - if cffStandardStringMapping.has_key(s): + if s in cffStandardStringMapping: SID = cffStandardStringMapping[s] - elif self.stringMapping.has_key(s): + elif s in self.stringMapping: SID = self.stringMapping[s] else: SID = len(self.strings) + cffStandardStringCount self.strings.append(s) self.stringMapping[s] = SID return SID - + def getStrings(self): return self.strings - + def buildStringMapping(self): self.stringMapping = {} for index in range(len(self.strings)): @@ -1629,68 +1643,68 @@ # The 391 Standard Strings as used in the CFF format. # from Adobe Technical None #5176, version 1.0, 18 March 1998 -cffStandardStrings = ['.notdef', 'space', 'exclam', 'quotedbl', 'numbersign', - 'dollar', 'percent', 'ampersand', 'quoteright', 'parenleft', 'parenright', - 'asterisk', 'plus', 'comma', 'hyphen', 'period', 'slash', 'zero', 'one', - 'two', 'three', 'four', 'five', 'six', 'seven', 'eight', 'nine', 'colon', - 'semicolon', 'less', 'equal', 'greater', 'question', 'at', 'A', 'B', 'C', - 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M', 'N', 'O', 'P', 'Q', 'R', - 'S', 'T', 'U', 'V', 'W', 'X', 'Y', 'Z', 'bracketleft', 'backslash', - 'bracketright', 'asciicircum', 'underscore', 'quoteleft', 'a', 'b', 'c', - 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', - 's', 't', 'u', 'v', 'w', 'x', 'y', 'z', 'braceleft', 'bar', 'braceright', - 'asciitilde', 'exclamdown', 'cent', 'sterling', 'fraction', 'yen', 'florin', - 'section', 'currency', 'quotesingle', 'quotedblleft', 'guillemotleft', - 'guilsinglleft', 'guilsinglright', 'fi', 'fl', 'endash', 'dagger', - 'daggerdbl', 'periodcentered', 'paragraph', 'bullet', 'quotesinglbase', - 'quotedblbase', 'quotedblright', 'guillemotright', 'ellipsis', 'perthousand', - 'questiondown', 'grave', 'acute', 'circumflex', 'tilde', 'macron', 'breve', - 'dotaccent', 'dieresis', 'ring', 'cedilla', 'hungarumlaut', 'ogonek', 'caron', - 'emdash', 'AE', 'ordfeminine', 'Lslash', 'Oslash', 'OE', 'ordmasculine', 'ae', - 'dotlessi', 'lslash', 'oslash', 'oe', 'germandbls', 'onesuperior', - 'logicalnot', 'mu', 'trademark', 'Eth', 'onehalf', 'plusminus', 'Thorn', - 'onequarter', 'divide', 'brokenbar', 'degree', 'thorn', 'threequarters', - 'twosuperior', 'registered', 'minus', 'eth', 'multiply', 'threesuperior', - 'copyright', 'Aacute', 'Acircumflex', 'Adieresis', 'Agrave', 'Aring', - 'Atilde', 'Ccedilla', 'Eacute', 'Ecircumflex', 'Edieresis', 'Egrave', - 'Iacute', 'Icircumflex', 'Idieresis', 'Igrave', 'Ntilde', 'Oacute', - 'Ocircumflex', 'Odieresis', 'Ograve', 'Otilde', 'Scaron', 'Uacute', - 'Ucircumflex', 'Udieresis', 'Ugrave', 'Yacute', 'Ydieresis', 'Zcaron', - 'aacute', 'acircumflex', 'adieresis', 'agrave', 'aring', 'atilde', 'ccedilla', - 'eacute', 'ecircumflex', 'edieresis', 'egrave', 'iacute', 'icircumflex', - 'idieresis', 'igrave', 'ntilde', 'oacute', 'ocircumflex', 'odieresis', - 'ograve', 'otilde', 'scaron', 'uacute', 'ucircumflex', 'udieresis', 'ugrave', - 'yacute', 'ydieresis', 'zcaron', 'exclamsmall', 'Hungarumlautsmall', - 'dollaroldstyle', 'dollarsuperior', 'ampersandsmall', 'Acutesmall', - 'parenleftsuperior', 'parenrightsuperior', 'twodotenleader', 'onedotenleader', - 'zerooldstyle', 'oneoldstyle', 'twooldstyle', 'threeoldstyle', 'fouroldstyle', - 'fiveoldstyle', 'sixoldstyle', 'sevenoldstyle', 'eightoldstyle', - 'nineoldstyle', 'commasuperior', 'threequartersemdash', 'periodsuperior', - 'questionsmall', 'asuperior', 'bsuperior', 'centsuperior', 'dsuperior', - 'esuperior', 'isuperior', 'lsuperior', 'msuperior', 'nsuperior', 'osuperior', - 'rsuperior', 'ssuperior', 'tsuperior', 'ff', 'ffi', 'ffl', 'parenleftinferior', - 'parenrightinferior', 'Circumflexsmall', 'hyphensuperior', 'Gravesmall', - 'Asmall', 'Bsmall', 'Csmall', 'Dsmall', 'Esmall', 'Fsmall', 'Gsmall', 'Hsmall', - 'Ismall', 'Jsmall', 'Ksmall', 'Lsmall', 'Msmall', 'Nsmall', 'Osmall', 'Psmall', - 'Qsmall', 'Rsmall', 'Ssmall', 'Tsmall', 'Usmall', 'Vsmall', 'Wsmall', 'Xsmall', - 'Ysmall', 'Zsmall', 'colonmonetary', 'onefitted', 'rupiah', 'Tildesmall', - 'exclamdownsmall', 'centoldstyle', 'Lslashsmall', 'Scaronsmall', 'Zcaronsmall', - 'Dieresissmall', 'Brevesmall', 'Caronsmall', 'Dotaccentsmall', 'Macronsmall', - 'figuredash', 'hypheninferior', 'Ogoneksmall', 'Ringsmall', 'Cedillasmall', - 'questiondownsmall', 'oneeighth', 'threeeighths', 'fiveeighths', 'seveneighths', - 'onethird', 'twothirds', 'zerosuperior', 'foursuperior', 'fivesuperior', - 'sixsuperior', 'sevensuperior', 'eightsuperior', 'ninesuperior', 'zeroinferior', - 'oneinferior', 'twoinferior', 'threeinferior', 'fourinferior', 'fiveinferior', - 'sixinferior', 'seveninferior', 'eightinferior', 'nineinferior', 'centinferior', - 'dollarinferior', 'periodinferior', 'commainferior', 'Agravesmall', - 'Aacutesmall', 'Acircumflexsmall', 'Atildesmall', 'Adieresissmall', 'Aringsmall', - 'AEsmall', 'Ccedillasmall', 'Egravesmall', 'Eacutesmall', 'Ecircumflexsmall', - 'Edieresissmall', 'Igravesmall', 'Iacutesmall', 'Icircumflexsmall', - 'Idieresissmall', 'Ethsmall', 'Ntildesmall', 'Ogravesmall', 'Oacutesmall', - 'Ocircumflexsmall', 'Otildesmall', 'Odieresissmall', 'OEsmall', 'Oslashsmall', - 'Ugravesmall', 'Uacutesmall', 'Ucircumflexsmall', 'Udieresissmall', - 'Yacutesmall', 'Thornsmall', 'Ydieresissmall', '001.000', '001.001', '001.002', - '001.003', 'Black', 'Bold', 'Book', 'Light', 'Medium', 'Regular', 'Roman', +cffStandardStrings = ['.notdef', 'space', 'exclam', 'quotedbl', 'numbersign', + 'dollar', 'percent', 'ampersand', 'quoteright', 'parenleft', 'parenright', + 'asterisk', 'plus', 'comma', 'hyphen', 'period', 'slash', 'zero', 'one', + 'two', 'three', 'four', 'five', 'six', 'seven', 'eight', 'nine', 'colon', + 'semicolon', 'less', 'equal', 'greater', 'question', 'at', 'A', 'B', 'C', + 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M', 'N', 'O', 'P', 'Q', 'R', + 'S', 'T', 'U', 'V', 'W', 'X', 'Y', 'Z', 'bracketleft', 'backslash', + 'bracketright', 'asciicircum', 'underscore', 'quoteleft', 'a', 'b', 'c', + 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', + 's', 't', 'u', 'v', 'w', 'x', 'y', 'z', 'braceleft', 'bar', 'braceright', + 'asciitilde', 'exclamdown', 'cent', 'sterling', 'fraction', 'yen', 'florin', + 'section', 'currency', 'quotesingle', 'quotedblleft', 'guillemotleft', + 'guilsinglleft', 'guilsinglright', 'fi', 'fl', 'endash', 'dagger', + 'daggerdbl', 'periodcentered', 'paragraph', 'bullet', 'quotesinglbase', + 'quotedblbase', 'quotedblright', 'guillemotright', 'ellipsis', 'perthousand', + 'questiondown', 'grave', 'acute', 'circumflex', 'tilde', 'macron', 'breve', + 'dotaccent', 'dieresis', 'ring', 'cedilla', 'hungarumlaut', 'ogonek', 'caron', + 'emdash', 'AE', 'ordfeminine', 'Lslash', 'Oslash', 'OE', 'ordmasculine', 'ae', + 'dotlessi', 'lslash', 'oslash', 'oe', 'germandbls', 'onesuperior', + 'logicalnot', 'mu', 'trademark', 'Eth', 'onehalf', 'plusminus', 'Thorn', + 'onequarter', 'divide', 'brokenbar', 'degree', 'thorn', 'threequarters', + 'twosuperior', 'registered', 'minus', 'eth', 'multiply', 'threesuperior', + 'copyright', 'Aacute', 'Acircumflex', 'Adieresis', 'Agrave', 'Aring', + 'Atilde', 'Ccedilla', 'Eacute', 'Ecircumflex', 'Edieresis', 'Egrave', + 'Iacute', 'Icircumflex', 'Idieresis', 'Igrave', 'Ntilde', 'Oacute', + 'Ocircumflex', 'Odieresis', 'Ograve', 'Otilde', 'Scaron', 'Uacute', + 'Ucircumflex', 'Udieresis', 'Ugrave', 'Yacute', 'Ydieresis', 'Zcaron', + 'aacute', 'acircumflex', 'adieresis', 'agrave', 'aring', 'atilde', 'ccedilla', + 'eacute', 'ecircumflex', 'edieresis', 'egrave', 'iacute', 'icircumflex', + 'idieresis', 'igrave', 'ntilde', 'oacute', 'ocircumflex', 'odieresis', + 'ograve', 'otilde', 'scaron', 'uacute', 'ucircumflex', 'udieresis', 'ugrave', + 'yacute', 'ydieresis', 'zcaron', 'exclamsmall', 'Hungarumlautsmall', + 'dollaroldstyle', 'dollarsuperior', 'ampersandsmall', 'Acutesmall', + 'parenleftsuperior', 'parenrightsuperior', 'twodotenleader', 'onedotenleader', + 'zerooldstyle', 'oneoldstyle', 'twooldstyle', 'threeoldstyle', 'fouroldstyle', + 'fiveoldstyle', 'sixoldstyle', 'sevenoldstyle', 'eightoldstyle', + 'nineoldstyle', 'commasuperior', 'threequartersemdash', 'periodsuperior', + 'questionsmall', 'asuperior', 'bsuperior', 'centsuperior', 'dsuperior', + 'esuperior', 'isuperior', 'lsuperior', 'msuperior', 'nsuperior', 'osuperior', + 'rsuperior', 'ssuperior', 'tsuperior', 'ff', 'ffi', 'ffl', 'parenleftinferior', + 'parenrightinferior', 'Circumflexsmall', 'hyphensuperior', 'Gravesmall', + 'Asmall', 'Bsmall', 'Csmall', 'Dsmall', 'Esmall', 'Fsmall', 'Gsmall', 'Hsmall', + 'Ismall', 'Jsmall', 'Ksmall', 'Lsmall', 'Msmall', 'Nsmall', 'Osmall', 'Psmall', + 'Qsmall', 'Rsmall', 'Ssmall', 'Tsmall', 'Usmall', 'Vsmall', 'Wsmall', 'Xsmall', + 'Ysmall', 'Zsmall', 'colonmonetary', 'onefitted', 'rupiah', 'Tildesmall', + 'exclamdownsmall', 'centoldstyle', 'Lslashsmall', 'Scaronsmall', 'Zcaronsmall', + 'Dieresissmall', 'Brevesmall', 'Caronsmall', 'Dotaccentsmall', 'Macronsmall', + 'figuredash', 'hypheninferior', 'Ogoneksmall', 'Ringsmall', 'Cedillasmall', + 'questiondownsmall', 'oneeighth', 'threeeighths', 'fiveeighths', 'seveneighths', + 'onethird', 'twothirds', 'zerosuperior', 'foursuperior', 'fivesuperior', + 'sixsuperior', 'sevensuperior', 'eightsuperior', 'ninesuperior', 'zeroinferior', + 'oneinferior', 'twoinferior', 'threeinferior', 'fourinferior', 'fiveinferior', + 'sixinferior', 'seveninferior', 'eightinferior', 'nineinferior', 'centinferior', + 'dollarinferior', 'periodinferior', 'commainferior', 'Agravesmall', + 'Aacutesmall', 'Acircumflexsmall', 'Atildesmall', 'Adieresissmall', 'Aringsmall', + 'AEsmall', 'Ccedillasmall', 'Egravesmall', 'Eacutesmall', 'Ecircumflexsmall', + 'Edieresissmall', 'Igravesmall', 'Iacutesmall', 'Icircumflexsmall', + 'Idieresissmall', 'Ethsmall', 'Ntildesmall', 'Ogravesmall', 'Oacutesmall', + 'Ocircumflexsmall', 'Otildesmall', 'Odieresissmall', 'OEsmall', 'Oslashsmall', + 'Ugravesmall', 'Uacutesmall', 'Ucircumflexsmall', 'Udieresissmall', + 'Yacutesmall', 'Thornsmall', 'Ydieresissmall', '001.000', '001.001', '001.002', + '001.003', 'Black', 'Bold', 'Book', 'Light', 'Medium', 'Regular', 'Roman', 'Semibold' ] diff -Nru fonttools-2.4/Lib/fontTools/encodings/codecs.py fonttools-3.0/Lib/fontTools/encodings/codecs.py --- fonttools-2.4/Lib/fontTools/encodings/codecs.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.0/Lib/fontTools/encodings/codecs.py 2015-08-31 17:57:15.000000000 +0000 @@ -0,0 +1,135 @@ +"""Extend the Python codecs module with a few encodings that are used in OpenType (name table) +but missing from Python. See https://github.com/behdad/fonttools/issues/236 for details.""" + +from __future__ import print_function, division, absolute_import +from fontTools.misc.py23 import * +import codecs +import encodings + +class ExtendCodec(codecs.Codec): + + def __init__(self, name, base_encoding, mapping): + self.name = name + self.base_encoding = base_encoding + self.mapping = mapping + self.reverse = {v:k for k,v in mapping.items()} + self.max_len = max(len(v) for v in mapping.values()) + self.info = codecs.CodecInfo(name=self.name, encode=self.encode, decode=self.decode) + codecs.register_error(name, self.error) + + def encode(self, input, errors='strict'): + assert errors == 'strict' + #return codecs.encode(input, self.base_encoding, self.name), len(input) + + # The above line could totally be all we needed, relying on the error + # handling to replace the unencodable Unicode characters with our extended + # byte sequences. + # + # However, there seems to be a design bug in Python (probably intentional): + # the error handler for encoding is supposed to return a **Unicode** character, + # that then needs to be encodable itself... Ugh. + # + # So we implement what codecs.encode() should have been doing: which is expect + # error handler to return bytes() to be added to the output. + # + # This seems to have been fixed in Python 3.3. We should try using that and + # use fallback only if that failed. + # https://docs.python.org/3.3/library/codecs.html#codecs.register_error + + length = len(input) + out = b'' + while input: + try: + part = codecs.encode(input, self.base_encoding) + out += part + input = '' # All converted + except UnicodeEncodeError as e: + # Convert the correct part + out += codecs.encode(input[:e.start], self.base_encoding) + replacement, pos = self.error(e) + out += replacement + input = input[pos:] + return out, length + + def decode(self, input, errors='strict'): + assert errors == 'strict' + return codecs.decode(input, self.base_encoding, self.name), len(input) + + def error(self, e): + if isinstance(e, UnicodeDecodeError): + for end in range(e.start + 1, e.end + 1): + s = e.object[e.start:end] + if s in self.mapping: + return self.mapping[s], end + elif isinstance(e, UnicodeEncodeError): + for end in range(e.start + 1, e.start + self.max_len + 1): + s = e.object[e.start:end] + if s in self.reverse: + return self.reverse[s], end + e.encoding = self.name + raise e + + +_extended_encodings = { + "x_mac_japanese_ttx": ("shift_jis", { + b"\xFC": unichr(0x007C), + b"\x7E": unichr(0x007E), + b"\x80": unichr(0x005C), + b"\xA0": unichr(0x00A0), + b"\xFD": unichr(0x00A9), + b"\xFE": unichr(0x2122), + b"\xFF": unichr(0x2026), + }), + "x_mac_trad_chinese_ttx": ("big5", { + b"\x80": unichr(0x005C), + b"\xA0": unichr(0x00A0), + b"\xFD": unichr(0x00A9), + b"\xFE": unichr(0x2122), + b"\xFF": unichr(0x2026), + }), + "x_mac_korean_ttx": ("euc_kr", { + b"\x80": unichr(0x00A0), + b"\x81": unichr(0x20A9), + b"\x82": unichr(0x2014), + b"\x83": unichr(0x00A9), + b"\xFE": unichr(0x2122), + b"\xFF": unichr(0x2026), + }), + "x_mac_simp_chinese_ttx": ("gb2312", { + b"\x80": unichr(0x00FC), + b"\xA0": unichr(0x00A0), + b"\xFD": unichr(0x00A9), + b"\xFE": unichr(0x2122), + b"\xFF": unichr(0x2026), + }), +} + +_cache = {} + +def search_function(name): + name = encodings.normalize_encoding(name) # Rather undocumented... + if name in _extended_encodings: + if name not in _cache: + base_encoding, mapping = _extended_encodings[name] + assert(name[-4:] == "_ttx") + # Python 2 didn't have any of the encodings that we are implementing + # in this file. Python 3 added aliases for the East Asian ones, mapping + # them "temporarily" to the same base encoding as us, with a comment + # suggesting that full implementation will appear some time later. + # As such, try the Python version of the x_mac_... first, if that is found, + # use *that* as our base encoding. This would make our encoding upgrade + # to the full encoding when and if Python finally implements that. + # http://bugs.python.org/issue24041 + base_encodings = [name[:-4], base_encoding] + for base_encoding in base_encodings: + try: + codecs.lookup(base_encoding) + except LookupError: + continue + _cache[name] = ExtendCodec(name, base_encoding, mapping) + break + return _cache[name].info + + return None + +codecs.register(search_function) diff -Nru fonttools-2.4/Lib/fontTools/encodings/codecs_test.py fonttools-3.0/Lib/fontTools/encodings/codecs_test.py --- fonttools-2.4/Lib/fontTools/encodings/codecs_test.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.0/Lib/fontTools/encodings/codecs_test.py 2015-08-31 17:57:15.000000000 +0000 @@ -0,0 +1,25 @@ +from __future__ import print_function, division, absolute_import, unicode_literals +from fontTools.misc.py23 import * +import unittest +import fontTools.encodings.codecs # Not to be confused with "import codecs" + +class ExtendedCodecsTest(unittest.TestCase): + + def test_decode_mac_japanese(self): + self.assertEqual(b'x\xfe\xfdy'.decode("x_mac_japanese_ttx"), + unichr(0x78)+unichr(0x2122)+unichr(0x00A9)+unichr(0x79)) + + def test_encode_mac_japanese(self): + self.assertEqual(b'x\xfe\xfdy', + (unichr(0x78)+unichr(0x2122)+unichr(0x00A9)+unichr(0x79)).encode("x_mac_japanese_ttx")) + + def test_decode_mac_trad_chinese(self): + self.assertEqual(b'\x80'.decode("x_mac_trad_chinese_ttx"), + unichr(0x5C)) + + def test_decode_mac_romanian(self): + self.assertEqual(b'x\xfb'.decode("mac_romanian"), + unichr(0x78)+unichr(0x02DA)) + +if __name__ == '__main__': + unittest.main() diff -Nru fonttools-2.4/Lib/fontTools/encodings/__init__.py fonttools-3.0/Lib/fontTools/encodings/__init__.py --- fonttools-2.4/Lib/fontTools/encodings/__init__.py 2013-04-23 04:08:08.000000000 +0000 +++ fonttools-3.0/Lib/fontTools/encodings/__init__.py 2015-08-31 17:57:15.000000000 +0000 @@ -1,3 +1,4 @@ -"""Empty __init__.py file to signal Python this directory is a package. -(It can't be completely empty since WinZip seems to skip empty files.) -""" +"""Empty __init__.py file to signal Python this directory is a package.""" + +from __future__ import print_function, division, absolute_import +from fontTools.misc.py23 import * diff -Nru fonttools-2.4/Lib/fontTools/encodings/MacRoman.py fonttools-3.0/Lib/fontTools/encodings/MacRoman.py --- fonttools-2.4/Lib/fontTools/encodings/MacRoman.py 2013-04-23 04:08:08.000000000 +0000 +++ fonttools-3.0/Lib/fontTools/encodings/MacRoman.py 2015-08-31 17:57:15.000000000 +0000 @@ -1,37 +1,39 @@ +from __future__ import print_function, division, absolute_import +from fontTools.misc.py23 import * + MacRoman = [ - 'NUL', 'Eth', 'eth', 'Lslash', 'lslash', 'Scaron', 'scaron', 'Yacute', - 'yacute', 'HT', 'LF', 'Thorn', 'thorn', 'CR', 'Zcaron', 'zcaron', 'DLE', 'DC1', - 'DC2', 'DC3', 'DC4', 'onehalf', 'onequarter', 'onesuperior', 'threequarters', - 'threesuperior', 'twosuperior', 'brokenbar', 'minus', 'multiply', 'RS', 'US', - 'space', 'exclam', 'quotedbl', 'numbersign', 'dollar', 'percent', 'ampersand', - 'quotesingle', 'parenleft', 'parenright', 'asterisk', 'plus', 'comma', - 'hyphen', 'period', 'slash', 'zero', 'one', 'two', 'three', 'four', 'five', - 'six', 'seven', 'eight', 'nine', 'colon', 'semicolon', 'less', 'equal', - 'greater', 'question', 'at', 'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', - 'K', 'L', 'M', 'N', 'O', 'P', 'Q', 'R', 'S', 'T', 'U', 'V', 'W', 'X', 'Y', 'Z', - 'bracketleft', 'backslash', 'bracketright', 'asciicircum', 'underscore', - 'grave', 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', - 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z', 'braceleft', 'bar', - 'braceright', 'asciitilde', 'DEL', 'Adieresis', 'Aring', 'Ccedilla', 'Eacute', - 'Ntilde', 'Odieresis', 'Udieresis', 'aacute', 'agrave', 'acircumflex', - 'adieresis', 'atilde', 'aring', 'ccedilla', 'eacute', 'egrave', 'ecircumflex', - 'edieresis', 'iacute', 'igrave', 'icircumflex', 'idieresis', 'ntilde', - 'oacute', 'ograve', 'ocircumflex', 'odieresis', 'otilde', 'uacute', 'ugrave', - 'ucircumflex', 'udieresis', 'dagger', 'degree', 'cent', 'sterling', 'section', - 'bullet', 'paragraph', 'germandbls', 'registered', 'copyright', 'trademark', - 'acute', 'dieresis', 'notequal', 'AE', 'Oslash', 'infinity', 'plusminus', - 'lessequal', 'greaterequal', 'yen', 'mu', 'partialdiff', 'summation', - 'product', 'pi', 'integral', 'ordfeminine', 'ordmasculine', 'Omega', 'ae', - 'oslash', 'questiondown', 'exclamdown', 'logicalnot', 'radical', 'florin', - 'approxequal', 'Delta', 'guillemotleft', 'guillemotright', 'ellipsis', - 'nbspace', 'Agrave', 'Atilde', 'Otilde', 'OE', 'oe', 'endash', 'emdash', - 'quotedblleft', 'quotedblright', 'quoteleft', 'quoteright', 'divide', 'lozenge', - 'ydieresis', 'Ydieresis', 'fraction', 'currency', 'guilsinglleft', - 'guilsinglright', 'fi', 'fl', 'daggerdbl', 'periodcentered', 'quotesinglbase', - 'quotedblbase', 'perthousand', 'Acircumflex', 'Ecircumflex', 'Aacute', - 'Edieresis', 'Egrave', 'Iacute', 'Icircumflex', 'Idieresis', 'Igrave', 'Oacute', - 'Ocircumflex', 'apple', 'Ograve', 'Uacute', 'Ucircumflex', 'Ugrave', 'dotlessi', - 'circumflex', 'tilde', 'macron', 'breve', 'dotaccent', 'ring', 'cedilla', + 'NUL', 'Eth', 'eth', 'Lslash', 'lslash', 'Scaron', 'scaron', 'Yacute', + 'yacute', 'HT', 'LF', 'Thorn', 'thorn', 'CR', 'Zcaron', 'zcaron', 'DLE', 'DC1', + 'DC2', 'DC3', 'DC4', 'onehalf', 'onequarter', 'onesuperior', 'threequarters', + 'threesuperior', 'twosuperior', 'brokenbar', 'minus', 'multiply', 'RS', 'US', + 'space', 'exclam', 'quotedbl', 'numbersign', 'dollar', 'percent', 'ampersand', + 'quotesingle', 'parenleft', 'parenright', 'asterisk', 'plus', 'comma', + 'hyphen', 'period', 'slash', 'zero', 'one', 'two', 'three', 'four', 'five', + 'six', 'seven', 'eight', 'nine', 'colon', 'semicolon', 'less', 'equal', + 'greater', 'question', 'at', 'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', + 'K', 'L', 'M', 'N', 'O', 'P', 'Q', 'R', 'S', 'T', 'U', 'V', 'W', 'X', 'Y', 'Z', + 'bracketleft', 'backslash', 'bracketright', 'asciicircum', 'underscore', + 'grave', 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', + 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z', 'braceleft', 'bar', + 'braceright', 'asciitilde', 'DEL', 'Adieresis', 'Aring', 'Ccedilla', 'Eacute', + 'Ntilde', 'Odieresis', 'Udieresis', 'aacute', 'agrave', 'acircumflex', + 'adieresis', 'atilde', 'aring', 'ccedilla', 'eacute', 'egrave', 'ecircumflex', + 'edieresis', 'iacute', 'igrave', 'icircumflex', 'idieresis', 'ntilde', + 'oacute', 'ograve', 'ocircumflex', 'odieresis', 'otilde', 'uacute', 'ugrave', + 'ucircumflex', 'udieresis', 'dagger', 'degree', 'cent', 'sterling', 'section', + 'bullet', 'paragraph', 'germandbls', 'registered', 'copyright', 'trademark', + 'acute', 'dieresis', 'notequal', 'AE', 'Oslash', 'infinity', 'plusminus', + 'lessequal', 'greaterequal', 'yen', 'mu', 'partialdiff', 'summation', + 'product', 'pi', 'integral', 'ordfeminine', 'ordmasculine', 'Omega', 'ae', + 'oslash', 'questiondown', 'exclamdown', 'logicalnot', 'radical', 'florin', + 'approxequal', 'Delta', 'guillemotleft', 'guillemotright', 'ellipsis', + 'nbspace', 'Agrave', 'Atilde', 'Otilde', 'OE', 'oe', 'endash', 'emdash', + 'quotedblleft', 'quotedblright', 'quoteleft', 'quoteright', 'divide', 'lozenge', + 'ydieresis', 'Ydieresis', 'fraction', 'currency', 'guilsinglleft', + 'guilsinglright', 'fi', 'fl', 'daggerdbl', 'periodcentered', 'quotesinglbase', + 'quotedblbase', 'perthousand', 'Acircumflex', 'Ecircumflex', 'Aacute', + 'Edieresis', 'Egrave', 'Iacute', 'Icircumflex', 'Idieresis', 'Igrave', 'Oacute', + 'Ocircumflex', 'apple', 'Ograve', 'Uacute', 'Ucircumflex', 'Ugrave', 'dotlessi', + 'circumflex', 'tilde', 'macron', 'breve', 'dotaccent', 'ring', 'cedilla', 'hungarumlaut', 'ogonek', 'caron' ] - diff -Nru fonttools-2.4/Lib/fontTools/encodings/StandardEncoding.py fonttools-3.0/Lib/fontTools/encodings/StandardEncoding.py --- fonttools-2.4/Lib/fontTools/encodings/StandardEncoding.py 2013-04-23 04:08:08.000000000 +0000 +++ fonttools-3.0/Lib/fontTools/encodings/StandardEncoding.py 2015-08-31 17:57:15.000000000 +0000 @@ -1,3 +1,6 @@ +from __future__ import print_function, division, absolute_import +from fontTools.misc.py23 import * + StandardEncoding = [ '.notdef', '.notdef', '.notdef', '.notdef', '.notdef', '.notdef', '.notdef', '.notdef', '.notdef', '.notdef', diff -Nru fonttools-2.4/Lib/fontTools/feaLib/ast.py fonttools-3.0/Lib/fontTools/feaLib/ast.py --- fonttools-2.4/Lib/fontTools/feaLib/ast.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.0/Lib/fontTools/feaLib/ast.py 2015-08-31 17:57:15.000000000 +0000 @@ -0,0 +1,98 @@ +from __future__ import print_function, division, absolute_import +from __future__ import unicode_literals + + +class FeatureFile(object): + def __init__(self): + self.statements = [] + + +class FeatureBlock(object): + def __init__(self, location, name, use_extension): + self.location = location + self.name, self.use_extension = name, use_extension + self.statements = [] + + +class LookupBlock(object): + def __init__(self, location, name, use_extension): + self.location = location + self.name, self.use_extension = name, use_extension + self.statements = [] + + +class GlyphClassDefinition(object): + def __init__(self, location, name, glyphs): + self.location = location + self.name = name + self.glyphs = glyphs + + +class AlternateSubstitution(object): + def __init__(self, location, glyph, from_class): + self.location = location + self.glyph, self.from_class = (glyph, from_class) + + +class AnchorDefinition(object): + def __init__(self, location, name, x, y, contourpoint): + self.location = location + self.name, self.x, self.y, self.contourpoint = name, x, y, contourpoint + + +class LanguageStatement(object): + def __init__(self, location, language, include_default, required): + self.location = location + self.language = language + self.include_default = include_default + self.required = required + + +class LanguageSystemStatement(object): + def __init__(self, location, script, language): + self.location = location + self.script, self.language = (script, language) + + +class IgnoreSubstitutionRule(object): + def __init__(self, location, prefix, glyphs, suffix): + self.location = location + self.prefix, self.glyphs, self.suffix = (prefix, glyphs, suffix) + + +class LookupReferenceStatement(object): + def __init__(self, location, lookup): + self.location, self.lookup = (location, lookup) + + +class ScriptStatement(object): + def __init__(self, location, script): + self.location = location + self.script = script + + +class SubtableStatement(object): + def __init__(self, location): + self.location = location + + +class SubstitutionRule(object): + def __init__(self, location, old, new): + self.location, self.old, self.new = (location, old, new) + self.old_prefix = [] + self.old_suffix = [] + self.lookups = [None] * len(old) + + +class ValueRecord(object): + def __init__(self, location, xPlacement, yPlacement, xAdvance, yAdvance): + self.location = location + self.xPlacement, self.yPlacement = (xPlacement, yPlacement) + self.xAdvance, self.yAdvance = (xAdvance, yAdvance) + + +class ValueRecordDefinition(object): + def __init__(self, location, name, value): + self.location = location + self.name = name + self.value = value diff -Nru fonttools-2.4/Lib/fontTools/feaLib/__init__.py fonttools-3.0/Lib/fontTools/feaLib/__init__.py --- fonttools-2.4/Lib/fontTools/feaLib/__init__.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.0/Lib/fontTools/feaLib/__init__.py 2015-08-31 17:57:15.000000000 +0000 @@ -0,0 +1,4 @@ +"""fontTools.feaLib -- a package for dealing with OpenType feature files.""" + +# The structure of OpenType feature files is defined here: +# http://www.adobe.com/devnet/opentype/afdko/topic_feature_file_syntax.html diff -Nru fonttools-2.4/Lib/fontTools/feaLib/lexer.py fonttools-3.0/Lib/fontTools/feaLib/lexer.py --- fonttools-2.4/Lib/fontTools/feaLib/lexer.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.0/Lib/fontTools/feaLib/lexer.py 2015-08-31 17:57:15.000000000 +0000 @@ -0,0 +1,203 @@ +from __future__ import print_function, division, absolute_import +from __future__ import unicode_literals +import codecs +import os + + +class LexerError(Exception): + def __init__(self, message, location): + Exception.__init__(self, message) + self.location = location + + def __str__(self): + message = Exception.__str__(self) + if self.location: + path, line, column = self.location + return "%s:%d:%d: %s" % (path, line, column, message) + else: + return message + + +class Lexer(object): + NUMBER = "NUMBER" + STRING = "STRING" + NAME = "NAME" + FILENAME = "FILENAME" + GLYPHCLASS = "GLYPHCLASS" + CID = "CID" + SYMBOL = "SYMBOL" + COMMENT = "COMMENT" + NEWLINE = "NEWLINE" + + CHAR_WHITESPACE_ = " \t" + CHAR_NEWLINE_ = "\r\n" + CHAR_SYMBOL_ = ";:-+'{}[]<>()=" + CHAR_DIGIT_ = "0123456789" + CHAR_HEXDIGIT_ = "0123456789ABCDEFabcdef" + CHAR_LETTER_ = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz" + CHAR_NAME_START_ = CHAR_LETTER_ + "_.\\" + CHAR_NAME_CONTINUATION_ = CHAR_LETTER_ + CHAR_DIGIT_ + "_." + + MODE_NORMAL_ = "NORMAL" + MODE_FILENAME_ = "FILENAME" + + def __init__(self, text, filename): + self.filename_ = filename + self.line_ = 1 + self.pos_ = 0 + self.line_start_ = 0 + self.text_ = text + self.text_length_ = len(text) + self.mode_ = Lexer.MODE_NORMAL_ + + def __iter__(self): + return self + + def next(self): # Python 2 + return self.__next__() + + def __next__(self): # Python 3 + while True: + token_type, token, location = self.next_() + if token_type not in {Lexer.COMMENT, Lexer.NEWLINE}: + return (token_type, token, location) + + def next_(self): + self.scan_over_(Lexer.CHAR_WHITESPACE_) + column = self.pos_ - self.line_start_ + 1 + location = (self.filename_, self.line_, column) + start = self.pos_ + text = self.text_ + limit = len(text) + if start >= limit: + raise StopIteration() + cur_char = text[start] + next_char = text[start + 1] if start + 1 < limit else None + + if cur_char == "\n": + self.pos_ += 1 + self.line_ += 1 + self.line_start_ = self.pos_ + return (Lexer.NEWLINE, None, location) + if cur_char == "\r": + self.pos_ += (2 if next_char == "\n" else 1) + self.line_ += 1 + self.line_start_ = self.pos_ + return (Lexer.NEWLINE, None, location) + if cur_char == "#": + self.scan_until_(Lexer.CHAR_NEWLINE_) + return (Lexer.COMMENT, text[start:self.pos_], location) + + if self.mode_ is Lexer.MODE_FILENAME_: + if cur_char != "(": + raise LexerError("Expected '(' before file name", location) + self.scan_until_(")") + cur_char = text[self.pos_] if self.pos_ < limit else None + if cur_char != ")": + raise LexerError("Expected ')' after file name", location) + self.pos_ += 1 + self.mode_ = Lexer.MODE_NORMAL_ + return (Lexer.FILENAME, text[start + 1:self.pos_ - 1], location) + + if cur_char == "\\" and next_char in Lexer.CHAR_DIGIT_: + self.pos_ += 1 + self.scan_over_(Lexer.CHAR_DIGIT_) + return (Lexer.CID, int(text[start + 1:self.pos_], 10), location) + if cur_char == "@": + self.pos_ += 1 + self.scan_over_(Lexer.CHAR_NAME_CONTINUATION_) + glyphclass = text[start + 1:self.pos_] + if len(glyphclass) < 1: + raise LexerError("Expected glyph class name", location) + if len(glyphclass) > 30: + raise LexerError( + "Glyph class names must not be longer than 30 characters", + location) + return (Lexer.GLYPHCLASS, glyphclass, location) + if cur_char in Lexer.CHAR_NAME_START_: + self.pos_ += 1 + self.scan_over_(Lexer.CHAR_NAME_CONTINUATION_) + token = text[start:self.pos_] + if token == "include": + self.mode_ = Lexer.MODE_FILENAME_ + return (Lexer.NAME, token, location) + if cur_char == "0" and next_char in "xX": + self.pos_ += 2 + self.scan_over_(Lexer.CHAR_HEXDIGIT_) + return (Lexer.NUMBER, int(text[start:self.pos_], 16), location) + if cur_char in Lexer.CHAR_DIGIT_: + self.scan_over_(Lexer.CHAR_DIGIT_) + return (Lexer.NUMBER, int(text[start:self.pos_], 10), location) + if cur_char == "-" and next_char in Lexer.CHAR_DIGIT_: + self.pos_ += 1 + self.scan_over_(Lexer.CHAR_DIGIT_) + return (Lexer.NUMBER, int(text[start:self.pos_], 10), location) + if cur_char in Lexer.CHAR_SYMBOL_: + self.pos_ += 1 + return (Lexer.SYMBOL, cur_char, location) + if cur_char == '"': + self.pos_ += 1 + self.scan_until_('"\r\n') + if self.pos_ < self.text_length_ and self.text_[self.pos_] == '"': + self.pos_ += 1 + return (Lexer.STRING, text[start + 1:self.pos_ - 1], location) + else: + raise LexerError("Expected '\"' to terminate string", location) + raise LexerError("Unexpected character: '%s'" % cur_char, location) + + def scan_over_(self, valid): + p = self.pos_ + while p < self.text_length_ and self.text_[p] in valid: + p += 1 + self.pos_ = p + + def scan_until_(self, stop_at): + p = self.pos_ + while p < self.text_length_ and self.text_[p] not in stop_at: + p += 1 + self.pos_ = p + + +class IncludingLexer(object): + def __init__(self, filename): + self.lexers_ = [self.make_lexer_(filename, (filename, 0, 0))] + + def __iter__(self): + return self + + def next(self): # Python 2 + return self.__next__() + + def __next__(self): # Python 3 + while self.lexers_: + lexer = self.lexers_[-1] + try: + token_type, token, location = lexer.next() + except StopIteration: + self.lexers_.pop() + continue + if token_type is Lexer.NAME and token == "include": + fname_type, fname_token, fname_location = lexer.next() + if fname_type is not Lexer.FILENAME: + raise LexerError("Expected file name", fname_location) + semi_type, semi_token, semi_location = lexer.next() + if semi_type is not Lexer.SYMBOL or semi_token != ";": + raise LexerError("Expected ';'", semi_location) + curpath, _ = os.path.split(lexer.filename_) + path = os.path.join(curpath, fname_token) + if len(self.lexers_) >= 5: + raise LexerError("Too many recursive includes", + fname_location) + self.lexers_.append(self.make_lexer_(path, fname_location)) + continue + else: + return (token_type, token, location) + raise StopIteration() + + @staticmethod + def make_lexer_(filename, location): + try: + with codecs.open(filename, "rb", "utf-8") as f: + return Lexer(f.read(), filename) + except IOError as err: + raise LexerError(str(err), location) diff -Nru fonttools-2.4/Lib/fontTools/feaLib/lexer_test.py fonttools-3.0/Lib/fontTools/feaLib/lexer_test.py --- fonttools-2.4/Lib/fontTools/feaLib/lexer_test.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.0/Lib/fontTools/feaLib/lexer_test.py 2015-08-31 17:57:15.000000000 +0000 @@ -0,0 +1,160 @@ +from __future__ import print_function, division, absolute_import +from __future__ import unicode_literals +from fontTools.feaLib.lexer import IncludingLexer, Lexer, LexerError +import os +import unittest + + +def lex(s): + return [(typ, tok) for (typ, tok, _) in Lexer(s, "test.fea")] + + +class LexerErrorTest(unittest.TestCase): + def test_str(self): + err = LexerError("Squeak!", ("foo.fea", 23, 42)) + self.assertEqual(str(err), "foo.fea:23:42: Squeak!") + + def test_str_nolocation(self): + err = LexerError("Squeak!", None) + self.assertEqual(str(err), "Squeak!") + + +class LexerTest(unittest.TestCase): + def __init__(self, methodName): + unittest.TestCase.__init__(self, methodName) + # Python 3 renamed assertRaisesRegexp to assertRaisesRegex, + # and fires deprecation warnings if a program uses the old name. + if not hasattr(self, "assertRaisesRegex"): + self.assertRaisesRegex = self.assertRaisesRegexp + + def test_empty(self): + self.assertEqual(lex(""), []) + self.assertEqual(lex(" \t "), []) + + def test_name(self): + self.assertEqual(lex("a17"), [(Lexer.NAME, "a17")]) + self.assertEqual(lex(".notdef"), [(Lexer.NAME, ".notdef")]) + self.assertEqual(lex("two.oldstyle"), [(Lexer.NAME, "two.oldstyle")]) + self.assertEqual(lex("_"), [(Lexer.NAME, "_")]) + self.assertEqual(lex("\\table"), [(Lexer.NAME, "\\table")]) + + def test_cid(self): + self.assertEqual(lex("\\0 \\987"), [(Lexer.CID, 0), (Lexer.CID, 987)]) + + def test_glyphclass(self): + self.assertEqual(lex("@Vowel.sc"), [(Lexer.GLYPHCLASS, "Vowel.sc")]) + self.assertRaisesRegex(LexerError, "Expected glyph class", lex, "@(a)") + self.assertRaisesRegex(LexerError, "Expected glyph class", lex, "@ A") + self.assertRaisesRegex(LexerError, "not be longer than 30 characters", + lex, "@a123456789.a123456789.a123456789.x") + + def test_include(self): + self.assertEqual(lex("include (~/foo/bar baz.fea);"), [ + (Lexer.NAME, "include"), + (Lexer.FILENAME, "~/foo/bar baz.fea"), + (Lexer.SYMBOL, ";") + ]) + self.assertEqual(lex("include # Comment\n (foo) \n;"), [ + (Lexer.NAME, "include"), + (Lexer.FILENAME, "foo"), + (Lexer.SYMBOL, ";") + ]) + self.assertRaises(LexerError, lex, "include blah") + self.assertRaises(LexerError, lex, "include (blah") + + def test_number(self): + self.assertEqual(lex("123 -456"), + [(Lexer.NUMBER, 123), (Lexer.NUMBER, -456)]) + self.assertEqual(lex("0xCAFED00D"), [(Lexer.NUMBER, 0xCAFED00D)]) + self.assertEqual(lex("0xcafed00d"), [(Lexer.NUMBER, 0xCAFED00D)]) + + def test_symbol(self): + self.assertEqual(lex("a'"), [(Lexer.NAME, "a"), (Lexer.SYMBOL, "'")]) + self.assertEqual( + lex("foo - -2"), + [(Lexer.NAME, "foo"), (Lexer.SYMBOL, "-"), (Lexer.NUMBER, -2)]) + + def test_comment(self): + self.assertEqual(lex("# Comment\n#"), []) + + def test_string(self): + self.assertEqual(lex('"foo" "bar"'), + [(Lexer.STRING, "foo"), (Lexer.STRING, "bar")]) + self.assertRaises(LexerError, lambda: lex('"foo\n bar"')) + + def test_bad_character(self): + self.assertRaises(LexerError, lambda: lex("123 \u0001")) + + def test_newline(self): + lines = lambda s: [loc[1] for (_, _, loc) in Lexer(s, "test.fea")] + self.assertEqual(lines("FOO\n\nBAR\nBAZ"), [1, 3, 4]) # Unix + self.assertEqual(lines("FOO\r\rBAR\rBAZ"), [1, 3, 4]) # Macintosh + self.assertEqual(lines("FOO\r\n\r\n BAR\r\nBAZ"), [1, 3, 4]) # Windows + self.assertEqual(lines("FOO\n\rBAR\r\nBAZ"), [1, 3, 4]) # mixed + + def test_location(self): + locs = lambda s: ["%s:%d:%d" % loc + for (_, _, loc) in Lexer(s, "test.fea")] + self.assertEqual(locs("a b # Comment\n12 @x"), [ + "test.fea:1:1", "test.fea:1:3", "test.fea:2:1", + "test.fea:2:4" + ]) + + def test_scan_over_(self): + lexer = Lexer("abbacabba12", "test.fea") + self.assertEqual(lexer.pos_, 0) + lexer.scan_over_("xyz") + self.assertEqual(lexer.pos_, 0) + lexer.scan_over_("abc") + self.assertEqual(lexer.pos_, 9) + lexer.scan_over_("abc") + self.assertEqual(lexer.pos_, 9) + lexer.scan_over_("0123456789") + self.assertEqual(lexer.pos_, 11) + + def test_scan_until_(self): + lexer = Lexer("foo'bar", "test.fea") + self.assertEqual(lexer.pos_, 0) + lexer.scan_until_("'") + self.assertEqual(lexer.pos_, 3) + lexer.scan_until_("'") + self.assertEqual(lexer.pos_, 3) + + +class IncludingLexerTest(unittest.TestCase): + @staticmethod + def getpath(filename): + path, _ = os.path.split(__file__) + return os.path.join(path, "testdata", filename) + + def test_include(self): + lexer = IncludingLexer(self.getpath("include4.fea")) + result = ['%s %s:%d' % (token, os.path.split(loc[0])[1], loc[1]) + for _, token, loc in lexer] + self.assertEqual(result, [ + "I4a include4.fea:1", + "I3a include3.fea:1", + "I2a include2.fea:1", + "I1a include1.fea:1", + "I0 include0.fea:1", + "I1b include1.fea:3", + "I2b include2.fea:3", + "I3b include3.fea:3", + "I4b include4.fea:3" + ]) + + def test_include_limit(self): + lexer = IncludingLexer(self.getpath("include6.fea")) + self.assertRaises(LexerError, lambda: list(lexer)) + + def test_include_self(self): + lexer = IncludingLexer(self.getpath("includeself.fea")) + self.assertRaises(LexerError, lambda: list(lexer)) + + def test_include_missing_file(self): + lexer = IncludingLexer(self.getpath("includemissingfile.fea")) + self.assertRaises(LexerError, lambda: list(lexer)) + + +if __name__ == "__main__": + unittest.main() diff -Nru fonttools-2.4/Lib/fontTools/feaLib/parser.py fonttools-3.0/Lib/fontTools/feaLib/parser.py --- fonttools-2.4/Lib/fontTools/feaLib/parser.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.0/Lib/fontTools/feaLib/parser.py 2015-08-31 17:57:15.000000000 +0000 @@ -0,0 +1,466 @@ +from __future__ import print_function, division, absolute_import +from __future__ import unicode_literals +from fontTools.feaLib.lexer import Lexer, IncludingLexer +import fontTools.feaLib.ast as ast +import os +import re + + +class ParserError(Exception): + def __init__(self, message, location): + Exception.__init__(self, message) + self.location = location + + def __str__(self): + message = Exception.__str__(self) + if self.location: + path, line, column = self.location + return "%s:%d:%d: %s" % (path, line, column, message) + else: + return message + + +class Parser(object): + def __init__(self, path): + self.doc_ = ast.FeatureFile() + self.anchors_ = SymbolTable() + self.glyphclasses_ = SymbolTable() + self.lookups_ = SymbolTable() + self.valuerecords_ = SymbolTable() + self.symbol_tables_ = { + self.anchors_, self.glyphclasses_, + self.lookups_, self.valuerecords_ + } + self.next_token_type_, self.next_token_ = (None, None) + self.next_token_location_ = None + self.lexer_ = IncludingLexer(path) + self.advance_lexer_() + + def parse(self): + statements = self.doc_.statements + while self.next_token_type_ is not None: + self.advance_lexer_() + if self.cur_token_type_ is Lexer.GLYPHCLASS: + statements.append(self.parse_glyphclass_definition_()) + elif self.is_cur_keyword_("anchorDef"): + statements.append(self.parse_anchordef_()) + elif self.is_cur_keyword_("languagesystem"): + statements.append(self.parse_languagesystem_()) + elif self.is_cur_keyword_("lookup"): + statements.append(self.parse_lookup_(vertical=False)) + elif self.is_cur_keyword_("feature"): + statements.append(self.parse_feature_block_()) + elif self.is_cur_keyword_("valueRecordDef"): + statements.append( + self.parse_valuerecord_definition_(vertical=False)) + else: + raise ParserError("Expected feature, languagesystem, " + "lookup, or glyph class definition", + self.cur_token_location_) + return self.doc_ + + def parse_anchordef_(self): + assert self.is_cur_keyword_("anchorDef") + location = self.cur_token_location_ + x, y = self.expect_number_(), self.expect_number_() + contourpoint = None + if self.next_token_ == "contourpoint": + self.expect_keyword_("contourpoint") + contourpoint = self.expect_number_() + name = self.expect_name_() + self.expect_symbol_(";") + anchordef = ast.AnchorDefinition(location, name, x, y, contourpoint) + self.anchors_.define(name, anchordef) + return anchordef + + def parse_glyphclass_definition_(self): + location, name = self.cur_token_location_, self.cur_token_ + self.expect_symbol_("=") + glyphs = self.parse_glyphclass_(accept_glyphname=False) + self.expect_symbol_(";") + if self.glyphclasses_.resolve(name) is not None: + raise ParserError("Glyph class @%s already defined" % name, + location) + glyphclass = ast.GlyphClassDefinition(location, name, glyphs) + self.glyphclasses_.define(name, glyphclass) + return glyphclass + + def parse_glyphclass_(self, accept_glyphname): + result = set() + if accept_glyphname and self.next_token_type_ is Lexer.NAME: + result.add(self.expect_name_()) + return result + if self.next_token_type_ is Lexer.GLYPHCLASS: + self.advance_lexer_() + gc = self.glyphclasses_.resolve(self.cur_token_) + if gc is None: + raise ParserError("Unknown glyph class @%s" % self.cur_token_, + self.cur_token_location_) + result.update(gc.glyphs) + return result + + self.expect_symbol_("[") + while self.next_token_ != "]": + self.advance_lexer_() + if self.cur_token_type_ is Lexer.NAME: + if self.next_token_ == "-": + range_location_ = self.cur_token_location_ + range_start = self.cur_token_ + self.expect_symbol_("-") + range_end = self.expect_name_() + result.update(self.make_glyph_range_(range_location_, + range_start, + range_end)) + else: + result.add(self.cur_token_) + elif self.cur_token_type_ is Lexer.GLYPHCLASS: + gc = self.glyphclasses_.resolve(self.cur_token_) + if gc is None: + raise ParserError( + "Unknown glyph class @%s" % self.cur_token_, + self.cur_token_location_) + result.update(gc.glyphs) + else: + raise ParserError( + "Expected glyph name, glyph range, " + "or glyph class reference", + self.cur_token_location_) + self.expect_symbol_("]") + return result + + def parse_glyph_pattern_(self): + prefix, glyphs, lookups, suffix = ([], [], [], []) + while self.next_token_ not in {"by", "from", ";"}: + gc = self.parse_glyphclass_(accept_glyphname=True) + marked = False + if self.next_token_ == "'": + self.expect_symbol_("'") + marked = True + if marked: + glyphs.append(gc) + elif glyphs: + suffix.append(gc) + else: + prefix.append(gc) + + lookup = None + if self.next_token_ == "lookup": + self.expect_keyword_("lookup") + if not marked: + raise ParserError("Lookups can only follow marked glyphs", + self.cur_token_location_) + lookup_name = self.expect_name_() + lookup = self.lookups_.resolve(lookup_name) + if lookup is None: + raise ParserError('Unknown lookup "%s"' % lookup_name, + self.cur_token_location_) + if marked: + lookups.append(lookup) + + if not glyphs and not suffix: # eg., "sub f f i by" + assert lookups == [] + return ([], prefix, [None] * len(prefix), []) + else: + return (prefix, glyphs, lookups, suffix) + + def parse_ignore_(self): + assert self.is_cur_keyword_("ignore") + location = self.cur_token_location_ + self.advance_lexer_() + if self.cur_token_ in ["substitute", "sub"]: + prefix, glyphs, lookups, suffix = self.parse_glyph_pattern_() + self.expect_symbol_(";") + return ast.IgnoreSubstitutionRule(location, prefix, glyphs, suffix) + raise ParserError("Expected \"substitute\"", self.next_token_location_) + + def parse_language_(self): + assert self.is_cur_keyword_("language") + location, language = self.cur_token_location_, self.expect_tag_() + include_default, required = (True, False) + if self.next_token_ in {"exclude_dflt", "include_dflt"}: + include_default = (self.expect_name_() == "include_dflt") + if self.next_token_ == "required": + self.expect_keyword_("required") + required = True + self.expect_symbol_(";") + return ast.LanguageStatement(location, language.strip(), + include_default, required) + + def parse_lookup_(self, vertical): + assert self.is_cur_keyword_("lookup") + location, name = self.cur_token_location_, self.expect_name_() + + if self.next_token_ == ";": + lookup = self.lookups_.resolve(name) + if lookup is None: + raise ParserError("Unknown lookup \"%s\"" % name, + self.cur_token_location_) + self.expect_symbol_(";") + return ast.LookupReferenceStatement(location, lookup) + + use_extension = False + if self.next_token_ == "useExtension": + self.expect_keyword_("useExtension") + use_extension = True + + block = ast.LookupBlock(location, name, use_extension) + self.parse_block_(block, vertical) + self.lookups_.define(name, block) + return block + + def parse_script_(self): + assert self.is_cur_keyword_("script") + location, script = self.cur_token_location_, self.expect_tag_() + self.expect_symbol_(";") + return ast.ScriptStatement(location, script) + + def parse_substitute_(self): + assert self.cur_token_ in {"substitute", "sub"} + location = self.cur_token_location_ + old_prefix, old, lookups, old_suffix = self.parse_glyph_pattern_() + + new = [] + if self.next_token_ == "by": + keyword = self.expect_keyword_("by") + while self.next_token_ != ";": + new.append(self.parse_glyphclass_(accept_glyphname=True)) + elif self.next_token_ == "from": + keyword = self.expect_keyword_("from") + new = [self.parse_glyphclass_(accept_glyphname=False)] + else: + keyword = None + self.expect_symbol_(";") + if len(new) is 0 and not any(lookups): + raise ParserError( + 'Expected "by", "from" or explicit lookup references', + self.cur_token_location_) + + if keyword == "from": + if len(old) != 1 or len(old[0]) != 1: + raise ParserError('Expected a single glyph before "from"', + location) + if len(new) != 1: + raise ParserError('Expected a single glyphclass after "from"', + location) + return ast.AlternateSubstitution(location, list(old[0])[0], new[0]) + + rule = ast.SubstitutionRule(location, old, new) + rule.old_prefix, rule.old_suffix = old_prefix, old_suffix + rule.lookups = lookups + return rule + + def parse_subtable_(self): + assert self.is_cur_keyword_("subtable") + location = self.cur_token_location_ + self.expect_symbol_(";") + return ast.SubtableStatement(location) + + def parse_valuerecord_(self, vertical): + if self.next_token_type_ is Lexer.NUMBER: + number, location = self.expect_number_(), self.cur_token_location_ + if vertical: + val = ast.ValueRecord(location, 0, 0, 0, number) + else: + val = ast.ValueRecord(location, 0, 0, number, 0) + return val + self.expect_symbol_("<") + location = self.cur_token_location_ + if self.next_token_type_ is Lexer.NAME: + name = self.expect_name_() + vrd = self.valuerecords_.resolve(name) + if vrd is None: + raise ParserError("Unknown valueRecordDef \"%s\"" % name, + self.cur_token_location_) + value = vrd.value + xPlacement, yPlacement = (value.xPlacement, value.yPlacement) + xAdvance, yAdvance = (value.xAdvance, value.yAdvance) + else: + xPlacement, yPlacement, xAdvance, yAdvance = ( + self.expect_number_(), self.expect_number_(), + self.expect_number_(), self.expect_number_()) + self.expect_symbol_(">") + return ast.ValueRecord( + location, xPlacement, yPlacement, xAdvance, yAdvance) + + def parse_valuerecord_definition_(self, vertical): + assert self.is_cur_keyword_("valueRecordDef") + location = self.cur_token_location_ + value = self.parse_valuerecord_(vertical) + name = self.expect_name_() + self.expect_symbol_(";") + vrd = ast.ValueRecordDefinition(location, name, value) + self.valuerecords_.define(name, vrd) + return vrd + + def parse_languagesystem_(self): + assert self.cur_token_ == "languagesystem" + location = self.cur_token_location_ + script, language = self.expect_tag_(), self.expect_tag_() + self.expect_symbol_(";") + return ast.LanguageSystemStatement(location, script, language) + + def parse_feature_block_(self): + assert self.cur_token_ == "feature" + location = self.cur_token_location_ + tag = self.expect_tag_() + vertical = (tag == "vkrn") + + use_extension = False + if self.next_token_ == "useExtension": + self.expect_keyword_("useExtension") + use_extension = True + + block = ast.FeatureBlock(location, tag, use_extension) + self.parse_block_(block, vertical) + return block + + def parse_block_(self, block, vertical): + self.expect_symbol_("{") + for symtab in self.symbol_tables_: + symtab.enter_scope() + + statements = block.statements + while self.next_token_ != "}": + self.advance_lexer_() + if self.cur_token_type_ is Lexer.GLYPHCLASS: + statements.append(self.parse_glyphclass_definition_()) + elif self.is_cur_keyword_("anchorDef"): + statements.append(self.parse_anchordef_()) + elif self.is_cur_keyword_("ignore"): + statements.append(self.parse_ignore_()) + elif self.is_cur_keyword_("language"): + statements.append(self.parse_language_()) + elif self.is_cur_keyword_("lookup"): + statements.append(self.parse_lookup_(vertical)) + elif self.is_cur_keyword_("script"): + statements.append(self.parse_script_()) + elif (self.is_cur_keyword_("substitute") or + self.is_cur_keyword_("sub")): + statements.append(self.parse_substitute_()) + elif self.is_cur_keyword_("subtable"): + statements.append(self.parse_subtable_()) + elif self.is_cur_keyword_("valueRecordDef"): + statements.append(self.parse_valuerecord_definition_(vertical)) + else: + raise ParserError( + "Expected glyph class definition or statement", + self.cur_token_location_) + + self.expect_symbol_("}") + for symtab in self.symbol_tables_: + symtab.exit_scope() + + name = self.expect_name_() + if name != block.name.strip(): + raise ParserError("Expected \"%s\"" % block.name.strip(), + self.cur_token_location_) + self.expect_symbol_(";") + + def is_cur_keyword_(self, k): + return (self.cur_token_type_ is Lexer.NAME) and (self.cur_token_ == k) + + def expect_tag_(self): + self.advance_lexer_() + if self.cur_token_type_ is not Lexer.NAME: + raise ParserError("Expected a tag", self.cur_token_location_) + if len(self.cur_token_) > 4: + raise ParserError("Tags can not be longer than 4 characters", + self.cur_token_location_) + return (self.cur_token_ + " ")[:4] + + def expect_symbol_(self, symbol): + self.advance_lexer_() + if self.cur_token_type_ is Lexer.SYMBOL and self.cur_token_ == symbol: + return symbol + raise ParserError("Expected '%s'" % symbol, self.cur_token_location_) + + def expect_keyword_(self, keyword): + self.advance_lexer_() + if self.cur_token_type_ is Lexer.NAME and self.cur_token_ == keyword: + return self.cur_token_ + raise ParserError("Expected \"%s\"" % keyword, + self.cur_token_location_) + + def expect_name_(self): + self.advance_lexer_() + if self.cur_token_type_ is Lexer.NAME: + return self.cur_token_ + raise ParserError("Expected a name", self.cur_token_location_) + + def expect_number_(self): + self.advance_lexer_() + if self.cur_token_type_ is Lexer.NUMBER: + return self.cur_token_ + raise ParserError("Expected a number", self.cur_token_location_) + + def advance_lexer_(self): + self.cur_token_type_, self.cur_token_, self.cur_token_location_ = ( + self.next_token_type_, self.next_token_, self.next_token_location_) + try: + (self.next_token_type_, self.next_token_, + self.next_token_location_) = self.lexer_.next() + except StopIteration: + self.next_token_type_, self.next_token_ = (None, None) + + def make_glyph_range_(self, location, start, limit): + """("a.sc", "d.sc") --> {"a.sc", "b.sc", "c.sc", "d.sc"}""" + result = set() + if len(start) != len(limit): + raise ParserError( + "Bad range: \"%s\" and \"%s\" should have the same length" % + (start, limit), location) + rev = lambda s: ''.join(reversed(list(s))) # string reversal + prefix = os.path.commonprefix([start, limit]) + suffix = rev(os.path.commonprefix([rev(start), rev(limit)])) + if len(suffix) > 0: + start_range = start[len(prefix):-len(suffix)] + limit_range = limit[len(prefix):-len(suffix)] + else: + start_range = start[len(prefix):] + limit_range = limit[len(prefix):] + + if start_range >= limit_range: + raise ParserError("Start of range must be smaller than its end", + location) + + uppercase = re.compile(r'^[A-Z]$') + if uppercase.match(start_range) and uppercase.match(limit_range): + for c in range(ord(start_range), ord(limit_range) + 1): + result.add("%s%c%s" % (prefix, c, suffix)) + return result + + lowercase = re.compile(r'^[a-z]$') + if lowercase.match(start_range) and lowercase.match(limit_range): + for c in range(ord(start_range), ord(limit_range) + 1): + result.add("%s%c%s" % (prefix, c, suffix)) + return result + + digits = re.compile(r'^[0-9]{1,3}$') + if digits.match(start_range) and digits.match(limit_range): + for i in range(int(start_range, 10), int(limit_range, 10) + 1): + number = ("000" + str(i))[-len(start_range):] + result.add("%s%s%s" % (prefix, number, suffix)) + return result + + raise ParserError("Bad range: \"%s-%s\"" % (start, limit), location) + + +class SymbolTable(object): + def __init__(self): + self.scopes_ = [{}] + + def enter_scope(self): + self.scopes_.append({}) + + def exit_scope(self): + self.scopes_.pop() + + def define(self, name, item): + self.scopes_[-1][name] = item + + def resolve(self, name): + for scope in reversed(self.scopes_): + item = scope.get(name) + if item: + return item + return None diff -Nru fonttools-2.4/Lib/fontTools/feaLib/parser_test.py fonttools-3.0/Lib/fontTools/feaLib/parser_test.py --- fonttools-2.4/Lib/fontTools/feaLib/parser_test.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.0/Lib/fontTools/feaLib/parser_test.py 2015-08-31 17:57:15.000000000 +0000 @@ -0,0 +1,448 @@ +from __future__ import print_function, division, absolute_import +from __future__ import unicode_literals +from fontTools.feaLib.lexer import LexerError +from fontTools.feaLib.parser import Parser, ParserError, SymbolTable +from fontTools.misc.py23 import * +import fontTools.feaLib.ast as ast +import codecs +import os +import shutil +import sys +import tempfile +import unittest + + +class ParserTest(unittest.TestCase): + def __init__(self, methodName): + unittest.TestCase.__init__(self, methodName) + # Python 3 renamed assertRaisesRegexp to assertRaisesRegex, + # and fires deprecation warnings if a program uses the old name. + if not hasattr(self, "assertRaisesRegex"): + self.assertRaisesRegex = self.assertRaisesRegexp + + def test_anchordef(self): + [foo] = self.parse("anchorDef 123 456 foo;").statements + self.assertEqual(type(foo), ast.AnchorDefinition) + self.assertEqual(foo.name, "foo") + self.assertEqual(foo.x, 123) + self.assertEqual(foo.y, 456) + self.assertEqual(foo.contourpoint, None) + + def test_anchordef_contourpoint(self): + [foo] = self.parse("anchorDef 123 456 contourpoint 5 foo;").statements + self.assertEqual(type(foo), ast.AnchorDefinition) + self.assertEqual(foo.name, "foo") + self.assertEqual(foo.x, 123) + self.assertEqual(foo.y, 456) + self.assertEqual(foo.contourpoint, 5) + + def test_feature_block(self): + [liga] = self.parse("feature liga {} liga;").statements + self.assertEqual(liga.name, "liga") + self.assertFalse(liga.use_extension) + + def test_feature_block_useExtension(self): + [liga] = self.parse("feature liga useExtension {} liga;").statements + self.assertEqual(liga.name, "liga") + self.assertTrue(liga.use_extension) + + def test_glyphclass(self): + [gc] = self.parse("@dash = [endash emdash figuredash];").statements + self.assertEqual(gc.name, "dash") + self.assertEqual(gc.glyphs, {"endash", "emdash", "figuredash"}) + + def test_glyphclass_bad(self): + self.assertRaisesRegex( + ParserError, + "Expected glyph name, glyph range, or glyph class reference", + self.parse, "@bad = [a 123];") + + def test_glyphclass_duplicate(self): + self.assertRaisesRegex( + ParserError, "Glyph class @dup already defined", + self.parse, "@dup = [a b]; @dup = [x];") + + def test_glyphclass_empty(self): + [gc] = self.parse("@empty_set = [];").statements + self.assertEqual(gc.name, "empty_set") + self.assertEqual(gc.glyphs, set()) + + def test_glyphclass_equality(self): + [foo, bar] = self.parse("@foo = [a b]; @bar = @foo;").statements + self.assertEqual(foo.glyphs, {"a", "b"}) + self.assertEqual(bar.glyphs, {"a", "b"}) + + def test_glyphclass_range_uppercase(self): + [gc] = self.parse("@swashes = [X.swash-Z.swash];").statements + self.assertEqual(gc.name, "swashes") + self.assertEqual(gc.glyphs, {"X.swash", "Y.swash", "Z.swash"}) + + def test_glyphclass_range_lowercase(self): + [gc] = self.parse("@defg.sc = [d.sc-g.sc];").statements + self.assertEqual(gc.name, "defg.sc") + self.assertEqual(gc.glyphs, {"d.sc", "e.sc", "f.sc", "g.sc"}) + + def test_glyphclass_range_digit1(self): + [gc] = self.parse("@range = [foo.2-foo.5];").statements + self.assertEqual(gc.glyphs, {"foo.2", "foo.3", "foo.4", "foo.5"}) + + def test_glyphclass_range_digit2(self): + [gc] = self.parse("@range = [foo.09-foo.11];").statements + self.assertEqual(gc.glyphs, {"foo.09", "foo.10", "foo.11"}) + + def test_glyphclass_range_digit3(self): + [gc] = self.parse("@range = [foo.123-foo.125];").statements + self.assertEqual(gc.glyphs, {"foo.123", "foo.124", "foo.125"}) + + def test_glyphclass_range_bad(self): + self.assertRaisesRegex( + ParserError, + "Bad range: \"a\" and \"foobar\" should have the same length", + self.parse, "@bad = [a-foobar];") + self.assertRaisesRegex( + ParserError, "Bad range: \"A.swash-z.swash\"", + self.parse, "@bad = [A.swash-z.swash];") + self.assertRaisesRegex( + ParserError, "Start of range must be smaller than its end", + self.parse, "@bad = [B.swash-A.swash];") + self.assertRaisesRegex( + ParserError, "Bad range: \"foo.1234-foo.9876\"", + self.parse, "@bad = [foo.1234-foo.9876];") + + def test_glyphclass_range_mixed(self): + [gc] = self.parse("@range = [a foo.09-foo.11 X.sc-Z.sc];").statements + self.assertEqual(gc.glyphs, { + "a", "foo.09", "foo.10", "foo.11", "X.sc", "Y.sc", "Z.sc" + }) + + def test_glyphclass_reference(self): + [vowels_lc, vowels_uc, vowels] = self.parse( + "@Vowels.lc = [a e i o u]; @Vowels.uc = [A E I O U];" + "@Vowels = [@Vowels.lc @Vowels.uc y Y];").statements + self.assertEqual(vowels_lc.glyphs, set(list("aeiou"))) + self.assertEqual(vowels_uc.glyphs, set(list("AEIOU"))) + self.assertEqual(vowels.glyphs, set(list("aeiouyAEIOUY"))) + self.assertRaisesRegex( + ParserError, "Unknown glyph class @unknown", + self.parse, "@bad = [@unknown];") + + def test_glyphclass_scoping(self): + [foo, liga, smcp] = self.parse( + "@foo = [a b];" + "feature liga { @bar = [@foo l]; } liga;" + "feature smcp { @bar = [@foo s]; } smcp;" + ).statements + self.assertEqual(foo.glyphs, {"a", "b"}) + self.assertEqual(liga.statements[0].glyphs, {"a", "b", "l"}) + self.assertEqual(smcp.statements[0].glyphs, {"a", "b", "s"}) + + def test_ignore_sub(self): + doc = self.parse("feature test {ignore sub e t' c;} test;") + s = doc.statements[0].statements[0] + self.assertEqual(type(s), ast.IgnoreSubstitutionRule) + self.assertEqual(s.prefix, [{"e"}]) + self.assertEqual(s.glyphs, [{"t"}]) + self.assertEqual(s.suffix, [{"c"}]) + + def test_ignore_substitute(self): + doc = self.parse( + "feature test {" + " ignore substitute f [a e] d' [a u]' [e y];" + "} test;") + s = doc.statements[0].statements[0] + self.assertEqual(type(s), ast.IgnoreSubstitutionRule) + self.assertEqual(s.prefix, [{"f"}, {"a", "e"}]) + self.assertEqual(s.glyphs, [{"d"}, {"a", "u"}]) + self.assertEqual(s.suffix, [{"e", "y"}]) + + def test_language(self): + doc = self.parse("feature test {language DEU;} test;") + s = doc.statements[0].statements[0] + self.assertEqual(type(s), ast.LanguageStatement) + self.assertEqual(s.language, "DEU") + self.assertTrue(s.include_default) + self.assertFalse(s.required) + + def test_language_exclude_dflt(self): + doc = self.parse("feature test {language DEU exclude_dflt;} test;") + s = doc.statements[0].statements[0] + self.assertEqual(type(s), ast.LanguageStatement) + self.assertEqual(s.language, "DEU") + self.assertFalse(s.include_default) + self.assertFalse(s.required) + + def test_language_exclude_dflt_required(self): + doc = self.parse("feature test {" + " language DEU exclude_dflt required;" + "} test;") + s = doc.statements[0].statements[0] + self.assertEqual(type(s), ast.LanguageStatement) + self.assertEqual(s.language, "DEU") + self.assertFalse(s.include_default) + self.assertTrue(s.required) + + def test_language_include_dflt(self): + doc = self.parse("feature test {language DEU include_dflt;} test;") + s = doc.statements[0].statements[0] + self.assertEqual(type(s), ast.LanguageStatement) + self.assertEqual(s.language, "DEU") + self.assertTrue(s.include_default) + self.assertFalse(s.required) + + def test_language_include_dflt_required(self): + doc = self.parse("feature test {" + " language DEU include_dflt required;" + "} test;") + s = doc.statements[0].statements[0] + self.assertEqual(type(s), ast.LanguageStatement) + self.assertEqual(s.language, "DEU") + self.assertTrue(s.include_default) + self.assertTrue(s.required) + + def test_lookup_block(self): + [lookup] = self.parse("lookup Ligatures {} Ligatures;").statements + self.assertEqual(lookup.name, "Ligatures") + self.assertFalse(lookup.use_extension) + + def test_lookup_block_useExtension(self): + [lookup] = self.parse("lookup Foo useExtension {} Foo;").statements + self.assertEqual(lookup.name, "Foo") + self.assertTrue(lookup.use_extension) + + def test_lookup_block_name_mismatch(self): + self.assertRaisesRegex( + ParserError, 'Expected "Foo"', + self.parse, "lookup Foo {} Bar;") + + def test_lookup_block_with_horizontal_valueRecordDef(self): + doc = self.parse("feature liga {" + " lookup look {" + " valueRecordDef 123 foo;" + " } look;" + "} liga;") + [liga] = doc.statements + [look] = liga.statements + [foo] = look.statements + self.assertEqual(foo.value.xAdvance, 123) + self.assertEqual(foo.value.yAdvance, 0) + + def test_lookup_block_with_vertical_valueRecordDef(self): + doc = self.parse("feature vkrn {" + " lookup look {" + " valueRecordDef 123 foo;" + " } look;" + "} vkrn;") + [vkrn] = doc.statements + [look] = vkrn.statements + [foo] = look.statements + self.assertEqual(foo.value.xAdvance, 0) + self.assertEqual(foo.value.yAdvance, 123) + + def test_lookup_reference(self): + [foo, bar] = self.parse("lookup Foo {} Foo;" + "feature Bar {lookup Foo;} Bar;").statements + [ref] = bar.statements + self.assertEqual(type(ref), ast.LookupReferenceStatement) + self.assertEqual(ref.lookup, foo) + + def test_lookup_reference_unknown(self): + self.assertRaisesRegex( + ParserError, 'Unknown lookup "Huh"', + self.parse, "feature liga {lookup Huh;} liga;") + + def test_script(self): + doc = self.parse("feature test {script cyrl;} test;") + s = doc.statements[0].statements[0] + self.assertEqual(type(s), ast.ScriptStatement) + self.assertEqual(s.script, "cyrl") + + def test_substitute_single_format_a(self): # GSUB LookupType 1 + doc = self.parse("feature smcp {substitute a by a.sc;} smcp;") + sub = doc.statements[0].statements[0] + self.assertEqual(sub.old_prefix, []) + self.assertEqual(sub.old, [{"a"}]) + self.assertEqual(sub.old_suffix, []) + self.assertEqual(sub.new, [{"a.sc"}]) + self.assertEqual(sub.lookups, [None]) + + def test_substitute_single_format_b(self): # GSUB LookupType 1 + doc = self.parse( + "feature smcp {" + " substitute [one.fitted one.oldstyle] by one;" + "} smcp;") + sub = doc.statements[0].statements[0] + self.assertEqual(sub.old_prefix, []) + self.assertEqual(sub.old, [{"one.fitted", "one.oldstyle"}]) + self.assertEqual(sub.old_suffix, []) + self.assertEqual(sub.new, [{"one"}]) + self.assertEqual(sub.lookups, [None]) + + def test_substitute_single_format_c(self): # GSUB LookupType 1 + doc = self.parse( + "feature smcp {" + " substitute [a-d] by [A.sc-D.sc];" + "} smcp;") + sub = doc.statements[0].statements[0] + self.assertEqual(sub.old_prefix, []) + self.assertEqual(sub.old, [{"a", "b", "c", "d"}]) + self.assertEqual(sub.old_suffix, []) + self.assertEqual(sub.new, [{"A.sc", "B.sc", "C.sc", "D.sc"}]) + self.assertEqual(sub.lookups, [None]) + + def test_substitute_multiple(self): # GSUB LookupType 2 + doc = self.parse("lookup Look {substitute f_f_i by f f i;} Look;") + sub = doc.statements[0].statements[0] + self.assertEqual(type(sub), ast.SubstitutionRule) + self.assertEqual(sub.old_prefix, []) + self.assertEqual(sub.old, [{"f_f_i"}]) + self.assertEqual(sub.old_suffix, []) + self.assertEqual(sub.new, [{"f"}, {"f"}, {"i"}]) + self.assertEqual(sub.lookups, [None]) + + def test_substitute_from(self): # GSUB LookupType 3 + doc = self.parse("feature test {" + " substitute a from [a.1 a.2 a.3];" + "} test;") + sub = doc.statements[0].statements[0] + self.assertEqual(type(sub), ast.AlternateSubstitution) + self.assertEqual(sub.glyph, "a") + self.assertEqual(sub.from_class, {"a.1", "a.2", "a.3"}) + + def test_substitute_from_glyphclass(self): # GSUB LookupType 3 + doc = self.parse("feature test {" + " @Ampersands = [ampersand.1 ampersand.2];" + " substitute ampersand from @Ampersands;" + "} test;") + [glyphclass, sub] = doc.statements[0].statements + self.assertEqual(type(sub), ast.AlternateSubstitution) + self.assertEqual(sub.glyph, "ampersand") + self.assertEqual(sub.from_class, {"ampersand.1", "ampersand.2"}) + + def test_substitute_ligature(self): # GSUB LookupType 4 + doc = self.parse("feature liga {substitute f f i by f_f_i;} liga;") + sub = doc.statements[0].statements[0] + self.assertEqual(sub.old_prefix, []) + self.assertEqual(sub.old, [{"f"}, {"f"}, {"i"}]) + self.assertEqual(sub.old_suffix, []) + self.assertEqual(sub.new, [{"f_f_i"}]) + self.assertEqual(sub.lookups, [None, None, None]) + + def test_substitute_lookups(self): + doc = Parser(self.getpath("spec5fi.fea")).parse() + [ligs, sub, feature] = doc.statements + self.assertEqual(feature.statements[0].lookups, [ligs, None, sub]) + self.assertEqual(feature.statements[1].lookups, [ligs, None, sub]) + + def test_substitute_missing_by(self): + self.assertRaisesRegex( + ParserError, 'Expected "by", "from" or explicit lookup references', + self.parse, "feature liga {substitute f f i;} liga;") + + def test_subtable(self): + doc = self.parse("feature test {subtable;} test;") + s = doc.statements[0].statements[0] + self.assertEqual(type(s), ast.SubtableStatement) + + def test_valuerecord_format_a_horizontal(self): + doc = self.parse("feature liga {valueRecordDef 123 foo;} liga;") + value = doc.statements[0].statements[0].value + self.assertEqual(value.xPlacement, 0) + self.assertEqual(value.yPlacement, 0) + self.assertEqual(value.xAdvance, 123) + self.assertEqual(value.yAdvance, 0) + + def test_valuerecord_format_a_vertical(self): + doc = self.parse("feature vkrn {valueRecordDef 123 foo;} vkrn;") + value = doc.statements[0].statements[0].value + self.assertEqual(value.xPlacement, 0) + self.assertEqual(value.yPlacement, 0) + self.assertEqual(value.xAdvance, 0) + self.assertEqual(value.yAdvance, 123) + + def test_valuerecord_format_b(self): + doc = self.parse("feature liga {valueRecordDef <1 2 3 4> foo;} liga;") + value = doc.statements[0].statements[0].value + self.assertEqual(value.xPlacement, 1) + self.assertEqual(value.yPlacement, 2) + self.assertEqual(value.xAdvance, 3) + self.assertEqual(value.yAdvance, 4) + + def test_valuerecord_named(self): + doc = self.parse("valueRecordDef <1 2 3 4> foo;" + "feature liga {valueRecordDef bar;} liga;") + value = doc.statements[1].statements[0].value + self.assertEqual(value.xPlacement, 1) + self.assertEqual(value.yPlacement, 2) + self.assertEqual(value.xAdvance, 3) + self.assertEqual(value.yAdvance, 4) + + def test_valuerecord_named_unknown(self): + self.assertRaisesRegex( + ParserError, "Unknown valueRecordDef \"unknown\"", + self.parse, "valueRecordDef foo;") + + def test_valuerecord_scoping(self): + [foo, liga, smcp] = self.parse( + "valueRecordDef 789 foo;" + "feature liga {valueRecordDef bar;} liga;" + "feature smcp {valueRecordDef bar;} smcp;" + ).statements + self.assertEqual(foo.value.xAdvance, 789) + self.assertEqual(liga.statements[0].value.xAdvance, 789) + self.assertEqual(smcp.statements[0].value.xAdvance, 789) + + def test_languagesystem(self): + [langsys] = self.parse("languagesystem latn DEU;").statements + self.assertEqual(langsys.script, "latn") + self.assertEqual(langsys.language, "DEU ") + self.assertRaisesRegex( + ParserError, "Expected ';'", + self.parse, "languagesystem latn DEU") + self.assertRaisesRegex( + ParserError, "longer than 4 characters", + self.parse, "languagesystem foobar DEU") + self.assertRaisesRegex( + ParserError, "longer than 4 characters", + self.parse, "languagesystem latn FOOBAR") + + def setUp(self): + self.tempdir = None + self.num_tempfiles = 0 + + def tearDown(self): + if self.tempdir: + shutil.rmtree(self.tempdir) + + def parse(self, text): + if not self.tempdir: + self.tempdir = tempfile.mkdtemp() + self.num_tempfiles += 1 + path = os.path.join(self.tempdir, "tmp%d.fea" % self.num_tempfiles) + with codecs.open(path, "wb", "utf-8") as outfile: + outfile.write(text) + return Parser(path).parse() + + @staticmethod + def getpath(testfile): + path, _ = os.path.split(__file__) + return os.path.join(path, "testdata", testfile) + + +class SymbolTableTest(unittest.TestCase): + def test_scopes(self): + symtab = SymbolTable() + symtab.define("foo", 23) + self.assertEqual(symtab.resolve("foo"), 23) + symtab.enter_scope() + self.assertEqual(symtab.resolve("foo"), 23) + symtab.define("foo", 42) + self.assertEqual(symtab.resolve("foo"), 42) + symtab.exit_scope() + self.assertEqual(symtab.resolve("foo"), 23) + + def test_resolve_undefined(self): + self.assertEqual(SymbolTable().resolve("abc"), None) + + +if __name__ == "__main__": + unittest.main() diff -Nru fonttools-2.4/Lib/fontTools/feaLib/testdata/include0.fea fonttools-3.0/Lib/fontTools/feaLib/testdata/include0.fea --- fonttools-2.4/Lib/fontTools/feaLib/testdata/include0.fea 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.0/Lib/fontTools/feaLib/testdata/include0.fea 2015-08-31 17:57:15.000000000 +0000 @@ -0,0 +1 @@ +I0 diff -Nru fonttools-2.4/Lib/fontTools/feaLib/testdata/include1.fea fonttools-3.0/Lib/fontTools/feaLib/testdata/include1.fea --- fonttools-2.4/Lib/fontTools/feaLib/testdata/include1.fea 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.0/Lib/fontTools/feaLib/testdata/include1.fea 2015-08-31 17:57:15.000000000 +0000 @@ -0,0 +1,3 @@ +I1a +include(include0.fea); +I1b diff -Nru fonttools-2.4/Lib/fontTools/feaLib/testdata/include2.fea fonttools-3.0/Lib/fontTools/feaLib/testdata/include2.fea --- fonttools-2.4/Lib/fontTools/feaLib/testdata/include2.fea 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.0/Lib/fontTools/feaLib/testdata/include2.fea 2015-08-31 17:57:15.000000000 +0000 @@ -0,0 +1,3 @@ +I2a +include(include1.fea); +I2b diff -Nru fonttools-2.4/Lib/fontTools/feaLib/testdata/include3.fea fonttools-3.0/Lib/fontTools/feaLib/testdata/include3.fea --- fonttools-2.4/Lib/fontTools/feaLib/testdata/include3.fea 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.0/Lib/fontTools/feaLib/testdata/include3.fea 2015-08-31 17:57:15.000000000 +0000 @@ -0,0 +1,4 @@ +I3a +include(include2.fea); +I3b + diff -Nru fonttools-2.4/Lib/fontTools/feaLib/testdata/include4.fea fonttools-3.0/Lib/fontTools/feaLib/testdata/include4.fea --- fonttools-2.4/Lib/fontTools/feaLib/testdata/include4.fea 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.0/Lib/fontTools/feaLib/testdata/include4.fea 2015-08-31 17:57:15.000000000 +0000 @@ -0,0 +1,4 @@ +I4a +include(include3.fea); +I4b + diff -Nru fonttools-2.4/Lib/fontTools/feaLib/testdata/include5.fea fonttools-3.0/Lib/fontTools/feaLib/testdata/include5.fea --- fonttools-2.4/Lib/fontTools/feaLib/testdata/include5.fea 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.0/Lib/fontTools/feaLib/testdata/include5.fea 2015-08-31 17:57:15.000000000 +0000 @@ -0,0 +1,3 @@ +I5a +include(include4.fea); +I5b diff -Nru fonttools-2.4/Lib/fontTools/feaLib/testdata/include6.fea fonttools-3.0/Lib/fontTools/feaLib/testdata/include6.fea --- fonttools-2.4/Lib/fontTools/feaLib/testdata/include6.fea 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.0/Lib/fontTools/feaLib/testdata/include6.fea 2015-08-31 17:57:15.000000000 +0000 @@ -0,0 +1,3 @@ +I6a +include(include5.fea); +I6b diff -Nru fonttools-2.4/Lib/fontTools/feaLib/testdata/includemissingfile.fea fonttools-3.0/Lib/fontTools/feaLib/testdata/includemissingfile.fea --- fonttools-2.4/Lib/fontTools/feaLib/testdata/includemissingfile.fea 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.0/Lib/fontTools/feaLib/testdata/includemissingfile.fea 2015-08-31 17:57:15.000000000 +0000 @@ -0,0 +1 @@ +include(missingfile.fea); diff -Nru fonttools-2.4/Lib/fontTools/feaLib/testdata/includeself.fea fonttools-3.0/Lib/fontTools/feaLib/testdata/includeself.fea --- fonttools-2.4/Lib/fontTools/feaLib/testdata/includeself.fea 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.0/Lib/fontTools/feaLib/testdata/includeself.fea 2015-08-31 17:57:15.000000000 +0000 @@ -0,0 +1 @@ +include(includeself.fea); diff -Nru fonttools-2.4/Lib/fontTools/feaLib/testdata/mini.fea fonttools-3.0/Lib/fontTools/feaLib/testdata/mini.fea --- fonttools-2.4/Lib/fontTools/feaLib/testdata/mini.fea 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.0/Lib/fontTools/feaLib/testdata/mini.fea 2015-08-31 17:57:15.000000000 +0000 @@ -0,0 +1,19 @@ +# Example file from OpenType Feature File specification, section 1. +# http://www.adobe.com/devnet/opentype/afdko/topic_feature_file_syntax.html + +# Script and language coverage +languagesystem DFLT dflt; +languagesystem latn dflt; + +# Ligature formation +feature liga { + substitute f i by f_i; + substitute f l by f_l; +} liga; + +# Kerning +feature kern { + position A Y -100; + position a y -80; + position s f' <0 0 10 0> t; +} kern; diff -Nru fonttools-2.4/Lib/fontTools/feaLib/testdata/spec5fi.fea fonttools-3.0/Lib/fontTools/feaLib/testdata/spec5fi.fea --- fonttools-2.4/Lib/fontTools/feaLib/testdata/spec5fi.fea 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.0/Lib/fontTools/feaLib/testdata/spec5fi.fea 2015-08-31 17:57:15.000000000 +0000 @@ -0,0 +1,18 @@ +# OpenType Feature File specification, section 5.f.i, example 1 +# "Specifying a Chain Sub rule and marking sub-runs" +# http://www.adobe.com/devnet/opentype/afdko/topic_feature_file_syntax.html + +lookup CNTXT_LIGS { + substitute f i by f_i; + substitute c t by c_t; + } CNTXT_LIGS; + +lookup CNTXT_SUB { + substitute n by n.end; + substitute s by s.end; + } CNTXT_SUB; + +feature test { + substitute [a e i o u] f' lookup CNTXT_LIGS i' n' lookup CNTXT_SUB; + substitute [a e i o u] c' lookup CNTXT_LIGS t' s' lookup CNTXT_SUB; +} test; diff -Nru fonttools-2.4/Lib/fontTools/fondLib.py fonttools-3.0/Lib/fontTools/fondLib.py --- fonttools-2.4/Lib/fontTools/fondLib.py 2013-04-23 04:08:08.000000000 +0000 +++ fonttools-3.0/Lib/fontTools/fondLib.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,553 +0,0 @@ -import os -import struct, sstruct -import string -try: - from Carbon import Res -except ImportError: - import Res - - -error = "fondLib.error" - -DEBUG = 0 - -headerformat = """ - > - ffFlags: h - ffFamID: h - ffFirstChar: h - ffLastChar: h - ffAscent: h - ffDescent: h - ffLeading: h - ffWidMax: h - ffWTabOff: l - ffKernOff: l - ffStylOff: l -""" - -FONDheadersize = 52 - -class FontFamily: - - def __init__(self, theRes, mode = 'r'): - self.ID, type, self.name = theRes.GetResInfo() - if type <> 'FOND': - raise ValueError, "FOND resource required" - self.FOND = theRes - self.mode = mode - self.changed = 0 - - if DEBUG: - self.parsedthings = [] - - def parse(self): - self._getheader() - self._getfontassociationtable() - self._getoffsettable() - self._getboundingboxtable() - self._getglyphwidthtable() - self._getstylemappingtable() - self._getglyphencodingsubtable() - self._getkerningtables() - - def minimalparse(self): - self._getheader() - self._getglyphwidthtable() - self._getstylemappingtable() - - def __repr__(self): - return "" % self.name - - def getflags(self): - return self.fondClass - - def setflags(self, flags): - self.changed = 1 - self.fondClass = flags - - def save(self, destresfile = None): - if self.mode <> 'w': - raise error, "can't save font: no write permission" - self._buildfontassociationtable() - self._buildoffsettable() - self._buildboundingboxtable() - self._buildglyphwidthtable() - self._buildkerningtables() - self._buildstylemappingtable() - self._buildglyphencodingsubtable() - rawnames = [ "_rawheader", - "_rawfontassociationtable", - "_rawoffsettable", - "_rawglyphwidthtable", - "_rawstylemappingtable", - "_rawglyphencodingsubtable", - "_rawkerningtables" - ] - for name in rawnames[1:]: # skip header - data = getattr(self, name) - if len(data) & 1: - setattr(self, name, data + '\0') - - self.ffWTabOff = FONDheadersize + len(self._rawfontassociationtable) + len(self._rawoffsettable) - self.ffStylOff = self.ffWTabOff + len(self._rawglyphwidthtable) - self.ffKernOff = self.ffStylOff + len(self._rawstylemappingtable) + len(self._rawglyphencodingsubtable) - self.glyphTableOffset = len(self._rawstylemappingtable) - - if not self._rawglyphwidthtable: - self.ffWTabOff = 0 - if not self._rawstylemappingtable: - self.ffStylOff = 0 - if not self._rawglyphencodingsubtable: - self.glyphTableOffset = 0 - if not self._rawkerningtables: - self.ffKernOff = 0 - - self._buildheader() - - # glyphTableOffset has only just been calculated - self._updatestylemappingtable() - - newdata = "" - for name in rawnames: - newdata = newdata + getattr(self, name) - if destresfile is None: - self.FOND.data = newdata - self.FOND.ChangedResource() - self.FOND.WriteResource() - else: - ID, type, name = self.FOND.GetResInfo() - self.FOND.DetachResource() - self.FOND.data = newdata - saveref = Res.CurResFile() - Res.UseResFile(destresfile) - self.FOND.AddResource(type, ID, name) - Res.UseResFile(saveref) - self.changed = 0 - - def _getheader(self): - data = self.FOND.data - sstruct.unpack(headerformat, data[:28], self) - self.ffProperty = struct.unpack(">9h", data[28:46]) - self.ffIntl = struct.unpack(">hh", data[46:50]) - self.ffVersion, = struct.unpack(">h", data[50:FONDheadersize]) - - if DEBUG: - self._rawheader = data[:FONDheadersize] - self.parsedthings.append((0, FONDheadersize, 'header')) - - def _buildheader(self): - header = sstruct.pack(headerformat, self) - header = header + apply(struct.pack, (">9h",) + self.ffProperty) - header = header + apply(struct.pack, (">hh",) + self.ffIntl) - header = header + struct.pack(">h", self.ffVersion) - if DEBUG: - print "header is the same?", self._rawheader == header and 'yes.' or 'no.' - if self._rawheader <> header: - print len(self._rawheader), len(header) - self._rawheader = header - - def _getfontassociationtable(self): - data = self.FOND.data - offset = FONDheadersize - numberofentries, = struct.unpack(">h", data[offset:offset+2]) - numberofentries = numberofentries + 1 - size = numberofentries * 6 - self.fontAssoc = [] - for i in range(offset + 2, offset + size, 6): - self.fontAssoc.append(struct.unpack(">3h", data[i:i+6])) - - self._endoffontassociationtable = offset + size + 2 - if DEBUG: - self._rawfontassociationtable = data[offset:self._endoffontassociationtable] - self.parsedthings.append((offset, self._endoffontassociationtable, 'fontassociationtable')) - - def _buildfontassociationtable(self): - data = struct.pack(">h", len(self.fontAssoc) - 1) - for size, stype, ID in self.fontAssoc: - data = data + struct.pack(">3h", size, stype, ID) - - if DEBUG: - print "font association table is the same?", self._rawfontassociationtable == data and 'yes.' or 'no.' - if self._rawfontassociationtable <> data: - print len(self._rawfontassociationtable), len(data) - self._rawfontassociationtable = data - - def _getoffsettable(self): - if self.ffWTabOff == 0: - self._rawoffsettable = "" - return - data = self.FOND.data - # Quick'n'Dirty. What's the spec anyway? Can't find it... - offset = self._endoffontassociationtable - count = self.ffWTabOff - self._rawoffsettable = data[offset:count] - if DEBUG: - self.parsedthings.append((offset, count, 'offsettable&bbtable')) - - def _buildoffsettable(self): - if not hasattr(self, "_rawoffsettable"): - self._rawoffsettable = "" - - def _getboundingboxtable(self): - self.boundingBoxes = None - if self._rawoffsettable[:6] <> '\0\0\0\0\0\6': # XXX ???? - return - boxes = {} - data = self._rawoffsettable[6:] - numstyles = struct.unpack(">h", data[:2])[0] + 1 - data = data[2:] - for i in range(numstyles): - style, l, b, r, t = struct.unpack(">hhhhh", data[:10]) - boxes[style] = (l, b, r, t) - data = data[10:] - self.boundingBoxes = boxes - - def _buildboundingboxtable(self): - if self.boundingBoxes and self._rawoffsettable[:6] == '\0\0\0\0\0\6': - boxes = self.boundingBoxes.items() - boxes.sort() - data = '\0\0\0\0\0\6' + struct.pack(">h", len(boxes) - 1) - for style, (l, b, r, t) in boxes: - data = data + struct.pack(">hhhhh", style, l, b, r, t) - self._rawoffsettable = data - - def _getglyphwidthtable(self): - self.widthTables = {} - if self.ffWTabOff == 0: - return - data = self.FOND.data - offset = self.ffWTabOff - numberofentries, = struct.unpack(">h", data[offset:offset+2]) - numberofentries = numberofentries + 1 - count = offset + 2 - for i in range(numberofentries): - stylecode, = struct.unpack(">h", data[count:count+2]) - widthtable = self.widthTables[stylecode] = [] - count = count + 2 - for j in range(3 + self.ffLastChar - self.ffFirstChar): - width, = struct.unpack(">h", data[count:count+2]) - widthtable.append(width) - count = count + 2 - - if DEBUG: - self._rawglyphwidthtable = data[offset:count] - self.parsedthings.append((offset, count, 'glyphwidthtable')) - - def _buildglyphwidthtable(self): - if not self.widthTables: - self._rawglyphwidthtable = "" - return - numberofentries = len(self.widthTables) - data = struct.pack('>h', numberofentries - 1) - tables = self.widthTables.items() - tables.sort() - for stylecode, table in tables: - data = data + struct.pack('>h', stylecode) - if len(table) <> (3 + self.ffLastChar - self.ffFirstChar): - raise error, "width table has wrong length" - for width in table: - data = data + struct.pack('>h', width) - if DEBUG: - print "glyph width table is the same?", self._rawglyphwidthtable == data and 'yes.' or 'no.' - self._rawglyphwidthtable = data - - def _getkerningtables(self): - self.kernTables = {} - if self.ffKernOff == 0: - return - data = self.FOND.data - offset = self.ffKernOff - numberofentries, = struct.unpack(">h", data[offset:offset+2]) - numberofentries = numberofentries + 1 - count = offset + 2 - for i in range(numberofentries): - stylecode, = struct.unpack(">h", data[count:count+2]) - count = count + 2 - numberofpairs, = struct.unpack(">h", data[count:count+2]) - count = count + 2 - kerntable = self.kernTables[stylecode] = [] - for j in range(numberofpairs): - firstchar, secondchar, kerndistance = struct.unpack(">cch", data[count:count+4]) - kerntable.append((ord(firstchar), ord(secondchar), kerndistance)) - count = count + 4 - - if DEBUG: - self._rawkerningtables = data[offset:count] - self.parsedthings.append((offset, count, 'kerningtables')) - - def _buildkerningtables(self): - if self.kernTables == {}: - self._rawkerningtables = "" - self.ffKernOff = 0 - return - numberofentries = len(self.kernTables) - data = [struct.pack('>h', numberofentries - 1)] - tables = self.kernTables.items() - tables.sort() - for stylecode, table in tables: - data.append(struct.pack('>h', stylecode)) - data.append(struct.pack('>h', len(table))) # numberofpairs - for firstchar, secondchar, kerndistance in table: - data.append(struct.pack(">cch", chr(firstchar), chr(secondchar), kerndistance)) - - data = string.join(data, '') - - if DEBUG: - print "kerning table is the same?", self._rawkerningtables == data and 'yes.' or 'no.' - if self._rawkerningtables <> data: - print len(self._rawkerningtables), len(data) - self._rawkerningtables = data - - def _getstylemappingtable(self): - offset = self.ffStylOff - self.styleStrings = [] - self.styleIndices = () - self.glyphTableOffset = 0 - self.fondClass = 0 - if offset == 0: - return - data = self.FOND.data - self.fondClass, self.glyphTableOffset, self.styleMappingReserved, = \ - struct.unpack(">hll", data[offset:offset+10]) - self.styleIndices = struct.unpack('>48b', data[offset + 10:offset + 58]) - stringcount, = struct.unpack('>h', data[offset+58:offset+60]) - - count = offset + 60 - for i in range(stringcount): - str_len = ord(data[count]) - self.styleStrings.append(data[count + 1:count + 1 + str_len]) - count = count + 1 + str_len - - self._unpackstylestrings() - - data = data[offset:count] - if len(data) % 2: - data = data + '\0' - if DEBUG: - self._rawstylemappingtable = data - self.parsedthings.append((offset, count, 'stylemappingtable')) - - def _buildstylemappingtable(self): - if not self.styleIndices: - self._rawstylemappingtable = "" - return - data = struct.pack(">hll", self.fondClass, self.glyphTableOffset, - self.styleMappingReserved) - - self._packstylestrings() - data = data + apply(struct.pack, (">48b",) + self.styleIndices) - - stringcount = len(self.styleStrings) - data = data + struct.pack(">h", stringcount) - for string in self.styleStrings: - data = data + chr(len(string)) + string - - if len(data) % 2: - data = data + '\0' - - if DEBUG: - print "style mapping table is the same?", self._rawstylemappingtable == data and 'yes.' or 'no.' - self._rawstylemappingtable = data - - def _unpackstylestrings(self): - psNames = {} - self.ffFamilyName = self.styleStrings[0] - for i in self.widthTables.keys(): - index = self.styleIndices[i] - if index == 1: - psNames[i] = self.styleStrings[0] - else: - style = self.styleStrings[0] - codes = map(ord, self.styleStrings[index - 1]) - for code in codes: - style = style + self.styleStrings[code - 1] - psNames[i] = style - self.psNames = psNames - - def _packstylestrings(self): - nameparts = {} - splitnames = {} - for style, name in self.psNames.items(): - split = splitname(name, self.ffFamilyName) - splitnames[style] = split - for part in split: - nameparts[part] = None - del nameparts[self.ffFamilyName] - nameparts = nameparts.keys() - nameparts.sort() - items = splitnames.items() - items.sort() - numindices = 0 - for style, split in items: - if len(split) > 1: - numindices = numindices + 1 - numindices = max(numindices, max(self.styleIndices) - 1) - styleStrings = [self.ffFamilyName] + numindices * [""] + nameparts - # XXX the next bit goes wrong for MM fonts. - for style, split in items: - if len(split) == 1: - continue - indices = "" - for part in split[1:]: - indices = indices + chr(nameparts.index(part) + numindices + 2) - styleStrings[self.styleIndices[style] - 1] = indices - self.styleStrings = styleStrings - - def _updatestylemappingtable(self): - # Update the glyphTableOffset field. - # This is necessary since we have to build this table to - # know what the glyphTableOffset will be. - # And we don't want to build it twice, do we? - data = self._rawstylemappingtable - if not data: - return - data = data[:2] + struct.pack(">l", self.glyphTableOffset) + data[6:] - self._rawstylemappingtable = data - - def _getglyphencodingsubtable(self): - glyphEncoding = self.glyphEncoding = {} - if not self.glyphTableOffset: - return - offset = self.ffStylOff + self.glyphTableOffset - data = self.FOND.data - numberofentries, = struct.unpack(">h", data[offset:offset+2]) - count = offset + 2 - for i in range(numberofentries): - glyphcode = ord(data[count]) - count = count + 1 - strlen = ord(data[count]) - count = count + 1 - glyphname = data[count:count+strlen] - glyphEncoding[glyphcode] = glyphname - count = count + strlen - - if DEBUG: - self._rawglyphencodingsubtable = data[offset:count] - self.parsedthings.append((offset, count, 'glyphencodingsubtable')) - - def _buildglyphencodingsubtable(self): - if not self.glyphEncoding: - self._rawglyphencodingsubtable = "" - return - numberofentries = len(self.glyphEncoding) - data = struct.pack(">h", numberofentries) - items = self.glyphEncoding.items() - items.sort() - for glyphcode, glyphname in items: - data = data + chr(glyphcode) + chr(len(glyphname)) + glyphname - self._rawglyphencodingsubtable = data - - -uppercase = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ' - -def splitname(name, famname = None): - # XXX this goofs up MM font names: but how should it be done?? - if famname: - if name[:len(famname)] <> famname: - raise error, "first part of name should be same as family name" - name = name[len(famname):] - split = [famname] - else: - split = [] - current = "" - for c in name: - if c == '-' or c in uppercase: - if current: - split.append(current) - current = "" - current = current + c - if current: - split.append(current) - return split - -def makeLWFNfilename(name): - split = splitname(name) - lwfnname = split[0][:5] - for part in split[1:]: - if part <> '-': - lwfnname = lwfnname + part[:3] - return lwfnname - -class BitmapFontFile: - - def __init__(self, path, mode='r'): - if mode == 'r': - permission = 1 # read only - elif mode == 'w': - permission = 3 # exclusive r/w - else: - raise error, 'mode should be either "r" or "w"' - self.mode = mode - self.resref = Res.FSOpenResFile(path, permission) - Res.UseResFile(self.resref) - self.path = path - self.fonds = [] - self.getFONDs() - - def getFONDs(self): - FONDcount = Res.Count1Resources('FOND') - for i in range(FONDcount): - fond = FontFamily(Res.Get1IndResource('FOND', i + 1), self.mode) - self.fonds.append(fond) - - def parse(self): - self.fondsbyname = {} - for fond in self.fonds: - fond.parse() - if hasattr(fond, "psNames") and fond.psNames: - psNames = fond.psNames.values() - psNames.sort() - self.fondsbyname[psNames[0]] = fond - - def minimalparse(self): - for fond in self.fonds: - fond.minimalparse() - - def close(self): - if self.resref <> None: - try: - Res.CloseResFile(self.resref) - except Res.Error: - pass - self.resref = None - - -class FondSelector: - - def __init__(self, fondlist): - import W - if not fondlist: - raise ValueError, "expected at least one FOND entry" - if len(fondlist) == 1: - self.choice = 0 - return - fonds = [] - for fond in fondlist: - fonds.append(fond.name) - self.w = W.ModalDialog((200, 200), "aaa") - self.w.donebutton = W.Button((-70, -26, 60, 16), "Done", self.close) - self.w.l = W.List((10, 10, -10, -36), fonds, self.listhit) - self.w.setdefaultbutton(self.w.donebutton) - self.w.l.setselection([0]) - self.w.open() - - def close(self): - self.checksel() - sel = self.w.l.getselection() - self.choice = sel[0] - self.w.close() - - def listhit(self, isDbl): - if isDbl: - self.w.donebutton.push() - else: - self.checksel() - - def checksel(self): - sel = self.w.l.getselection() - if not sel: - self.w.l.setselection([0]) - elif len(sel) <> 1: - self.w.l.setselection([sel[0]]) - diff -Nru fonttools-2.4/Lib/fontTools/__init__.py fonttools-3.0/Lib/fontTools/__init__.py --- fonttools-2.4/Lib/fontTools/__init__.py 2013-06-22 14:25:29.000000000 +0000 +++ fonttools-3.0/Lib/fontTools/__init__.py 2015-08-31 17:57:15.000000000 +0000 @@ -1 +1,4 @@ -version = "2.4" +from __future__ import print_function, division, absolute_import +from fontTools.misc.py23 import * + +version = "3.0" diff -Nru fonttools-2.4/Lib/fontTools/inspect.py fonttools-3.0/Lib/fontTools/inspect.py --- fonttools-2.4/Lib/fontTools/inspect.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.0/Lib/fontTools/inspect.py 2015-08-31 17:57:15.000000000 +0000 @@ -0,0 +1,265 @@ +# Copyright 2013 Google, Inc. All Rights Reserved. +# +# Google Author(s): Behdad Esfahbod + +"""GUI font inspector. +""" + +from __future__ import print_function, division, absolute_import +from fontTools.misc.py23 import * +from fontTools import misc, ttLib, cffLib +import pygtk +pygtk.require('2.0') +import gtk +import sys + + +class Row(object): + def __init__(self, parent, index, key, value, font): + self._parent = parent + self._index = index + self._key = key + self._value = value + self._font = font + + if isinstance(value, ttLib.TTFont): + self._add_font(value) + return + + if not isinstance(value, basestring): + # Try sequences + is_sequence = True + try: + len(value) + iter(value) + # It's hard to differentiate list-type sequences + # from dict-type ones. Try fetching item 0. + value[0] + except (TypeError, AttributeError, KeyError, IndexError): + is_sequence = False + if is_sequence: + self._add_list(key, value) + return + if hasattr(value, '__dict__'): + self._add_object(key, value) + return + if hasattr(value, 'items'): + self._add_dict(key, value) + return + + if isinstance(value, basestring): + self._value_str = '"'+value+'"' + self._children = [] + return + + # Everything else + self._children = [] + + def _filter_items(self): + items = [] + for k,v in self._items: + if isinstance(v, ttLib.TTFont): + continue + if k in ['reader', 'file', 'tableTag', 'compileStatus', 'recurse']: + continue + if isinstance(k, basestring) and k[0] == '_': + continue + items.append((k,v)) + self._items = items + + def _add_font(self, font): + self._items = [(tag,font[tag]) for tag in font.keys()] + + def _add_object(self, key, value): + # Make sure item is decompiled + try: + value["asdf"] + except (AttributeError, KeyError, TypeError, ttLib.TTLibError): + pass + if isinstance(value, ttLib.getTableModule('glyf').Glyph): + # Glyph type needs explicit expanding to be useful + value.expand(self._font['glyf']) + if isinstance(value, misc.psCharStrings.T2CharString): + try: + value.decompile() + except TypeError: # Subroutines can't be decompiled + pass + if isinstance(value, cffLib.BaseDict): + for k in value.rawDict.keys(): + getattr(value, k) + if isinstance(value, cffLib.Index): + # Load all items + for i in range(len(value)): + value[i] + # Discard offsets as should not be needed anymore + if hasattr(value, 'offsets'): + del value.offsets + + self._value_str = value.__class__.__name__ + if isinstance(value, ttLib.tables.DefaultTable.DefaultTable): + self._value_str += ' (%d Bytes)' % self._font.reader.tables[key].length + self._items = sorted(value.__dict__.items()) + self._filter_items() + + def _add_dict(self, key, value): + self._value_str = '%s of %d items' % (value.__class__.__name__, len(value)) + self._items = sorted(value.items()) + + def _add_list(self, key, value): + if len(value) and len(value) <= 32: + self._value_str = str(value) + else: + self._value_str = '%s of %d items' % (value.__class__.__name__, len(value)) + self._items = list(enumerate(value)) + + def __len__(self): + if hasattr(self, '_children'): + return len(self._children) + if hasattr(self, '_items'): + return len(self._items) + assert False + + def _ensure_children(self): + if hasattr(self, '_children'): + return + children = [] + for i,(k,v) in enumerate(self._items): + children.append(Row(self, i, k, v, self._font)) + self._children = children + del self._items + + def __getitem__(self, n): + if n >= len(self): + return None + if not hasattr(self, '_children'): + self._children = [None] * len(self) + c = self._children[n] + if c is None: + k,v = self._items[n] + c = self._children[n] = Row(self, n, k, v, self._font) + self._items[n] = None + return c + + def get_parent(self): + return self._parent + + def get_index(self): + return self._index + + def get_key(self): + return self._key + + def get_value(self): + return self._value + + def get_value_str(self): + if hasattr(self,'_value_str'): + return self._value_str + return str(self._value) + +class FontTreeModel(gtk.GenericTreeModel): + + __gtype_name__ = 'FontTreeModel' + + def __init__(self, font): + super(FontTreeModel, self).__init__() + self._columns = (str, str) + self.font = font + self._root = Row(None, 0, "font", font, font) + + def on_get_flags(self): + return 0 + + def on_get_n_columns(self): + return len(self._columns) + + def on_get_column_type(self, index): + return self._columns[index] + + def on_get_iter(self, path): + rowref = self._root + while path: + rowref = rowref[path[0]] + path = path[1:] + return rowref + + def on_get_path(self, rowref): + path = [] + while rowref != self._root: + path.append(rowref.get_index()) + rowref = rowref.get_parent() + path.reverse() + return tuple(path) + + def on_get_value(self, rowref, column): + if column == 0: + return rowref.get_key() + else: + return rowref.get_value_str() + + def on_iter_next(self, rowref): + return rowref.get_parent()[rowref.get_index() + 1] + + def on_iter_children(self, rowref): + return rowref[0] + + def on_iter_has_child(self, rowref): + return bool(len(rowref)) + + def on_iter_n_children(self, rowref): + return len(rowref) + + def on_iter_nth_child(self, rowref, n): + if not rowref: rowref = self._root + return rowref[n] + + def on_iter_parent(self, rowref): + return rowref.get_parent() + +class Inspect(object): + + def _delete_event(self, widget, event, data=None): + gtk.main_quit() + return False + + def __init__(self, fontfile): + + self.window = gtk.Window(gtk.WINDOW_TOPLEVEL) + self.window.set_title("%s - pyftinspect" % fontfile) + self.window.connect("delete_event", self._delete_event) + self.window.set_size_request(400, 600) + + self.scrolled_window = gtk.ScrolledWindow() + self.window.add(self.scrolled_window) + + self.font = ttLib.TTFont(fontfile, lazy=True) + self.treemodel = FontTreeModel(self.font) + self.treeview = gtk.TreeView(self.treemodel) + #self.treeview.set_reorderable(True) + + for i in range(2): + col_name = ('Key', 'Value')[i] + col = gtk.TreeViewColumn(col_name) + col.set_sort_column_id(-1) + self.treeview.append_column(col) + + cell = gtk.CellRendererText() + col.pack_start(cell, True) + col.add_attribute(cell, 'text', i) + + self.treeview.set_search_column(1) + self.scrolled_window.add(self.treeview) + self.window.show_all() + +def main(args=None): + if args is None: + args = sys.argv[1:] + if len(args) < 1: + print("usage: pyftinspect font...", file=sys.stderr) + sys.exit(1) + for arg in args: + Inspect(arg) + gtk.main() + +if __name__ == "__main__": + main() diff -Nru fonttools-2.4/Lib/fontTools/merge.py fonttools-3.0/Lib/fontTools/merge.py --- fonttools-2.4/Lib/fontTools/merge.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.0/Lib/fontTools/merge.py 2015-08-31 17:57:15.000000000 +0000 @@ -0,0 +1,949 @@ +# Copyright 2013 Google, Inc. All Rights Reserved. +# +# Google Author(s): Behdad Esfahbod, Roozbeh Pournader + +"""Font merger. +""" + +from __future__ import print_function, division, absolute_import +from fontTools.misc.py23 import * +from fontTools.misc.timeTools import timestampNow +from fontTools import ttLib, cffLib +from fontTools.ttLib.tables import otTables, _h_e_a_d +from fontTools.ttLib.tables.DefaultTable import DefaultTable +from functools import reduce +import sys +import time +import operator + + +def _add_method(*clazzes, **kwargs): + """Returns a decorator function that adds a new method to one or + more classes.""" + allowDefault = kwargs.get('allowDefaultTable', False) + def wrapper(method): + for clazz in clazzes: + assert allowDefault or clazz != DefaultTable, 'Oops, table class not found.' + assert method.__name__ not in clazz.__dict__, \ + "Oops, class '%s' has method '%s'." % (clazz.__name__, method.__name__) + setattr(clazz, method.__name__, method) + return None + return wrapper + +# General utility functions for merging values from different fonts + +def equal(lst): + lst = list(lst) + t = iter(lst) + first = next(t) + assert all(item == first for item in t), "Expected all items to be equal: %s" % lst + return first + +def first(lst): + return next(iter(lst)) + +def recalculate(lst): + return NotImplemented + +def current_time(lst): + return timestampNow() + +def bitwise_and(lst): + return reduce(operator.and_, lst) + +def bitwise_or(lst): + return reduce(operator.or_, lst) + +def avg_int(lst): + lst = list(lst) + return sum(lst) // len(lst) + +def onlyExisting(func): + """Returns a filter func that when called with a list, + only calls func on the non-NotImplemented items of the list, + and only so if there's at least one item remaining. + Otherwise returns NotImplemented.""" + + def wrapper(lst): + items = [item for item in lst if item is not NotImplemented] + return func(items) if items else NotImplemented + + return wrapper + +def sumLists(lst): + l = [] + for item in lst: + l.extend(item) + return l + +def sumDicts(lst): + d = {} + for item in lst: + d.update(item) + return d + +def mergeObjects(lst): + lst = [item for item in lst if item is not NotImplemented] + if not lst: + return NotImplemented + lst = [item for item in lst if item is not None] + if not lst: + return None + + clazz = lst[0].__class__ + assert all(type(item) == clazz for item in lst), lst + + logic = clazz.mergeMap + returnTable = clazz() + returnDict = {} + + allKeys = set.union(set(), *(vars(table).keys() for table in lst)) + for key in allKeys: + try: + mergeLogic = logic[key] + except KeyError: + try: + mergeLogic = logic['*'] + except KeyError: + raise Exception("Don't know how to merge key %s of class %s" % + (key, clazz.__name__)) + if mergeLogic is NotImplemented: + continue + value = mergeLogic(getattr(table, key, NotImplemented) for table in lst) + if value is not NotImplemented: + returnDict[key] = value + + returnTable.__dict__ = returnDict + + return returnTable + +def mergeBits(bitmap): + + def wrapper(lst): + lst = list(lst) + returnValue = 0 + for bitNumber in range(bitmap['size']): + try: + mergeLogic = bitmap[bitNumber] + except KeyError: + try: + mergeLogic = bitmap['*'] + except KeyError: + raise Exception("Don't know how to merge bit %s" % bitNumber) + shiftedBit = 1 << bitNumber + mergedValue = mergeLogic(bool(item & shiftedBit) for item in lst) + returnValue |= mergedValue << bitNumber + return returnValue + + return wrapper + + +@_add_method(DefaultTable, allowDefaultTable=True) +def merge(self, m, tables): + if not hasattr(self, 'mergeMap'): + m.log("Don't know how to merge '%s'." % self.tableTag) + return NotImplemented + + logic = self.mergeMap + + if isinstance(logic, dict): + return m.mergeObjects(self, self.mergeMap, tables) + else: + return logic(tables) + + +ttLib.getTableClass('maxp').mergeMap = { + '*': max, + 'tableTag': equal, + 'tableVersion': equal, + 'numGlyphs': sum, + 'maxStorage': first, + 'maxFunctionDefs': first, + 'maxInstructionDefs': first, + # TODO When we correctly merge hinting data, update these values: + # maxFunctionDefs, maxInstructionDefs, maxSizeOfInstructions +} + +headFlagsMergeBitMap = { + 'size': 16, + '*': bitwise_or, + 1: bitwise_and, # Baseline at y = 0 + 2: bitwise_and, # lsb at x = 0 + 3: bitwise_and, # Force ppem to integer values. FIXME? + 5: bitwise_and, # Font is vertical + 6: lambda bit: 0, # Always set to zero + 11: bitwise_and, # Font data is 'lossless' + 13: bitwise_and, # Optimized for ClearType + 14: bitwise_and, # Last resort font. FIXME? equal or first may be better + 15: lambda bit: 0, # Always set to zero +} + +ttLib.getTableClass('head').mergeMap = { + 'tableTag': equal, + 'tableVersion': max, + 'fontRevision': max, + 'checkSumAdjustment': lambda lst: 0, # We need *something* here + 'magicNumber': equal, + 'flags': mergeBits(headFlagsMergeBitMap), + 'unitsPerEm': equal, + 'created': current_time, + 'modified': current_time, + 'xMin': min, + 'yMin': min, + 'xMax': max, + 'yMax': max, + 'macStyle': first, + 'lowestRecPPEM': max, + 'fontDirectionHint': lambda lst: 2, + 'indexToLocFormat': recalculate, + 'glyphDataFormat': equal, +} + +ttLib.getTableClass('hhea').mergeMap = { + '*': equal, + 'tableTag': equal, + 'tableVersion': max, + 'ascent': max, + 'descent': min, + 'lineGap': max, + 'advanceWidthMax': max, + 'minLeftSideBearing': min, + 'minRightSideBearing': min, + 'xMaxExtent': max, + 'caretSlopeRise': first, + 'caretSlopeRun': first, + 'caretOffset': first, + 'numberOfHMetrics': recalculate, +} + +os2FsTypeMergeBitMap = { + 'size': 16, + '*': lambda bit: 0, + 1: bitwise_or, # no embedding permitted + 2: bitwise_and, # allow previewing and printing documents + 3: bitwise_and, # allow editing documents + 8: bitwise_or, # no subsetting permitted + 9: bitwise_or, # no embedding of outlines permitted +} + +def mergeOs2FsType(lst): + lst = list(lst) + if all(item == 0 for item in lst): + return 0 + + # Compute least restrictive logic for each fsType value + for i in range(len(lst)): + # unset bit 1 (no embedding permitted) if either bit 2 or 3 is set + if lst[i] & 0x000C: + lst[i] &= ~0x0002 + # set bit 2 (allow previewing) if bit 3 is set (allow editing) + elif lst[i] & 0x0008: + lst[i] |= 0x0004 + # set bits 2 and 3 if everything is allowed + elif lst[i] == 0: + lst[i] = 0x000C + + fsType = mergeBits(os2FsTypeMergeBitMap)(lst) + # unset bits 2 and 3 if bit 1 is set (some font is "no embedding") + if fsType & 0x0002: + fsType &= ~0x000C + return fsType + + +ttLib.getTableClass('OS/2').mergeMap = { + '*': first, + 'tableTag': equal, + 'version': max, + 'xAvgCharWidth': avg_int, # Apparently fontTools doesn't recalc this + 'fsType': mergeOs2FsType, # Will be overwritten + 'panose': first, # FIXME: should really be the first Latin font + 'ulUnicodeRange1': bitwise_or, + 'ulUnicodeRange2': bitwise_or, + 'ulUnicodeRange3': bitwise_or, + 'ulUnicodeRange4': bitwise_or, + 'fsFirstCharIndex': min, + 'fsLastCharIndex': max, + 'sTypoAscender': max, + 'sTypoDescender': min, + 'sTypoLineGap': max, + 'usWinAscent': max, + 'usWinDescent': max, + # Version 2,3,4 + 'ulCodePageRange1': onlyExisting(bitwise_or), + 'ulCodePageRange2': onlyExisting(bitwise_or), + 'usMaxContex': onlyExisting(max), + # TODO version 5 +} + +@_add_method(ttLib.getTableClass('OS/2')) +def merge(self, m, tables): + DefaultTable.merge(self, m, tables) + if self.version < 2: + # bits 8 and 9 are reserved and should be set to zero + self.fsType &= ~0x0300 + if self.version >= 3: + # Only one of bits 1, 2, and 3 may be set. We already take + # care of bit 1 implications in mergeOs2FsType. So unset + # bit 2 if bit 3 is already set. + if self.fsType & 0x0008: + self.fsType &= ~0x0004 + return self + +ttLib.getTableClass('post').mergeMap = { + '*': first, + 'tableTag': equal, + 'formatType': max, + 'isFixedPitch': min, + 'minMemType42': max, + 'maxMemType42': lambda lst: 0, + 'minMemType1': max, + 'maxMemType1': lambda lst: 0, + 'mapping': onlyExisting(sumDicts), + 'extraNames': lambda lst: [], +} + +ttLib.getTableClass('vmtx').mergeMap = ttLib.getTableClass('hmtx').mergeMap = { + 'tableTag': equal, + 'metrics': sumDicts, +} + +ttLib.getTableClass('gasp').mergeMap = { + 'tableTag': equal, + 'version': max, + 'gaspRange': first, # FIXME? Appears irreconcilable +} + +ttLib.getTableClass('name').mergeMap = { + 'tableTag': equal, + 'names': first, # FIXME? Does mixing name records make sense? +} + +ttLib.getTableClass('loca').mergeMap = { + '*': recalculate, + 'tableTag': equal, +} + +ttLib.getTableClass('glyf').mergeMap = { + 'tableTag': equal, + 'glyphs': sumDicts, + 'glyphOrder': sumLists, +} + +@_add_method(ttLib.getTableClass('glyf')) +def merge(self, m, tables): + for i,table in enumerate(tables): + for g in table.glyphs.values(): + if i: + # Drop hints for all but first font, since + # we don't map functions / CVT values. + g.removeHinting() + # Expand composite glyphs to load their + # composite glyph names. + if g.isComposite(): + g.expand(table) + return DefaultTable.merge(self, m, tables) + +ttLib.getTableClass('prep').mergeMap = lambda self, lst: first(lst) +ttLib.getTableClass('fpgm').mergeMap = lambda self, lst: first(lst) +ttLib.getTableClass('cvt ').mergeMap = lambda self, lst: first(lst) + +@_add_method(ttLib.getTableClass('cmap')) +def merge(self, m, tables): + # TODO Handle format=14. + cmapTables = [(t,fontIdx) for fontIdx,table in enumerate(tables) for t in table.tables if t.isUnicode()] + # TODO Better handle format-4 and format-12 coexisting in same font. + # TODO Insert both a format-4 and format-12 if needed. + module = ttLib.getTableModule('cmap') + assert all(t.format in [4, 12] for t,_ in cmapTables) + format = max(t.format for t,_ in cmapTables) + cmapTable = module.cmap_classes[format](format) + cmapTable.cmap = {} + cmapTable.platformID = 3 + cmapTable.platEncID = max(t.platEncID for t,_ in cmapTables) + cmapTable.language = 0 + cmap = cmapTable.cmap + for table,fontIdx in cmapTables: + # TODO handle duplicates. + for uni,gid in table.cmap.items(): + oldgid = cmap.get(uni, None) + if oldgid is None: + cmap[uni] = gid + elif oldgid != gid: + # Char previously mapped to oldgid, now to gid. + # Record, to fix up in GSUB 'locl' later. + assert m.duplicateGlyphsPerFont[fontIdx].get(oldgid, gid) == gid + m.duplicateGlyphsPerFont[fontIdx][oldgid] = gid + self.tableVersion = 0 + self.tables = [cmapTable] + self.numSubTables = len(self.tables) + return self + + +otTables.ScriptList.mergeMap = { + 'ScriptCount': sum, + 'ScriptRecord': lambda lst: sorted(sumLists(lst), key=lambda s: s.ScriptTag), +} +otTables.BaseScriptList.mergeMap = { + 'BaseScriptCount': sum, + 'BaseScriptRecord': lambda lst: sorted(sumLists(lst), key=lambda s: s.BaseScriptTag), +} + +otTables.FeatureList.mergeMap = { + 'FeatureCount': sum, + 'FeatureRecord': sumLists, +} + +otTables.LookupList.mergeMap = { + 'LookupCount': sum, + 'Lookup': sumLists, +} + +otTables.Coverage.mergeMap = { + 'glyphs': sumLists, +} + +otTables.ClassDef.mergeMap = { + 'classDefs': sumDicts, +} + +otTables.LigCaretList.mergeMap = { + 'Coverage': mergeObjects, + 'LigGlyphCount': sum, + 'LigGlyph': sumLists, +} + +otTables.AttachList.mergeMap = { + 'Coverage': mergeObjects, + 'GlyphCount': sum, + 'AttachPoint': sumLists, +} + +# XXX Renumber MarkFilterSets of lookups +otTables.MarkGlyphSetsDef.mergeMap = { + 'MarkSetTableFormat': equal, + 'MarkSetCount': sum, + 'Coverage': sumLists, +} + +otTables.Axis.mergeMap = { + '*': mergeObjects, +} + +# XXX Fix BASE table merging +otTables.BaseTagList.mergeMap = { + 'BaseTagCount': sum, + 'BaselineTag': sumLists, +} + +otTables.GDEF.mergeMap = \ +otTables.GSUB.mergeMap = \ +otTables.GPOS.mergeMap = \ +otTables.BASE.mergeMap = \ +otTables.JSTF.mergeMap = \ +otTables.MATH.mergeMap = \ +{ + '*': mergeObjects, + 'Version': max, +} + +ttLib.getTableClass('GDEF').mergeMap = \ +ttLib.getTableClass('GSUB').mergeMap = \ +ttLib.getTableClass('GPOS').mergeMap = \ +ttLib.getTableClass('BASE').mergeMap = \ +ttLib.getTableClass('JSTF').mergeMap = \ +ttLib.getTableClass('MATH').mergeMap = \ +{ + 'tableTag': onlyExisting(equal), # XXX clean me up + 'table': mergeObjects, +} + +@_add_method(ttLib.getTableClass('GSUB')) +def merge(self, m, tables): + + assert len(tables) == len(m.duplicateGlyphsPerFont) + for i,(table,dups) in enumerate(zip(tables, m.duplicateGlyphsPerFont)): + if not dups: continue + assert (table is not None and table is not NotImplemented), "Have duplicates to resolve for font %d but no GSUB" % (i + 1) + lookupMap = {id(v):v for v in table.table.LookupList.Lookup} + featureMap = {id(v):v for v in table.table.FeatureList.FeatureRecord} + synthFeature = None + synthLookup = None + for script in table.table.ScriptList.ScriptRecord: + if script.ScriptTag == 'DFLT': continue # XXX + for langsys in [script.Script.DefaultLangSys] + [l.LangSys for l in script.Script.LangSysRecord]: + feature = [featureMap[v] for v in langsys.FeatureIndex if featureMap[v].FeatureTag == 'locl'] + assert len(feature) <= 1 + if feature: + feature = feature[0] + else: + if not synthFeature: + synthFeature = otTables.FeatureRecord() + synthFeature.FeatureTag = 'locl' + f = synthFeature.Feature = otTables.Feature() + f.FeatureParams = None + f.LookupCount = 0 + f.LookupListIndex = [] + langsys.FeatureIndex.append(id(synthFeature)) + featureMap[id(synthFeature)] = synthFeature + langsys.FeatureIndex.sort(key=lambda v: featureMap[v].FeatureTag) + table.table.FeatureList.FeatureRecord.append(synthFeature) + table.table.FeatureList.FeatureCount += 1 + feature = synthFeature + + if not synthLookup: + subtable = otTables.SingleSubst() + subtable.mapping = dups + synthLookup = otTables.Lookup() + synthLookup.LookupFlag = 0 + synthLookup.LookupType = 1 + synthLookup.SubTableCount = 1 + synthLookup.SubTable = [subtable] + table.table.LookupList.Lookup.append(synthLookup) + table.table.LookupList.LookupCount += 1 + + feature.Feature.LookupListIndex[:0] = [id(synthLookup)] + feature.Feature.LookupCount += 1 + + DefaultTable.merge(self, m, tables) + return self + +@_add_method(otTables.SingleSubst, + otTables.MultipleSubst, + otTables.AlternateSubst, + otTables.LigatureSubst, + otTables.ReverseChainSingleSubst, + otTables.SinglePos, + otTables.PairPos, + otTables.CursivePos, + otTables.MarkBasePos, + otTables.MarkLigPos, + otTables.MarkMarkPos) +def mapLookups(self, lookupMap): + pass + +# Copied and trimmed down from subset.py +@_add_method(otTables.ContextSubst, + otTables.ChainContextSubst, + otTables.ContextPos, + otTables.ChainContextPos) +def __merge_classify_context(self): + + class ContextHelper(object): + def __init__(self, klass, Format): + if klass.__name__.endswith('Subst'): + Typ = 'Sub' + Type = 'Subst' + else: + Typ = 'Pos' + Type = 'Pos' + if klass.__name__.startswith('Chain'): + Chain = 'Chain' + else: + Chain = '' + ChainTyp = Chain+Typ + + self.Typ = Typ + self.Type = Type + self.Chain = Chain + self.ChainTyp = ChainTyp + + self.LookupRecord = Type+'LookupRecord' + + if Format == 1: + self.Rule = ChainTyp+'Rule' + self.RuleSet = ChainTyp+'RuleSet' + elif Format == 2: + self.Rule = ChainTyp+'ClassRule' + self.RuleSet = ChainTyp+'ClassSet' + + if self.Format not in [1, 2, 3]: + return None # Don't shoot the messenger; let it go + if not hasattr(self.__class__, "__ContextHelpers"): + self.__class__.__ContextHelpers = {} + if self.Format not in self.__class__.__ContextHelpers: + helper = ContextHelper(self.__class__, self.Format) + self.__class__.__ContextHelpers[self.Format] = helper + return self.__class__.__ContextHelpers[self.Format] + + +@_add_method(otTables.ContextSubst, + otTables.ChainContextSubst, + otTables.ContextPos, + otTables.ChainContextPos) +def mapLookups(self, lookupMap): + c = self.__merge_classify_context() + + if self.Format in [1, 2]: + for rs in getattr(self, c.RuleSet): + if not rs: continue + for r in getattr(rs, c.Rule): + if not r: continue + for ll in getattr(r, c.LookupRecord): + if not ll: continue + ll.LookupListIndex = lookupMap[ll.LookupListIndex] + elif self.Format == 3: + for ll in getattr(self, c.LookupRecord): + if not ll: continue + ll.LookupListIndex = lookupMap[ll.LookupListIndex] + else: + assert 0, "unknown format: %s" % self.Format + +@_add_method(otTables.ExtensionSubst, + otTables.ExtensionPos) +def mapLookups(self, lookupMap): + if self.Format == 1: + self.ExtSubTable.mapLookups(lookupMap) + else: + assert 0, "unknown format: %s" % self.Format + +@_add_method(otTables.Lookup) +def mapLookups(self, lookupMap): + for st in self.SubTable: + if not st: continue + st.mapLookups(lookupMap) + +@_add_method(otTables.LookupList) +def mapLookups(self, lookupMap): + for l in self.Lookup: + if not l: continue + l.mapLookups(lookupMap) + +@_add_method(otTables.Feature) +def mapLookups(self, lookupMap): + self.LookupListIndex = [lookupMap[i] for i in self.LookupListIndex] + +@_add_method(otTables.FeatureList) +def mapLookups(self, lookupMap): + for f in self.FeatureRecord: + if not f or not f.Feature: continue + f.Feature.mapLookups(lookupMap) + +@_add_method(otTables.DefaultLangSys, + otTables.LangSys) +def mapFeatures(self, featureMap): + self.FeatureIndex = [featureMap[i] for i in self.FeatureIndex] + if self.ReqFeatureIndex != 65535: + self.ReqFeatureIndex = featureMap[self.ReqFeatureIndex] + +@_add_method(otTables.Script) +def mapFeatures(self, featureMap): + if self.DefaultLangSys: + self.DefaultLangSys.mapFeatures(featureMap) + for l in self.LangSysRecord: + if not l or not l.LangSys: continue + l.LangSys.mapFeatures(featureMap) + +@_add_method(otTables.ScriptList) +def mapFeatures(self, featureMap): + for s in self.ScriptRecord: + if not s or not s.Script: continue + s.Script.mapFeatures(featureMap) + + +class Options(object): + + class UnknownOptionError(Exception): + pass + + def __init__(self, **kwargs): + + self.set(**kwargs) + + def set(self, **kwargs): + for k,v in kwargs.items(): + if not hasattr(self, k): + raise self.UnknownOptionError("Unknown option '%s'" % k) + setattr(self, k, v) + + def parse_opts(self, argv, ignore_unknown=False): + ret = [] + opts = {} + for a in argv: + orig_a = a + if not a.startswith('--'): + ret.append(a) + continue + a = a[2:] + i = a.find('=') + op = '=' + if i == -1: + if a.startswith("no-"): + k = a[3:] + v = False + else: + k = a + v = True + else: + k = a[:i] + if k[-1] in "-+": + op = k[-1]+'=' # Ops is '-=' or '+=' now. + k = k[:-1] + v = a[i+1:] + k = k.replace('-', '_') + if not hasattr(self, k): + if ignore_unknown is True or k in ignore_unknown: + ret.append(orig_a) + continue + else: + raise self.UnknownOptionError("Unknown option '%s'" % a) + + ov = getattr(self, k) + if isinstance(ov, bool): + v = bool(v) + elif isinstance(ov, int): + v = int(v) + elif isinstance(ov, list): + vv = v.split(',') + if vv == ['']: + vv = [] + vv = [int(x, 0) if len(x) and x[0] in "0123456789" else x for x in vv] + if op == '=': + v = vv + elif op == '+=': + v = ov + v.extend(vv) + elif op == '-=': + v = ov + for x in vv: + if x in v: + v.remove(x) + else: + assert 0 + + opts[k] = v + self.set(**opts) + + return ret + + +class Merger(object): + + def __init__(self, options=None, log=None): + + if not log: + log = Logger() + if not options: + options = Options() + + self.options = options + self.log = log + + def merge(self, fontfiles): + + mega = ttLib.TTFont() + + # + # Settle on a mega glyph order. + # + fonts = [ttLib.TTFont(fontfile) for fontfile in fontfiles] + glyphOrders = [font.getGlyphOrder() for font in fonts] + megaGlyphOrder = self._mergeGlyphOrders(glyphOrders) + # Reload fonts and set new glyph names on them. + # TODO Is it necessary to reload font? I think it is. At least + # it's safer, in case tables were loaded to provide glyph names. + fonts = [ttLib.TTFont(fontfile) for fontfile in fontfiles] + for font,glyphOrder in zip(fonts, glyphOrders): + font.setGlyphOrder(glyphOrder) + mega.setGlyphOrder(megaGlyphOrder) + + for font in fonts: + self._preMerge(font) + + self.duplicateGlyphsPerFont = [{} for f in fonts] + + allTags = reduce(set.union, (list(font.keys()) for font in fonts), set()) + allTags.remove('GlyphOrder') + + # Make sure we process cmap before GSUB as we have a dependency there. + if 'GSUB' in allTags: + allTags.remove('GSUB') + allTags = ['GSUB'] + list(allTags) + if 'cmap' in allTags: + allTags.remove('cmap') + allTags = ['cmap'] + list(allTags) + + for tag in allTags: + + tables = [font.get(tag, NotImplemented) for font in fonts] + + clazz = ttLib.getTableClass(tag) + table = clazz(tag).merge(self, tables) + # XXX Clean this up and use: table = mergeObjects(tables) + + if table is not NotImplemented and table is not False: + mega[tag] = table + self.log("Merged '%s'." % tag) + else: + self.log("Dropped '%s'." % tag) + self.log.lapse("merge '%s'" % tag) + + del self.duplicateGlyphsPerFont + + self._postMerge(mega) + + return mega + + def _mergeGlyphOrders(self, glyphOrders): + """Modifies passed-in glyphOrders to reflect new glyph names. + Returns glyphOrder for the merged font.""" + # Simply append font index to the glyph name for now. + # TODO Even this simplistic numbering can result in conflicts. + # But then again, we have to improve this soon anyway. + mega = [] + for n,glyphOrder in enumerate(glyphOrders): + for i,glyphName in enumerate(glyphOrder): + glyphName += "#" + repr(n) + glyphOrder[i] = glyphName + mega.append(glyphName) + return mega + + def mergeObjects(self, returnTable, logic, tables): + # Right now we don't use self at all. Will use in the future + # for options and logging. + + allKeys = set.union(set(), *(vars(table).keys() for table in tables if table is not NotImplemented)) + for key in allKeys: + try: + mergeLogic = logic[key] + except KeyError: + try: + mergeLogic = logic['*'] + except KeyError: + raise Exception("Don't know how to merge key %s of class %s" % + (key, returnTable.__class__.__name__)) + if mergeLogic is NotImplemented: + continue + value = mergeLogic(getattr(table, key, NotImplemented) for table in tables) + if value is not NotImplemented: + setattr(returnTable, key, value) + + return returnTable + + def _preMerge(self, font): + + # Map indices to references + + GDEF = font.get('GDEF') + GSUB = font.get('GSUB') + GPOS = font.get('GPOS') + + for t in [GSUB, GPOS]: + if not t: continue + + if t.table.LookupList: + lookupMap = {i:id(v) for i,v in enumerate(t.table.LookupList.Lookup)} + t.table.LookupList.mapLookups(lookupMap) + if t.table.FeatureList: + # XXX Handle present FeatureList but absent LookupList + t.table.FeatureList.mapLookups(lookupMap) + + if t.table.FeatureList and t.table.ScriptList: + featureMap = {i:id(v) for i,v in enumerate(t.table.FeatureList.FeatureRecord)} + t.table.ScriptList.mapFeatures(featureMap) + + # TODO GDEF/Lookup MarkFilteringSets + # TODO FeatureParams nameIDs + + def _postMerge(self, font): + + # Map references back to indices + + GDEF = font.get('GDEF') + GSUB = font.get('GSUB') + GPOS = font.get('GPOS') + + for t in [GSUB, GPOS]: + if not t: continue + + if t.table.LookupList: + lookupMap = {id(v):i for i,v in enumerate(t.table.LookupList.Lookup)} + t.table.LookupList.mapLookups(lookupMap) + if t.table.FeatureList: + # XXX Handle present FeatureList but absent LookupList + t.table.FeatureList.mapLookups(lookupMap) + + if t.table.FeatureList and t.table.ScriptList: + # XXX Handle present ScriptList but absent FeatureList + featureMap = {id(v):i for i,v in enumerate(t.table.FeatureList.FeatureRecord)} + t.table.ScriptList.mapFeatures(featureMap) + + # TODO GDEF/Lookup MarkFilteringSets + # TODO FeatureParams nameIDs + + +class Logger(object): + + def __init__(self, verbose=False, xml=False, timing=False): + self.verbose = verbose + self.xml = xml + self.timing = timing + self.last_time = self.start_time = time.time() + + def parse_opts(self, argv): + argv = argv[:] + for v in ['verbose', 'xml', 'timing']: + if "--"+v in argv: + setattr(self, v, True) + argv.remove("--"+v) + return argv + + def __call__(self, *things): + if not self.verbose: + return + print(' '.join(str(x) for x in things)) + + def lapse(self, *things): + if not self.timing: + return + new_time = time.time() + print("Took %0.3fs to %s" %(new_time - self.last_time, + ' '.join(str(x) for x in things))) + self.last_time = new_time + + def font(self, font, file=sys.stdout): + if not self.xml: + return + from fontTools.misc import xmlWriter + writer = xmlWriter.XMLWriter(file) + font.disassembleInstructions = False # Work around ttLib bug + for tag in font.keys(): + writer.begintag(tag) + writer.newline() + font[tag].toXML(writer, font) + writer.endtag(tag) + writer.newline() + + +__all__ = [ + 'Options', + 'Merger', + 'Logger', + 'main' +] + +def main(args=None): + + if args is None: + args = sys.argv[1:] + + log = Logger() + args = log.parse_opts(args) + + options = Options() + args = options.parse_opts(args) + + if len(args) < 1: + print("usage: pyftmerge font...", file=sys.stderr) + sys.exit(1) + + merger = Merger(options=options, log=log) + font = merger.merge(args) + outfile = 'merged.ttf' + font.save(outfile) + log.lapse("compile and save font") + + log.last_time = log.start_time + log.lapse("make one with everything(TOTAL TIME)") + +if __name__ == "__main__": + main() diff -Nru fonttools-2.4/Lib/fontTools/misc/arrayTools.py fonttools-3.0/Lib/fontTools/misc/arrayTools.py --- fonttools-2.4/Lib/fontTools/misc/arrayTools.py 2013-04-23 04:08:08.000000000 +0000 +++ fonttools-3.0/Lib/fontTools/misc/arrayTools.py 2015-08-31 17:57:15.000000000 +0000 @@ -4,6 +4,8 @@ # +from __future__ import print_function, division, absolute_import +from fontTools.misc.py23 import * import math def calcBounds(array): @@ -16,18 +18,32 @@ ys = [y for x, y in array] return min(xs), min(ys), max(xs), max(ys) -def updateBounds(bounds, (x, y), min=min, max=max): +def calcIntBounds(array): + """Return the integer bounding rectangle of a 2D points array as a + tuple: (xMin, yMin, xMax, yMax) + """ + xMin, yMin, xMax, yMax = calcBounds(array) + xMin = int(math.floor(xMin)) + xMax = int(math.ceil(xMax)) + yMin = int(math.floor(yMin)) + yMax = int(math.ceil(yMax)) + return xMin, yMin, xMax, yMax + + +def updateBounds(bounds, p, min=min, max=max): """Return the bounding recangle of rectangle bounds and point (x, y).""" + (x, y) = p xMin, yMin, xMax, yMax = bounds return min(xMin, x), min(yMin, y), max(xMax, x), max(yMax, y) -def pointInRect((x, y), rect): +def pointInRect(p, rect): """Return True when point (x, y) is inside rect.""" + (x, y) = p xMin, yMin, xMax, yMax = rect return (xMin <= x <= xMax) and (yMin <= y <= yMax) def pointsInRect(array, rect): - """Find out which points or array are inside rect. + """Find out which points or array are inside rect. Returns an array with a boolean for each point. """ if len(array) < 1: @@ -43,55 +59,64 @@ def asInt16(array): """Round and cast to 16 bit integer.""" return [int(math.floor(i+0.5)) for i in array] - -def normRect((xMin, yMin, xMax, yMax)): + +def normRect(rect): """Normalize the rectangle so that the following holds: xMin <= xMax and yMin <= yMax """ + (xMin, yMin, xMax, yMax) = rect return min(xMin, xMax), min(yMin, yMax), max(xMin, xMax), max(yMin, yMax) -def scaleRect((xMin, yMin, xMax, yMax), x, y): +def scaleRect(rect, x, y): """Scale the rectangle by x, y.""" + (xMin, yMin, xMax, yMax) = rect return xMin * x, yMin * y, xMax * x, yMax * y -def offsetRect((xMin, yMin, xMax, yMax), dx, dy): +def offsetRect(rect, dx, dy): """Offset the rectangle by dx, dy.""" + (xMin, yMin, xMax, yMax) = rect return xMin+dx, yMin+dy, xMax+dx, yMax+dy -def insetRect((xMin, yMin, xMax, yMax), dx, dy): +def insetRect(rect, dx, dy): """Inset the rectangle by dx, dy on all sides.""" + (xMin, yMin, xMax, yMax) = rect return xMin+dx, yMin+dy, xMax-dx, yMax-dy -def sectRect((xMin1, yMin1, xMax1, yMax1), (xMin2, yMin2, xMax2, yMax2)): +def sectRect(rect1, rect2): """Return a boolean and a rectangle. If the input rectangles intersect, return True and the intersecting rectangle. Return False and (0, 0, 0, 0) if the input rectangles don't intersect. """ + (xMin1, yMin1, xMax1, yMax1) = rect1 + (xMin2, yMin2, xMax2, yMax2) = rect2 xMin, yMin, xMax, yMax = (max(xMin1, xMin2), max(yMin1, yMin2), min(xMax1, xMax2), min(yMax1, yMax2)) if xMin >= xMax or yMin >= yMax: - return 0, (0, 0, 0, 0) - return 1, (xMin, yMin, xMax, yMax) + return False, (0, 0, 0, 0) + return True, (xMin, yMin, xMax, yMax) -def unionRect((xMin1, yMin1, xMax1, yMax1), (xMin2, yMin2, xMax2, yMax2)): +def unionRect(rect1, rect2): """Return the smallest rectangle in which both input rectangles are fully enclosed. In other words, return the total bounding rectangle of both input rectangles. """ + (xMin1, yMin1, xMax1, yMax1) = rect1 + (xMin2, yMin2, xMax2, yMax2) = rect2 xMin, yMin, xMax, yMax = (min(xMin1, xMin2), min(yMin1, yMin2), max(xMax1, xMax2), max(yMax1, yMax2)) return (xMin, yMin, xMax, yMax) -def rectCenter((xMin, yMin, xMax, yMax)): +def rectCenter(rect0): """Return the center of the rectangle as an (x, y) coordinate.""" + (xMin, yMin, xMax, yMax) = rect0 return (xMin+xMax)/2, (yMin+yMax)/2 -def intRect((xMin, yMin, xMax, yMax)): +def intRect(rect1): """Return the rectangle, rounded off to integer values, but guaranteeing that the resulting rectangle is NOT smaller than the original. """ - import math + (xMin, yMin, xMax, yMax) = rect1 xMin = int(math.floor(xMin)) yMin = int(math.floor(yMin)) xMax = int(math.ceil(xMax)) @@ -147,13 +172,14 @@ >>> unionRect((0, 10, 20, 30), (0, 40, 20, 50)) (0, 10, 20, 50) >>> rectCenter((0, 0, 100, 200)) - (50, 100) + (50.0, 100.0) >>> rectCenter((0, 0, 100, 199.0)) - (50, 99.5) + (50.0, 99.5) >>> intRect((0.9, 2.9, 3.1, 4.1)) (0, 2, 4, 5) """ if __name__ == "__main__": + import sys import doctest - doctest.testmod() + sys.exit(doctest.testmod().failed) diff -Nru fonttools-2.4/Lib/fontTools/misc/bezierTools.py fonttools-3.0/Lib/fontTools/misc/bezierTools.py --- fonttools-2.4/Lib/fontTools/misc/bezierTools.py 2013-04-23 04:08:08.000000000 +0000 +++ fonttools-3.0/Lib/fontTools/misc/bezierTools.py 2015-08-31 17:57:15.000000000 +0000 @@ -1,6 +1,8 @@ """fontTools.misc.bezierTools.py -- tools for working with bezier path segments. """ +from __future__ import print_function, division, absolute_import +from fontTools.misc.py23 import * __all__ = [ "calcQuadraticBounds", @@ -48,7 +50,7 @@ (0, 0, 100, 75.0) >>> calcCubicBounds((0, 0), (50, 0), (100, 50), (100, 100)) (0.0, 0.0, 100, 100) - >>> print "%f %f %f %f" % calcCubicBounds((50, 0), (0, 100), (100, 100), (50, 0)) + >>> print("%f %f %f %f" % calcCubicBounds((50, 0), (0, 100), (100, 100), (50, 0))) 35.566243 0.000000 64.433757 75.000000 """ (ax, ay), (bx, by), (cx, cy), (dx, dy) = calcCubicParameters(pt1, pt2, pt3, pt4) @@ -60,7 +62,7 @@ xRoots = [t for t in solveQuadratic(ax3, bx2, cx) if 0 <= t < 1] yRoots = [t for t in solveQuadratic(ay3, by2, cy) if 0 <= t < 1] roots = xRoots + yRoots - + points = [(ax*t*t*t + bx*t*t + cx * t + dx, ay*t*t*t + by*t*t + cy * t + dy) for t in roots] + [pt1, pt4] return calcBounds(points) @@ -73,32 +75,37 @@ line. >>> printSegments(splitLine((0, 0), (100, 100), 50, True)) - ((0, 0), (50.0, 50.0)) - ((50.0, 50.0), (100, 100)) + ((0, 0), (50, 50)) + ((50, 50), (100, 100)) >>> printSegments(splitLine((0, 0), (100, 100), 100, True)) ((0, 0), (100, 100)) >>> printSegments(splitLine((0, 0), (100, 100), 0, True)) - ((0, 0), (0.0, 0.0)) - ((0.0, 0.0), (100, 100)) + ((0, 0), (0, 0)) + ((0, 0), (100, 100)) >>> printSegments(splitLine((0, 0), (100, 100), 0, False)) - ((0, 0), (0.0, 0.0)) - ((0.0, 0.0), (100, 100)) + ((0, 0), (0, 0)) + ((0, 0), (100, 100)) + >>> printSegments(splitLine((100, 0), (0, 0), 50, False)) + ((100, 0), (50, 0)) + ((50, 0), (0, 0)) + >>> printSegments(splitLine((0, 100), (0, 0), 50, True)) + ((0, 100), (0, 50)) + ((0, 50), (0, 0)) """ pt1x, pt1y = pt1 pt2x, pt2y = pt2 - + ax = (pt2x - pt1x) ay = (pt2y - pt1y) - + bx = pt1x by = pt1y - - ax1 = (ax, ay)[isHorizontal] - - if ax == 0: + + a = (ax, ay)[isHorizontal] + + if a == 0: return [(pt1, pt2)] - - t = float(where - (bx, by)[isHorizontal]) / ax + t = (where - (bx, by)[isHorizontal]) / a if 0 <= t < 1: midPt = ax * t + bx, ay * t + by return [(pt1, midPt), (midPt, pt2)] @@ -114,26 +121,25 @@ >>> printSegments(splitQuadratic((0, 0), (50, 100), (100, 0), 150, False)) ((0, 0), (50, 100), (100, 0)) >>> printSegments(splitQuadratic((0, 0), (50, 100), (100, 0), 50, False)) - ((0.0, 0.0), (25.0, 50.0), (50.0, 50.0)) - ((50.0, 50.0), (75.0, 50.0), (100.0, 0.0)) + ((0, 0), (25, 50), (50, 50)) + ((50, 50), (75, 50), (100, 0)) >>> printSegments(splitQuadratic((0, 0), (50, 100), (100, 0), 25, False)) - ((0.0, 0.0), (12.5, 25.0), (25.0, 37.5)) - ((25.0, 37.5), (62.5, 75.0), (100.0, 0.0)) + ((0, 0), (12.5, 25), (25, 37.5)) + ((25, 37.5), (62.5, 75), (100, 0)) >>> printSegments(splitQuadratic((0, 0), (50, 100), (100, 0), 25, True)) - ((0.0, 0.0), (7.32233047034, 14.6446609407), (14.6446609407, 25.0)) - ((14.6446609407, 25.0), (50.0, 75.0), (85.3553390593, 25.0)) - ((85.3553390593, 25.0), (92.6776695297, 14.6446609407), (100.0, -7.1054273576e-15)) + ((0, 0), (7.32233, 14.6447), (14.6447, 25)) + ((14.6447, 25), (50, 75), (85.3553, 25)) + ((85.3553, 25), (92.6777, 14.6447), (100, -7.10543e-15)) >>> # XXX I'm not at all sure if the following behavior is desirable: >>> printSegments(splitQuadratic((0, 0), (50, 100), (100, 0), 50, True)) - ((0.0, 0.0), (25.0, 50.0), (50.0, 50.0)) - ((50.0, 50.0), (50.0, 50.0), (50.0, 50.0)) - ((50.0, 50.0), (75.0, 50.0), (100.0, 0.0)) + ((0, 0), (25, 50), (50, 50)) + ((50, 50), (50, 50), (50, 50)) + ((50, 50), (75, 50), (100, 0)) """ a, b, c = calcQuadraticParameters(pt1, pt2, pt3) solutions = solveQuadratic(a[isHorizontal], b[isHorizontal], c[isHorizontal] - where) - solutions = [t for t in solutions if 0 <= t < 1] - solutions.sort() + solutions = sorted([t for t in solutions if 0 <= t < 1]) if not solutions: return [(pt1, pt2, pt3)] return _splitQuadraticAtT(a, b, c, *solutions) @@ -147,18 +153,17 @@ >>> printSegments(splitCubic((0, 0), (25, 100), (75, 100), (100, 0), 150, False)) ((0, 0), (25, 100), (75, 100), (100, 0)) >>> printSegments(splitCubic((0, 0), (25, 100), (75, 100), (100, 0), 50, False)) - ((0.0, 0.0), (12.5, 50.0), (31.25, 75.0), (50.0, 75.0)) - ((50.0, 75.0), (68.75, 75.0), (87.5, 50.0), (100.0, 0.0)) + ((0, 0), (12.5, 50), (31.25, 75), (50, 75)) + ((50, 75), (68.75, 75), (87.5, 50), (100, 0)) >>> printSegments(splitCubic((0, 0), (25, 100), (75, 100), (100, 0), 25, True)) - ((0.0, 0.0), (2.2937927384, 9.17517095361), (4.79804488188, 17.5085042869), (7.47413641001, 25.0)) - ((7.47413641001, 25.0), (31.2886200204, 91.6666666667), (68.7113799796, 91.6666666667), (92.52586359, 25.0)) - ((92.52586359, 25.0), (95.2019551181, 17.5085042869), (97.7062072616, 9.17517095361), (100.0, 1.7763568394e-15)) + ((0, 0), (2.29379, 9.17517), (4.79804, 17.5085), (7.47414, 25)) + ((7.47414, 25), (31.2886, 91.6667), (68.7114, 91.6667), (92.5259, 25)) + ((92.5259, 25), (95.202, 17.5085), (97.7062, 9.17517), (100, 1.77636e-15)) """ a, b, c, d = calcCubicParameters(pt1, pt2, pt3, pt4) solutions = solveCubic(a[isHorizontal], b[isHorizontal], c[isHorizontal], d[isHorizontal] - where) - solutions = [t for t in solutions if 0 <= t < 1] - solutions.sort() + solutions = sorted([t for t in solutions if 0 <= t < 1]) if not solutions: return [(pt1, pt2, pt3, pt4)] return _splitCubicAtT(a, b, c, d, *solutions) @@ -169,12 +174,12 @@ values of t. Return a list of curve segments. >>> printSegments(splitQuadraticAtT((0, 0), (50, 100), (100, 0), 0.5)) - ((0.0, 0.0), (25.0, 50.0), (50.0, 50.0)) - ((50.0, 50.0), (75.0, 50.0), (100.0, 0.0)) + ((0, 0), (25, 50), (50, 50)) + ((50, 50), (75, 50), (100, 0)) >>> printSegments(splitQuadraticAtT((0, 0), (50, 100), (100, 0), 0.5, 0.75)) - ((0.0, 0.0), (25.0, 50.0), (50.0, 50.0)) - ((50.0, 50.0), (62.5, 50.0), (75.0, 37.5)) - ((75.0, 37.5), (87.5, 25.0), (100.0, 0.0)) + ((0, 0), (25, 50), (50, 50)) + ((50, 50), (62.5, 50), (75, 37.5)) + ((75, 37.5), (87.5, 25), (100, 0)) """ a, b, c = calcQuadraticParameters(pt1, pt2, pt3) return _splitQuadraticAtT(a, b, c, *ts) @@ -185,12 +190,12 @@ values of t. Return a list of curve segments. >>> printSegments(splitCubicAtT((0, 0), (25, 100), (75, 100), (100, 0), 0.5)) - ((0.0, 0.0), (12.5, 50.0), (31.25, 75.0), (50.0, 75.0)) - ((50.0, 75.0), (68.75, 75.0), (87.5, 50.0), (100.0, 0.0)) + ((0, 0), (12.5, 50), (31.25, 75), (50, 75)) + ((50, 75), (68.75, 75), (87.5, 50), (100, 0)) >>> printSegments(splitCubicAtT((0, 0), (25, 100), (75, 100), (100, 0), 0.5, 0.75)) - ((0.0, 0.0), (12.5, 50.0), (31.25, 75.0), (50.0, 75.0)) - ((50.0, 75.0), (59.375, 75.0), (68.75, 68.75), (77.34375, 56.25)) - ((77.34375, 56.25), (85.9375, 43.75), (93.75, 25.0), (100.0, 0.0)) + ((0, 0), (12.5, 50), (31.25, 75), (50, 75)) + ((50, 75), (59.375, 75), (68.75, 68.75), (77.3438, 56.25)) + ((77.3438, 56.25), (85.9375, 43.75), (93.75, 25), (100, 0)) """ a, b, c, d = calcCubicParameters(pt1, pt2, pt3, pt4) return _splitCubicAtT(a, b, c, d, *ts) @@ -215,7 +220,7 @@ b1y = (2*ay*t1 + by) * delta c1x = ax*t1**2 + bx*t1 + cx c1y = ay*t1**2 + by*t1 + cy - + pt1, pt2, pt3 = calcQuadraticPoints((a1x, a1y), (b1x, b1y), (c1x, c1y)) segments.append((pt1, pt2, pt3)) return segments @@ -281,8 +286,7 @@ return roots -def solveCubic(a, b, c, d, - abs=abs, pow=pow, sqrt=sqrt, cos=cos, acos=acos, pi=pi): +def solveCubic(a, b, c, d): """Solve a cubic equation where a, b, c and d are real. a*x*x*x + b*x*x + c*x + d = 0 This function returns a list of roots. Note that the returned list @@ -302,7 +306,7 @@ a1 = b/a a2 = c/a a3 = d/a - + Q = (a1*a1 - 3.0*a2)/9.0 R = (2.0*a1*a1*a1 - 9.0*a1*a2 + 27.0*a3)/54.0 R2_Q3 = R*R - Q*Q*Q @@ -392,7 +396,7 @@ try: it = iter(obj) except TypeError: - return str(obj) + return "%g" % obj else: return "(%s)" % ", ".join([_segmentrepr(x) for x in it]) @@ -402,8 +406,9 @@ segments on a single line as a tuple. """ for segment in segments: - print _segmentrepr(segment) + print(_segmentrepr(segment)) if __name__ == "__main__": + import sys import doctest - doctest.testmod() + sys.exit(doctest.testmod().failed) diff -Nru fonttools-2.4/Lib/fontTools/misc/eexec.py fonttools-3.0/Lib/fontTools/misc/eexec.py --- fonttools-2.4/Lib/fontTools/misc/eexec.py 2013-04-23 04:08:08.000000000 +0000 +++ fonttools-3.0/Lib/fontTools/misc/eexec.py 2015-08-31 17:57:15.000000000 +0000 @@ -1,37 +1,29 @@ -"""fontTools.misc.eexec.py -- Module implementing the eexec and +"""fontTools.misc.eexec.py -- Module implementing the eexec and charstring encryption algorithm as used by PostScript Type 1 fonts. """ -# Warning: Although a Python implementation is provided here, -# all four public functions get overridden by the *much* faster -# C extension module eexecOp, if available. - -import string - -error = "eexec.error" - +from __future__ import print_function, division, absolute_import +from fontTools.misc.py23 import * def _decryptChar(cipher, R): - cipher = ord(cipher) + cipher = byteord(cipher) plain = ( (cipher ^ (R>>8)) ) & 0xFF - R = ( (cipher + R) * 52845L + 22719L ) & 0xFFFF - return chr(plain), R + R = ( (cipher + R) * 52845 + 22719 ) & 0xFFFF + return bytechr(plain), R def _encryptChar(plain, R): - plain = ord(plain) + plain = byteord(plain) cipher = ( (plain ^ (R>>8)) ) & 0xFF - R = ( (cipher + R) * 52845L + 22719L ) & 0xFFFF - return chr(cipher), R + R = ( (cipher + R) * 52845 + 22719 ) & 0xFFFF + return bytechr(cipher), R def decrypt(cipherstring, R): - # I could probably speed this up by inlining _decryptChar, - # but... we've got eexecOp, so who cares ;-) plainList = [] for cipher in cipherstring: plain, R = _decryptChar(cipher, R) plainList.append(plain) - plainstring = string.join(plainList, '') + plainstring = strjoin(plainList) return plainstring, int(R) def encrypt(plainstring, R): @@ -39,7 +31,7 @@ for plain in plainstring: cipher, R = _encryptChar(plain, R) cipherList.append(cipher) - cipherstring = string.join(cipherList, '') + cipherstring = strjoin(cipherList) return cipherstring, int(R) @@ -49,25 +41,15 @@ def deHexString(h): import binascii - h = "".join(h.split()) + h = strjoin(h.split()) return binascii.unhexlify(h) def _test(): - import fontTools.misc.eexecOp as eexecOp testStr = "\0\0asdadads asds\265" - print decrypt, decrypt(testStr, 12321) - print eexecOp.decrypt, eexecOp.decrypt(testStr, 12321) - print encrypt, encrypt(testStr, 12321) - print eexecOp.encrypt, eexecOp.encrypt(testStr, 12321) + print(decrypt, decrypt(testStr, 12321)) + print(encrypt, encrypt(testStr, 12321)) if __name__ == "__main__": _test() - - -try: - from fontTools.misc.eexecOp import * -except ImportError: - pass # Use the slow Python versions - diff -Nru fonttools-2.4/Lib/fontTools/misc/encodingTools.py fonttools-3.0/Lib/fontTools/misc/encodingTools.py --- fonttools-2.4/Lib/fontTools/misc/encodingTools.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.0/Lib/fontTools/misc/encodingTools.py 2015-08-31 17:57:15.000000000 +0000 @@ -0,0 +1,73 @@ +"""fontTools.misc.encodingTools.py -- tools for working with OpenType encodings. +""" + +from __future__ import print_function, division, absolute_import +from fontTools.misc.py23 import * +import fontTools.encodings.codecs + +# Map keyed by platformID, then platEncID, then possibly langID +_encodingMap = { + 0: { # Unicode + 0: 'utf_16_be', + 1: 'utf_16_be', + 2: 'utf_16_be', + 3: 'utf_16_be', + 4: 'utf_16_be', + 5: 'utf_16_be', + 6: 'utf_16_be', + }, + 1: { # Macintosh + # See + # https://github.com/behdad/fonttools/issues/236 + 0: { # Macintosh, platEncID==0, keyed by langID + 15: "mac_iceland", + 17: "mac_turkish", + 18: "mac_croatian", + 24: "mac_latin2", + 25: "mac_latin2", + 26: "mac_latin2", + 27: "mac_latin2", + 28: "mac_latin2", + 36: "mac_latin2", + 37: "mac_romanian", + 38: "mac_latin2", + 39: "mac_latin2", + 40: "mac_latin2", + Ellipsis: 'mac_roman', # Other + }, + 1: 'x_mac_japanese_ttx', + 2: 'x_mac_trad_chinese_ttx', + 3: 'x_mac_korean_ttx', + 6: 'mac_greek', + 7: 'mac_cyrillic', + 25: 'x_mac_simp_chinese_ttx', + 29: 'mac_latin2', + 35: 'mac_turkish', + 37: 'mac_iceland', + }, + 2: { # ISO + 0: 'ascii', + 1: 'utf_16_be', + 2: 'latin1', + }, + 3: { # Microsoft + 0: 'utf_16_be', + 1: 'utf_16_be', + 2: 'shift_jis', + 3: 'gb2312', + 4: 'big5', + 5: 'euc_kr', + 6: 'johab', + 10: 'utf_16_be', + }, +} + +def getEncoding(platformID, platEncID, langID, default=None): + """Returns the Python encoding name for OpenType platformID/encodingID/langID + triplet. If encoding for these values is not known, by default None is + returned. That can be overriden by passing a value to the default argument. + """ + encoding = _encodingMap.get(platformID, {}).get(platEncID, default) + if isinstance(encoding, dict): + encoding = encoding.get(langID, encoding[Ellipsis]) + return encoding diff -Nru fonttools-2.4/Lib/fontTools/misc/encodingTools_test.py fonttools-3.0/Lib/fontTools/misc/encodingTools_test.py --- fonttools-2.4/Lib/fontTools/misc/encodingTools_test.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.0/Lib/fontTools/misc/encodingTools_test.py 2015-08-31 17:57:15.000000000 +0000 @@ -0,0 +1,31 @@ +from __future__ import print_function, division, absolute_import, unicode_literals +from fontTools.misc.py23 import * +import unittest +from .encodingTools import getEncoding + +class EncodingTest(unittest.TestCase): + + def test_encoding_unicode(self): + + self.assertEqual(getEncoding(3, 0, None), "utf_16_be") # MS Symbol is Unicode as well + self.assertEqual(getEncoding(3, 1, None), "utf_16_be") + self.assertEqual(getEncoding(3, 10, None), "utf_16_be") + self.assertEqual(getEncoding(0, 3, None), "utf_16_be") + + def test_encoding_macroman_misc(self): + self.assertEqual(getEncoding(1, 0, 17), "mac_turkish") + self.assertEqual(getEncoding(1, 0, 37), "mac_romanian") + self.assertEqual(getEncoding(1, 0, 45), "mac_roman") + + def test_extended_mac_encodings(self): + encoding = getEncoding(1, 1, 0) # Mac Japanese + decoded = b'\xfe'.decode(encoding) + self.assertEqual(decoded, unichr(0x2122)) + + def test_extended_unknown(self): + self.assertEqual(getEncoding(10, 11, 12), None) + self.assertEqual(getEncoding(10, 11, 12, "ascii"), "ascii") + self.assertEqual(getEncoding(10, 11, 12, default="ascii"), "ascii") + +if __name__ == "__main__": + unittest.main() diff -Nru fonttools-2.4/Lib/fontTools/misc/fixedTools.py fonttools-3.0/Lib/fontTools/misc/fixedTools.py --- fonttools-2.4/Lib/fontTools/misc/fixedTools.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.0/Lib/fontTools/misc/fixedTools.py 2015-08-31 17:57:15.000000000 +0000 @@ -0,0 +1,47 @@ +"""fontTools.misc.fixedTools.py -- tools for working with fixed numbers. +""" + +from __future__ import print_function, division, absolute_import +from fontTools.misc.py23 import * + +__all__ = [ + "fixedToFloat", + "floatToFixed", +] + +def fixedToFloat(value, precisionBits): + """Converts a fixed-point number to a float, choosing the float + that has the shortest decimal reprentation. Eg. to convert a + fixed number in a 2.14 format, use precisionBits=14. This is + pretty slow compared to a simple division. Use sporadically. + + precisionBits is only supported up to 16. + """ + if not value: return 0.0 + + scale = 1 << precisionBits + value /= scale + eps = .5 / scale + lo = value - eps + hi = value + eps + # If the range of valid choices spans an integer, return the integer. + if int(lo) != int(hi): + return float(round(value)) + fmt = "%.8f" + lo = fmt % lo + hi = fmt % hi + assert len(lo) == len(hi) and lo != hi + for i in range(len(lo)): + if lo[i] != hi[i]: + break + period = lo.find('.') + assert period < i + fmt = "%%.%df" % (i - period) + value = fmt % value + return float(value) + +def floatToFixed(value, precisionBits): + """Converts a float to a fixed-point number given the number of + precisionBits. Ie. int(round(value * (1<h", data[index:index+2]) + return value, index+2 + +def read_longInt(self, b0, data, index): + value, = struct.unpack(">l", data[index:index+4]) + return value, index+4 + +def read_fixed1616(self, b0, data, index): + value, = struct.unpack(">l", data[index:index+4]) + return value / 65536, index+4 + +def read_reserved(self, b0, data, index): + assert NotImplementedError + return NotImplemented, index + +def read_realNumber(self, b0, data, index): + number = '' + while True: + b = byteord(data[index]) + index = index + 1 + nibble0 = (b & 0xf0) >> 4 + nibble1 = b & 0x0f + if nibble0 == 0xf: + break + number = number + realNibbles[nibble0] + if nibble1 == 0xf: + break + number = number + realNibbles[nibble1] + return float(number), index + + t1OperandEncoding = [None] * 256 -t1OperandEncoding[0:32] = (32) * ["do_operator"] -t1OperandEncoding[32:247] = (247 - 32) * ["read_byte"] -t1OperandEncoding[247:251] = (251 - 247) * ["read_smallInt1"] -t1OperandEncoding[251:255] = (255 - 251) * ["read_smallInt2"] -t1OperandEncoding[255] = "read_longInt" +t1OperandEncoding[0:32] = (32) * [read_operator] +t1OperandEncoding[32:247] = (247 - 32) * [read_byte] +t1OperandEncoding[247:251] = (251 - 247) * [read_smallInt1] +t1OperandEncoding[251:255] = (255 - 251) * [read_smallInt2] +t1OperandEncoding[255] = read_longInt assert len(t1OperandEncoding) == 256 t2OperandEncoding = t1OperandEncoding[:] -t2OperandEncoding[28] = "read_shortInt" -t2OperandEncoding[255] = "read_fixed1616" +t2OperandEncoding[28] = read_shortInt +t2OperandEncoding[255] = read_fixed1616 cffDictOperandEncoding = t2OperandEncoding[:] -cffDictOperandEncoding[29] = "read_longInt" -cffDictOperandEncoding[30] = "read_realNumber" -cffDictOperandEncoding[255] = "reserved" +cffDictOperandEncoding[29] = read_longInt +cffDictOperandEncoding[30] = read_realNumber +cffDictOperandEncoding[255] = read_reserved -realNibbles = ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9', +realNibbles = ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9', '.', 'E', 'E-', None, '-'] -realNibblesDict = {} -for _i in range(len(realNibbles)): - realNibblesDict[realNibbles[_i]] = _i - - -class ByteCodeBase: - - def read_byte(self, b0, data, index): - return b0 - 139, index - - def read_smallInt1(self, b0, data, index): - b1 = ord(data[index]) - return (b0-247)*256 + b1 + 108, index+1 - - def read_smallInt2(self, b0, data, index): - b1 = ord(data[index]) - return -(b0-251)*256 - b1 - 108, index+1 - - def read_shortInt(self, b0, data, index): - bin = data[index] + data[index+1] - value, = struct.unpack(">h", bin) - return value, index+2 - - def read_longInt(self, b0, data, index): - bin = data[index] + data[index+1] + data[index+2] + data[index+3] - value, = struct.unpack(">l", bin) - return value, index+4 - - def read_fixed1616(self, b0, data, index): - bin = data[index] + data[index+1] + data[index+2] + data[index+3] - value, = struct.unpack(">l", bin) - return value / 65536.0, index+4 - - def read_realNumber(self, b0, data, index): - number = '' - while 1: - b = ord(data[index]) - index = index + 1 - nibble0 = (b & 0xf0) >> 4 - nibble1 = b & 0x0f - if nibble0 == 0xf: - break - number = number + realNibbles[nibble0] - if nibble1 == 0xf: - break - number = number + realNibbles[nibble1] - return float(number), index +realNibblesDict = {v:i for i,v in enumerate(realNibbles)} + + +class ByteCodeBase(object): + pass def buildOperatorDict(operatorList): @@ -87,7 +98,7 @@ oper[item[0]] = item[1] else: oper[item[0]] = item[1:] - if type(item[0]) == types.TupleType: + if isinstance(item[0], tuple): opc[item[1]] = item[0] else: opc[item[1]] = (item[0],) @@ -95,86 +106,86 @@ t2Operators = [ -# opcode name - (1, 'hstem'), - (3, 'vstem'), - (4, 'vmoveto'), - (5, 'rlineto'), - (6, 'hlineto'), - (7, 'vlineto'), - (8, 'rrcurveto'), - (10, 'callsubr'), - (11, 'return'), - (14, 'endchar'), - (16, 'blend'), - (18, 'hstemhm'), - (19, 'hintmask'), - (20, 'cntrmask'), - (21, 'rmoveto'), - (22, 'hmoveto'), - (23, 'vstemhm'), - (24, 'rcurveline'), - (25, 'rlinecurve'), - (26, 'vvcurveto'), - (27, 'hhcurveto'), -# (28, 'shortint'), # not really an operator - (29, 'callgsubr'), - (30, 'vhcurveto'), - (31, 'hvcurveto'), - ((12, 0), 'ignore'), # dotsection. Yes, there a few very early OTF/CFF - # fonts with this deprecated operator. Just ignore it. - ((12, 3), 'and'), - ((12, 4), 'or'), - ((12, 5), 'not'), - ((12, 8), 'store'), - ((12, 9), 'abs'), - ((12, 10), 'add'), - ((12, 11), 'sub'), - ((12, 12), 'div'), - ((12, 13), 'load'), - ((12, 14), 'neg'), - ((12, 15), 'eq'), - ((12, 18), 'drop'), - ((12, 20), 'put'), - ((12, 21), 'get'), - ((12, 22), 'ifelse'), - ((12, 23), 'random'), - ((12, 24), 'mul'), - ((12, 26), 'sqrt'), - ((12, 27), 'dup'), - ((12, 28), 'exch'), - ((12, 29), 'index'), - ((12, 30), 'roll'), - ((12, 34), 'hflex'), - ((12, 35), 'flex'), - ((12, 36), 'hflex1'), - ((12, 37), 'flex1'), +# opcode name + (1, 'hstem'), + (3, 'vstem'), + (4, 'vmoveto'), + (5, 'rlineto'), + (6, 'hlineto'), + (7, 'vlineto'), + (8, 'rrcurveto'), + (10, 'callsubr'), + (11, 'return'), + (14, 'endchar'), + (16, 'blend'), + (18, 'hstemhm'), + (19, 'hintmask'), + (20, 'cntrmask'), + (21, 'rmoveto'), + (22, 'hmoveto'), + (23, 'vstemhm'), + (24, 'rcurveline'), + (25, 'rlinecurve'), + (26, 'vvcurveto'), + (27, 'hhcurveto'), +# (28, 'shortint'), # not really an operator + (29, 'callgsubr'), + (30, 'vhcurveto'), + (31, 'hvcurveto'), + ((12, 0), 'ignore'), # dotsection. Yes, there a few very early OTF/CFF + # fonts with this deprecated operator. Just ignore it. + ((12, 3), 'and'), + ((12, 4), 'or'), + ((12, 5), 'not'), + ((12, 8), 'store'), + ((12, 9), 'abs'), + ((12, 10), 'add'), + ((12, 11), 'sub'), + ((12, 12), 'div'), + ((12, 13), 'load'), + ((12, 14), 'neg'), + ((12, 15), 'eq'), + ((12, 18), 'drop'), + ((12, 20), 'put'), + ((12, 21), 'get'), + ((12, 22), 'ifelse'), + ((12, 23), 'random'), + ((12, 24), 'mul'), + ((12, 26), 'sqrt'), + ((12, 27), 'dup'), + ((12, 28), 'exch'), + ((12, 29), 'index'), + ((12, 30), 'roll'), + ((12, 34), 'hflex'), + ((12, 35), 'flex'), + ((12, 36), 'hflex1'), + ((12, 37), 'flex1'), ] def getIntEncoder(format): if format == "cff": - fourByteOp = chr(29) + fourByteOp = bytechr(29) elif format == "t1": - fourByteOp = chr(255) + fourByteOp = bytechr(255) else: assert format == "t2" fourByteOp = None - - def encodeInt(value, fourByteOp=fourByteOp, chr=chr, + + def encodeInt(value, fourByteOp=fourByteOp, bytechr=bytechr, pack=struct.pack, unpack=struct.unpack): if -107 <= value <= 107: - code = chr(value + 139) + code = bytechr(value + 139) elif 108 <= value <= 1131: value = value - 108 - code = chr((value >> 8) + 247) + chr(value & 0xFF) + code = bytechr((value >> 8) + 247) + bytechr(value & 0xFF) elif -1131 <= value <= -108: value = -value - 108 - code = chr((value >> 8) + 251) + chr(value & 0xFF) + code = bytechr((value >> 8) + 251) + bytechr(value & 0xFF) elif fourByteOp is None: # T2 only supports 2 byte ints if -32768 <= value <= 32767: - code = chr(28) + pack(">h", value) + code = bytechr(28) + pack(">h", value) else: # Backwards compatible hack: due to a previous bug in FontTools, # 16.16 fixed numbers were written out as 4-byte ints. When @@ -188,11 +199,11 @@ sys.stderr.write("Warning: 4-byte T2 number got passed to the " "IntType handler. This should happen only when reading in " "old XML files.\n") - code = chr(255) + pack(">l", value) + code = bytechr(255) + pack(">l", value) else: code = fourByteOp + pack(">l", value) return code - + return encodeInt @@ -202,7 +213,7 @@ def encodeFixed(f, pack=struct.pack): # For T2 only - return "\xff" + pack(">l", int(round(f * 65536))) + return b"\xff" + pack(">l", int(round(f * 65536))) def encodeFloat(f): # For CFF only, used in cffLib @@ -222,9 +233,9 @@ nibbles.append(0xf) if len(nibbles) % 2: nibbles.append(0xf) - d = chr(30) + d = bytechr(30) for i in range(0, len(nibbles), 2): - d = d + chr(nibbles[i] << 4 | nibbles[i+1]) + d = d + bytechr(nibbles[i] << 4 | nibbles[i+1]) return d @@ -232,27 +243,27 @@ class T2CharString(ByteCodeBase): - + operandEncoding = t2OperandEncoding operators, opcodes = buildOperatorDict(t2Operators) - + def __init__(self, bytecode=None, program=None, private=None, globalSubrs=None): if program is None: program = [] self.bytecode = bytecode self.program = program self.private = private - self.globalSubrs = globalSubrs - + self.globalSubrs = globalSubrs if globalSubrs is not None else [] + def __repr__(self): if self.bytecode is None: return "<%s (source) at %x>" % (self.__class__.__name__, id(self)) else: return "<%s (bytecode) at %x>" % (self.__class__.__name__, id(self)) - + def getIntEncoder(self): return encodeIntT2 - + def getFixedEncoder(self): return encodeFixed @@ -262,14 +273,14 @@ subrs = getattr(self.private, "Subrs", []) decompiler = SimpleT2Decompiler(subrs, self.globalSubrs) decompiler.execute(self) - + def draw(self, pen): subrs = getattr(self.private, "Subrs", []) extractor = T2OutlineExtractor(pen, subrs, self.globalSubrs, self.private.nominalWidthX, self.private.defaultWidthX) extractor.execute(self) self.width = extractor.width - + def compile(self): if self.bytecode is not None: return @@ -287,56 +298,56 @@ token = program[i] i = i + 1 tp = type(token) - if tp == types.StringType: + if issubclass(tp, basestring): try: - bytecode.extend(map(chr, opcodes[token])) + bytecode.extend(bytechr(b) for b in opcodes[token]) except KeyError: - raise CharStringCompileError, "illegal operator: %s" % token + raise CharStringCompileError("illegal operator: %s" % token) if token in ('hintmask', 'cntrmask'): bytecode.append(program[i]) # hint mask i = i + 1 - elif tp == types.IntType: + elif tp == int: bytecode.append(encodeInt(token)) - elif tp == types.FloatType: + elif tp == float: bytecode.append(encodeFixed(token)) else: assert 0, "unsupported type: %s" % tp try: - bytecode = "".join(bytecode) + bytecode = bytesjoin(bytecode) except TypeError: - print bytecode + print(bytecode) raise self.setBytecode(bytecode) - + def needsDecompilation(self): return self.bytecode is not None - + def setProgram(self, program): self.program = program self.bytecode = None - + def setBytecode(self, bytecode): self.bytecode = bytecode self.program = None - - def getToken(self, index, - len=len, ord=ord, getattr=getattr, type=type, StringType=types.StringType): + + def getToken(self, index, + len=len, byteord=byteord, basestring=basestring, + isinstance=isinstance): if self.bytecode is not None: if index >= len(self.bytecode): return None, 0, 0 - b0 = ord(self.bytecode[index]) + b0 = byteord(self.bytecode[index]) index = index + 1 - code = self.operandEncoding[b0] - handler = getattr(self, code) - token, index = handler(b0, self.bytecode, index) + handler = self.operandEncoding[b0] + token, index = handler(self, b0, self.bytecode, index) else: if index >= len(self.program): return None, 0, 0 token = self.program[index] index = index + 1 - isOperator = type(token) == StringType + isOperator = isinstance(token, basestring) return token, isOperator, index - + def getBytes(self, index, nBytes): if self.bytecode is not None: newIndex = index + nBytes @@ -347,16 +358,10 @@ index = index + 1 assert len(bytes) == nBytes return bytes, index - - def do_operator(self, b0, data, index): - if b0 == 12: - op = (b0, ord(data[index])) - index = index+1 - else: - op = b0 - operator = self.operators[op] - return operator, index - + + def handle_operator(self, operator): + return operator + def toXML(self, xmlWriter): from fontTools.misc.textTools import num2binary if self.bytecode is not None: @@ -364,33 +369,33 @@ else: index = 0 args = [] - while 1: + while True: token, isOperator, index = self.getToken(index) if token is None: break if isOperator: - args = map(str, args) + args = [str(arg) for arg in args] if token in ('hintmask', 'cntrmask'): hintMask, isOperator, index = self.getToken(index) bits = [] for byte in hintMask: - bits.append(num2binary(ord(byte), 8)) - hintMask = string.join(bits, "") - line = string.join(args + [token, hintMask], " ") + bits.append(num2binary(byteord(byte), 8)) + hintMask = strjoin(bits) + line = ' '.join(args + [token, hintMask]) else: - line = string.join(args + [token], " ") + line = ' '.join(args + [token]) xmlWriter.write(line) xmlWriter.newline() args = [] else: args.append(token) - - def fromXML(self, (name, attrs, content)): + + def fromXML(self, name, attrs, content): from fontTools.misc.textTools import binary2num, readHex if attrs.get("raw"): self.setBytecode(readHex(content)) return - content = "".join(content) + content = strjoin(content) content = content.split() program = [] end = len(content) @@ -407,9 +412,9 @@ program.append(token) if token in ('hintmask', 'cntrmask'): mask = content[i] - maskBytes = "" + maskBytes = b"" for j in range(0, len(mask), 8): - maskBytes = maskBytes + chr(binary2num(mask[j:j+8])) + maskBytes = maskBytes + bytechr(binary2num(mask[j:j+8])) program.append(maskBytes) i = i + 1 else: @@ -420,39 +425,39 @@ t1Operators = [ -# opcode name - (1, 'hstem'), - (3, 'vstem'), - (4, 'vmoveto'), - (5, 'rlineto'), - (6, 'hlineto'), - (7, 'vlineto'), - (8, 'rrcurveto'), - (9, 'closepath'), - (10, 'callsubr'), - (11, 'return'), - (13, 'hsbw'), - (14, 'endchar'), - (21, 'rmoveto'), - (22, 'hmoveto'), - (30, 'vhcurveto'), - (31, 'hvcurveto'), - ((12, 0), 'dotsection'), - ((12, 1), 'vstem3'), - ((12, 2), 'hstem3'), - ((12, 6), 'seac'), - ((12, 7), 'sbw'), - ((12, 12), 'div'), - ((12, 16), 'callothersubr'), - ((12, 17), 'pop'), - ((12, 33), 'setcurrentpoint'), +# opcode name + (1, 'hstem'), + (3, 'vstem'), + (4, 'vmoveto'), + (5, 'rlineto'), + (6, 'hlineto'), + (7, 'vlineto'), + (8, 'rrcurveto'), + (9, 'closepath'), + (10, 'callsubr'), + (11, 'return'), + (13, 'hsbw'), + (14, 'endchar'), + (21, 'rmoveto'), + (22, 'hmoveto'), + (30, 'vhcurveto'), + (31, 'hvcurveto'), + ((12, 0), 'dotsection'), + ((12, 1), 'vstem3'), + ((12, 2), 'hstem3'), + ((12, 6), 'seac'), + ((12, 7), 'sbw'), + ((12, 12), 'div'), + ((12, 16), 'callothersubr'), + ((12, 17), 'pop'), + ((12, 33), 'setcurrentpoint'), ] class T1CharString(T2CharString): - + operandEncoding = t1OperandEncoding operators, opcodes = buildOperatorDict(t1Operators) - + def __init__(self, bytecode=None, program=None, subrs=None): if program is None: program = [] @@ -468,11 +473,11 @@ raise TypeError("Type 1 charstrings don't support floating point operands") def decompile(self): - if self.program is not None: + if self.bytecode is None: return program = [] index = 0 - while 1: + while True: token, isOperator, index = self.getToken(index) if token is None: break @@ -485,21 +490,21 @@ self.width = extractor.width -class SimpleT2Decompiler: - +class SimpleT2Decompiler(object): + def __init__(self, localSubrs, globalSubrs): self.localSubrs = localSubrs self.localBias = calcSubrBias(localSubrs) self.globalSubrs = globalSubrs self.globalBias = calcSubrBias(globalSubrs) self.reset() - + def reset(self): self.callingStack = [] self.operandStack = [] self.hintCount = 0 self.hintMaskBytes = 0 - + def execute(self, charString): self.callingStack.append(charString) needsDecompilation = charString.needsDecompilation() @@ -510,15 +515,15 @@ pushToProgram = lambda x: None pushToStack = self.operandStack.append index = 0 - while 1: + while True: token, isOperator, index = charString.getToken(index) if token is None: break # we're done! pushToProgram(token) if isOperator: handlerName = "op_" + token - if hasattr(self, handlerName): - handler = getattr(self, handlerName) + handler = getattr(self, handlerName, None) + if handler is not None: rv = handler(index) if rv: hintMaskBytes, index = rv @@ -533,24 +538,24 @@ "seac"), "illegal CharString" charString.setProgram(program) del self.callingStack[-1] - + def pop(self): value = self.operandStack[-1] del self.operandStack[-1] return value - + def popall(self): stack = self.operandStack[:] self.operandStack[:] = [] return stack - + def push(self, value): self.operandStack.append(value) - + def op_return(self, index): if self.operandStack: pass - + def op_endchar(self, index): pass @@ -561,12 +566,12 @@ subrIndex = self.pop() subr = self.localSubrs[subrIndex+self.localBias] self.execute(subr) - + def op_callgsubr(self, index): subrIndex = self.pop() subr = self.globalSubrs[subrIndex+self.globalBias] self.execute(subr) - + def op_hstem(self, index): self.countHints() def op_vstem(self, index): @@ -575,29 +580,74 @@ self.countHints() def op_vstemhm(self, index): self.countHints() - + def op_hintmask(self, index): if not self.hintMaskBytes: self.countHints() - self.hintMaskBytes = (self.hintCount + 7) / 8 + self.hintMaskBytes = (self.hintCount + 7) // 8 hintMaskBytes, index = self.callingStack[-1].getBytes(index, self.hintMaskBytes) return hintMaskBytes, index - + op_cntrmask = op_hintmask - + def countHints(self): args = self.popall() - self.hintCount = self.hintCount + len(args) / 2 + self.hintCount = self.hintCount + len(args) // 2 + # misc + def op_and(self, index): + raise NotImplementedError + def op_or(self, index): + raise NotImplementedError + def op_not(self, index): + raise NotImplementedError + def op_store(self, index): + raise NotImplementedError + def op_abs(self, index): + raise NotImplementedError + def op_add(self, index): + raise NotImplementedError + def op_sub(self, index): + raise NotImplementedError + def op_div(self, index): + raise NotImplementedError + def op_load(self, index): + raise NotImplementedError + def op_neg(self, index): + raise NotImplementedError + def op_eq(self, index): + raise NotImplementedError + def op_drop(self, index): + raise NotImplementedError + def op_put(self, index): + raise NotImplementedError + def op_get(self, index): + raise NotImplementedError + def op_ifelse(self, index): + raise NotImplementedError + def op_random(self, index): + raise NotImplementedError + def op_mul(self, index): + raise NotImplementedError + def op_sqrt(self, index): + raise NotImplementedError + def op_dup(self, index): + raise NotImplementedError + def op_exch(self, index): + raise NotImplementedError + def op_index(self, index): + raise NotImplementedError + def op_roll(self, index): + raise NotImplementedError class T2OutlineExtractor(SimpleT2Decompiler): - + def __init__(self, pen, localSubrs, globalSubrs, nominalWidthX, defaultWidthX): SimpleT2Decompiler.__init__(self, localSubrs, globalSubrs) self.pen = pen self.nominalWidthX = nominalWidthX self.defaultWidthX = defaultWidthX - + def reset(self): SimpleT2Decompiler.reset(self) self.hints = [] @@ -605,13 +655,13 @@ self.width = 0 self.currentPoint = (0, 0) self.sawMoveTo = 0 - + def _nextPoint(self, point): x, y = self.currentPoint point = x + point[0], y + point[1] self.currentPoint = point return point - + def rMoveTo(self, point): self.pen.moveTo(self._nextPoint(point)) self.sawMoveTo = 1 @@ -626,12 +676,12 @@ self.rMoveTo((0, 0)) nextPoint = self._nextPoint self.pen.curveTo(nextPoint(pt1), nextPoint(pt2), nextPoint(pt3)) - + def closePath(self): if self.sawMoveTo: self.pen.closePath() self.sawMoveTo = 0 - + def endPath(self): # In T2 there are no open paths, so always do a closePath when # finishing a sub path. @@ -647,11 +697,11 @@ self.width = self.defaultWidthX self.gotWidth = 1 return args - + def countHints(self): args = self.popallWidth() - self.hintCount = self.hintCount + len(args) / 2 - + self.hintCount = self.hintCount + len(args) // 2 + # # hint operators # @@ -667,7 +717,7 @@ # self.countHints() #def op_cntrmask(self, index): # self.countHints() - + # # path constructors, moveto # @@ -692,7 +742,7 @@ self.pen.addComponent(baseGlyph, (1, 0, 0, 1, 0, 0)) accentGlyph = StandardEncoding[achar] self.pen.addComponent(accentGlyph, (1, 0, 0, 1, adx, ady)) - + # # path constructors, lines # @@ -701,12 +751,12 @@ for i in range(0, len(args), 2): point = args[i:i+2] self.rLineTo(point) - + def op_hlineto(self, index): self.alternatingLineto(1) def op_vlineto(self, index): self.alternatingLineto(0) - + # # path constructors, curves # @@ -716,7 +766,7 @@ for i in range(0, len(args), 6): dxa, dya, dxb, dyb, dxc, dyc, = args[i:i+6] self.rCurveTo((dxa, dya), (dxb, dyb), (dxc, dyc)) - + def op_rcurveline(self, index): """{dxa dya dxb dyb dxc dyc}+ dxd dyd rcurveline""" args = self.popall() @@ -724,7 +774,7 @@ dxb, dyb, dxc, dyc, dxd, dyd = args[i:i+6] self.rCurveTo((dxb, dyb), (dxc, dyc), (dxd, dyd)) self.rLineTo(args[-2:]) - + def op_rlinecurve(self, index): """{dxa dya}+ dxb dyb dxc dyc dxd dyd rlinecurve""" args = self.popall() @@ -733,7 +783,7 @@ self.rLineTo(lineArgs[i:i+2]) dxb, dyb, dxc, dyc, dxd, dyd = args[-6:] self.rCurveTo((dxb, dyb), (dxc, dyc), (dxd, dyd)) - + def op_vvcurveto(self, index): "dx1? {dya dxb dyb dyc}+ vvcurveto" args = self.popall() @@ -746,7 +796,7 @@ dya, dxb, dyb, dyc = args[i:i+4] self.rCurveTo((dx1, dya), (dxb, dyb), (0, dyc)) dx1 = 0 - + def op_hhcurveto(self, index): """dy1? {dxa dxb dyb dxc}+ hhcurveto""" args = self.popall() @@ -759,7 +809,7 @@ dxa, dxb, dyb, dxc = args[i:i+4] self.rCurveTo((dxa, dy1), (dxb, dyb), (dxc, 0)) dy1 = 0 - + def op_vhcurveto(self, index): """dy1 dx2 dy2 dx3 {dxa dxb dyb dyc dyd dxe dye dxf}* dyf? vhcurveto (30) {dya dxb dyb dxc dxd dxe dye dyf}+ dxf? vhcurveto @@ -769,7 +819,7 @@ args = self.vcurveto(args) if args: args = self.hcurveto(args) - + def op_hvcurveto(self, index): """dx1 dx2 dy2 dy3 {dya dxb dyb dxc dxd dxe dye dyf}* dxf? {dxa dxb dyb dyc dyd dxe dye dxf}+ dyf? @@ -779,7 +829,7 @@ args = self.hcurveto(args) if args: args = self.vcurveto(args) - + # # path constructors, flex # @@ -812,13 +862,13 @@ dy6 = d6 self.rCurveTo((dx1, dy1), (dx2, dy2), (dx3, dy3)) self.rCurveTo((dx4, dy4), (dx5, dy5), (dx6, dy6)) - + # # MultipleMaster. Well... # def op_blend(self, index): - args = self.popall() - + self.popall() + # misc def op_and(self, index): raise NotImplementedError @@ -837,8 +887,8 @@ def op_div(self, index): num2 = self.pop() num1 = self.pop() - d1 = num1/num2 - d2 = float(num1)/num2 + d1 = num1//num2 + d2 = num1/num2 if d1 == d2: self.push(d1) else: @@ -871,9 +921,9 @@ raise NotImplementedError def op_roll(self, index): raise NotImplementedError - + # - # miscelaneous helpers + # miscellaneous helpers # def alternatingLineto(self, isHorizontal): args = self.popall() @@ -884,7 +934,7 @@ point = (0, arg) self.rLineTo(point) isHorizontal = not isHorizontal - + def vcurveto(self, args): dya, dxb, dyb, dxc = args[:4] args = args[4:] @@ -895,7 +945,7 @@ dyc = 0 self.rCurveTo((0, dya), (dxb, dyb), (dxc, dyc)) return args - + def hcurveto(self, args): dxa, dxb, dyb, dyc = args[:4] args = args[4:] @@ -909,18 +959,18 @@ class T1OutlineExtractor(T2OutlineExtractor): - + def __init__(self, pen, subrs): self.pen = pen self.subrs = subrs self.reset() - + def reset(self): self.flexing = 0 self.width = 0 self.sbx = 0 T2OutlineExtractor.reset(self) - + def endPath(self): if self.sawMoveTo: self.pen.endPath() @@ -928,11 +978,11 @@ def popallWidth(self, evenOdd=0): return self.popall() - + def exch(self): stack = self.operandStack stack[-1], stack[-2] = stack[-2], stack[-1] - + # # path constructors # @@ -962,10 +1012,10 @@ args = self.popall() x, y = args self.currentPoint = x, y - + def op_endchar(self, index): self.endPath() - + def op_hsbw(self, index): sbx, wx = self.popall() self.width = wx @@ -973,7 +1023,7 @@ self.currentPoint = sbx, self.currentPoint[1] def op_sbw(self, index): self.popall() # XXX - + # def op_callsubr(self, index): subrIndex = self.pop() @@ -991,12 +1041,12 @@ # ignore... def op_pop(self, index): pass # ignore... - + def doFlex(self): finaly = self.pop() finalx = self.pop() self.pop() # flex height is unused - + p3y = self.pop() p3x = self.pop() bcp4y = self.pop() @@ -1011,7 +1061,7 @@ bcp1x = self.pop() rpy = self.pop() rpx = self.pop() - + # call rrcurveto self.push(bcp1x+rpx) self.push(bcp1y+rpy) @@ -1020,7 +1070,7 @@ self.push(p2x) self.push(p2y) self.op_rrcurveto(None) - + # call rrcurveto self.push(bcp3x) self.push(bcp3y) @@ -1029,11 +1079,11 @@ self.push(p3x) self.push(p3y) self.op_rrcurveto(None) - + # Push back final coords so subr 0 can find them self.push(finalx) self.push(finaly) - + def op_dotsection(self, index): self.popall() # XXX def op_hstem3(self, index): @@ -1052,53 +1102,43 @@ class DictDecompiler(ByteCodeBase): - + operandEncoding = cffDictOperandEncoding - + def __init__(self, strings): self.stack = [] self.strings = strings self.dict = {} - + def getDict(self): assert len(self.stack) == 0, "non-empty stack" return self.dict - + def decompile(self, data): index = 0 lenData = len(data) push = self.stack.append while index < lenData: - b0 = ord(data[index]) + b0 = byteord(data[index]) index = index + 1 - code = self.operandEncoding[b0] - handler = getattr(self, code) - value, index = handler(b0, data, index) + handler = self.operandEncoding[b0] + value, index = handler(self, b0, data, index) if value is not None: push(value) - + def pop(self): value = self.stack[-1] del self.stack[-1] return value - + def popall(self): - all = self.stack[:] + args = self.stack[:] del self.stack[:] - return all - - def do_operator(self, b0, data, index): - if b0 == 12: - op = (b0, ord(data[index])) - index = index+1 - else: - op = b0 - operator, argType = self.operators[op] - self.handle_operator(operator, argType) - return None, index - - def handle_operator(self, operator, argType): - if type(argType) == type(()): + return args + + def handle_operator(self, operator): + operator, argType = operator + if isinstance(argType, type(())): value = () for i in range(len(argType)-1, -1, -1): arg = argType[i] @@ -1108,7 +1148,7 @@ arghandler = getattr(self, "arg_" + argType) value = arghandler(operator) self.dict[operator] = value - + def arg_number(self, name): return self.pop() def arg_SID(self, name): diff -Nru fonttools-2.4/Lib/fontTools/misc/psLib.py fonttools-3.0/Lib/fontTools/misc/psLib.py --- fonttools-2.4/Lib/fontTools/misc/psLib.py 2013-04-23 04:08:08.000000000 +0000 +++ fonttools-3.0/Lib/fontTools/misc/psLib.py 2015-08-31 17:57:15.000000000 +0000 @@ -1,10 +1,10 @@ +from __future__ import print_function, division, absolute_import +from fontTools.misc.py23 import * +from fontTools.misc import eexec +from .psOperators import * import re -import types +import collections from string import whitespace -import StringIO - -from fontTools.misc import eexec -from psOperators import * ps_special = '()<>[]{}%' # / is one too, but we take care of that one differently @@ -38,18 +38,18 @@ class PSError(Exception): pass -class PSTokenizer(StringIO.StringIO): - - def getnexttoken(self, +class PSTokenizer(BytesIO): + + def getnexttoken(self, # localize some stuff, for performance len=len, ps_special=ps_special, stringmatch=stringRE.match, hexstringmatch=hexstringRE.match, commentmatch=commentRE.match, - endmatch=endofthingRE.match, + endmatch=endofthingRE.match, whitematch=skipwhiteRE.match): - + _, nextpos = whitematch(self.buf, self.pos).span() self.pos = nextpos if self.pos >= self.len: @@ -69,18 +69,18 @@ tokentype = 'do_string' m = stringmatch(buf, pos) if m is None: - raise PSTokenError, 'bad string at character %d' % pos + raise PSTokenError('bad string at character %d' % pos) _, nextpos = m.span() token = buf[pos:nextpos] elif char == '<': tokentype = 'do_hexstring' m = hexstringmatch(buf, pos) if m is None: - raise PSTokenError, 'bad hexstring at character %d' % pos + raise PSTokenError('bad hexstring at character %d' % pos) _, nextpos = m.span() token = buf[pos:nextpos] else: - raise PSTokenError, 'bad token at character %d' % pos + raise PSTokenError('bad token at character %d' % pos) else: if char == '/': tokentype = 'do_literal' @@ -89,16 +89,16 @@ tokentype = '' m = endmatch(buf, pos) if m is None: - raise PSTokenError, 'bad token at character %d' % pos + raise PSTokenError('bad token at character %d' % pos) _, nextpos = m.span() token = buf[pos:nextpos] self.pos = pos + len(token) return tokentype, token - + def skipwhite(self, whitematch=skipwhiteRE.match): _, nextpos = whitematch(self.buf, self.pos).span() self.pos = nextpos - + def starteexec(self): self.pos = self.pos + 1 #self.skipwhite() @@ -106,13 +106,13 @@ self.buf, R = eexec.decrypt(self.dirtybuf, 55665) self.len = len(self.buf) self.pos = 4 - + def stopeexec(self): if not hasattr(self, 'dirtybuf'): return self.buf = self.dirtybuf del self.dirtybuf - + def flush(self): if self.buflist: self.buf = self.buf + "".join(self.buflist) @@ -120,7 +120,7 @@ class PSInterpreter(PSOperators): - + def __init__(self): systemdict = {} userdict = {} @@ -129,7 +129,7 @@ self.proclevel = 0 self.procmark = ps_procmark() self.fillsystemdict() - + def fillsystemdict(self): systemdict = self.dictstack[0] systemdict['['] = systemdict['mark'] = self.mark = ps_mark() @@ -139,17 +139,17 @@ systemdict['StandardEncoding'] = ps_array(ps_StandardEncoding) systemdict['FontDirectory'] = ps_dict({}) self.suckoperators(systemdict, self.__class__) - + def suckoperators(self, systemdict, klass): for name in dir(klass): attr = getattr(self, name) - if callable(attr) and name[:3] == 'ps_': + if isinstance(attr, collections.Callable) and name[:3] == 'ps_': name = name[3:] systemdict[name] = ps_operator(name, attr) for baseclass in klass.__bases__: self.suckoperators(systemdict, baseclass) - - def interpret(self, data, getattr = getattr): + + def interpret(self, data, getattr=getattr): tokenizer = self.tokenizer = PSTokenizer(data) getnexttoken = tokenizer.getnexttoken do_token = self.do_token @@ -172,15 +172,15 @@ finally: if self.tokenizer is not None: if 0: - print 'ps error:\n- - - - - - -' - print self.tokenizer.buf[self.tokenizer.pos-50:self.tokenizer.pos] - print '>>>' - print self.tokenizer.buf[self.tokenizer.pos:self.tokenizer.pos+50] - print '- - - - - - -' - + print('ps error:\n- - - - - - -') + print(self.tokenizer.buf[self.tokenizer.pos-50:self.tokenizer.pos]) + print('>>>') + print(self.tokenizer.buf[self.tokenizer.pos:self.tokenizer.pos+50]) + print('- - - - - - -') + def handle_object(self, object): if not (self.proclevel or object.literal or object.type == 'proceduretype'): - if object.type <> 'operatortype': + if object.type != 'operatortype': object = self.resolve_name(object.value) if object.literal: self.push(object) @@ -191,21 +191,21 @@ object.function() else: self.push(object) - + def call_procedure(self, proc): handle_object = self.handle_object for item in proc.value: handle_object(item) - + def resolve_name(self, name): dictstack = self.dictstack for i in range(len(dictstack)-1, -1, -1): - if dictstack[i].has_key(name): + if name in dictstack[i]: return dictstack[i][name] - raise PSError, 'name error: ' + str(name) - + raise PSError('name error: ' + str(name)) + def do_token(self, token, - int=int, + int=int, float=float, ps_name=ps_name, ps_integer=ps_integer, @@ -231,16 +231,16 @@ return ps_real(num) else: return ps_integer(num) - + def do_comment(self, token): pass - + def do_literal(self, token): return ps_literal(token[1:]) - + def do_string(self, token): return ps_string(token[1:-1]) - + def do_hexstring(self, token): hexStr = "".join(token[1:-1].split()) if len(hexStr) % 2: @@ -250,7 +250,7 @@ cleanstr.append(chr(int(hexStr[i:i+2], 16))) cleanstr = "".join(cleanstr) return ps_string(cleanstr) - + def do_special(self, token): if token == '{': self.proclevel = self.proclevel + 1 @@ -270,22 +270,22 @@ elif token == ']': return ps_name(']') else: - raise PSTokenError, 'huh?' - + raise PSTokenError('huh?') + def push(self, object): self.stack.append(object) - + def pop(self, *types): stack = self.stack if not stack: - raise PSError, 'stack underflow' + raise PSError('stack underflow') object = stack[-1] if types: if object.type not in types: - raise PSError, 'typecheck, expected %s, found %s' % (`types`, object.type) + raise PSError('typecheck, expected %s, found %s' % (repr(types), object.type)) del stack[-1] return object - + def do_makearray(self): array = [] while 1: @@ -295,7 +295,7 @@ array.append(topobject) array.reverse() self.push(ps_array(array)) - + def close(self): """Remove circular references.""" del self.stack @@ -304,11 +304,11 @@ def unpack_item(item): tp = type(item.value) - if tp == types.DictionaryType: + if tp == dict: newitem = {} for key, value in item.value.items(): newitem[key] = unpack_item(value) - elif tp == types.ListType: + elif tp == list: newitem = [None] * len(item.value) for i in range(len(item.value)): newitem[i] = unpack_item(item.value[i]) @@ -319,21 +319,20 @@ return newitem def suckfont(data): - import re - m = re.search(r"/FontName\s+/([^ \t\n\r]+)\s+def", data) + m = re.search(br"/FontName\s+/([^ \t\n\r]+)\s+def", data) if m: fontName = m.group(1) else: fontName = None interpreter = PSInterpreter() - interpreter.interpret("/Helvetica 4 dict dup /Encoding StandardEncoding put definefont pop") + interpreter.interpret(b"/Helvetica 4 dict dup /Encoding StandardEncoding put definefont pop") interpreter.interpret(data) fontdir = interpreter.dictstack[0]['FontDirectory'].value - if fontdir.has_key(fontName): + if fontName in fontdir: rawfont = fontdir[fontName] else: # fall back, in case fontName wasn't found - fontNames = fontdir.keys() + fontNames = list(fontdir.keys()) if len(fontNames) > 1: fontNames.remove("Helvetica") fontNames.sort() diff -Nru fonttools-2.4/Lib/fontTools/misc/psOperators.py fonttools-3.0/Lib/fontTools/misc/psOperators.py --- fonttools-2.4/Lib/fontTools/misc/psOperators.py 2013-04-23 04:08:08.000000000 +0000 +++ fonttools-3.0/Lib/fontTools/misc/psOperators.py 2015-08-31 17:57:15.000000000 +0000 @@ -1,24 +1,27 @@ +from __future__ import print_function, division, absolute_import +from fontTools.misc.py23 import * + _accessstrings = {0: "", 1: "readonly", 2: "executeonly", 3: "noaccess"} -class ps_object: - +class ps_object(object): + literal = 1 access = 0 value = None - + def __init__(self, value): self.value = value self.type = self.__class__.__name__[3:] + "type" - + def __repr__(self): return "<%s %s>" % (self.__class__.__name__[3:], repr(self.value)) class ps_operator(ps_object): - + literal = 0 - + def __init__(self, name, function): self.name = name self.function = function @@ -116,13 +119,12 @@ psstring = "/Encoding 256 array\n0 1 255 {1 index exch /.notdef put} for\n" for i in range(256): name = encoding[i].value - if name <> '.notdef': + if name != '.notdef': psstring = psstring + "dup %d /%s put\n" % (i, name) return psstring + access + "def\n" def _type1_CharString_repr(charstrings): - items = charstrings.items() - items.sort() + items = sorted(charstrings.items()) return 'xxx' class ps_font(ps_object): @@ -135,8 +137,7 @@ pass else: psstring = psstring + _type1_item_repr(key, value) - items = self.value.items() - items.sort() + items = sorted(self.value.items()) for key, value in items: if key not in _type1_pre_eexec_order + _type1_post_eexec_order: psstring = psstring + _type1_item_repr(key, value) @@ -159,9 +160,7 @@ class ps_dict(ps_object): def __str__(self): psstring = "%d dict dup begin\n" % len(self.value) - items = self.value.items() - items.sort() - dictrepr = "%d dict dup begin\n" % len(items) + items = sorted(self.value.items()) for key, value in items: access = _accessstrings[value.access] if access: @@ -172,7 +171,7 @@ return "" class ps_mark(ps_object): - def __init__(self): + def __init__(self): self.value = 'mark' self.type = self.__class__.__name__[3:] + "type" @@ -194,29 +193,29 @@ class ps_string(ps_object): def __str__(self): - return "(%s)" % `self.value`[1:-1] + return "(%s)" % repr(self.value)[1:-1] class ps_integer(ps_object): def __str__(self): - return `self.value` + return repr(self.value) class ps_real(ps_object): def __str__(self): - return `self.value` + return repr(self.value) + +class PSOperators(object): -class PSOperators: - def ps_def(self): - object = self.pop() + obj = self.pop() name = self.pop() - self.dictstack[-1][name.value] = object - + self.dictstack[-1][name.value] = obj + def ps_bind(self): proc = self.pop('proceduretype') self.proc_bind(proc) self.push(proc) - + def proc_bind(self, proc): for i in range(len(proc.value)): item = proc.value[i] @@ -225,167 +224,165 @@ else: if not item.literal: try: - object = self.resolve_name(item.value) + obj = self.resolve_name(item.value) except: pass else: - if object.type == 'operatortype': - proc.value[i] = object - + if obj.type == 'operatortype': + proc.value[i] = obj + def ps_exch(self): if len(self.stack) < 2: - raise RuntimeError, 'stack underflow' + raise RuntimeError('stack underflow') obj1 = self.pop() obj2 = self.pop() self.push(obj1) self.push(obj2) - + def ps_dup(self): if not self.stack: - raise RuntimeError, 'stack underflow' + raise RuntimeError('stack underflow') self.push(self.stack[-1]) - + def ps_exec(self): - object = self.pop() - if object.type == 'proceduretype': - self.call_procedure(object) + obj = self.pop() + if obj.type == 'proceduretype': + self.call_procedure(obj) else: - self.handle_object(object) - + self.handle_object(obj) + def ps_count(self): self.push(ps_integer(len(self.stack))) - + def ps_eq(self): any1 = self.pop() any2 = self.pop() self.push(ps_boolean(any1.value == any2.value)) - + def ps_ne(self): any1 = self.pop() any2 = self.pop() - self.push(ps_boolean(any1.value <> any2.value)) - + self.push(ps_boolean(any1.value != any2.value)) + def ps_cvx(self): obj = self.pop() obj.literal = 0 self.push(obj) - + def ps_matrix(self): matrix = [ps_real(1.0), ps_integer(0), ps_integer(0), ps_real(1.0), ps_integer(0), ps_integer(0)] self.push(ps_array(matrix)) - + def ps_string(self): num = self.pop('integertype').value self.push(ps_string('\0' * num)) - + def ps_type(self): obj = self.pop() self.push(ps_string(obj.type)) - + def ps_store(self): value = self.pop() key = self.pop() name = key.value for i in range(len(self.dictstack)-1, -1, -1): - if self.dictstack[i].has_key(name): + if name in self.dictstack[i]: self.dictstack[i][name] = value break self.dictstack[-1][name] = value - + def ps_where(self): name = self.pop() # XXX self.push(ps_boolean(0)) - + def ps_systemdict(self): self.push(ps_dict(self.dictstack[0])) - + def ps_userdict(self): self.push(ps_dict(self.dictstack[1])) - + def ps_currentdict(self): self.push(ps_dict(self.dictstack[-1])) - + def ps_currentfile(self): self.push(ps_file(self.tokenizer)) - + def ps_eexec(self): - file = self.pop('filetype').value - file.starteexec() - + f = self.pop('filetype').value + f.starteexec() + def ps_closefile(self): - file = self.pop('filetype').value - file.skipwhite() - file.stopeexec() - + f = self.pop('filetype').value + f.skipwhite() + f.stopeexec() + def ps_cleartomark(self): obj = self.pop() - while obj <> self.mark: + while obj != self.mark: obj = self.pop() - + def ps_readstring(self, - ps_boolean = ps_boolean, - len = len): + ps_boolean=ps_boolean, + len=len): s = self.pop('stringtype') oldstr = s.value - file = self.pop('filetype') + f = self.pop('filetype') #pad = file.value.read(1) # for StringIO, this is faster - file.value.pos = file.value.pos + 1 - newstr = file.value.read(len(oldstr)) + f.value.pos = f.value.pos + 1 + newstr = f.value.read(len(oldstr)) s.value = newstr self.push(s) self.push(ps_boolean(len(oldstr) == len(newstr))) - + def ps_known(self): key = self.pop() - dict = self.pop('dicttype', 'fonttype') - self.push(ps_boolean(dict.value.has_key(key.value))) - + d = self.pop('dicttype', 'fonttype') + self.push(ps_boolean(key.value in d.value)) + def ps_if(self): proc = self.pop('proceduretype') - bool = self.pop('booleantype') - if bool.value: + if self.pop('booleantype').value: self.call_procedure(proc) - + def ps_ifelse(self): proc2 = self.pop('proceduretype') proc1 = self.pop('proceduretype') - bool = self.pop('booleantype') - if bool.value: + if self.pop('booleantype').value: self.call_procedure(proc1) else: self.call_procedure(proc2) - + def ps_readonly(self): obj = self.pop() if obj.access < 1: obj.access = 1 self.push(obj) - + def ps_executeonly(self): obj = self.pop() if obj.access < 2: obj.access = 2 self.push(obj) - + def ps_noaccess(self): obj = self.pop() if obj.access < 3: obj.access = 3 self.push(obj) - + def ps_not(self): obj = self.pop('booleantype', 'integertype') if obj.type == 'booleantype': self.push(ps_boolean(not obj.value)) else: self.push(ps_integer(~obj.value)) - + def ps_print(self): str = self.pop('stringtype') - print 'PS output --->', str.value - + print('PS output --->', str.value) + def ps_anchorsearch(self): seek = self.pop('stringtype') s = self.pop('stringtype') @@ -397,23 +394,22 @@ else: self.push(s) self.push(ps_boolean(0)) - + def ps_array(self): num = self.pop('integertype') array = ps_array([None] * num.value) self.push(array) - + def ps_astore(self): array = self.pop('arraytype') for i in range(len(array.value)-1, -1, -1): array.value[i] = self.pop() self.push(array) - + def ps_load(self): name = self.pop() - object = self.resolve_name(name.value) - self.push(object) - + self.push(self.resolve_name(name.value)) + def ps_put(self): obj1 = self.pop() obj2 = self.pop() @@ -426,7 +422,7 @@ elif tp == 'stringtype': index = obj2.value obj3.value = obj3.value[:index] + chr(obj1.value) + obj3.value[index+1:] - + def ps_get(self): obj1 = self.pop() if obj1.value == "Encoding": @@ -440,8 +436,8 @@ elif tp == 'stringtype': self.push(ps_integer(ord(obj2.value[obj1.value]))) else: - assert 0, "shouldn't get here" - + assert False, "shouldn't get here" + def ps_getinterval(self): obj1 = self.pop('integertype') obj2 = self.pop('integertype') @@ -451,7 +447,7 @@ self.push(ps_array(obj3.value[obj2.value:obj2.value + obj1.value])) elif tp == 'stringtype': self.push(ps_string(obj3.value[obj2.value:obj2.value + obj1.value])) - + def ps_putinterval(self): obj1 = self.pop('arraytype', 'stringtype') obj2 = self.pop('integertype') @@ -464,17 +460,16 @@ newstr = newstr + obj1.value newstr = newstr + obj3.value[obj2.value + len(obj1.value):] obj3.value = newstr - + def ps_cvn(self): - str = self.pop('stringtype') - self.push(ps_name(str.value)) - + self.push(ps_name(self.pop('stringtype').value)) + def ps_index(self): n = self.pop('integertype').value if n < 0: - raise RuntimeError, 'index may not be negative' + raise RuntimeError('index may not be negative') self.push(self.stack[-1-n]) - + def ps_for(self): proc = self.pop('proceduretype') limit = self.pop('integertype', 'realtype').value @@ -493,7 +488,7 @@ self.push(ps_integer(i)) self.call_procedure(proc) i = i + increment - + def ps_forall(self): proc = self.pop('proceduretype') obj = self.pop('arraytype', 'stringtype', 'dicttype') @@ -510,39 +505,36 @@ for key, value in obj.value.items(): self.push(ps_name(key)) self.push(value) - self.call_procedure(proc) - + self.call_procedure(proc) + def ps_definefont(self): font = self.pop('dicttype') name = self.pop() font = ps_font(font.value) self.dictstack[0]['FontDirectory'].value[name.value] = font self.push(font) - + def ps_findfont(self): name = self.pop() font = self.dictstack[0]['FontDirectory'].value[name.value] self.push(font) - + def ps_pop(self): self.pop() - + def ps_dict(self): - num = self.pop('integertype') - dict = ps_dict({}) - self.push(dict) - + self.pop('integertype') + self.push(ps_dict({})) + def ps_begin(self): - dict = self.pop('dicttype') - self.dictstack.append(dict.value) - + self.dictstack.append(self.pop('dicttype').value) + def ps_end(self): if len(self.dictstack) > 2: del self.dictstack[-1] else: - raise RuntimeError, 'dictstack underflow' - + raise RuntimeError('dictstack underflow') + notdef = '.notdef' from fontTools.encodings.StandardEncoding import StandardEncoding -ps_StandardEncoding = map(ps_name, StandardEncoding) - +ps_StandardEncoding = list(map(ps_name, StandardEncoding)) diff -Nru fonttools-2.4/Lib/fontTools/misc/py23.py fonttools-3.0/Lib/fontTools/misc/py23.py --- fonttools-2.4/Lib/fontTools/misc/py23.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.0/Lib/fontTools/misc/py23.py 2015-08-31 17:57:15.000000000 +0000 @@ -0,0 +1,161 @@ +"""Python 2/3 compat layer.""" + +from __future__ import print_function, division, absolute_import +import sys + +try: + basestring +except NameError: + basestring = str + +try: + unicode +except NameError: + unicode = str + +try: + unichr + + if sys.maxunicode < 0x10FFFF: + # workarounds for Python 2 "narrow" builds with UCS2-only support. + + _narrow_unichr = unichr + + def unichr(i): + """ + Return the unicode character whose Unicode code is the integer 'i'. + The valid range is 0 to 0x10FFFF inclusive. + + >>> _narrow_unichr(0xFFFF + 1) + Traceback (most recent call last): + File "", line 1, in ? + ValueError: unichr() arg not in range(0x10000) (narrow Python build) + >>> unichr(0xFFFF + 1) == u'\U00010000' + True + >>> unichr(1114111) == u'\U0010FFFF' + True + >>> unichr(0x10FFFF + 1) + Traceback (most recent call last): + File "", line 1, in ? + ValueError: unichr() arg not in range(0x110000) + """ + try: + return _narrow_unichr(i) + except ValueError: + try: + padded_hex_str = hex(i)[2:].zfill(8) + escape_str = "\\U" + padded_hex_str + return escape_str.decode("unicode-escape") + except UnicodeDecodeError: + raise ValueError('unichr() arg not in range(0x110000)') + + import re + _unicode_escape_RE = re.compile(r'\\U[A-Fa-f0-9]{8}') + + def byteord(c): + """ + Given a 8-bit or unicode character, return an integer representing the + Unicode code point of the character. If a unicode argument is given, the + character's code point must be in the range 0 to 0x10FFFF inclusive. + + >>> ord(u'\U00010000') + Traceback (most recent call last): + File "", line 1, in ? + TypeError: ord() expected a character, but string of length 2 found + >>> byteord(u'\U00010000') == 0xFFFF + 1 + True + >>> byteord(u'\U0010FFFF') == 1114111 + True + """ + try: + return ord(c) + except TypeError as e: + try: + escape_str = c.encode('unicode-escape') + if not _unicode_escape_RE.match(escape_str): + raise + hex_str = escape_str[3:] + return int(hex_str, 16) + except: + raise TypeError(e) + + else: + byteord = ord + bytechr = chr + +except NameError: + unichr = chr + def bytechr(n): + return bytes([n]) + def byteord(c): + return c if isinstance(c, int) else ord(c) + + +# the 'io' module provides the same I/O interface on both 2 and 3. +# here we define an alias of io.StringIO to disambiguate it eternally... +from io import BytesIO +from io import StringIO as UnicodeIO +try: + # in python 2, by 'StringIO' we still mean a stream of *byte* strings + from StringIO import StringIO +except ImportError: + # in Python 3, we mean instead a stream of *unicode* strings + StringIO = UnicodeIO + + +def strjoin(iterable, joiner=''): + return tostr(joiner).join(iterable) + +def tobytes(s, encoding='ascii', errors='strict'): + if not isinstance(s, bytes): + return s.encode(encoding, errors) + else: + return s +def tounicode(s, encoding='ascii', errors='strict'): + if not isinstance(s, unicode): + return s.decode(encoding, errors) + else: + return s + +if str == bytes: + class Tag(str): + def tobytes(self): + if isinstance(self, bytes): + return self + else: + return self.encode('latin1') + + tostr = tobytes + + bytesjoin = strjoin +else: + class Tag(str): + + @staticmethod + def transcode(blob): + if not isinstance(blob, str): + blob = blob.decode('latin-1') + return blob + + def __new__(self, content): + return str.__new__(self, self.transcode(content)) + def __ne__(self, other): + return not self.__eq__(other) + def __eq__(self, other): + return str.__eq__(self, self.transcode(other)) + + def __hash__(self): + return str.__hash__(self) + + def tobytes(self): + return self.encode('latin-1') + + tostr = tounicode + + def bytesjoin(iterable, joiner=b''): + return tobytes(joiner).join(tobytes(item) for item in iterable) + + +if __name__ == "__main__": + import doctest, sys + sys.exit(doctest.testmod().failed) diff -Nru fonttools-2.4/Lib/fontTools/misc/sstruct.py fonttools-3.0/Lib/fontTools/misc/sstruct.py --- fonttools-2.4/Lib/fontTools/misc/sstruct.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.0/Lib/fontTools/misc/sstruct.py 2015-08-31 17:57:15.000000000 +0000 @@ -0,0 +1,211 @@ +"""sstruct.py -- SuperStruct + +Higher level layer on top of the struct module, enabling to +bind names to struct elements. The interface is similar to +struct, except the objects passed and returned are not tuples +(or argument lists), but dictionaries or instances. + +Just like struct, we use fmt strings to describe a data +structure, except we use one line per element. Lines are +separated by newlines or semi-colons. Each line contains +either one of the special struct characters ('@', '=', '<', +'>' or '!') or a 'name:formatchar' combo (eg. 'myFloat:f'). +Repetitions, like the struct module offers them are not useful +in this context, except for fixed length strings (eg. 'myInt:5h' +is not allowed but 'myString:5s' is). The 'x' fmt character +(pad byte) is treated as 'special', since it is by definition +anonymous. Extra whitespace is allowed everywhere. + +The sstruct module offers one feature that the "normal" struct +module doesn't: support for fixed point numbers. These are spelled +as "n.mF", where n is the number of bits before the point, and m +the number of bits after the point. Fixed point numbers get +converted to floats. + +pack(fmt, object): + 'object' is either a dictionary or an instance (or actually + anything that has a __dict__ attribute). If it is a dictionary, + its keys are used for names. If it is an instance, it's + attributes are used to grab struct elements from. Returns + a string containing the data. + +unpack(fmt, data, object=None) + If 'object' is omitted (or None), a new dictionary will be + returned. If 'object' is a dictionary, it will be used to add + struct elements to. If it is an instance (or in fact anything + that has a __dict__ attribute), an attribute will be added for + each struct element. In the latter two cases, 'object' itself + is returned. + +unpack2(fmt, data, object=None) + Convenience function. Same as unpack, except data may be longer + than needed. The returned value is a tuple: (object, leftoverdata). + +calcsize(fmt) + like struct.calcsize(), but uses our own fmt strings: + it returns the size of the data in bytes. +""" + +from __future__ import print_function, division, absolute_import +from fontTools.misc.py23 import * +from fontTools.misc.fixedTools import fixedToFloat as fi2fl, floatToFixed as fl2fi +import struct +import re + +__version__ = "1.2" +__copyright__ = "Copyright 1998, Just van Rossum " + + +class Error(Exception): + pass + +def pack(fmt, obj): + formatstring, names, fixes = getformat(fmt) + elements = [] + if not isinstance(obj, dict): + obj = obj.__dict__ + for name in names: + value = obj[name] + if name in fixes: + # fixed point conversion + value = fl2fi(value, fixes[name]) + elif isinstance(value, basestring): + value = tobytes(value) + elements.append(value) + data = struct.pack(*(formatstring,) + tuple(elements)) + return data + +def unpack(fmt, data, obj=None): + if obj is None: + obj = {} + data = tobytes(data) + formatstring, names, fixes = getformat(fmt) + if isinstance(obj, dict): + d = obj + else: + d = obj.__dict__ + elements = struct.unpack(formatstring, data) + for i in range(len(names)): + name = names[i] + value = elements[i] + if name in fixes: + # fixed point conversion + value = fi2fl(value, fixes[name]) + elif isinstance(value, bytes): + try: + value = tostr(value) + except UnicodeDecodeError: + pass + d[name] = value + return obj + +def unpack2(fmt, data, obj=None): + length = calcsize(fmt) + return unpack(fmt, data[:length], obj), data[length:] + +def calcsize(fmt): + formatstring, names, fixes = getformat(fmt) + return struct.calcsize(formatstring) + + +# matches "name:formatchar" (whitespace is allowed) +_elementRE = re.compile( + "\s*" # whitespace + "([A-Za-z_][A-Za-z_0-9]*)" # name (python identifier) + "\s*:\s*" # whitespace : whitespace + "([cbBhHiIlLqQfd]|[0-9]+[ps]|" # formatchar... + "([0-9]+)\.([0-9]+)(F))" # ...formatchar + "\s*" # whitespace + "(#.*)?$" # [comment] + end of string + ) + +# matches the special struct fmt chars and 'x' (pad byte) +_extraRE = re.compile("\s*([x@=<>!])\s*(#.*)?$") + +# matches an "empty" string, possibly containing whitespace and/or a comment +_emptyRE = re.compile("\s*(#.*)?$") + +_fixedpointmappings = { + 8: "b", + 16: "h", + 32: "l"} + +_formatcache = {} + +def getformat(fmt): + try: + formatstring, names, fixes = _formatcache[fmt] + except KeyError: + lines = re.split("[\n;]", fmt) + formatstring = "" + names = [] + fixes = {} + for line in lines: + if _emptyRE.match(line): + continue + m = _extraRE.match(line) + if m: + formatchar = m.group(1) + if formatchar != 'x' and formatstring: + raise Error("a special fmt char must be first") + else: + m = _elementRE.match(line) + if not m: + raise Error("syntax error in fmt: '%s'" % line) + name = m.group(1) + names.append(name) + formatchar = m.group(2) + if m.group(3): + # fixed point + before = int(m.group(3)) + after = int(m.group(4)) + bits = before + after + if bits not in [8, 16, 32]: + raise Error("fixed point must be 8, 16 or 32 bits long") + formatchar = _fixedpointmappings[bits] + assert m.group(5) == "F" + fixes[name] = after + formatstring = formatstring + formatchar + _formatcache[fmt] = formatstring, names, fixes + return formatstring, names, fixes + +def _test(): + fmt = """ + # comments are allowed + > # big endian (see documentation for struct) + # empty lines are allowed: + + ashort: h + along: l + abyte: b # a byte + achar: c + astr: 5s + afloat: f; adouble: d # multiple "statements" are allowed + afixed: 16.16F + """ + + print('size:', calcsize(fmt)) + + class foo(object): + pass + + i = foo() + + i.ashort = 0x7fff + i.along = 0x7fffffff + i.abyte = 0x7f + i.achar = "a" + i.astr = "12345" + i.afloat = 0.5 + i.adouble = 0.5 + i.afixed = 1.5 + + data = pack(fmt, i) + print('data:', repr(data)) + print(unpack(fmt, data)) + i2 = foo() + unpack(fmt, data, i2) + print(vars(i2)) + +if __name__ == "__main__": + _test() diff -Nru fonttools-2.4/Lib/fontTools/misc/textTools.py fonttools-3.0/Lib/fontTools/misc/textTools.py --- fonttools-2.4/Lib/fontTools/misc/textTools.py 2013-04-23 04:08:08.000000000 +0000 +++ fonttools-3.0/Lib/fontTools/misc/textTools.py 2015-08-31 17:57:15.000000000 +0000 @@ -1,28 +1,29 @@ -"""fontTools.misc.textTools.py -- miscelaneous routines.""" +"""fontTools.misc.textTools.py -- miscellaneous routines.""" +from __future__ import print_function, division, absolute_import +from fontTools.misc.py23 import * import string def safeEval(data, eval=eval): """A (kindof) safe replacement for eval.""" - return eval(data, {"__builtins__":{}}, {}) + return eval(data, {"__builtins__":{"True":True,"False":False}}) def readHex(content): """Convert a list of hex strings to binary data.""" - return deHexStr(''.join([ chunk for chunk in content if isinstance(chunk,str) ])) + return deHexStr(strjoin(chunk for chunk in content if isinstance(chunk, basestring))) def deHexStr(hexdata): """Convert a hex string to binary data.""" - parts = string.split(hexdata) - hexdata = string.join(parts, "") + hexdata = strjoin(hexdata.split()) if len(hexdata) % 2: hexdata = hexdata + "0" data = [] for i in range(0, len(hexdata), 2): - data.append(chr(string.atoi(hexdata[i:i+2], 16))) - return "".join(data) + data.append(bytechr(int(hexdata[i:i+2], 16))) + return bytesjoin(data) def hexStr(data): @@ -30,59 +31,71 @@ h = string.hexdigits r = '' for c in data: - i = ord(c) + i = byteord(c) r = r + h[(i >> 4) & 0xF] + h[i & 0xF] return r def num2binary(l, bits=32): - all = [] - bin = "" + items = [] + binary = "" for i in range(bits): if l & 0x1: - bin = "1" + bin + binary = "1" + binary else: - bin = "0" + bin + binary = "0" + binary l = l >> 1 if not ((i+1) % 8): - all.append(bin) - bin = "" - if bin: - all.append(bin) - all.reverse() + items.append(binary) + binary = "" + if binary: + items.append(binary) + items.reverse() assert l in (0, -1), "number doesn't fit in number of bits" - return string.join(all, " ") + return ' '.join(items) def binary2num(bin): - bin = string.join(string.split(bin), "") + bin = strjoin(bin.split()) l = 0 for digit in bin: l = l << 1 - if digit <> "0": + if digit != "0": l = l | 0x1 return l def caselessSort(alist): - """Return a sorted copy of a list. If there are only strings + """Return a sorted copy of a list. If there are only strings in the list, it will not consider case. """ - + try: - # turn ['FOO', 'aaBc', 'ABcD'] into - # [('foo', 'FOO'), ('aabc', 'aaBc'), ('abcd', 'ABcD')], - # but only if all elements are strings - tupledlist = map(lambda item, lower = string.lower: - (lower(item), item), alist) + return sorted(alist, key=lambda a: (a.lower(), a)) except TypeError: - # at least one element in alist is not a string, proceed the normal way... - alist = alist[:] - alist.sort() - return alist - else: - tupledlist.sort() - # turn [('aabc', 'aaBc'), ('abcd', 'ABcD'), ('foo', 'FOO')] into - # ['aaBc', 'ABcD', 'FOO'] - return map(lambda x: x[1], tupledlist) + return sorted(alist) + + +def pad(data, size): + r""" Pad byte string 'data' with null bytes until its length is a + multiple of 'size'. + + >>> len(pad(b'abcd', 4)) + 4 + >>> len(pad(b'abcde', 2)) + 6 + >>> len(pad(b'abcde', 4)) + 8 + >>> pad(b'abcdef', 4) == b'abcdef\x00\x00' + True + """ + data = tobytes(data) + if size > 1: + while len(data) % size != 0: + data += b"\0" + return data + +if __name__ == "__main__": + import doctest + sys.exit(doctest.testmod().failed) diff -Nru fonttools-2.4/Lib/fontTools/misc/timeTools.py fonttools-3.0/Lib/fontTools/misc/timeTools.py --- fonttools-2.4/Lib/fontTools/misc/timeTools.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.0/Lib/fontTools/misc/timeTools.py 2015-08-31 17:57:15.000000000 +0000 @@ -0,0 +1,22 @@ +"""fontTools.misc.timeTools.py -- tools for working with OpenType timestamps. +""" + +from __future__ import print_function, division, absolute_import +from fontTools.misc.py23 import * +import time +import calendar + + +epoch_diff = calendar.timegm((1904, 1, 1, 0, 0, 0, 0, 0, 0)) + +def timestampToString(value): + return time.asctime(time.gmtime(max(0, value + epoch_diff))) + +def timestampFromString(value): + return calendar.timegm(time.strptime(value)) - epoch_diff + +def timestampNow(): + return int(time.time() - epoch_diff) + +def timestampSinceEpoch(value): + return int(value - epoch_diff) diff -Nru fonttools-2.4/Lib/fontTools/misc/transform.py fonttools-3.0/Lib/fontTools/misc/transform.py --- fonttools-2.4/Lib/fontTools/misc/transform.py 2013-04-23 04:08:08.000000000 +0000 +++ fonttools-3.0/Lib/fontTools/misc/transform.py 2015-08-31 17:57:15.000000000 +0000 @@ -45,6 +45,8 @@ >>> """ +from __future__ import print_function, division, absolute_import +from fontTools.misc.py23 import * __all__ = ["Transform", "Identity", "Offset", "Scale"] @@ -64,7 +66,7 @@ return v -class Transform: +class Transform(object): """2x2 transformation matrix plus offset, a.k.a. Affine transform. Transform instances are immutable: all transforming methods, eg. @@ -77,7 +79,7 @@ >>> t.scale(2) >>> t.scale(2.5, 5.5) - + >>> >>> t.scale(2, 3).transformPoint((100, 100)) (200, 300) @@ -96,7 +98,7 @@ """ self.__affine = xx, xy, yx, yy, dx, dy - def transformPoint(self, (x, y)): + def transformPoint(self, p): """Transform a point. Example: @@ -105,6 +107,7 @@ >>> t.transformPoint((100, 100)) (250.0, 550.0) """ + (x, y) = p xx, xy, yx, yy, dx, dy = self.__affine return (xx*x + yx*y + dx, xy*x + yy*y + dy) @@ -169,7 +172,7 @@ >>> import math >>> t = Transform() >>> t.skew(math.pi / 4) - + >>> """ import math @@ -233,7 +236,7 @@ if self.__affine == (1, 0, 0, 1, 0, 0): return self xx, xy, yx, yy, dx, dy = self.__affine - det = float(xx*yy - yx*xy) + det = xx*yy - yx*xy xx, xy, yx, yy = yy/det, -xy/det, -yx/det, xx/det dx, dy = -xx*dx - yx*dy, -xy*dx - yy*dy return self.__class__(xx, xy, yx, yy, dx, dy) @@ -265,19 +268,9 @@ """ return self.__affine[index] - def __getslice__(self, i, j): - """Transform instances also behave like sequences and even support - slicing: - >>> t = Offset(100, 200) - >>> t - - >>> t[4:] - (100, 200) - >>> - """ - return self.__affine[i:j] - - def __cmp__(self, other): + def __ne__(self, other): + return not self.__eq__(other) + def __eq__(self, other): """Transform instances are comparable: >>> t1 = Identity.scale(2, 3).translate(4, 6) >>> t2 = Identity.translate(8, 18).scale(2, 3) @@ -289,17 +282,17 @@ >>> t1 = Identity.scale(0.2, 0.3).translate(0.4, 0.6) >>> t2 = Identity.translate(0.08, 0.18).scale(0.2, 0.3) >>> t1 - + >>> t2 - + >>> t1 == t2 0 >>> """ xx1, xy1, yx1, yy1, dx1, dy1 = self.__affine xx2, xy2, yx2, yy2, dx2, dy2 = other - return cmp((xx1, xy1, yx1, yy1, dx1, dy1), - (xx2, xy2, yx2, yy2, dx2, dy2)) + return (xx1, xy1, yx1, yy1, dx1, dy1) == \ + (xx2, xy2, yx2, yy2, dx2, dy2) def __hash__(self): """Transform instances are hashable, meaning you can use them as @@ -313,23 +306,23 @@ >>> t1 = Identity.scale(0.2, 0.3).translate(0.4, 0.6) >>> t2 = Identity.translate(0.08, 0.18).scale(0.2, 0.3) >>> t1 - + >>> t2 - + >>> d = {t1: None} >>> d - {: None} + {: None} >>> d[t2] Traceback (most recent call last): File "", line 1, in ? - KeyError: + KeyError: >>> """ return hash(self.__affine) def __repr__(self): - return "<%s [%s %s %s %s %s %s]>" % ((self.__class__.__name__,) - + tuple(map(str, self.__affine))) + return "<%s [%g %g %g %g %g %g]>" % ((self.__class__.__name__,) \ + + self.__affine) Identity = Transform() @@ -358,9 +351,7 @@ return Transform(x, 0, 0, y, 0, 0) -def _test(): - import doctest, transform - return doctest.testmod(transform) - if __name__ == "__main__": - _test() + import sys + import doctest + sys.exit(doctest.testmod().failed) diff -Nru fonttools-2.4/Lib/fontTools/misc/xmlReader.py fonttools-3.0/Lib/fontTools/misc/xmlReader.py --- fonttools-2.4/Lib/fontTools/misc/xmlReader.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.0/Lib/fontTools/misc/xmlReader.py 2015-08-31 17:57:15.000000000 +0000 @@ -0,0 +1,131 @@ +from __future__ import print_function, division, absolute_import +from fontTools.misc.py23 import * +from fontTools import ttLib +from fontTools.misc.textTools import safeEval +from fontTools.ttLib.tables.DefaultTable import DefaultTable +import os + + +class TTXParseError(Exception): pass + +BUFSIZE = 0x4000 + + +class XMLReader(object): + + def __init__(self, fileName, ttFont, progress=None, quiet=False): + self.ttFont = ttFont + self.fileName = fileName + self.progress = progress + self.quiet = quiet + self.root = None + self.contentStack = [] + self.stackSize = 0 + + def read(self): + if self.progress: + import stat + self.progress.set(0, os.stat(self.fileName)[stat.ST_SIZE] // 100 or 1) + file = open(self.fileName, 'rb') + self._parseFile(file) + file.close() + + def _parseFile(self, file): + from xml.parsers.expat import ParserCreate + parser = ParserCreate() + parser.StartElementHandler = self._startElementHandler + parser.EndElementHandler = self._endElementHandler + parser.CharacterDataHandler = self._characterDataHandler + + pos = 0 + while True: + chunk = file.read(BUFSIZE) + if not chunk: + parser.Parse(chunk, 1) + break + pos = pos + len(chunk) + if self.progress: + self.progress.set(pos // 100) + parser.Parse(chunk, 0) + + def _startElementHandler(self, name, attrs): + stackSize = self.stackSize + self.stackSize = stackSize + 1 + if not stackSize: + if name != "ttFont": + raise TTXParseError("illegal root tag: %s" % name) + sfntVersion = attrs.get("sfntVersion") + if sfntVersion is not None: + if len(sfntVersion) != 4: + sfntVersion = safeEval('"' + sfntVersion + '"') + self.ttFont.sfntVersion = sfntVersion + self.contentStack.append([]) + elif stackSize == 1: + subFile = attrs.get("src") + if subFile is not None: + subFile = os.path.join(os.path.dirname(self.fileName), subFile) + subReader = XMLReader(subFile, self.ttFont, self.progress, self.quiet) + subReader.read() + self.contentStack.append([]) + return + tag = ttLib.xmlToTag(name) + msg = "Parsing '%s' table..." % tag + if self.progress: + self.progress.setlabel(msg) + elif self.ttFont.verbose: + ttLib.debugmsg(msg) + else: + if not self.quiet: + print(msg) + if tag == "GlyphOrder": + tableClass = ttLib.GlyphOrder + elif "ERROR" in attrs or ('raw' in attrs and safeEval(attrs['raw'])): + tableClass = DefaultTable + else: + tableClass = ttLib.getTableClass(tag) + if tableClass is None: + tableClass = DefaultTable + if tag == 'loca' and tag in self.ttFont: + # Special-case the 'loca' table as we need the + # original if the 'glyf' table isn't recompiled. + self.currentTable = self.ttFont[tag] + else: + self.currentTable = tableClass(tag) + self.ttFont[tag] = self.currentTable + self.contentStack.append([]) + elif stackSize == 2: + self.contentStack.append([]) + self.root = (name, attrs, self.contentStack[-1]) + else: + l = [] + self.contentStack[-1].append((name, attrs, l)) + self.contentStack.append(l) + + def _characterDataHandler(self, data): + if self.stackSize > 1: + self.contentStack[-1].append(data) + + def _endElementHandler(self, name): + self.stackSize = self.stackSize - 1 + del self.contentStack[-1] + if self.stackSize == 1: + self.root = None + elif self.stackSize == 2: + name, attrs, content = self.root + self.currentTable.fromXML(name, attrs, content, self.ttFont) + self.root = None + + +class ProgressPrinter(object): + + def __init__(self, title, maxval=100): + print(title) + + def set(self, val, maxval=None): + pass + + def increment(self, val=1): + pass + + def setLabel(self, text): + print(text) diff -Nru fonttools-2.4/Lib/fontTools/misc/xmlReader_test.py fonttools-3.0/Lib/fontTools/misc/xmlReader_test.py --- fonttools-2.4/Lib/fontTools/misc/xmlReader_test.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.0/Lib/fontTools/misc/xmlReader_test.py 2015-08-31 17:57:15.000000000 +0000 @@ -0,0 +1,81 @@ +# -*- coding: utf-8 -*- + +from __future__ import print_function, division, absolute_import, unicode_literals +from fontTools.misc.py23 import * +import os +import unittest +from fontTools.ttLib import TTFont +from .xmlReader import XMLReader +import tempfile + + +class TestXMLReader(unittest.TestCase): + + def test_decode_utf8(self): + + class DebugXMLReader(XMLReader): + + def __init__(self, fileName, ttFont, progress=None, quiet=False): + super(DebugXMLReader, self).__init__( + fileName, ttFont, progress, quiet) + self.contents = [] + + def _endElementHandler(self, name): + if self.stackSize == 3: + name, attrs, content = self.root + self.contents.append(content) + super(DebugXMLReader, self)._endElementHandler(name) + + expected = 'fôôbär' + data = '''\ + + + + + %s + + + +''' % expected + + with tempfile.NamedTemporaryFile(delete=False) as tmp: + tmp.write(data.encode('utf-8')) + reader = DebugXMLReader(tmp.name, TTFont(), quiet=True) + reader.read() + os.remove(tmp.name) + content = strjoin(reader.contents[0]).strip() + self.assertEqual(expected, content) + + def test_normalise_newlines(self): + + class DebugXMLReader(XMLReader): + + def __init__(self, fileName, ttFont, progress=None, quiet=False): + super(DebugXMLReader, self).__init__( + fileName, ttFont, progress, quiet) + self.newlines = [] + + def _characterDataHandler(self, data): + self.newlines.extend([c for c in data if c in ('\r', '\n')]) + + # notice how when CR is escaped, it is not normalised by the XML parser + data = ( + '\r' # \r -> \n + ' \r\n' # \r\n -> \n + ' a line of text\n' # \n + ' escaped CR and unix newline \n' # \n -> \r\n + ' escaped CR and macintosh newline \r' # \r -> \r\n + ' escaped CR and windows newline \r\n' # \r\n -> \r\n + ' \n' # \n + '') + with tempfile.NamedTemporaryFile(delete=False) as tmp: + tmp.write(data.encode('utf-8')) + reader = DebugXMLReader(tmp.name, TTFont(), quiet=True) + reader.read() + os.remove(tmp.name) + expected = ['\n'] * 3 + ['\r', '\n'] * 3 + ['\n'] + self.assertEqual(expected, reader.newlines) + + +if __name__ == '__main__': + unittest.main() diff -Nru fonttools-2.4/Lib/fontTools/misc/xmlWriter.py fonttools-3.0/Lib/fontTools/misc/xmlWriter.py --- fonttools-2.4/Lib/fontTools/misc/xmlWriter.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.0/Lib/fontTools/misc/xmlWriter.py 2015-08-31 17:57:15.000000000 +0000 @@ -0,0 +1,180 @@ +"""xmlWriter.py -- Simple XML authoring class""" + +from __future__ import print_function, division, absolute_import +from fontTools.misc.py23 import * +import sys +import os +import string + +INDENT = " " + + +class XMLWriter(object): + + def __init__(self, fileOrPath, indentwhite=INDENT, idlefunc=None, encoding="utf_8"): + if encoding.lower().replace('-','').replace('_','') != 'utf8': + raise Exception('Only UTF-8 encoding is supported.') + if fileOrPath == '-': + fileOrPath = sys.stdout + if not hasattr(fileOrPath, "write"): + self.file = open(fileOrPath, "wb") + else: + # assume writable file object + self.file = fileOrPath + + # Figure out if writer expects bytes or unicodes + try: + # The bytes check should be first. See: + # https://github.com/behdad/fonttools/pull/233 + self.file.write(b'') + self.totype = tobytes + except TypeError: + # This better not fail. + self.file.write(tounicode('')) + self.totype = tounicode + self.indentwhite = self.totype(indentwhite) + self.newlinestr = self.totype(os.linesep) + self.indentlevel = 0 + self.stack = [] + self.needindent = 1 + self.idlefunc = idlefunc + self.idlecounter = 0 + self._writeraw('') + self.newline() + + def close(self): + self.file.close() + + def write(self, string, indent=True): + """Writes text.""" + self._writeraw(escape(string), indent=indent) + + def writecdata(self, string): + """Writes text in a CDATA section.""" + self._writeraw("") + + def write8bit(self, data, strip=False): + """Writes a bytes() sequence into the XML, escaping + non-ASCII bytes. When this is read in xmlReader, + the original bytes can be recovered by encoding to + 'latin-1'.""" + self._writeraw(escape8bit(data), strip=strip) + + def write_noindent(self, string): + """Writes text without indentation.""" + self._writeraw(escape(string), indent=False) + + def _writeraw(self, data, indent=True, strip=False): + """Writes bytes, possibly indented.""" + if indent and self.needindent: + self.file.write(self.indentlevel * self.indentwhite) + self.needindent = 0 + s = self.totype(data, encoding="utf_8") + if (strip): + s = s.strip() + self.file.write(s) + + def newline(self): + self.file.write(self.newlinestr) + self.needindent = 1 + idlecounter = self.idlecounter + if not idlecounter % 100 and self.idlefunc is not None: + self.idlefunc() + self.idlecounter = idlecounter + 1 + + def comment(self, data): + data = escape(data) + lines = data.split("\n") + self._writeraw("") + + def simpletag(self, _TAG_, *args, **kwargs): + attrdata = self.stringifyattrs(*args, **kwargs) + data = "<%s%s/>" % (_TAG_, attrdata) + self._writeraw(data) + + def begintag(self, _TAG_, *args, **kwargs): + attrdata = self.stringifyattrs(*args, **kwargs) + data = "<%s%s>" % (_TAG_, attrdata) + self._writeraw(data) + self.stack.append(_TAG_) + self.indent() + + def endtag(self, _TAG_): + assert self.stack and self.stack[-1] == _TAG_, "nonmatching endtag" + del self.stack[-1] + self.dedent() + data = "" % _TAG_ + self._writeraw(data) + + def dumphex(self, data): + linelength = 16 + hexlinelength = linelength * 2 + chunksize = 8 + for i in range(0, len(data), linelength): + hexline = hexStr(data[i:i+linelength]) + line = "" + white = "" + for j in range(0, hexlinelength, chunksize): + line = line + white + hexline[j:j+chunksize] + white = " " + self._writeraw(line) + self.newline() + + def indent(self): + self.indentlevel = self.indentlevel + 1 + + def dedent(self): + assert self.indentlevel > 0 + self.indentlevel = self.indentlevel - 1 + + def stringifyattrs(self, *args, **kwargs): + if kwargs: + assert not args + attributes = sorted(kwargs.items()) + elif args: + assert len(args) == 1 + attributes = args[0] + else: + return "" + data = "" + for attr, value in attributes: + if not isinstance(value, (bytes, unicode)): + value = str(value) + data = data + ' %s="%s"' % (attr, escapeattr(value)) + return data + + +def escape(data): + data = tostr(data, 'utf_8') + data = data.replace("&", "&") + data = data.replace("<", "<") + data = data.replace(">", ">") + data = data.replace("\r", " ") + return data + +def escapeattr(data): + data = escape(data) + data = data.replace('"', """) + return data + +def escape8bit(data): + """Input is Unicode string.""" + def escapechar(c): + n = ord(c) + if 32 <= n <= 127 and c not in "<&>": + return c + else: + return "&#" + repr(n) + ";" + return strjoin(map(escapechar, data.decode('latin-1'))) + +def hexStr(s): + h = string.hexdigits + r = '' + for c in s: + i = byteord(c) + r = r + h[(i >> 4) & 0xF] + h[i & 0xF] + return r diff -Nru fonttools-2.4/Lib/fontTools/misc/xmlWriter_test.py fonttools-3.0/Lib/fontTools/misc/xmlWriter_test.py --- fonttools-2.4/Lib/fontTools/misc/xmlWriter_test.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.0/Lib/fontTools/misc/xmlWriter_test.py 2015-08-31 17:57:15.000000000 +0000 @@ -0,0 +1,111 @@ +from __future__ import print_function, division, absolute_import +from fontTools.misc.py23 import * +import os +import unittest +from .xmlWriter import XMLWriter + +linesep = tobytes(os.linesep) +HEADER = b'' + linesep + +class TestXMLWriter(unittest.TestCase): + + def test_comment_escaped(self): + writer = XMLWriter(BytesIO()) + writer.comment("This&that are ") + self.assertEqual(HEADER + b"", writer.file.getvalue()) + + def test_comment_multiline(self): + writer = XMLWriter(BytesIO()) + writer.comment("Hello world\nHow are you?") + self.assertEqual(HEADER + b"", + writer.file.getvalue()) + + def test_encoding_default(self): + writer = XMLWriter(BytesIO()) + self.assertEqual(b'' + linesep, + writer.file.getvalue()) + + def test_encoding_utf8(self): + # https://github.com/behdad/fonttools/issues/246 + writer = XMLWriter(BytesIO(), encoding="utf8") + self.assertEqual(b'' + linesep, + writer.file.getvalue()) + + def test_encoding_UTF_8(self): + # https://github.com/behdad/fonttools/issues/246 + writer = XMLWriter(BytesIO(), encoding="UTF-8") + self.assertEqual(b'' + linesep, + writer.file.getvalue()) + + def test_encoding_UTF8(self): + # https://github.com/behdad/fonttools/issues/246 + writer = XMLWriter(BytesIO(), encoding="UTF8") + self.assertEqual(b'' + linesep, + writer.file.getvalue()) + + def test_encoding_other(self): + self.assertRaises(Exception, XMLWriter, BytesIO(), + encoding="iso-8859-1") + + def test_write(self): + writer = XMLWriter(BytesIO()) + writer.write("foo&bar") + self.assertEqual(HEADER + b"foo&bar", writer.file.getvalue()) + + def test_indent_dedent(self): + writer = XMLWriter(BytesIO()) + writer.write("foo") + writer.newline() + writer.indent() + writer.write("bar") + writer.newline() + writer.dedent() + writer.write("baz") + self.assertEqual(HEADER + bytesjoin(["foo", " bar", "baz"], linesep), + writer.file.getvalue()) + + def test_writecdata(self): + writer = XMLWriter(BytesIO()) + writer.writecdata("foo&bar") + self.assertEqual(HEADER + b"", writer.file.getvalue()) + + def test_simpletag(self): + writer = XMLWriter(BytesIO()) + writer.simpletag("tag", a="1", b="2") + self.assertEqual(HEADER + b'', writer.file.getvalue()) + + def test_begintag_endtag(self): + writer = XMLWriter(BytesIO()) + writer.begintag("tag", attr="value") + writer.write("content") + writer.endtag("tag") + self.assertEqual(HEADER + b'content', writer.file.getvalue()) + + def test_dumphex(self): + writer = XMLWriter(BytesIO()) + writer.dumphex("Type is a beautiful group of letters, not a group of beautiful letters.") + self.assertEqual(HEADER + bytesjoin([ + "54797065 20697320 61206265 61757469", + "66756c20 67726f75 70206f66 206c6574", + "74657273 2c206e6f 74206120 67726f75", + "70206f66 20626561 75746966 756c206c", + "65747465 72732e ", ""], joiner=linesep), writer.file.getvalue()) + + def test_stringifyattrs(self): + writer = XMLWriter(BytesIO()) + expected = ' attr="0"' + self.assertEqual(expected, writer.stringifyattrs(attr=0)) + self.assertEqual(expected, writer.stringifyattrs(attr=b'0')) + self.assertEqual(expected, writer.stringifyattrs(attr='0')) + self.assertEqual(expected, writer.stringifyattrs(attr=u'0')) + + def test_carriage_return_escaped(self): + writer = XMLWriter(BytesIO()) + writer.write("two lines\r\nseparated by Windows line endings") + self.assertEqual( + HEADER + b'two lines \nseparated by Windows line endings', + writer.file.getvalue()) + + +if __name__ == '__main__': + unittest.main() diff -Nru fonttools-2.4/Lib/fontTools/nfntLib.py fonttools-3.0/Lib/fontTools/nfntLib.py --- fonttools-2.4/Lib/fontTools/nfntLib.py 2013-04-23 04:08:08.000000000 +0000 +++ fonttools-3.0/Lib/fontTools/nfntLib.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,303 +0,0 @@ -import struct, sstruct -import string -import types - - -# FontRec header -nfntHeaderFormat = """ - > # big endian - fontType: h # font type - firstChar: h # ASCII code of first character - lastChar: h # ASCII code of last character - widMax: h # maximum character width - kernMax: h # negative of maximum character kern - nDescent: h # negative of descent - fRectWidth: h # width of font rectangle - fRectHeight: h # height of font rectangle - owTLoc: H # offset to offset/width table (in words from _this_ point) - ascent: h # ascent - descent: h # descent - leading: h # leading - rowWords: h # row width of bit image / 2 -""" -headerSize = sstruct.calcsize(nfntHeaderFormat) -assert headerSize == 26 - - -class NFNT: - - def __init__(self, data=None): - if data is not None: - self.decompile(data) - - def decompile(self, data): - # header; FontRec - sstruct.unpack(nfntHeaderFormat, data[:headerSize], self) - - #assert self.fRectHeight == (self.ascent + self.descent) - - # rest - tableSize = 2 * (self.lastChar - self.firstChar + 3) - bitmapSize = 2 * self.rowWords * self.fRectHeight - - self.bits = data[headerSize:headerSize + bitmapSize] - - # XXX deal with self.nDescent being a positive number - assert (headerSize + bitmapSize + tableSize - 16) / 2 == self.owTLoc # ugh... - - locTable = data[headerSize + bitmapSize:headerSize + bitmapSize + tableSize] - if len(locTable) <> tableSize: - raise ValueError, 'invalid NFNT format' - - owTable = data[headerSize + bitmapSize + tableSize:headerSize + bitmapSize + 2 * tableSize] - if len(owTable) <> tableSize: - raise ValueError, 'invalid NFNT format' - - # fill tables - self.offsetTable = [] - self.widthTable = [] - self.locTable = [] - for i in range(0, tableSize, 2): - self.offsetTable.append(ord(owTable[i])) - self.widthTable.append(ord(owTable[i+1])) - loc, = struct.unpack("h", locTable[i:i+2]) - self.locTable.append(loc) - - def compile(self): - header = sstruct.pack(nfntHeaderFormat, self) - nEntries = len(self.widthTable) - owTable = [None] * nEntries - locTable = [None] * nEntries - for i in range(nEntries): - owTable[i] = chr(self.offsetTable[i]) + chr(self.widthTable[i]) - locTable[i] = struct.pack("h", self.locTable[i]) - owTable = string.join(owTable, "") - locTable = string.join(locTable, "") - assert len(locTable) == len(owTable) == 2 * (self.lastChar - self.firstChar + 3) - return header + self.bits + locTable + owTable - - def unpackGlyphs(self): - import numpy - nGlyphs = len(self.locTable) - 1 - self.glyphs = [None] * nGlyphs - - rowBytes = self.rowWords * 2 - imageWidth = self.rowWords * 16 - imageHeight = self.fRectHeight - bits = self.bits - bitImage = numpy.zeros((imageWidth, imageHeight), numpy.int8) - - for y in range(imageHeight): - for xByte in range(rowBytes): - byte = bits[y * rowBytes + xByte] - for xBit in range(8): - x = 8 * xByte + xBit - bit = (ord(byte) >> (7 - xBit)) & 0x01 - bitImage[x, y] = bit - - for i in range(nGlyphs): - width = self.widthTable[i] - offset = self.offsetTable[i] - if width == 255 and offset == 255: - self.glyphs[i] = None - else: - imageL = self.locTable[i] - imageR = self.locTable[i+1] - imageWidth = imageR - imageL - offset = offset + self.kernMax - self.glyphs[i] = glyph = Glyph(width, offset, bitImage[imageL:imageR]) - - def packGlyphs(self): - import numpy - imageWidth = 0 - kernMax = 0 - imageHeight = None - widMax = 0 - fRectWidth = 0 - for glyph in self.glyphs: - if glyph is None: - continue - if imageHeight is None: - imageHeight = glyph.pixels.shape[1] - else: - assert imageHeight == glyph.pixels.shape[1] - imageWidth = imageWidth + glyph.pixels.shape[0] - kernMax = min(kernMax, glyph.offset) - widMax = max(widMax, glyph.width) - fRectWidth = max(fRectWidth, glyph.pixels.shape[0] + glyph.offset) - - fRectWidth = fRectWidth - kernMax - imageWidth = 16 * ((imageWidth - 1) / 16 + 1) - rowBytes = imageWidth / 8 - rowWords = rowBytes / 2 - bitImage = numpy.zeros((imageWidth, imageHeight), numpy.int8) - locTable = [] - widthTable = [] - offsetTable = [] - loc = 0 - for glyph in self.glyphs: - locTable.append(loc) - if glyph is None: - widthTable.append(255) - offsetTable.append(255) - continue - widthTable.append(glyph.width) - offsetTable.append(glyph.offset - kernMax) - imageWidth = glyph.pixels.shape[0] - bitImage[loc:loc+imageWidth] = glyph.pixels - loc = loc + imageWidth - - locTable.append(loc) - widthTable.append(255) - offsetTable.append(255) - - bits = [] - for y in range(imageHeight): - for xByte in range(rowBytes): - byte = 0 - for x in range(8): - byte = byte | ((bitImage[8 * xByte + x, y] & 0x01) << (7 - x)) - bits.append(chr(byte)) - bits = string.join(bits, "") - - # assign values - self.fontType = 0x9000 - self.lastChar = self.firstChar + len(self.glyphs) - 2 - self.widMax = widMax - self.kernMax = kernMax - self.descent = imageHeight - self.ascent - self.nDescent = -self.descent - self.fRectWidth = fRectWidth - self.fRectHeight = imageHeight - self.rowWords = rowWords - - tableSize = 2 * (self.lastChar - self.firstChar + 3) - self.owTLoc = (headerSize + len(bits) + tableSize - 16) / 2 - - self.bits = bits - self.locTable = locTable - self.widthTable = widthTable - self.offsetTable = offsetTable - - def getMissing(self): - return self.glyphs[-1] - - def __getitem__(self, charNum): - if charNum > self.lastChar or charNum < 0: - raise IndexError, "no such character" - index = charNum - self.firstChar - if index < 0: - return None - return self.glyphs[index] - - def __setitem__(self, charNum, glyph): - if charNum > self.lastChar or charNum < 0: - raise IndexError, "no such character" - index = charNum - self.firstChar - if index < 0: - raise IndexError, "no such character" - self.glyphs[index] = glyph - - def __len__(self): - return len(self.locTable) - 2 + self.firstChar - - # - # XXX old cruft - # - - def createQdBitImage(self): - import Qd - self.bitImage = Qd.BitMap(self.bits, 2 * self.rowWords, (0, 0, self.rowWords * 16, self.fRectHeight)) - - def drawstring(self, astring, destbits, xOffset=0, yOffset=0): - drawchar = self.drawchar - for ch in astring: - xOffset = drawchar(ch, destbits, xOffset, yOffset) - return xOffset - - def drawchar(self, ch, destbits, xOffset, yOffset=0): - import Qd - width, bounds, destbounds = self.getcharbounds(ch) - destbounds = Qd.OffsetRect(destbounds, xOffset, yOffset) - Qd.CopyBits(self.bitImage, destbits, bounds, destbounds, 1, None) - return xOffset + width - - def stringwidth(self, astring): - charwidth = self.charwidth - width = 0 - for ch in astring: - width = width + charwidth(ch) - return width - - def charwidth(self, ch): - cindex = ord(ch) - self.firstChar - if cindex > self.lastChar or \ - (self.offsetTable[cindex] == 255 and self.widthTable[cindex] == 255): - cindex = -2 # missing char - return self.widthTable[cindex] - - def getcharbounds(self, ch): - cindex = ord(ch) - self.firstChar - if cindex > self.lastChar or \ - (self.offsetTable[cindex] == 255 and self.widthTable[cindex] == 255): - return self.getcharboundsindex(-2) # missing char - return self.getcharboundsindex(cindex) - - def getcharboundsindex(self, cindex): - offset = self.offsetTable[cindex] - width = self.widthTable[cindex] - if offset == 255 and width == 255: - raise ValueError, "character not defined" - location0 = self.locTable[cindex] - location1 = self.locTable[cindex + 1] - srcbounds = (location0, 0, location1, self.fRectHeight) - destbounds = ( offset + self.kernMax, - 0, - offset + self.kernMax + location1 - location0, - self.fRectHeight ) - return width, srcbounds, destbounds - - -class Glyph: - - def __init__(self, width, offset, pixels=None, pixelDepth=1): - self.width = width - self.offset = offset - self.pixelDepth = pixelDepth - self.pixels = pixels - - -def dataFromFile(pathOrFSSpec, nameOrID="", resType='NFNT'): - from Carbon import Res - resref = Res.FSOpenResFile(pathOrFSSpec, 1) # readonly - try: - Res.UseResFile(resref) - if not nameOrID: - # just take the first in the file - res = Res.Get1IndResource(resType, 1) - elif type(nameOrID) == types.IntType: - res = Res.Get1Resource(resType, nameOrID) - else: - res = Res.Get1NamedResource(resType, nameOrID) - theID, theType, name = res.GetResInfo() - data = res.data - finally: - Res.CloseResFile(resref) - return data - - -def fromFile(pathOrFSSpec, nameOrID="", resType='NFNT'): - data = dataFromFile(pathOrFSSpec, nameOrID, resType) - return NFNT(data) - - -if __name__ == "__main__": - import EasyDialogs - path = EasyDialogs.AskFileForOpen() - if path: - data = dataFromFile(path) - font = NFNT(data) - font.unpackGlyphs() - font.packGlyphs() - data2 = font.compile() - print "xxxxx", data == data2, len(data) == len(data2) diff -Nru fonttools-2.4/Lib/fontTools/pens/basePen.py fonttools-3.0/Lib/fontTools/pens/basePen.py --- fonttools-2.4/Lib/fontTools/pens/basePen.py 2013-04-23 04:08:08.000000000 +0000 +++ fonttools-3.0/Lib/fontTools/pens/basePen.py 2015-08-31 17:57:15.000000000 +0000 @@ -10,7 +10,7 @@ The most basic pattern is this: - outline.draw(pen) # 'outline' draws itself onto 'pen' + outline.draw(pen) # 'outline' draws itself onto 'pen' Pens can be used to render outlines to the screen, but also to construct new outlines. Eg. an outline object can be both a drawable object (it has a @@ -36,9 +36,11 @@ sequence of length 2 will do. """ +from __future__ import print_function, division, absolute_import +from fontTools.misc.py23 import * -__all__ = ["AbstractPen", "BasePen", - "decomposeSuperBezierSegment", "decomposeQuadraticSegment"] +__all__ = ["AbstractPen", "NullPen", "BasePen", + "decomposeSuperBezierSegment", "decomposeQuadraticSegment"] class AbstractPen(object): @@ -112,6 +114,33 @@ raise NotImplementedError +class NullPen(object): + + """A pen that does nothing. + """ + + def moveTo(self, pt): + pass + + def lineTo(self, pt): + pass + + def curveTo(self, *points): + pass + + def qCurveTo(self, *points): + pass + + def closePath(self): + pass + + def endPath(self): + pass + + def addComponent(self, glyphName, transformation): + pass + + class BasePen(AbstractPen): """Base class for drawing pens. You must override _moveTo, _lineTo and @@ -221,7 +250,7 @@ elif n == 0: self.lineTo(points[0]) else: - raise AssertionError, "can't get there from here" + raise AssertionError("can't get there from here") def qCurveTo(self, *points): n = len(points) - 1 # 'n' is the number of control points @@ -269,9 +298,8 @@ for i in range(2, n+1): # calculate points in between control points. nDivisions = min(i, 3, n-i+2) - d = float(nDivisions) for j in range(1, nDivisions): - factor = j / d + factor = j / nDivisions temp1 = points[i-1] temp2 = points[i-2] temp = (temp2[0] + factor * (temp1[0] - temp2[0]), @@ -279,8 +307,8 @@ if pt2 is None: pt2 = temp else: - pt3 = (0.5 * (pt2[0] + temp[0]), - 0.5 * (pt2[1] + temp[1])) + pt3 = (0.5 * (pt2[0] + temp[0]), + 0.5 * (pt2[1] + temp[1])) bezierSegments.append((pt1, pt2, pt3)) pt1, pt2, pt3 = temp, None, None bezierSegments.append((pt1, points[-2], points[-1])) @@ -312,14 +340,14 @@ class _TestPen(BasePen): """Test class that prints PostScript to stdout.""" def _moveTo(self, pt): - print "%s %s moveto" % (pt[0], pt[1]) + print("%s %s moveto" % (pt[0], pt[1])) def _lineTo(self, pt): - print "%s %s lineto" % (pt[0], pt[1]) + print("%s %s lineto" % (pt[0], pt[1])) def _curveToOne(self, bcp1, bcp2, pt): - print "%s %s %s %s %s %s curveto" % (bcp1[0], bcp1[1], - bcp2[0], bcp2[1], pt[0], pt[1]) + print("%s %s %s %s %s %s curveto" % (bcp1[0], bcp1[1], + bcp2[0], bcp2[1], pt[0], pt[1])) def _closePath(self): - print "closepath" + print("closepath") if __name__ == "__main__": diff -Nru fonttools-2.4/Lib/fontTools/pens/basePen_test.py fonttools-3.0/Lib/fontTools/pens/basePen_test.py --- fonttools-2.4/Lib/fontTools/pens/basePen_test.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.0/Lib/fontTools/pens/basePen_test.py 2015-08-31 17:57:15.000000000 +0000 @@ -0,0 +1,171 @@ +from __future__ import print_function, division, absolute_import +from fontTools.misc.py23 import * +from fontTools.pens.basePen import \ + BasePen, decomposeSuperBezierSegment, decomposeQuadraticSegment +import unittest + + +class _TestPen(BasePen): + def __init__(self): + BasePen.__init__(self, glyphSet={}) + self._commands = [] + + def __repr__(self): + return " ".join(self._commands) + + def getCurrentPoint(self): + return self._getCurrentPoint() + + def _moveTo(self, pt): + self._commands.append("%s %s moveto" % (pt[0], pt[1])) + + def _lineTo(self, pt): + self._commands.append("%s %s lineto" % (pt[0], pt[1])) + + def _curveToOne(self, bcp1, bcp2, pt): + self._commands.append("%s %s %s %s %s %s curveto" % + (bcp1[0], bcp1[1], + bcp2[0], bcp2[1], + pt[0], pt[1])) + + def _closePath(self): + self._commands.append("closepath") + + def _endPath(self): + self._commands.append("endpath") + + +class _TestGlyph: + def draw(self, pen): + pen.moveTo((0.0, 0.0)) + pen.lineTo((0.0, 100.0)) + pen.curveTo((50.0, 75.0), (60.0, 50.0), (50.0, 25.0), (0.0, 0.0)) + pen.closePath() + + +class BasePenTest(unittest.TestCase): + def test_moveTo(self): + pen = _TestPen() + pen.moveTo((0.5, -4.3)) + self.assertEqual("0.5 -4.3 moveto", repr(pen)) + self.assertEqual((0.5, -4.3), pen.getCurrentPoint()) + + def test_lineTo(self): + pen = _TestPen() + pen.moveTo((4, 5)) + pen.lineTo((7, 8)) + self.assertEqual("4 5 moveto 7 8 lineto", repr(pen)) + self.assertEqual((7, 8), pen.getCurrentPoint()) + + def test_curveTo_zeroPoints(self): + pen = _TestPen() + pen.moveTo((0.0, 0.0)) + self.assertRaises(AssertionError, pen.curveTo) + + def test_curveTo_onePoint(self): + pen = _TestPen() + pen.moveTo((0.0, 0.0)) + pen.curveTo((1.0, 1.1)) + self.assertEqual("0.0 0.0 moveto 1.0 1.1 lineto", repr(pen)) + self.assertEqual((1.0, 1.1), pen.getCurrentPoint()) + + def test_curveTo_twoPoints(self): + pen = _TestPen() + pen.moveTo((0.0, 0.0)) + pen.curveTo((6.0, 3.0), (3.0, 6.0)) + self.assertEqual("0.0 0.0 moveto 4.0 2.0 5.0 4.0 3.0 6.0 curveto", + repr(pen)) + self.assertEqual((3.0, 6.0), pen.getCurrentPoint()) + + def test_curveTo_manyPoints(self): + pen = _TestPen() + pen.moveTo((0.0, 0.0)) + pen.curveTo((1.0, 1.1), (2.0, 2.1), (3.0, 3.1), (4.0, 4.1)) + self.assertEqual("0.0 0.0 moveto " + "1.0 1.1 1.5 1.6 2.0 2.1 curveto " + "2.5 2.6 3.0 3.1 4.0 4.1 curveto", repr(pen)) + self.assertEqual((4.0, 4.1), pen.getCurrentPoint()) + + def test_qCurveTo_zeroPoints(self): + pen = _TestPen() + pen.moveTo((0.0, 0.0)) + self.assertRaises(AssertionError, pen.qCurveTo) + + def test_qCurveTo_onePoint(self): + pen = _TestPen() + pen.moveTo((0.0, 0.0)) + pen.qCurveTo((77.7, 99.9)) + self.assertEqual("0.0 0.0 moveto 77.7 99.9 lineto", repr(pen)) + self.assertEqual((77.7, 99.9), pen.getCurrentPoint()) + + def test_qCurveTo_manyPoints(self): + pen = _TestPen() + pen.moveTo((0.0, 0.0)) + pen.qCurveTo((6.0, 3.0), (3.0, 6.0)) + self.assertEqual("0.0 0.0 moveto 4.0 2.0 5.0 4.0 3.0 6.0 curveto", + repr(pen)) + self.assertEqual((3.0, 6.0), pen.getCurrentPoint()) + + def test_qCurveTo_onlyOffCurvePoints(self): + pen = _TestPen() + pen.moveTo((0.0, 0.0)) + pen.qCurveTo((6.0, -6.0), (12.0, 12.0), (18.0, -18.0), None) + self.assertEqual("0.0 0.0 moveto " + "12.0 -12.0 moveto " + "8.0 -8.0 7.0 -3.0 9.0 3.0 curveto " + "11.0 9.0 13.0 7.0 15.0 -3.0 curveto " + "17.0 -13.0 16.0 -16.0 12.0 -12.0 curveto", repr(pen)) + self.assertEqual((12.0, -12.0), pen.getCurrentPoint()) + + def test_closePath(self): + pen = _TestPen() + pen.lineTo((3, 4)) + pen.closePath() + self.assertEqual("3 4 lineto closepath", repr(pen)) + self.assertEqual(None, pen.getCurrentPoint()) + + def test_endPath(self): + pen = _TestPen() + pen.lineTo((3, 4)) + pen.endPath() + self.assertEqual("3 4 lineto endpath", repr(pen)) + self.assertEqual(None, pen.getCurrentPoint()) + + def test_addComponent(self): + pen = _TestPen() + pen.glyphSet["oslash"] = _TestGlyph() + pen.addComponent("oslash", (2, 3, 0.5, 2, -10, 0)) + self.assertEqual("-10.0 0.0 moveto " + "40.0 200.0 lineto " + "127.5 300.0 131.25 290.0 125.0 265.0 curveto " + "118.75 240.0 102.5 200.0 -10.0 0.0 curveto " + "closepath", repr(pen)) + self.assertEqual(None, pen.getCurrentPoint()) + + +class DecomposeSegmentTest(unittest.TestCase): + def test_decomposeSuperBezierSegment(self): + decompose = decomposeSuperBezierSegment + self.assertRaises(AssertionError, decompose, []) + self.assertRaises(AssertionError, decompose, [(0, 0)]) + self.assertRaises(AssertionError, decompose, [(0, 0), (1, 1)]) + self.assertEqual([((0, 0), (1, 1), (2, 2))], + decompose([(0, 0), (1, 1), (2, 2)])) + self.assertEqual( + [((0, 0), (2, -2), (4, 0)), ((6, 2), (8, 8), (12, -12))], + decompose([(0, 0), (4, -4), (8, 8), (12, -12)])) + + def test_decomposeQuadraticSegment(self): + decompose = decomposeQuadraticSegment + self.assertRaises(AssertionError, decompose, []) + self.assertRaises(AssertionError, decompose, [(0, 0)]) + self.assertEqual([((0,0), (4, 8))], decompose([(0, 0), (4, 8)])) + self.assertEqual([((0,0), (2, 4)), ((4, 8), (9, -9))], + decompose([(0, 0), (4, 8), (9, -9)])) + self.assertEqual( + [((0, 0), (2.0, 4.0)), ((4, 8), (6.5, -0.5)), ((9, -9), (10, 10))], + decompose([(0, 0), (4, 8), (9, -9), (10, 10)])) + + +if __name__ == '__main__': + unittest.main() diff -Nru fonttools-2.4/Lib/fontTools/pens/boundsPen.py fonttools-3.0/Lib/fontTools/pens/boundsPen.py --- fonttools-2.4/Lib/fontTools/pens/boundsPen.py 2013-04-23 04:08:08.000000000 +0000 +++ fonttools-3.0/Lib/fontTools/pens/boundsPen.py 2015-08-31 17:57:15.000000000 +0000 @@ -1,6 +1,8 @@ -from fontTools.pens.basePen import BasePen +from __future__ import print_function, division, absolute_import +from fontTools.misc.py23 import * from fontTools.misc.arrayTools import updateBounds, pointInRect, unionRect from fontTools.misc.bezierTools import calcCubicBounds, calcQuadraticBounds +from fontTools.pens.basePen import BasePen __all__ = ["BoundsPen", "ControlBoundsPen"] @@ -74,20 +76,3 @@ bounds = unionRect(bounds, calcQuadraticBounds( self._getCurrentPoint(), bcp, pt)) self.bounds = bounds - - -if __name__ == "__main__": - def draw(pen): - pen.moveTo((0, 0)) - pen.lineTo((0, 100)) - pen.qCurveTo((50, 75), (60, 50), (50, 25), (0, 0)) - pen.curveTo((-50, 25), (-60, 50), (-50, 75), (0, 100)) - pen.closePath() - - pen = ControlBoundsPen(None) - draw(pen) - print pen.bounds - - pen = BoundsPen(None) - draw(pen) - print pen.bounds diff -Nru fonttools-2.4/Lib/fontTools/pens/boundsPen_test.py fonttools-3.0/Lib/fontTools/pens/boundsPen_test.py --- fonttools-2.4/Lib/fontTools/pens/boundsPen_test.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.0/Lib/fontTools/pens/boundsPen_test.py 2015-08-31 17:57:15.000000000 +0000 @@ -0,0 +1,66 @@ +from __future__ import print_function, division, absolute_import +from fontTools.misc.py23 import * +from fontTools.pens.boundsPen import BoundsPen, ControlBoundsPen +import unittest + + +def draw_(pen): + pen.moveTo((0, 0)) + pen.lineTo((0, 100)) + pen.qCurveTo((50, 75), (60, 50), (50, 25), (0, 0)) + pen.curveTo((-50, 25), (-60, 50), (-50, 75), (0, 100)) + pen.closePath() + + +def bounds_(pen): + return " ".join(["%.0f" % c for c in pen.bounds]) + + +class BoundsPenTest(unittest.TestCase): + def test_draw(self): + pen = BoundsPen(None) + draw_(pen) + self.assertEqual("-55 0 58 100", bounds_(pen)) + + def test_empty(self): + pen = BoundsPen(None) + self.assertEqual(None, pen.bounds) + + def test_curve(self): + pen = BoundsPen(None) + pen.moveTo((0, 0)) + pen.curveTo((20, 10), (90, 40), (0, 0)) + self.assertEqual("0 0 45 20", bounds_(pen)) + + def test_quadraticCurve(self): + pen = BoundsPen(None) + pen.moveTo((0, 0)) + pen.qCurveTo((6, 6), (10, 0)) + self.assertEqual("0 0 10 3", bounds_(pen)) + + +class ControlBoundsPenTest(unittest.TestCase): + def test_draw(self): + pen = ControlBoundsPen(None) + draw_(pen) + self.assertEqual("-55 0 60 100", bounds_(pen)) + + def test_empty(self): + pen = ControlBoundsPen(None) + self.assertEqual(None, pen.bounds) + + def test_curve(self): + pen = ControlBoundsPen(None) + pen.moveTo((0, 0)) + pen.curveTo((20, 10), (90, 40), (0, 0)) + self.assertEqual("0 0 90 40", bounds_(pen)) + + def test_quadraticCurve(self): + pen = ControlBoundsPen(None) + pen.moveTo((0, 0)) + pen.qCurveTo((6, 6), (10, 0)) + self.assertEqual("0 0 10 6", bounds_(pen)) + + +if __name__ == '__main__': + unittest.main() diff -Nru fonttools-2.4/Lib/fontTools/pens/cocoaPen.py fonttools-3.0/Lib/fontTools/pens/cocoaPen.py --- fonttools-2.4/Lib/fontTools/pens/cocoaPen.py 2013-04-23 04:08:08.000000000 +0000 +++ fonttools-3.0/Lib/fontTools/pens/cocoaPen.py 2015-08-31 17:57:15.000000000 +0000 @@ -1,3 +1,5 @@ +from __future__ import print_function, division, absolute_import +from fontTools.misc.py23 import * from fontTools.pens.basePen import BasePen @@ -13,14 +15,14 @@ path = NSBezierPath.bezierPath() self.path = path - def _moveTo(self, (x, y)): - self.path.moveToPoint_((x, y)) + def _moveTo(self, p): + self.path.moveToPoint_(p) - def _lineTo(self, (x, y)): - self.path.lineToPoint_((x, y)) + def _lineTo(self, p): + self.path.lineToPoint_(p) - def _curveToOne(self, (x1, y1), (x2, y2), (x3, y3)): - self.path.curveToPoint_controlPoint1_controlPoint2_((x3, y3), (x1, y1), (x2, y2)) + def _curveToOne(self, p1, p2, p3): + self.path.curveToPoint_controlPoint1_controlPoint2_(p3, p1, p2) def _closePath(self): self.path.closePath() diff -Nru fonttools-2.4/Lib/fontTools/pens/__init__.py fonttools-3.0/Lib/fontTools/pens/__init__.py --- fonttools-2.4/Lib/fontTools/pens/__init__.py 2013-04-23 04:08:08.000000000 +0000 +++ fonttools-3.0/Lib/fontTools/pens/__init__.py 2015-08-31 17:57:15.000000000 +0000 @@ -1,3 +1,4 @@ -"""Empty __init__.py file to signal Python this directory is a package. -(It can't be completely empty since WinZip seems to skip empty files.) -""" +"""Empty __init__.py file to signal Python this directory is a package.""" + +from __future__ import print_function, division, absolute_import +from fontTools.misc.py23 import * diff -Nru fonttools-2.4/Lib/fontTools/pens/pointInsidePen.py fonttools-3.0/Lib/fontTools/pens/pointInsidePen.py --- fonttools-2.4/Lib/fontTools/pens/pointInsidePen.py 2013-04-23 04:08:08.000000000 +0000 +++ fonttools-3.0/Lib/fontTools/pens/pointInsidePen.py 2015-08-31 17:57:15.000000000 +0000 @@ -2,6 +2,8 @@ for shapes. """ +from __future__ import print_function, division, absolute_import +from fontTools.misc.py23 import * from fontTools.pens.basePen import BasePen from fontTools.misc.bezierTools import solveQuadratic, solveCubic @@ -96,7 +98,7 @@ dx = x2 - x1 dy = y2 - y1 - t = float(y - y1) / dy + t = (y - y1) / dy ix = dx * t + x1 if ix < x: return @@ -120,8 +122,7 @@ cy = (y2 - dy) * 3.0 by = (y3 - y2) * 3.0 - cy ay = y4 - dy - cy - by - solutions = solveCubic(ay, by, cy, dy - y) - solutions.sort() + solutions = sorted(solveCubic(ay, by, cy, dy - y)) solutions = [t for t in solutions if ZERO_MINUS_EPSILON <= t <= ONE_PLUS_EPSILON] if not solutions: return @@ -176,12 +177,11 @@ c = y1 b = (y2 - c) * 2.0 a = y3 - c - b - solutions = solveQuadratic(a, b, c - y) - solutions.sort() + solutions = sorted(solveQuadratic(a, b, c - y)) solutions = [t for t in solutions if ZERO_MINUS_EPSILON <= t <= ONE_PLUS_EPSILON] if not solutions: return - XXX + # XXX def _closePath(self): if self._getCurrentPoint() != self.firstPoint: diff -Nru fonttools-2.4/Lib/fontTools/pens/pointInsidePen_test.py fonttools-3.0/Lib/fontTools/pens/pointInsidePen_test.py --- fonttools-2.4/Lib/fontTools/pens/pointInsidePen_test.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.0/Lib/fontTools/pens/pointInsidePen_test.py 2015-08-31 17:57:15.000000000 +0000 @@ -0,0 +1,90 @@ +from __future__ import print_function, division, absolute_import +from fontTools.misc.py23 import * +from fontTools.pens.pointInsidePen import PointInsidePen +import unittest + + +class PointInsidePenTest(unittest.TestCase): + def test_line(self): + def draw_triangles(pen): + pen.moveTo((0,0)); pen.lineTo((10,5)); pen.lineTo((10,0)) + pen.moveTo((9,1)); pen.lineTo((4,1)); pen.lineTo((9,4)) + pen.closePath() + + self.assertEqual( + " *********" + " ** *" + " ** *" + " * *" + " *", + self.render(draw_triangles, even_odd=True)) + + self.assertEqual( + " *********" + " *******" + " *****" + " ***" + " *", + self.render(draw_triangles, even_odd=False)) + + def test_curve(self): + def draw_curves(pen): + pen.moveTo((0,0)); pen.curveTo((9,1), (9,4), (0,5)) + pen.moveTo((10,5)); pen.curveTo((1,4), (1,1), (10,0)) + pen.closePath() + + self.assertEqual( + "*** ***" + "**** ****" + "*** ***" + "**** ****" + "*** ***", + self.render(draw_curves, even_odd=True)) + + self.assertEqual( + "*** ***" + "**********" + "**********" + "**********" + "*** ***", + self.render(draw_curves, even_odd=False)) + + def test_qCurve(self): + def draw_qCurves(pen): + pen.moveTo((0,0)); pen.qCurveTo((15,2), (0,5)) + pen.moveTo((10,5)); pen.qCurveTo((-5,3), (10,0)) + pen.closePath() + + self.assertEqual( + "*** **" + "**** ***" + "*** ***" + "*** ****" + "** ***", + self.render(draw_qCurves, even_odd=True)) + + self.assertEqual( + "*** **" + "**********" + "**********" + "**********" + "** ***", + self.render(draw_qCurves, even_odd=False)) + + @staticmethod + def render(draw_function, even_odd): + result = BytesIO() + for y in range(5): + for x in range(10): + pen = PointInsidePen(None, (x + 0.5, y + 0.5), even_odd) + draw_function(pen) + if pen.getResult(): + result.write(b"*") + else: + result.write(b" ") + return tounicode(result.getvalue()) + + +if __name__ == "__main__": + unittest.main() + diff -Nru fonttools-2.4/Lib/fontTools/pens/qtPen.py fonttools-3.0/Lib/fontTools/pens/qtPen.py --- fonttools-2.4/Lib/fontTools/pens/qtPen.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.0/Lib/fontTools/pens/qtPen.py 2015-08-31 17:57:15.000000000 +0000 @@ -0,0 +1,28 @@ +from __future__ import print_function, division, absolute_import +from fontTools.misc.py23 import * +from fontTools.pens.basePen import BasePen + + +__all__ = ["QtPen"] + + +class QtPen(BasePen): + + def __init__(self, glyphSet, path=None): + BasePen.__init__(self, glyphSet) + if path is None: + from PyQt5.QtGui import QPainterPath + path = QPainterPath() + self.path = path + + def _moveTo(self, p): + self.path.moveTo(*p) + + def _lineTo(self, p): + self.path.lineTo(*p) + + def _curveToOne(self, p1, p2, p3): + self.path.cubicTo(*p1+p2+p3) + + def _closePath(self): + self.path.closeSubpath() diff -Nru fonttools-2.4/Lib/fontTools/pens/reportLabPen.py fonttools-3.0/Lib/fontTools/pens/reportLabPen.py --- fonttools-2.4/Lib/fontTools/pens/reportLabPen.py 2013-04-23 04:08:08.000000000 +0000 +++ fonttools-3.0/Lib/fontTools/pens/reportLabPen.py 2015-08-31 17:57:15.000000000 +0000 @@ -1,4 +1,7 @@ +from __future__ import print_function, division, absolute_import +from fontTools.misc.py23 import * from fontTools.pens.basePen import BasePen +from reportlab.graphics.shapes import Path class ReportLabPen(BasePen): @@ -8,17 +11,21 @@ def __init__(self, glyphSet, path=None): BasePen.__init__(self, glyphSet) if path is None: - from reportlab.graphics.shapes import Path path = Path() self.path = path - def _moveTo(self, (x,y)): + def _moveTo(self, p): + (x,y) = p self.path.moveTo(x,y) - def _lineTo(self, (x,y)): + def _lineTo(self, p): + (x,y) = p self.path.lineTo(x,y) - def _curveToOne(self, (x1,y1), (x2,y2), (x3,y3)): + def _curveToOne(self, p1, p2, p3): + (x1,y1) = p1 + (x2,y2) = p2 + (x3,y3) = p3 self.path.curveTo(x1, y1, x2, y2, x3, y3) def _closePath(self): @@ -28,15 +35,14 @@ if __name__=="__main__": import sys if len(sys.argv) < 3: - print "Usage: reportLabPen.py []" - print " If no image file name is created, by default .png is created." - print " example: reportLabPen.py Arial.TTF R test.png" - print " (The file format will be PNG, regardless of the image file name supplied)" + print("Usage: reportLabPen.py []") + print(" If no image file name is created, by default .png is created.") + print(" example: reportLabPen.py Arial.TTF R test.png") + print(" (The file format will be PNG, regardless of the image file name supplied)") sys.exit(0) from fontTools.ttLib import TTFont from reportlab.lib import colors - from reportlab.graphics.shapes import Path path = sys.argv[1] glyphName = sys.argv[2] diff -Nru fonttools-2.4/Lib/fontTools/pens/transformPen.py fonttools-3.0/Lib/fontTools/pens/transformPen.py --- fonttools-2.4/Lib/fontTools/pens/transformPen.py 2013-04-23 04:08:08.000000000 +0000 +++ fonttools-3.0/Lib/fontTools/pens/transformPen.py 2015-08-31 17:57:15.000000000 +0000 @@ -1,3 +1,5 @@ +from __future__ import print_function, division, absolute_import +from fontTools.misc.py23 import * from fontTools.pens.basePen import AbstractPen diff -Nru fonttools-2.4/Lib/fontTools/subset.py fonttools-3.0/Lib/fontTools/subset.py --- fonttools-2.4/Lib/fontTools/subset.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.0/Lib/fontTools/subset.py 2015-08-31 17:57:15.000000000 +0000 @@ -0,0 +1,2742 @@ +# Copyright 2013 Google, Inc. All Rights Reserved. +# +# Google Author(s): Behdad Esfahbod + +from __future__ import print_function, division, absolute_import +from fontTools.misc.py23 import * +from fontTools import ttLib +from fontTools.ttLib.tables import otTables +from fontTools.misc import psCharStrings +import sys +import struct +import time +import array + +__usage__ = "pyftsubset font-file [glyph...] [--option=value]..." + +__doc__="""\ +pyftsubset -- OpenType font subsetter and optimizer + + pyftsubset is an OpenType font subsetter and optimizer, based on fontTools. + It accepts any TT- or CFF-flavored OpenType (.otf or .ttf) or WOFF (.woff) + font file. The subsetted glyph set is based on the specified glyphs + or characters, and specified OpenType layout features. + + The tool also performs some size-reducing optimizations, aimed for using + subset fonts as webfonts. Individual optimizations can be enabled or + disabled, and are enabled by default when they are safe. + +Usage: + """+__usage__+""" + + At least one glyph or one of --gids, --gids-file, --glyphs, --glyphs-file, + --text, --text-file, --unicodes, or --unicodes-file, must be specified. + +Arguments: + font-file + The input font file. + glyph + Specify one or more glyph identifiers to include in the subset. Must be + PS glyph names, or the special string '*' to keep the entire glyph set. + +Initial glyph set specification: + These options populate the initial glyph set. Same option can appear + multiple times, and the results are accummulated. + --gids=[,...] + Specify comma/whitespace-separated list of glyph IDs or ranges as + decimal numbers. For example, --gids=10-12,14 adds glyphs with + numbers 10, 11, 12, and 14. + --gids-file= + Like --gids but reads from a file. Anything after a '#' on any line + is ignored as comments. + --glyphs=[,...] + Specify comma/whitespace-separated PS glyph names to add to the subset. + Note that only PS glyph names are accepted, not gidNNN, U+XXXX, etc + that are accepted on the command line. The special string '*' wil keep + the entire glyph set. + --glyphs-file= + Like --glyphs but reads from a file. Anything after a '#' on any line + is ignored as comments. + --text= + Specify characters to include in the subset, as UTF-8 string. + --text-file= + Like --text but reads from a file. Newline character are not added to + the subset. + --unicodes=[,...] + Specify comma/whitespace-separated list of Unicode codepoints or + ranges as hex numbers, optionally prefixed with 'U+', 'u', etc. + For example, --unicodes=41-5a,61-7a adds ASCII letters, so does + the more verbose --unicodes=U+0041-005A,U+0061-007A. + The special strings '*' will choose all Unicode characters mapped + by the font. + --unicodes-file= + Like --unicodes, but reads from a file. Anything after a '#' on any + line in the file is ignored as comments. + --ignore-missing-glyphs + Do not fail if some requested glyphs or gids are not available in + the font. + --no-ignore-missing-glyphs + Stop and fail if some requested glyphs or gids are not available + in the font. [default] + --ignore-missing-unicodes [default] + Do not fail if some requested Unicode characters (including those + indirectly specified using --text or --text-file) are not available + in the font. + --no-ignore-missing-unicodes + Stop and fail if some requested Unicode characters are not available + in the font. + Note the default discrepancy between ignoring missing glyphs versus + unicodes. This is for historical reasons and in the future + --no-ignore-missing-unicodes might become default. + +Other options: + For the other options listed below, to see the current value of the option, + pass a value of '?' to it, with or without a '='. + Examples: + $ pyftsubset --glyph-names? + Current setting for 'glyph-names' is: False + $ ./pyftsubset --name-IDs=? + Current setting for 'name-IDs' is: [1, 2] + $ ./pyftsubset --hinting? --no-hinting --hinting? + Current setting for 'hinting' is: True + Current setting for 'hinting' is: False + +Output options: + --output-file= + The output font file. If not specified, the subsetted font + will be saved in as font-file.subset. + --flavor= + Specify flavor of output font file. May be 'woff' or 'woff2'. + Note that WOFF2 requires the Brotli Python extension, available + at https://github.com/google/brotli + +Glyph set expansion: + These options control how additional glyphs are added to the subset. + --notdef-glyph + Add the '.notdef' glyph to the subset (ie, keep it). [default] + --no-notdef-glyph + Drop the '.notdef' glyph unless specified in the glyph set. This + saves a few bytes, but is not possible for Postscript-flavored + fonts, as those require '.notdef'. For TrueType-flavored fonts, + this works fine as long as no unsupported glyphs are requested + from the font. + --notdef-outline + Keep the outline of '.notdef' glyph. The '.notdef' glyph outline is + used when glyphs not supported by the font are to be shown. It is not + needed otherwise. + --no-notdef-outline + When including a '.notdef' glyph, remove its outline. This saves + a few bytes. [default] + --recommended-glyphs + Add glyphs 0, 1, 2, and 3 to the subset, as recommended for + TrueType-flavored fonts: '.notdef', 'NULL' or '.null', 'CR', 'space'. + Some legacy software might require this, but no modern system does. + --no-recommended-glyphs + Do not add glyphs 0, 1, 2, and 3 to the subset, unless specified in + glyph set. [default] + --layout-features[+|-]=[,...] + Specify (=), add to (+=) or exclude from (-=) the comma-separated + set of OpenType layout feature tags that will be preserved. + Glyph variants used by the preserved features are added to the + specified subset glyph set. By default, 'calt', 'ccmp', 'clig', 'curs', + 'kern', 'liga', 'locl', 'mark', 'mkmk', 'rclt', 'rlig' and all features + required for script shaping are preserved. To see the full list, try + '--layout-features=?'. Use '*' to keep all features. + Multiple --layout-features options can be provided if necessary. + Examples: + --layout-features+=onum,pnum,ss01 + * Keep the default set of features and 'onum', 'pnum', 'ss01'. + --layout-features-='mark','mkmk' + * Keep the default set of features but drop 'mark' and 'mkmk'. + --layout-features='kern' + * Only keep the 'kern' feature, drop all others. + --layout-features='' + * Drop all features. + --layout-features='*' + * Keep all features. + --layout-features+=aalt --layout-features-=vrt2 + * Keep default set of features plus 'aalt', but drop 'vrt2'. + +Hinting options: + --hinting + Keep hinting [default] + --no-hinting + Drop glyph-specific hinting and font-wide hinting tables, as well + as remove hinting-related bits and pieces from other tables (eg. GPOS). + See --hinting-tables for list of tables that are dropped by default. + Instructions and hints are stripped from 'glyf' and 'CFF ' tables + respectively. This produces (sometimes up to 30%) smaller fonts that + are suitable for extremely high-resolution systems, like high-end + mobile devices and retina displays. + XXX Note: Currently there is a known bug in 'CFF ' hint stripping that + might make the font unusable as a webfont as they will be rejected by + OpenType Sanitizer used in common browsers. For more information see: + https://github.com/behdad/fonttools/issues/144 + The --desubroutinize options works around that bug. + +Optimization options: + --desubroutinize + Remove CFF use of subroutinizes. Subroutinization is a way to make CFF + fonts smaller. For small subsets however, desubroutinizing might make + the font smaller. It has even been reported that desubroutinized CFF + fonts compress better (produce smaller output) WOFF and WOFF2 fonts. + Also see note under --no-hinting. + --no-desubroutinize [default] + Leave CFF subroutinizes as is, only throw away unused subroutinizes. + +Font table options: + --drop-tables[+|-]=
[,
...] + Specify (=), add to (+=) or exclude from (-=) the comma-separated + set of tables that will be be dropped. + By default, the following tables are dropped: + 'BASE', 'JSTF', 'DSIG', 'EBDT', 'EBLC', 'EBSC', 'SVG ', 'PCLT', 'LTSH' + and Graphite tables: 'Feat', 'Glat', 'Gloc', 'Silf', 'Sill' + and color tables: 'CBLC', 'CBDT', 'sbix', 'COLR', 'CPAL'. + The tool will attempt to subset the remaining tables. + Examples: + --drop-tables-='SVG ' + * Drop the default set of tables but keep 'SVG '. + --drop-tables+=GSUB + * Drop the default set of tables and 'GSUB'. + --drop-tables=DSIG + * Only drop the 'DSIG' table, keep all others. + --drop-tables= + * Keep all tables. + --no-subset-tables+=
[,
...] + Add to the set of tables that will not be subsetted. + By default, the following tables are included in this list, as + they do not need subsetting (ignore the fact that 'loca' is listed + here): 'gasp', 'head', 'hhea', 'maxp', 'vhea', 'OS/2', 'loca', + 'name', 'cvt ', 'fpgm', 'prep', 'VMDX', and 'DSIG'. Tables that the tool + does not know how to subset and are not specified here will be dropped + from the font. + Example: + --no-subset-tables+=FFTM + * Keep 'FFTM' table in the font by preventing subsetting. + --hinting-tables[-]=
[,
...] + Specify (=), add to (+=) or exclude from (-=) the list of font-wide + hinting tables that will be dropped if --no-hinting is specified, + Examples: + --hinting-tables-='VDMX' + * Drop font-wide hinting tables except 'VDMX'. + --hinting-tables='' + * Keep all font-wide hinting tables (but strip hints from glyphs). + --legacy-kern + Keep TrueType 'kern' table even when OpenType 'GPOS' is available. + --no-legacy-kern + Drop TrueType 'kern' table if OpenType 'GPOS' is available. [default] + +Font naming options: + These options control what is retained in the 'name' table. For numerical + codes, see: http://www.microsoft.com/typography/otspec/name.htm + --name-IDs[+|-]=[,...] + Specify (=), add to (+=) or exclude from (-=) the set of 'name' table + entry nameIDs that will be preserved. By default only nameID 1 (Family) + and nameID 2 (Style) are preserved. Use '*' to keep all entries. + Examples: + --name-IDs+=0,4,6 + * Also keep Copyright, Full name and PostScript name entry. + --name-IDs='' + * Drop all 'name' table entries. + --name-IDs='*' + * keep all 'name' table entries + --name-legacy + Keep legacy (non-Unicode) 'name' table entries (0.x, 1.x etc.). + XXX Note: This might be needed for some fonts that have no Unicode name + entires for English. See: https://github.com/behdad/fonttools/issues/146 + --no-name-legacy + Drop legacy (non-Unicode) 'name' table entries [default] + --name-languages[+|-]=[,] + Specify (=), add to (+=) or exclude from (-=) the set of 'name' table + langIDs that will be preserved. By default only records with langID + 0x0409 (English) are preserved. Use '*' to keep all langIDs. + --obfuscate-names + Make the font unusable as a system font by replacing name IDs 1, 2, 3, 4, + and 6 with dummy strings (it is still fully functional as webfont). + +Glyph naming and encoding options: + --glyph-names + Keep PS glyph names in TT-flavored fonts. In general glyph names are + not needed for correct use of the font. However, some PDF generators + and PDF viewers might rely on glyph names to extract Unicode text + from PDF documents. + --no-glyph-names + Drop PS glyph names in TT-flavored fonts, by using 'post' table + version 3.0. [default] + --legacy-cmap + Keep the legacy 'cmap' subtables (0.x, 1.x, 4.x etc.). + --no-legacy-cmap + Drop the legacy 'cmap' subtables. [default] + --symbol-cmap + Keep the 3.0 symbol 'cmap'. + --no-symbol-cmap + Drop the 3.0 symbol 'cmap'. [default] + +Other font-specific options: + --recalc-bounds + Recalculate font bounding boxes. + --no-recalc-bounds + Keep original font bounding boxes. This is faster and still safe + for all practical purposes. [default] + --recalc-timestamp + Set font 'modified' timestamp to current time. + --no-recalc-timestamp + Do not modify font 'modified' timestamp. [default] + --canonical-order + Order tables as recommended in the OpenType standard. This is not + required by the standard, nor by any known implementation. + --no-canonical-order + Keep original order of font tables. This is faster. [default] + +Application options: + --verbose + Display verbose information of the subsetting process. + --timing + Display detailed timing information of the subsetting process. + --xml + Display the TTX XML representation of subsetted font. + +Example: + Produce a subset containing the characters ' !"#$%' without performing + size-reducing optimizations: + + $ pyftsubset font.ttf --unicodes="U+0020-0025" \\ + --layout-features='*' --glyph-names --symbol-cmap --legacy-cmap \\ + --notdef-glyph --notdef-outline --recommended-glyphs \\ + --name-IDs='*' --name-legacy --name-languages='*' +""" + + +def _add_method(*clazzes): + """Returns a decorator function that adds a new method to one or + more classes.""" + def wrapper(method): + for clazz in clazzes: + assert clazz.__name__ != 'DefaultTable', \ + 'Oops, table class not found.' + assert not hasattr(clazz, method.__name__), \ + "Oops, class '%s' has method '%s'." % (clazz.__name__, + method.__name__) + setattr(clazz, method.__name__, method) + return None + return wrapper + +def _uniq_sort(l): + return sorted(set(l)) + +def _set_update(s, *others): + # Jython's set.update only takes one other argument. + # Emulate real set.update... + for other in others: + s.update(other) + +def _dict_subset(d, glyphs): + return {g:d[g] for g in glyphs} + + +@_add_method(otTables.Coverage) +def intersect(self, glyphs): + """Returns ascending list of matching coverage values.""" + return [i for i,g in enumerate(self.glyphs) if g in glyphs] + +@_add_method(otTables.Coverage) +def intersect_glyphs(self, glyphs): + """Returns set of intersecting glyphs.""" + return set(g for g in self.glyphs if g in glyphs) + +@_add_method(otTables.Coverage) +def subset(self, glyphs): + """Returns ascending list of remaining coverage values.""" + indices = self.intersect(glyphs) + self.glyphs = [g for g in self.glyphs if g in glyphs] + return indices + +@_add_method(otTables.Coverage) +def remap(self, coverage_map): + """Remaps coverage.""" + self.glyphs = [self.glyphs[i] for i in coverage_map] + +@_add_method(otTables.ClassDef) +def intersect(self, glyphs): + """Returns ascending list of matching class values.""" + return _uniq_sort( + ([0] if any(g not in self.classDefs for g in glyphs) else []) + + [v for g,v in self.classDefs.items() if g in glyphs]) + +@_add_method(otTables.ClassDef) +def intersect_class(self, glyphs, klass): + """Returns set of glyphs matching class.""" + if klass == 0: + return set(g for g in glyphs if g not in self.classDefs) + return set(g for g,v in self.classDefs.items() + if v == klass and g in glyphs) + +@_add_method(otTables.ClassDef) +def subset(self, glyphs, remap=False): + """Returns ascending list of remaining classes.""" + self.classDefs = {g:v for g,v in self.classDefs.items() if g in glyphs} + # Note: while class 0 has the special meaning of "not matched", + # if no glyph will ever /not match/, we can optimize class 0 out too. + indices = _uniq_sort( + ([0] if any(g not in self.classDefs for g in glyphs) else []) + + list(self.classDefs.values())) + if remap: + self.remap(indices) + return indices + +@_add_method(otTables.ClassDef) +def remap(self, class_map): + """Remaps classes.""" + self.classDefs = {g:class_map.index(v) for g,v in self.classDefs.items()} + +@_add_method(otTables.SingleSubst) +def closure_glyphs(self, s, cur_glyphs): + s.glyphs.update(v for g,v in self.mapping.items() if g in cur_glyphs) + +@_add_method(otTables.SingleSubst) +def subset_glyphs(self, s): + self.mapping = {g:v for g,v in self.mapping.items() + if g in s.glyphs and v in s.glyphs} + return bool(self.mapping) + +@_add_method(otTables.MultipleSubst) +def closure_glyphs(self, s, cur_glyphs): + indices = self.Coverage.intersect(cur_glyphs) + _set_update(s.glyphs, *(self.Sequence[i].Substitute for i in indices)) + +@_add_method(otTables.MultipleSubst) +def subset_glyphs(self, s): + indices = self.Coverage.subset(s.glyphs) + self.Sequence = [self.Sequence[i] for i in indices] + # Now drop rules generating glyphs we don't want + indices = [i for i,seq in enumerate(self.Sequence) + if all(sub in s.glyphs for sub in seq.Substitute)] + self.Sequence = [self.Sequence[i] for i in indices] + self.Coverage.remap(indices) + self.SequenceCount = len(self.Sequence) + return bool(self.SequenceCount) + +@_add_method(otTables.AlternateSubst) +def closure_glyphs(self, s, cur_glyphs): + _set_update(s.glyphs, *(vlist for g,vlist in self.alternates.items() + if g in cur_glyphs)) + +@_add_method(otTables.AlternateSubst) +def subset_glyphs(self, s): + self.alternates = {g:vlist + for g,vlist in self.alternates.items() + if g in s.glyphs and + all(v in s.glyphs for v in vlist)} + return bool(self.alternates) + +@_add_method(otTables.LigatureSubst) +def closure_glyphs(self, s, cur_glyphs): + _set_update(s.glyphs, *([seq.LigGlyph for seq in seqs + if all(c in s.glyphs for c in seq.Component)] + for g,seqs in self.ligatures.items() + if g in cur_glyphs)) + +@_add_method(otTables.LigatureSubst) +def subset_glyphs(self, s): + self.ligatures = {g:v for g,v in self.ligatures.items() + if g in s.glyphs} + self.ligatures = {g:[seq for seq in seqs + if seq.LigGlyph in s.glyphs and + all(c in s.glyphs for c in seq.Component)] + for g,seqs in self.ligatures.items()} + self.ligatures = {g:v for g,v in self.ligatures.items() if v} + return bool(self.ligatures) + +@_add_method(otTables.ReverseChainSingleSubst) +def closure_glyphs(self, s, cur_glyphs): + if self.Format == 1: + indices = self.Coverage.intersect(cur_glyphs) + if(not indices or + not all(c.intersect(s.glyphs) + for c in self.LookAheadCoverage + self.BacktrackCoverage)): + return + s.glyphs.update(self.Substitute[i] for i in indices) + else: + assert 0, "unknown format: %s" % self.Format + +@_add_method(otTables.ReverseChainSingleSubst) +def subset_glyphs(self, s): + if self.Format == 1: + indices = self.Coverage.subset(s.glyphs) + self.Substitute = [self.Substitute[i] for i in indices] + # Now drop rules generating glyphs we don't want + indices = [i for i,sub in enumerate(self.Substitute) + if sub in s.glyphs] + self.Substitute = [self.Substitute[i] for i in indices] + self.Coverage.remap(indices) + self.GlyphCount = len(self.Substitute) + return bool(self.GlyphCount and + all(c.subset(s.glyphs) + for c in self.LookAheadCoverage+self.BacktrackCoverage)) + else: + assert 0, "unknown format: %s" % self.Format + +@_add_method(otTables.SinglePos) +def subset_glyphs(self, s): + if self.Format == 1: + return len(self.Coverage.subset(s.glyphs)) + elif self.Format == 2: + indices = self.Coverage.subset(s.glyphs) + self.Value = [self.Value[i] for i in indices] + self.ValueCount = len(self.Value) + return bool(self.ValueCount) + else: + assert 0, "unknown format: %s" % self.Format + +@_add_method(otTables.SinglePos) +def prune_post_subset(self, options): + if not options.hinting: + # Drop device tables + self.ValueFormat &= ~0x00F0 + return True + +@_add_method(otTables.PairPos) +def subset_glyphs(self, s): + if self.Format == 1: + indices = self.Coverage.subset(s.glyphs) + self.PairSet = [self.PairSet[i] for i in indices] + for p in self.PairSet: + p.PairValueRecord = [r for r in p.PairValueRecord + if r.SecondGlyph in s.glyphs] + p.PairValueCount = len(p.PairValueRecord) + # Remove empty pairsets + indices = [i for i,p in enumerate(self.PairSet) if p.PairValueCount] + self.Coverage.remap(indices) + self.PairSet = [self.PairSet[i] for i in indices] + self.PairSetCount = len(self.PairSet) + return bool(self.PairSetCount) + elif self.Format == 2: + class1_map = self.ClassDef1.subset(s.glyphs, remap=True) + class2_map = self.ClassDef2.subset(s.glyphs, remap=True) + self.Class1Record = [self.Class1Record[i] for i in class1_map] + for c in self.Class1Record: + c.Class2Record = [c.Class2Record[i] for i in class2_map] + self.Class1Count = len(class1_map) + self.Class2Count = len(class2_map) + return bool(self.Class1Count and + self.Class2Count and + self.Coverage.subset(s.glyphs)) + else: + assert 0, "unknown format: %s" % self.Format + +@_add_method(otTables.PairPos) +def prune_post_subset(self, options): + if not options.hinting: + # Drop device tables + self.ValueFormat1 &= ~0x00F0 + self.ValueFormat2 &= ~0x00F0 + return True + +@_add_method(otTables.CursivePos) +def subset_glyphs(self, s): + if self.Format == 1: + indices = self.Coverage.subset(s.glyphs) + self.EntryExitRecord = [self.EntryExitRecord[i] for i in indices] + self.EntryExitCount = len(self.EntryExitRecord) + return bool(self.EntryExitCount) + else: + assert 0, "unknown format: %s" % self.Format + +@_add_method(otTables.Anchor) +def prune_hints(self): + # Drop device tables / contour anchor point + self.ensureDecompiled() + self.Format = 1 + +@_add_method(otTables.CursivePos) +def prune_post_subset(self, options): + if not options.hinting: + for rec in self.EntryExitRecord: + if rec.EntryAnchor: rec.EntryAnchor.prune_hints() + if rec.ExitAnchor: rec.ExitAnchor.prune_hints() + return True + +@_add_method(otTables.MarkBasePos) +def subset_glyphs(self, s): + if self.Format == 1: + mark_indices = self.MarkCoverage.subset(s.glyphs) + self.MarkArray.MarkRecord = [self.MarkArray.MarkRecord[i] + for i in mark_indices] + self.MarkArray.MarkCount = len(self.MarkArray.MarkRecord) + base_indices = self.BaseCoverage.subset(s.glyphs) + self.BaseArray.BaseRecord = [self.BaseArray.BaseRecord[i] + for i in base_indices] + self.BaseArray.BaseCount = len(self.BaseArray.BaseRecord) + # Prune empty classes + class_indices = _uniq_sort(v.Class for v in self.MarkArray.MarkRecord) + self.ClassCount = len(class_indices) + for m in self.MarkArray.MarkRecord: + m.Class = class_indices.index(m.Class) + for b in self.BaseArray.BaseRecord: + b.BaseAnchor = [b.BaseAnchor[i] for i in class_indices] + return bool(self.ClassCount and + self.MarkArray.MarkCount and + self.BaseArray.BaseCount) + else: + assert 0, "unknown format: %s" % self.Format + +@_add_method(otTables.MarkBasePos) +def prune_post_subset(self, options): + if not options.hinting: + for m in self.MarkArray.MarkRecord: + if m.MarkAnchor: + m.MarkAnchor.prune_hints() + for b in self.BaseArray.BaseRecord: + for a in b.BaseAnchor: + if a: + a.prune_hints() + return True + +@_add_method(otTables.MarkLigPos) +def subset_glyphs(self, s): + if self.Format == 1: + mark_indices = self.MarkCoverage.subset(s.glyphs) + self.MarkArray.MarkRecord = [self.MarkArray.MarkRecord[i] + for i in mark_indices] + self.MarkArray.MarkCount = len(self.MarkArray.MarkRecord) + ligature_indices = self.LigatureCoverage.subset(s.glyphs) + self.LigatureArray.LigatureAttach = [self.LigatureArray.LigatureAttach[i] + for i in ligature_indices] + self.LigatureArray.LigatureCount = len(self.LigatureArray.LigatureAttach) + # Prune empty classes + class_indices = _uniq_sort(v.Class for v in self.MarkArray.MarkRecord) + self.ClassCount = len(class_indices) + for m in self.MarkArray.MarkRecord: + m.Class = class_indices.index(m.Class) + for l in self.LigatureArray.LigatureAttach: + for c in l.ComponentRecord: + c.LigatureAnchor = [c.LigatureAnchor[i] for i in class_indices] + return bool(self.ClassCount and + self.MarkArray.MarkCount and + self.LigatureArray.LigatureCount) + else: + assert 0, "unknown format: %s" % self.Format + +@_add_method(otTables.MarkLigPos) +def prune_post_subset(self, options): + if not options.hinting: + for m in self.MarkArray.MarkRecord: + if m.MarkAnchor: + m.MarkAnchor.prune_hints() + for l in self.LigatureArray.LigatureAttach: + for c in l.ComponentRecord: + for a in c.LigatureAnchor: + if a: + a.prune_hints() + return True + +@_add_method(otTables.MarkMarkPos) +def subset_glyphs(self, s): + if self.Format == 1: + mark1_indices = self.Mark1Coverage.subset(s.glyphs) + self.Mark1Array.MarkRecord = [self.Mark1Array.MarkRecord[i] + for i in mark1_indices] + self.Mark1Array.MarkCount = len(self.Mark1Array.MarkRecord) + mark2_indices = self.Mark2Coverage.subset(s.glyphs) + self.Mark2Array.Mark2Record = [self.Mark2Array.Mark2Record[i] + for i in mark2_indices] + self.Mark2Array.MarkCount = len(self.Mark2Array.Mark2Record) + # Prune empty classes + class_indices = _uniq_sort(v.Class for v in self.Mark1Array.MarkRecord) + self.ClassCount = len(class_indices) + for m in self.Mark1Array.MarkRecord: + m.Class = class_indices.index(m.Class) + for b in self.Mark2Array.Mark2Record: + b.Mark2Anchor = [b.Mark2Anchor[i] for i in class_indices] + return bool(self.ClassCount and + self.Mark1Array.MarkCount and + self.Mark2Array.MarkCount) + else: + assert 0, "unknown format: %s" % self.Format + +@_add_method(otTables.MarkMarkPos) +def prune_post_subset(self, options): + if not options.hinting: + # Drop device tables or contour anchor point + for m in self.Mark1Array.MarkRecord: + if m.MarkAnchor: + m.MarkAnchor.prune_hints() + for b in self.Mark2Array.Mark2Record: + for m in b.Mark2Anchor: + if m: + m.prune_hints() + return True + +@_add_method(otTables.SingleSubst, + otTables.MultipleSubst, + otTables.AlternateSubst, + otTables.LigatureSubst, + otTables.ReverseChainSingleSubst, + otTables.SinglePos, + otTables.PairPos, + otTables.CursivePos, + otTables.MarkBasePos, + otTables.MarkLigPos, + otTables.MarkMarkPos) +def subset_lookups(self, lookup_indices): + pass + +@_add_method(otTables.SingleSubst, + otTables.MultipleSubst, + otTables.AlternateSubst, + otTables.LigatureSubst, + otTables.ReverseChainSingleSubst, + otTables.SinglePos, + otTables.PairPos, + otTables.CursivePos, + otTables.MarkBasePos, + otTables.MarkLigPos, + otTables.MarkMarkPos) +def collect_lookups(self): + return [] + +@_add_method(otTables.SingleSubst, + otTables.MultipleSubst, + otTables.AlternateSubst, + otTables.LigatureSubst, + otTables.ReverseChainSingleSubst, + otTables.ContextSubst, + otTables.ChainContextSubst, + otTables.ContextPos, + otTables.ChainContextPos) +def prune_post_subset(self, options): + return True + +@_add_method(otTables.SingleSubst, + otTables.AlternateSubst, + otTables.ReverseChainSingleSubst) +def may_have_non_1to1(self): + return False + +@_add_method(otTables.MultipleSubst, + otTables.LigatureSubst, + otTables.ContextSubst, + otTables.ChainContextSubst) +def may_have_non_1to1(self): + return True + +@_add_method(otTables.ContextSubst, + otTables.ChainContextSubst, + otTables.ContextPos, + otTables.ChainContextPos) +def __subset_classify_context(self): + + class ContextHelper(object): + def __init__(self, klass, Format): + if klass.__name__.endswith('Subst'): + Typ = 'Sub' + Type = 'Subst' + else: + Typ = 'Pos' + Type = 'Pos' + if klass.__name__.startswith('Chain'): + Chain = 'Chain' + else: + Chain = '' + ChainTyp = Chain+Typ + + self.Typ = Typ + self.Type = Type + self.Chain = Chain + self.ChainTyp = ChainTyp + + self.LookupRecord = Type+'LookupRecord' + + if Format == 1: + Coverage = lambda r: r.Coverage + ChainCoverage = lambda r: r.Coverage + ContextData = lambda r:(None,) + ChainContextData = lambda r:(None, None, None) + RuleData = lambda r:(r.Input,) + ChainRuleData = lambda r:(r.Backtrack, r.Input, r.LookAhead) + SetRuleData = None + ChainSetRuleData = None + elif Format == 2: + Coverage = lambda r: r.Coverage + ChainCoverage = lambda r: r.Coverage + ContextData = lambda r:(r.ClassDef,) + ChainContextData = lambda r:(r.BacktrackClassDef, + r.InputClassDef, + r.LookAheadClassDef) + RuleData = lambda r:(r.Class,) + ChainRuleData = lambda r:(r.Backtrack, r.Input, r.LookAhead) + def SetRuleData(r, d):(r.Class,) = d + def ChainSetRuleData(r, d):(r.Backtrack, r.Input, r.LookAhead) = d + elif Format == 3: + Coverage = lambda r: r.Coverage[0] + ChainCoverage = lambda r: r.InputCoverage[0] + ContextData = None + ChainContextData = None + RuleData = lambda r: r.Coverage + ChainRuleData = lambda r:(r.BacktrackCoverage + + r.InputCoverage + + r.LookAheadCoverage) + SetRuleData = None + ChainSetRuleData = None + else: + assert 0, "unknown format: %s" % Format + + if Chain: + self.Coverage = ChainCoverage + self.ContextData = ChainContextData + self.RuleData = ChainRuleData + self.SetRuleData = ChainSetRuleData + else: + self.Coverage = Coverage + self.ContextData = ContextData + self.RuleData = RuleData + self.SetRuleData = SetRuleData + + if Format == 1: + self.Rule = ChainTyp+'Rule' + self.RuleCount = ChainTyp+'RuleCount' + self.RuleSet = ChainTyp+'RuleSet' + self.RuleSetCount = ChainTyp+'RuleSetCount' + self.Intersect = lambda glyphs, c, r: [r] if r in glyphs else [] + elif Format == 2: + self.Rule = ChainTyp+'ClassRule' + self.RuleCount = ChainTyp+'ClassRuleCount' + self.RuleSet = ChainTyp+'ClassSet' + self.RuleSetCount = ChainTyp+'ClassSetCount' + self.Intersect = lambda glyphs, c, r: (c.intersect_class(glyphs, r) if c + else (set(glyphs) if r == 0 else set())) + + self.ClassDef = 'InputClassDef' if Chain else 'ClassDef' + self.ClassDefIndex = 1 if Chain else 0 + self.Input = 'Input' if Chain else 'Class' + + if self.Format not in [1, 2, 3]: + return None # Don't shoot the messenger; let it go + if not hasattr(self.__class__, "__ContextHelpers"): + self.__class__.__ContextHelpers = {} + if self.Format not in self.__class__.__ContextHelpers: + helper = ContextHelper(self.__class__, self.Format) + self.__class__.__ContextHelpers[self.Format] = helper + return self.__class__.__ContextHelpers[self.Format] + +@_add_method(otTables.ContextSubst, + otTables.ChainContextSubst) +def closure_glyphs(self, s, cur_glyphs): + c = self.__subset_classify_context() + + indices = c.Coverage(self).intersect(cur_glyphs) + if not indices: + return [] + cur_glyphs = c.Coverage(self).intersect_glyphs(cur_glyphs) + + if self.Format == 1: + ContextData = c.ContextData(self) + rss = getattr(self, c.RuleSet) + rssCount = getattr(self, c.RuleSetCount) + for i in indices: + if i >= rssCount or not rss[i]: continue + for r in getattr(rss[i], c.Rule): + if not r: continue + if not all(all(c.Intersect(s.glyphs, cd, k) for k in klist) + for cd,klist in zip(ContextData, c.RuleData(r))): + continue + chaos = set() + for ll in getattr(r, c.LookupRecord): + if not ll: continue + seqi = ll.SequenceIndex + if seqi in chaos: + # TODO Can we improve this? + pos_glyphs = None + else: + if seqi == 0: + pos_glyphs = frozenset([c.Coverage(self).glyphs[i]]) + else: + pos_glyphs = frozenset([r.Input[seqi - 1]]) + lookup = s.table.LookupList.Lookup[ll.LookupListIndex] + chaos.add(seqi) + if lookup.may_have_non_1to1(): + chaos.update(range(seqi, len(r.Input)+2)) + lookup.closure_glyphs(s, cur_glyphs=pos_glyphs) + elif self.Format == 2: + ClassDef = getattr(self, c.ClassDef) + indices = ClassDef.intersect(cur_glyphs) + ContextData = c.ContextData(self) + rss = getattr(self, c.RuleSet) + rssCount = getattr(self, c.RuleSetCount) + for i in indices: + if i >= rssCount or not rss[i]: continue + for r in getattr(rss[i], c.Rule): + if not r: continue + if not all(all(c.Intersect(s.glyphs, cd, k) for k in klist) + for cd,klist in zip(ContextData, c.RuleData(r))): + continue + chaos = set() + for ll in getattr(r, c.LookupRecord): + if not ll: continue + seqi = ll.SequenceIndex + if seqi in chaos: + # TODO Can we improve this? + pos_glyphs = None + else: + if seqi == 0: + pos_glyphs = frozenset(ClassDef.intersect_class(cur_glyphs, i)) + else: + pos_glyphs = frozenset(ClassDef.intersect_class(s.glyphs, getattr(r, c.Input)[seqi - 1])) + lookup = s.table.LookupList.Lookup[ll.LookupListIndex] + chaos.add(seqi) + if lookup.may_have_non_1to1(): + chaos.update(range(seqi, len(getattr(r, c.Input))+2)) + lookup.closure_glyphs(s, cur_glyphs=pos_glyphs) + elif self.Format == 3: + if not all(x.intersect(s.glyphs) for x in c.RuleData(self)): + return [] + r = self + chaos = set() + for ll in getattr(r, c.LookupRecord): + if not ll: continue + seqi = ll.SequenceIndex + if seqi in chaos: + # TODO Can we improve this? + pos_glyphs = None + else: + if seqi == 0: + pos_glyphs = frozenset(cur_glyphs) + else: + pos_glyphs = frozenset(r.InputCoverage[seqi].intersect_glyphs(s.glyphs)) + lookup = s.table.LookupList.Lookup[ll.LookupListIndex] + chaos.add(seqi) + if lookup.may_have_non_1to1(): + chaos.update(range(seqi, len(r.InputCoverage)+1)) + lookup.closure_glyphs(s, cur_glyphs=pos_glyphs) + else: + assert 0, "unknown format: %s" % self.Format + +@_add_method(otTables.ContextSubst, + otTables.ContextPos, + otTables.ChainContextSubst, + otTables.ChainContextPos) +def subset_glyphs(self, s): + c = self.__subset_classify_context() + + if self.Format == 1: + indices = self.Coverage.subset(s.glyphs) + rss = getattr(self, c.RuleSet) + rssCount = getattr(self, c.RuleSetCount) + rss = [rss[i] for i in indices if i < rssCount] + for rs in rss: + if not rs: continue + ss = getattr(rs, c.Rule) + ss = [r for r in ss + if r and all(all(g in s.glyphs for g in glist) + for glist in c.RuleData(r))] + setattr(rs, c.Rule, ss) + setattr(rs, c.RuleCount, len(ss)) + # Prune empty rulesets + indices = [i for i,rs in enumerate(rss) if rs and getattr(rs, c.Rule)] + self.Coverage.remap(indices) + rss = [rss[i] for i in indices] + setattr(self, c.RuleSet, rss) + setattr(self, c.RuleSetCount, len(rss)) + return bool(rss) + elif self.Format == 2: + if not self.Coverage.subset(s.glyphs): + return False + ContextData = c.ContextData(self) + klass_maps = [x.subset(s.glyphs, remap=True) if x else None for x in ContextData] + + # Keep rulesets for class numbers that survived. + indices = klass_maps[c.ClassDefIndex] + rss = getattr(self, c.RuleSet) + rssCount = getattr(self, c.RuleSetCount) + rss = [rss[i] for i in indices if i < rssCount] + del rssCount + # Delete, but not renumber, unreachable rulesets. + indices = getattr(self, c.ClassDef).intersect(self.Coverage.glyphs) + rss = [rss if i in indices else None for i,rss in enumerate(rss)] + + for rs in rss: + if not rs: continue + ss = getattr(rs, c.Rule) + ss = [r for r in ss + if r and all(all(k in klass_map for k in klist) + for klass_map,klist in zip(klass_maps, c.RuleData(r)))] + setattr(rs, c.Rule, ss) + setattr(rs, c.RuleCount, len(ss)) + + # Remap rule classes + for r in ss: + c.SetRuleData(r, [[klass_map.index(k) for k in klist] + for klass_map,klist in zip(klass_maps, c.RuleData(r))]) + + # Prune empty rulesets + rss = [rs if rs and getattr(rs, c.Rule) else None for rs in rss] + while rss and rss[-1] is None: + del rss[-1] + setattr(self, c.RuleSet, rss) + setattr(self, c.RuleSetCount, len(rss)) + + # TODO: We can do a second round of remapping class values based + # on classes that are actually used in at least one rule. Right + # now we subset classes to c.glyphs only. Or better, rewrite + # the above to do that. + + return bool(rss) + elif self.Format == 3: + return all(x.subset(s.glyphs) for x in c.RuleData(self)) + else: + assert 0, "unknown format: %s" % self.Format + +@_add_method(otTables.ContextSubst, + otTables.ChainContextSubst, + otTables.ContextPos, + otTables.ChainContextPos) +def subset_lookups(self, lookup_indices): + c = self.__subset_classify_context() + + if self.Format in [1, 2]: + for rs in getattr(self, c.RuleSet): + if not rs: continue + for r in getattr(rs, c.Rule): + if not r: continue + setattr(r, c.LookupRecord, + [ll for ll in getattr(r, c.LookupRecord) + if ll and ll.LookupListIndex in lookup_indices]) + for ll in getattr(r, c.LookupRecord): + if not ll: continue + ll.LookupListIndex = lookup_indices.index(ll.LookupListIndex) + elif self.Format == 3: + setattr(self, c.LookupRecord, + [ll for ll in getattr(self, c.LookupRecord) + if ll and ll.LookupListIndex in lookup_indices]) + for ll in getattr(self, c.LookupRecord): + if not ll: continue + ll.LookupListIndex = lookup_indices.index(ll.LookupListIndex) + else: + assert 0, "unknown format: %s" % self.Format + +@_add_method(otTables.ContextSubst, + otTables.ChainContextSubst, + otTables.ContextPos, + otTables.ChainContextPos) +def collect_lookups(self): + c = self.__subset_classify_context() + + if self.Format in [1, 2]: + return [ll.LookupListIndex + for rs in getattr(self, c.RuleSet) if rs + for r in getattr(rs, c.Rule) if r + for ll in getattr(r, c.LookupRecord) if ll] + elif self.Format == 3: + return [ll.LookupListIndex + for ll in getattr(self, c.LookupRecord) if ll] + else: + assert 0, "unknown format: %s" % self.Format + +@_add_method(otTables.ExtensionSubst) +def closure_glyphs(self, s, cur_glyphs): + if self.Format == 1: + self.ExtSubTable.closure_glyphs(s, cur_glyphs) + else: + assert 0, "unknown format: %s" % self.Format + +@_add_method(otTables.ExtensionSubst) +def may_have_non_1to1(self): + if self.Format == 1: + return self.ExtSubTable.may_have_non_1to1() + else: + assert 0, "unknown format: %s" % self.Format + +@_add_method(otTables.ExtensionSubst, + otTables.ExtensionPos) +def subset_glyphs(self, s): + if self.Format == 1: + return self.ExtSubTable.subset_glyphs(s) + else: + assert 0, "unknown format: %s" % self.Format + +@_add_method(otTables.ExtensionSubst, + otTables.ExtensionPos) +def prune_post_subset(self, options): + if self.Format == 1: + return self.ExtSubTable.prune_post_subset(options) + else: + assert 0, "unknown format: %s" % self.Format + +@_add_method(otTables.ExtensionSubst, + otTables.ExtensionPos) +def subset_lookups(self, lookup_indices): + if self.Format == 1: + return self.ExtSubTable.subset_lookups(lookup_indices) + else: + assert 0, "unknown format: %s" % self.Format + +@_add_method(otTables.ExtensionSubst, + otTables.ExtensionPos) +def collect_lookups(self): + if self.Format == 1: + return self.ExtSubTable.collect_lookups() + else: + assert 0, "unknown format: %s" % self.Format + +@_add_method(otTables.Lookup) +def closure_glyphs(self, s, cur_glyphs=None): + if cur_glyphs is None: + cur_glyphs = frozenset(s.glyphs) + + # Memoize + if (id(self), cur_glyphs) in s._doneLookups: + return + s._doneLookups.add((id(self), cur_glyphs)) + + if self in s._activeLookups: + raise Exception("Circular loop in lookup recursion") + s._activeLookups.append(self) + for st in self.SubTable: + if not st: continue + st.closure_glyphs(s, cur_glyphs) + assert(s._activeLookups[-1] == self) + del s._activeLookups[-1] + +@_add_method(otTables.Lookup) +def subset_glyphs(self, s): + self.SubTable = [st for st in self.SubTable if st and st.subset_glyphs(s)] + self.SubTableCount = len(self.SubTable) + return bool(self.SubTableCount) + +@_add_method(otTables.Lookup) +def prune_post_subset(self, options): + ret = False + for st in self.SubTable: + if not st: continue + if st.prune_post_subset(options): ret = True + return ret + +@_add_method(otTables.Lookup) +def subset_lookups(self, lookup_indices): + for s in self.SubTable: + s.subset_lookups(lookup_indices) + +@_add_method(otTables.Lookup) +def collect_lookups(self): + return _uniq_sort(sum((st.collect_lookups() for st in self.SubTable + if st), [])) + +@_add_method(otTables.Lookup) +def may_have_non_1to1(self): + return any(st.may_have_non_1to1() for st in self.SubTable if st) + +@_add_method(otTables.LookupList) +def subset_glyphs(self, s): + """Returns the indices of nonempty lookups.""" + return [i for i,l in enumerate(self.Lookup) if l and l.subset_glyphs(s)] + +@_add_method(otTables.LookupList) +def prune_post_subset(self, options): + ret = False + for l in self.Lookup: + if not l: continue + if l.prune_post_subset(options): ret = True + return ret + +@_add_method(otTables.LookupList) +def subset_lookups(self, lookup_indices): + self.ensureDecompiled() + self.Lookup = [self.Lookup[i] for i in lookup_indices + if i < self.LookupCount] + self.LookupCount = len(self.Lookup) + for l in self.Lookup: + l.subset_lookups(lookup_indices) + +@_add_method(otTables.LookupList) +def neuter_lookups(self, lookup_indices): + """Sets lookups not in lookup_indices to None.""" + self.ensureDecompiled() + self.Lookup = [l if i in lookup_indices else None for i,l in enumerate(self.Lookup)] + +@_add_method(otTables.LookupList) +def closure_lookups(self, lookup_indices): + lookup_indices = _uniq_sort(lookup_indices) + recurse = lookup_indices + while True: + recurse_lookups = sum((self.Lookup[i].collect_lookups() + for i in recurse if i < self.LookupCount), []) + recurse_lookups = [l for l in recurse_lookups + if l not in lookup_indices and l < self.LookupCount] + if not recurse_lookups: + return _uniq_sort(lookup_indices) + recurse_lookups = _uniq_sort(recurse_lookups) + lookup_indices.extend(recurse_lookups) + recurse = recurse_lookups + +@_add_method(otTables.Feature) +def subset_lookups(self, lookup_indices): + self.LookupListIndex = [l for l in self.LookupListIndex + if l in lookup_indices] + # Now map them. + self.LookupListIndex = [lookup_indices.index(l) + for l in self.LookupListIndex] + self.LookupCount = len(self.LookupListIndex) + return self.LookupCount or self.FeatureParams + +@_add_method(otTables.Feature) +def collect_lookups(self): + return self.LookupListIndex[:] + +@_add_method(otTables.FeatureList) +def subset_lookups(self, lookup_indices): + """Returns the indices of nonempty features.""" + # Note: Never ever drop feature 'pref', even if it's empty. + # HarfBuzz chooses shaper for Khmer based on presence of this + # feature. See thread at: + # http://lists.freedesktop.org/archives/harfbuzz/2012-November/002660.html + feature_indices = [i for i,f in enumerate(self.FeatureRecord) + if (f.Feature.subset_lookups(lookup_indices) or + f.FeatureTag == 'pref')] + self.subset_features(feature_indices) + return feature_indices + +@_add_method(otTables.FeatureList) +def collect_lookups(self, feature_indices): + return _uniq_sort(sum((self.FeatureRecord[i].Feature.collect_lookups() + for i in feature_indices + if i < self.FeatureCount), [])) + +@_add_method(otTables.FeatureList) +def subset_features(self, feature_indices): + self.ensureDecompiled() + self.FeatureRecord = [self.FeatureRecord[i] for i in feature_indices] + self.FeatureCount = len(self.FeatureRecord) + return bool(self.FeatureCount) + +@_add_method(otTables.DefaultLangSys, + otTables.LangSys) +def subset_features(self, feature_indices): + if self.ReqFeatureIndex in feature_indices: + self.ReqFeatureIndex = feature_indices.index(self.ReqFeatureIndex) + else: + self.ReqFeatureIndex = 65535 + self.FeatureIndex = [f for f in self.FeatureIndex if f in feature_indices] + # Now map them. + self.FeatureIndex = [feature_indices.index(f) for f in self.FeatureIndex + if f in feature_indices] + self.FeatureCount = len(self.FeatureIndex) + return bool(self.FeatureCount or self.ReqFeatureIndex != 65535) + +@_add_method(otTables.DefaultLangSys, + otTables.LangSys) +def collect_features(self): + feature_indices = self.FeatureIndex[:] + if self.ReqFeatureIndex != 65535: + feature_indices.append(self.ReqFeatureIndex) + return _uniq_sort(feature_indices) + +@_add_method(otTables.Script) +def subset_features(self, feature_indices): + if(self.DefaultLangSys and + not self.DefaultLangSys.subset_features(feature_indices)): + self.DefaultLangSys = None + self.LangSysRecord = [l for l in self.LangSysRecord + if l.LangSys.subset_features(feature_indices)] + self.LangSysCount = len(self.LangSysRecord) + return bool(self.LangSysCount or self.DefaultLangSys) + +@_add_method(otTables.Script) +def collect_features(self): + feature_indices = [l.LangSys.collect_features() for l in self.LangSysRecord] + if self.DefaultLangSys: + feature_indices.append(self.DefaultLangSys.collect_features()) + return _uniq_sort(sum(feature_indices, [])) + +@_add_method(otTables.ScriptList) +def subset_features(self, feature_indices): + self.ScriptRecord = [s for s in self.ScriptRecord + if s.Script.subset_features(feature_indices)] + self.ScriptCount = len(self.ScriptRecord) + return bool(self.ScriptCount) + +@_add_method(otTables.ScriptList) +def collect_features(self): + return _uniq_sort(sum((s.Script.collect_features() + for s in self.ScriptRecord), [])) + +@_add_method(ttLib.getTableClass('GSUB')) +def closure_glyphs(self, s): + s.table = self.table + if self.table.ScriptList: + feature_indices = self.table.ScriptList.collect_features() + else: + feature_indices = [] + if self.table.FeatureList: + lookup_indices = self.table.FeatureList.collect_lookups(feature_indices) + else: + lookup_indices = [] + if self.table.LookupList: + while True: + orig_glyphs = frozenset(s.glyphs) + s._activeLookups = [] + s._doneLookups = set() + for i in lookup_indices: + if i >= self.table.LookupList.LookupCount: continue + if not self.table.LookupList.Lookup[i]: continue + self.table.LookupList.Lookup[i].closure_glyphs(s) + del s._activeLookups, s._doneLookups + if orig_glyphs == s.glyphs: + break + del s.table + +@_add_method(ttLib.getTableClass('GSUB'), + ttLib.getTableClass('GPOS')) +def subset_glyphs(self, s): + s.glyphs = s.glyphs_gsubed + if self.table.LookupList: + lookup_indices = self.table.LookupList.subset_glyphs(s) + else: + lookup_indices = [] + self.subset_lookups(lookup_indices) + return True + +@_add_method(ttLib.getTableClass('GSUB'), + ttLib.getTableClass('GPOS')) +def subset_lookups(self, lookup_indices): + """Retains specified lookups, then removes empty features, language + systems, and scripts.""" + if self.table.LookupList: + self.table.LookupList.subset_lookups(lookup_indices) + if self.table.FeatureList: + feature_indices = self.table.FeatureList.subset_lookups(lookup_indices) + else: + feature_indices = [] + if self.table.ScriptList: + self.table.ScriptList.subset_features(feature_indices) + +@_add_method(ttLib.getTableClass('GSUB'), + ttLib.getTableClass('GPOS')) +def neuter_lookups(self, lookup_indices): + """Sets lookups not in lookup_indices to None.""" + if self.table.LookupList: + self.table.LookupList.neuter_lookups(lookup_indices) + +@_add_method(ttLib.getTableClass('GSUB'), + ttLib.getTableClass('GPOS')) +def prune_lookups(self, remap=True): + """Remove (default) or neuter unreferenced lookups""" + if self.table.ScriptList: + feature_indices = self.table.ScriptList.collect_features() + else: + feature_indices = [] + if self.table.FeatureList: + lookup_indices = self.table.FeatureList.collect_lookups(feature_indices) + else: + lookup_indices = [] + if self.table.LookupList: + lookup_indices = self.table.LookupList.closure_lookups(lookup_indices) + else: + lookup_indices = [] + if remap: + self.subset_lookups(lookup_indices) + else: + self.neuter_lookups(lookup_indices) + +@_add_method(ttLib.getTableClass('GSUB'), + ttLib.getTableClass('GPOS')) +def subset_feature_tags(self, feature_tags): + if self.table.FeatureList: + feature_indices = \ + [i for i,f in enumerate(self.table.FeatureList.FeatureRecord) + if f.FeatureTag in feature_tags] + self.table.FeatureList.subset_features(feature_indices) + else: + feature_indices = [] + if self.table.ScriptList: + self.table.ScriptList.subset_features(feature_indices) + +@_add_method(ttLib.getTableClass('GSUB'), + ttLib.getTableClass('GPOS')) +def prune_features(self): + """Remove unreferenced features""" + if self.table.ScriptList: + feature_indices = self.table.ScriptList.collect_features() + else: + feature_indices = [] + if self.table.FeatureList: + self.table.FeatureList.subset_features(feature_indices) + if self.table.ScriptList: + self.table.ScriptList.subset_features(feature_indices) + +@_add_method(ttLib.getTableClass('GSUB'), + ttLib.getTableClass('GPOS')) +def prune_pre_subset(self, options): + # Drop undesired features + if '*' not in options.layout_features: + self.subset_feature_tags(options.layout_features) + # Neuter unreferenced lookups + self.prune_lookups(remap=False) + return True + +@_add_method(ttLib.getTableClass('GSUB'), + ttLib.getTableClass('GPOS')) +def remove_redundant_langsys(self): + table = self.table + if not table.ScriptList or not table.FeatureList: + return + + features = table.FeatureList.FeatureRecord + + for s in table.ScriptList.ScriptRecord: + d = s.Script.DefaultLangSys + if not d: + continue + for lr in s.Script.LangSysRecord[:]: + l = lr.LangSys + # Compare d and l + if len(d.FeatureIndex) != len(l.FeatureIndex): + continue + if (d.ReqFeatureIndex == 65535) != (l.ReqFeatureIndex == 65535): + continue + + if d.ReqFeatureIndex != 65535: + if features[d.ReqFeatureIndex] != features[l.ReqFeatureIndex]: + continue + + for i in range(len(d.FeatureIndex)): + if features[d.FeatureIndex[i]] != features[l.FeatureIndex[i]]: + break + else: + # LangSys and default are equal; delete LangSys + s.Script.LangSysRecord.remove(lr) + +@_add_method(ttLib.getTableClass('GSUB'), + ttLib.getTableClass('GPOS')) +def prune_post_subset(self, options): + table = self.table + + self.prune_lookups() # XXX Is this actually needed?! + + if table.LookupList: + table.LookupList.prune_post_subset(options) + # XXX Next two lines disabled because OTS is stupid and + # doesn't like NULL offsets here. + #if not table.LookupList.Lookup: + # table.LookupList = None + + if not table.LookupList: + table.FeatureList = None + + if table.FeatureList: + self.remove_redundant_langsys() + # Remove unreferenced features + self.prune_features() + + # XXX Next two lines disabled because OTS is stupid and + # doesn't like NULL offsets here. + #if table.FeatureList and not table.FeatureList.FeatureRecord: + # table.FeatureList = None + + # Never drop scripts themselves as them just being available + # holds semantic significance. + # XXX Next two lines disabled because OTS is stupid and + # doesn't like NULL offsets here. + #if table.ScriptList and not table.ScriptList.ScriptRecord: + # table.ScriptList = None + + return True + +@_add_method(ttLib.getTableClass('GDEF')) +def subset_glyphs(self, s): + glyphs = s.glyphs_gsubed + table = self.table + if table.LigCaretList: + indices = table.LigCaretList.Coverage.subset(glyphs) + table.LigCaretList.LigGlyph = [table.LigCaretList.LigGlyph[i] + for i in indices] + table.LigCaretList.LigGlyphCount = len(table.LigCaretList.LigGlyph) + if table.MarkAttachClassDef: + table.MarkAttachClassDef.classDefs = \ + {g:v for g,v in table.MarkAttachClassDef.classDefs.items() + if g in glyphs} + if table.GlyphClassDef: + table.GlyphClassDef.classDefs = \ + {g:v for g,v in table.GlyphClassDef.classDefs.items() + if g in glyphs} + if table.AttachList: + indices = table.AttachList.Coverage.subset(glyphs) + GlyphCount = table.AttachList.GlyphCount + table.AttachList.AttachPoint = [table.AttachList.AttachPoint[i] + for i in indices + if i < GlyphCount] + table.AttachList.GlyphCount = len(table.AttachList.AttachPoint) + if hasattr(table, "MarkGlyphSetsDef") and table.MarkGlyphSetsDef: + for coverage in table.MarkGlyphSetsDef.Coverage: + coverage.subset(glyphs) + # TODO: The following is disabled. If enabling, we need to go fixup all + # lookups that use MarkFilteringSet and map their set. + # indices = table.MarkGlyphSetsDef.Coverage = \ + # [c for c in table.MarkGlyphSetsDef.Coverage if c.glyphs] + return True + +@_add_method(ttLib.getTableClass('GDEF')) +def prune_post_subset(self, options): + table = self.table + # XXX check these against OTS + if table.LigCaretList and not table.LigCaretList.LigGlyphCount: + table.LigCaretList = None + if table.MarkAttachClassDef and not table.MarkAttachClassDef.classDefs: + table.MarkAttachClassDef = None + if table.GlyphClassDef and not table.GlyphClassDef.classDefs: + table.GlyphClassDef = None + if table.AttachList and not table.AttachList.GlyphCount: + table.AttachList = None + if (hasattr(table, "MarkGlyphSetsDef") and + table.MarkGlyphSetsDef and + not table.MarkGlyphSetsDef.Coverage): + table.MarkGlyphSetsDef = None + if table.Version == 0x00010002/0x10000: + table.Version = 1.0 + return bool(table.LigCaretList or + table.MarkAttachClassDef or + table.GlyphClassDef or + table.AttachList or + (table.Version >= 0x00010002/0x10000 and table.MarkGlyphSetsDef)) + +@_add_method(ttLib.getTableClass('kern')) +def prune_pre_subset(self, options): + # Prune unknown kern table types + self.kernTables = [t for t in self.kernTables if hasattr(t, 'kernTable')] + return bool(self.kernTables) + +@_add_method(ttLib.getTableClass('kern')) +def subset_glyphs(self, s): + glyphs = s.glyphs_gsubed + for t in self.kernTables: + t.kernTable = {(a,b):v for (a,b),v in t.kernTable.items() + if a in glyphs and b in glyphs} + self.kernTables = [t for t in self.kernTables if t.kernTable] + return bool(self.kernTables) + +@_add_method(ttLib.getTableClass('vmtx')) +def subset_glyphs(self, s): + self.metrics = _dict_subset(self.metrics, s.glyphs) + return bool(self.metrics) + +@_add_method(ttLib.getTableClass('hmtx')) +def subset_glyphs(self, s): + self.metrics = _dict_subset(self.metrics, s.glyphs) + return True # Required table + +@_add_method(ttLib.getTableClass('hdmx')) +def subset_glyphs(self, s): + self.hdmx = {sz:_dict_subset(l, s.glyphs) for sz,l in self.hdmx.items()} + return bool(self.hdmx) + +@_add_method(ttLib.getTableClass('VORG')) +def subset_glyphs(self, s): + self.VOriginRecords = {g:v for g,v in self.VOriginRecords.items() + if g in s.glyphs} + self.numVertOriginYMetrics = len(self.VOriginRecords) + return True # Never drop; has default metrics + +@_add_method(ttLib.getTableClass('post')) +def prune_pre_subset(self, options): + if not options.glyph_names: + self.formatType = 3.0 + return True # Required table + +@_add_method(ttLib.getTableClass('post')) +def subset_glyphs(self, s): + self.extraNames = [] # This seems to do it + return True # Required table + +@_add_method(ttLib.getTableModule('glyf').Glyph) +def remapComponentsFast(self, indices): + if not self.data or struct.unpack(">h", self.data[:2])[0] >= 0: + return # Not composite + data = array.array("B", self.data) + i = 10 + more = 1 + while more: + flags =(data[i] << 8) | data[i+1] + glyphID =(data[i+2] << 8) | data[i+3] + # Remap + glyphID = indices.index(glyphID) + data[i+2] = glyphID >> 8 + data[i+3] = glyphID & 0xFF + i += 4 + flags = int(flags) + + if flags & 0x0001: i += 4 # ARG_1_AND_2_ARE_WORDS + else: i += 2 + if flags & 0x0008: i += 2 # WE_HAVE_A_SCALE + elif flags & 0x0040: i += 4 # WE_HAVE_AN_X_AND_Y_SCALE + elif flags & 0x0080: i += 8 # WE_HAVE_A_TWO_BY_TWO + more = flags & 0x0020 # MORE_COMPONENTS + + self.data = data.tostring() + +@_add_method(ttLib.getTableClass('glyf')) +def closure_glyphs(self, s): + decompose = s.glyphs + while True: + components = set() + for g in decompose: + if g not in self.glyphs: + continue + gl = self.glyphs[g] + for c in gl.getComponentNames(self): + if c not in s.glyphs: + components.add(c) + components = set(c for c in components if c not in s.glyphs) + if not components: + break + decompose = components + s.glyphs.update(components) + +@_add_method(ttLib.getTableClass('glyf')) +def prune_pre_subset(self, options): + if options.notdef_glyph and not options.notdef_outline: + g = self[self.glyphOrder[0]] + # Yay, easy! + g.__dict__.clear() + g.data = "" + return True + +@_add_method(ttLib.getTableClass('glyf')) +def subset_glyphs(self, s): + self.glyphs = _dict_subset(self.glyphs, s.glyphs) + indices = [i for i,g in enumerate(self.glyphOrder) if g in s.glyphs] + for v in self.glyphs.values(): + if hasattr(v, "data"): + v.remapComponentsFast(indices) + else: + pass # No need + self.glyphOrder = [g for g in self.glyphOrder if g in s.glyphs] + # Don't drop empty 'glyf' tables, otherwise 'loca' doesn't get subset. + return True + +@_add_method(ttLib.getTableClass('glyf')) +def prune_post_subset(self, options): + remove_hinting = not options.hinting + for v in self.glyphs.values(): + v.trim(remove_hinting=remove_hinting) + return True + +@_add_method(ttLib.getTableClass('CFF ')) +def prune_pre_subset(self, options): + cff = self.cff + # CFF table must have one font only + cff.fontNames = cff.fontNames[:1] + + if options.notdef_glyph and not options.notdef_outline: + for fontname in cff.keys(): + font = cff[fontname] + c,_ = font.CharStrings.getItemAndSelector('.notdef') + # XXX we should preserve the glyph width + c.bytecode = '\x0e' # endchar + c.program = None + + return True # bool(cff.fontNames) + +@_add_method(ttLib.getTableClass('CFF ')) +def subset_glyphs(self, s): + cff = self.cff + for fontname in cff.keys(): + font = cff[fontname] + cs = font.CharStrings + + # Load all glyphs + for g in font.charset: + if g not in s.glyphs: continue + c,sel = cs.getItemAndSelector(g) + + if cs.charStringsAreIndexed: + indices = [i for i,g in enumerate(font.charset) if g in s.glyphs] + csi = cs.charStringsIndex + csi.items = [csi.items[i] for i in indices] + del csi.file, csi.offsets + if hasattr(font, "FDSelect"): + sel = font.FDSelect + # XXX We want to set sel.format to None, such that the + # most compact format is selected. However, OTS was + # broken and couldn't parse a FDSelect format 0 that + # happened before CharStrings. As such, always force + # format 3 until we fix cffLib to always generate + # FDSelect after CharStrings. + # https://github.com/khaledhosny/ots/pull/31 + #sel.format = None + sel.format = 3 + sel.gidArray = [sel.gidArray[i] for i in indices] + cs.charStrings = {g:indices.index(v) + for g,v in cs.charStrings.items() + if g in s.glyphs} + else: + cs.charStrings = {g:v + for g,v in cs.charStrings.items() + if g in s.glyphs} + font.charset = [g for g in font.charset if g in s.glyphs] + font.numGlyphs = len(font.charset) + + return True # any(cff[fontname].numGlyphs for fontname in cff.keys()) + +@_add_method(psCharStrings.T2CharString) +def subset_subroutines(self, subrs, gsubrs): + p = self.program + assert len(p) + for i in range(1, len(p)): + if p[i] == 'callsubr': + assert isinstance(p[i-1], int) + p[i-1] = subrs._used.index(p[i-1] + subrs._old_bias) - subrs._new_bias + elif p[i] == 'callgsubr': + assert isinstance(p[i-1], int) + p[i-1] = gsubrs._used.index(p[i-1] + gsubrs._old_bias) - gsubrs._new_bias + +@_add_method(psCharStrings.T2CharString) +def drop_hints(self): + hints = self._hints + + if hints.has_hint: + self.program = self.program[hints.last_hint:] + if hasattr(self, 'width'): + # Insert width back if needed + if self.width != self.private.defaultWidthX: + self.program.insert(0, self.width - self.private.nominalWidthX) + + if hints.has_hintmask: + i = 0 + p = self.program + while i < len(p): + if p[i] in ['hintmask', 'cntrmask']: + assert i + 1 <= len(p) + del p[i:i+2] + continue + i += 1 + + # TODO: we currently don't drop calls to "empty" subroutines. + + assert len(self.program) + + del self._hints + +class _MarkingT2Decompiler(psCharStrings.SimpleT2Decompiler): + + def __init__(self, localSubrs, globalSubrs): + psCharStrings.SimpleT2Decompiler.__init__(self, + localSubrs, + globalSubrs) + for subrs in [localSubrs, globalSubrs]: + if subrs and not hasattr(subrs, "_used"): + subrs._used = set() + + def op_callsubr(self, index): + self.localSubrs._used.add(self.operandStack[-1]+self.localBias) + psCharStrings.SimpleT2Decompiler.op_callsubr(self, index) + + def op_callgsubr(self, index): + self.globalSubrs._used.add(self.operandStack[-1]+self.globalBias) + psCharStrings.SimpleT2Decompiler.op_callgsubr(self, index) + +class _DehintingT2Decompiler(psCharStrings.SimpleT2Decompiler): + + class Hints(object): + def __init__(self): + # Whether calling this charstring produces any hint stems + self.has_hint = False + # Index to start at to drop all hints + self.last_hint = 0 + # Index up to which we know more hints are possible. + # Only relevant if status is 0 or 1. + self.last_checked = 0 + # The status means: + # 0: after dropping hints, this charstring is empty + # 1: after dropping hints, there may be more hints + # continuing after this + # 2: no more hints possible after this charstring + self.status = 0 + # Has hintmask instructions; not recursive + self.has_hintmask = False + pass + + def __init__(self, css, localSubrs, globalSubrs): + self._css = css + psCharStrings.SimpleT2Decompiler.__init__(self, + localSubrs, + globalSubrs) + + def execute(self, charString): + old_hints = charString._hints if hasattr(charString, '_hints') else None + charString._hints = self.Hints() + + psCharStrings.SimpleT2Decompiler.execute(self, charString) + + hints = charString._hints + + if hints.has_hint or hints.has_hintmask: + self._css.add(charString) + + if hints.status != 2: + # Check from last_check, make sure we didn't have any operators. + for i in range(hints.last_checked, len(charString.program) - 1): + if isinstance(charString.program[i], str): + hints.status = 2 + break + else: + hints.status = 1 # There's *something* here + hints.last_checked = len(charString.program) + + if old_hints: + assert hints.__dict__ == old_hints.__dict__ + + def op_callsubr(self, index): + subr = self.localSubrs[self.operandStack[-1]+self.localBias] + psCharStrings.SimpleT2Decompiler.op_callsubr(self, index) + self.processSubr(index, subr) + + def op_callgsubr(self, index): + subr = self.globalSubrs[self.operandStack[-1]+self.globalBias] + psCharStrings.SimpleT2Decompiler.op_callgsubr(self, index) + self.processSubr(index, subr) + + def op_hstem(self, index): + psCharStrings.SimpleT2Decompiler.op_hstem(self, index) + self.processHint(index) + def op_vstem(self, index): + psCharStrings.SimpleT2Decompiler.op_vstem(self, index) + self.processHint(index) + def op_hstemhm(self, index): + psCharStrings.SimpleT2Decompiler.op_hstemhm(self, index) + self.processHint(index) + def op_vstemhm(self, index): + psCharStrings.SimpleT2Decompiler.op_vstemhm(self, index) + self.processHint(index) + def op_hintmask(self, index): + psCharStrings.SimpleT2Decompiler.op_hintmask(self, index) + self.processHintmask(index) + def op_cntrmask(self, index): + psCharStrings.SimpleT2Decompiler.op_cntrmask(self, index) + self.processHintmask(index) + + def processHintmask(self, index): + cs = self.callingStack[-1] + hints = cs._hints + hints.has_hintmask = True + if hints.status != 2 and hints.has_hint: + # Check from last_check, see if we may be an implicit vstem + for i in range(hints.last_checked, index - 1): + if isinstance(cs.program[i], str): + hints.status = 2 + break + if hints.status != 2: + # We are an implicit vstem + hints.last_hint = index + 1 + hints.status = 0 + hints.last_checked = index + 1 + + def processHint(self, index): + cs = self.callingStack[-1] + hints = cs._hints + hints.has_hint = True + hints.last_hint = index + hints.last_checked = index + + def processSubr(self, index, subr): + cs = self.callingStack[-1] + hints = cs._hints + subr_hints = subr._hints + + if subr_hints.has_hint: + if hints.status != 2: + hints.has_hint = True + hints.last_checked = index + hints.status = subr_hints.status + # Decide where to chop off from + if subr_hints.status == 0: + hints.last_hint = index + else: + hints.last_hint = index - 2 # Leave the subr call in + else: + # In my understanding, this is a font bug. + # I.e., it has hint stems *after* path construction. + # I've seen this in widespread fonts. + # Best to ignore the hints I suppose... + pass + #assert 0 + else: + hints.status = max(hints.status, subr_hints.status) + if hints.status != 2: + # Check from last_check, make sure we didn't have + # any operators. + for i in range(hints.last_checked, index - 1): + if isinstance(cs.program[i], str): + hints.status = 2 + break + hints.last_checked = index + if hints.status != 2: + # Decide where to chop off from + if subr_hints.status == 0: + hints.last_hint = index + else: + hints.last_hint = index - 2 # Leave the subr call in + +class _DesubroutinizingT2Decompiler(psCharStrings.SimpleT2Decompiler): + + def __init__(self, localSubrs, globalSubrs): + psCharStrings.SimpleT2Decompiler.__init__(self, + localSubrs, + globalSubrs) + + def execute(self, charString): + # Note: Currently we recompute _desubroutinized each time. + # This is more robust in some cases, but in other places we assume + # that each subroutine always expands to the same code, so + # maybe it doesn't matter. To speed up we can just not + # recompute _desubroutinized if it's there. For now I just + # double-check that it desubroutinized to the same thing. + old_desubroutinized = charString._desubroutinized if hasattr(charString, '_desubroutinized') else None + + charString._patches = [] + psCharStrings.SimpleT2Decompiler.execute(self, charString) + desubroutinized = charString.program[:] + for idx,expansion in reversed (charString._patches): + assert idx >= 2 + assert desubroutinized[idx - 1] in ['callsubr', 'callgsubr'], desubroutinized[idx - 1] + assert type(desubroutinized[idx - 2]) == int + if expansion[-1] == 'return': + expansion = expansion[:-1] + desubroutinized[idx-2:idx] = expansion + if 'endchar' in desubroutinized: + # Cut off after first endchar + desubroutinized = desubroutinized[:desubroutinized.index('endchar') + 1] + else: + if not len(desubroutinized) or desubroutinized[-1] != 'return': + desubroutinized.append('return') + + charString._desubroutinized = desubroutinized + del charString._patches + + if old_desubroutinized: + assert desubroutinized == old_desubroutinized + + def op_callsubr(self, index): + subr = self.localSubrs[self.operandStack[-1]+self.localBias] + psCharStrings.SimpleT2Decompiler.op_callsubr(self, index) + self.processSubr(index, subr) + + def op_callgsubr(self, index): + subr = self.globalSubrs[self.operandStack[-1]+self.globalBias] + psCharStrings.SimpleT2Decompiler.op_callgsubr(self, index) + self.processSubr(index, subr) + + def processSubr(self, index, subr): + cs = self.callingStack[-1] + cs._patches.append((index, subr._desubroutinized)) + + +@_add_method(ttLib.getTableClass('CFF ')) +def prune_post_subset(self, options): + cff = self.cff + for fontname in cff.keys(): + font = cff[fontname] + cs = font.CharStrings + + # Drop unused FontDictionaries + if hasattr(font, "FDSelect"): + sel = font.FDSelect + indices = _uniq_sort(sel.gidArray) + sel.gidArray = [indices.index (ss) for ss in sel.gidArray] + arr = font.FDArray + arr.items = [arr[i] for i in indices] + del arr.file, arr.offsets + + # Desubroutinize if asked for + if options.desubroutinize: + for g in font.charset: + c,sel = cs.getItemAndSelector(g) + c.decompile() + subrs = getattr(c.private, "Subrs", []) + decompiler = _DesubroutinizingT2Decompiler(subrs, c.globalSubrs) + decompiler.execute(c) + c.program = c._desubroutinized + + # Drop hints if not needed + if not options.hinting: + + # This can be tricky, but doesn't have to. What we do is: + # + # - Run all used glyph charstrings and recurse into subroutines, + # - For each charstring (including subroutines), if it has any + # of the hint stem operators, we mark it as such. + # Upon returning, for each charstring we note all the + # subroutine calls it makes that (recursively) contain a stem, + # - Dropping hinting then consists of the following two ops: + # * Drop the piece of the program in each charstring before the + # last call to a stem op or a stem-calling subroutine, + # * Drop all hintmask operations. + # - It's trickier... A hintmask right after hints and a few numbers + # will act as an implicit vstemhm. As such, we track whether + # we have seen any non-hint operators so far and do the right + # thing, recursively... Good luck understanding that :( + css = set() + for g in font.charset: + c,sel = cs.getItemAndSelector(g) + c.decompile() + subrs = getattr(c.private, "Subrs", []) + decompiler = _DehintingT2Decompiler(css, subrs, c.globalSubrs) + decompiler.execute(c) + for charstring in css: + charstring.drop_hints() + del css + + # Drop font-wide hinting values + all_privs = [] + if hasattr(font, 'FDSelect'): + all_privs.extend(fd.Private for fd in font.FDArray) + else: + all_privs.append(font.Private) + for priv in all_privs: + for k in ['BlueValues', 'OtherBlues', + 'FamilyBlues', 'FamilyOtherBlues', + 'BlueScale', 'BlueShift', 'BlueFuzz', + 'StemSnapH', 'StemSnapV', 'StdHW', 'StdVW']: + if hasattr(priv, k): + setattr(priv, k, None) + + # Renumber subroutines to remove unused ones + + # Mark all used subroutines + for g in font.charset: + c,sel = cs.getItemAndSelector(g) + subrs = getattr(c.private, "Subrs", []) + decompiler = _MarkingT2Decompiler(subrs, c.globalSubrs) + decompiler.execute(c) + + all_subrs = [font.GlobalSubrs] + if hasattr(font, 'FDSelect'): + all_subrs.extend(fd.Private.Subrs for fd in font.FDArray if hasattr(fd.Private, 'Subrs') and fd.Private.Subrs) + elif hasattr(font.Private, 'Subrs') and font.Private.Subrs: + all_subrs.append(font.Private.Subrs) + + subrs = set(subrs) # Remove duplicates + + # Prepare + for subrs in all_subrs: + if not hasattr(subrs, '_used'): + subrs._used = set() + subrs._used = _uniq_sort(subrs._used) + subrs._old_bias = psCharStrings.calcSubrBias(subrs) + subrs._new_bias = psCharStrings.calcSubrBias(subrs._used) + + # Renumber glyph charstrings + for g in font.charset: + c,sel = cs.getItemAndSelector(g) + subrs = getattr(c.private, "Subrs", []) + c.subset_subroutines (subrs, font.GlobalSubrs) + + # Renumber subroutines themselves + for subrs in all_subrs: + if subrs == font.GlobalSubrs: + if not hasattr(font, 'FDSelect') and hasattr(font.Private, 'Subrs'): + local_subrs = font.Private.Subrs + else: + local_subrs = [] + else: + local_subrs = subrs + + subrs.items = [subrs.items[i] for i in subrs._used] + del subrs.file + if hasattr(subrs, 'offsets'): + del subrs.offsets + + for subr in subrs.items: + subr.subset_subroutines (local_subrs, font.GlobalSubrs) + + # Cleanup + for subrs in all_subrs: + del subrs._used, subrs._old_bias, subrs._new_bias + + return True + +@_add_method(ttLib.getTableClass('cmap')) +def closure_glyphs(self, s): + tables = [t for t in self.tables if t.isUnicode()] + + # Close glyphs + for table in tables: + if table.format == 14: + for cmap in table.uvsDict.values(): + glyphs = {g for u,g in cmap if u in s.unicodes_requested} + if None in glyphs: + glyphs.remove(None) + s.glyphs.update(glyphs) + else: + cmap = table.cmap + intersection = s.unicodes_requested.intersection(cmap.keys()) + s.glyphs.update(cmap[u] for u in intersection) + + # Calculate unicodes_missing + s.unicodes_missing = s.unicodes_requested.copy() + for table in tables: + s.unicodes_missing.difference_update(table.cmap) + +@_add_method(ttLib.getTableClass('cmap')) +def prune_pre_subset(self, options): + if not options.legacy_cmap: + # Drop non-Unicode / non-Symbol cmaps + self.tables = [t for t in self.tables if t.isUnicode() or t.isSymbol()] + if not options.symbol_cmap: + self.tables = [t for t in self.tables if not t.isSymbol()] + # TODO(behdad) Only keep one subtable? + # For now, drop format=0 which can't be subset_glyphs easily? + self.tables = [t for t in self.tables if t.format != 0] + self.numSubTables = len(self.tables) + return True # Required table + +@_add_method(ttLib.getTableClass('cmap')) +def subset_glyphs(self, s): + s.glyphs = None # We use s.glyphs_requested and s.unicodes_requested only + for t in self.tables: + if t.format == 14: + # TODO(behdad) We drop all the default-UVS mappings + # for glyphs_requested. So it's the caller's responsibility to make + # sure those are included. + t.uvsDict = {v:[(u,g) for u,g in l + if g in s.glyphs_requested or u in s.unicodes_requested] + for v,l in t.uvsDict.items()} + t.uvsDict = {v:l for v,l in t.uvsDict.items() if l} + elif t.isUnicode(): + t.cmap = {u:g for u,g in t.cmap.items() + if g in s.glyphs_requested or u in s.unicodes_requested} + else: + t.cmap = {u:g for u,g in t.cmap.items() + if g in s.glyphs_requested} + self.tables = [t for t in self.tables + if (t.cmap if t.format != 14 else t.uvsDict)] + self.numSubTables = len(self.tables) + # TODO(behdad) Convert formats when needed. + # In particular, if we have a format=12 without non-BMP + # characters, either drop format=12 one or convert it + # to format=4 if there's not one. + return True # Required table + +@_add_method(ttLib.getTableClass('DSIG')) +def prune_pre_subset(self, options): + # Drop all signatures since they will be invalid + self.usNumSigs = 0 + self.signatureRecords = [] + return True + +@_add_method(ttLib.getTableClass('maxp')) +def prune_pre_subset(self, options): + if not options.hinting: + if self.tableVersion == 0x00010000: + self.maxZones = 1 + self.maxTwilightPoints = 0 + self.maxFunctionDefs = 0 + self.maxInstructionDefs = 0 + self.maxStackElements = 0 + self.maxSizeOfInstructions = 0 + return True + +@_add_method(ttLib.getTableClass('name')) +def prune_pre_subset(self, options): + if '*' not in options.name_IDs: + self.names = [n for n in self.names if n.nameID in options.name_IDs] + if not options.name_legacy: + # TODO(behdad) Sometimes (eg Apple Color Emoji) there's only a macroman + # entry for Latin and no Unicode names. + self.names = [n for n in self.names if n.isUnicode()] + # TODO(behdad) Option to keep only one platform's + if '*' not in options.name_languages: + # TODO(behdad) This is Windows-platform specific! + self.names = [n for n in self.names + if n.langID in options.name_languages] + if options.obfuscate_names: + namerecs = [] + for n in self.names: + if n.nameID in [1, 4]: + n.string = ".\x7f".encode('utf_16_be') if n.isUnicode() else ".\x7f" + elif n.nameID in [2, 6]: + n.string = "\x7f".encode('utf_16_be') if n.isUnicode() else "\x7f" + elif n.nameID == 3: + n.string = "" + elif n.nameID in [16, 17, 18]: + continue + namerecs.append(n) + self.names = namerecs + return True # Required table + + +# TODO(behdad) OS/2 ulUnicodeRange / ulCodePageRange? +# TODO(behdad) Drop AAT tables. +# TODO(behdad) Drop unneeded GSUB/GPOS Script/LangSys entries. +# TODO(behdad) Drop empty GSUB/GPOS, and GDEF if no GSUB/GPOS left +# TODO(behdad) Drop GDEF subitems if unused by lookups +# TODO(behdad) Avoid recursing too much (in GSUB/GPOS and in CFF) +# TODO(behdad) Text direction considerations. +# TODO(behdad) Text script / language considerations. +# TODO(behdad) Optionally drop 'kern' table if GPOS available +# TODO(behdad) Implement --unicode='*' to choose all cmap'ed +# TODO(behdad) Drop old-spec Indic scripts + + +class Options(object): + + class OptionError(Exception): pass + class UnknownOptionError(OptionError): pass + + _drop_tables_default = ['BASE', 'JSTF', 'DSIG', 'EBDT', 'EBLC', + 'EBSC', 'SVG ', 'PCLT', 'LTSH'] + _drop_tables_default += ['Feat', 'Glat', 'Gloc', 'Silf', 'Sill'] # Graphite + _drop_tables_default += ['CBLC', 'CBDT', 'sbix', 'COLR', 'CPAL'] # Color + _no_subset_tables_default = ['gasp', 'head', 'hhea', 'maxp', + 'vhea', 'OS/2', 'loca', 'name', 'cvt ', + 'fpgm', 'prep', 'VDMX', 'DSIG'] + _hinting_tables_default = ['cvt ', 'fpgm', 'prep', 'hdmx', 'VDMX'] + + # Based on HarfBuzz shapers + _layout_features_groups = { + # Default shaper + 'common': ['ccmp', 'liga', 'locl', 'mark', 'mkmk', 'rlig'], + 'horizontal': ['calt', 'clig', 'curs', 'kern', 'rclt'], + 'vertical': ['valt', 'vert', 'vkrn', 'vpal', 'vrt2'], + 'ltr': ['ltra', 'ltrm'], + 'rtl': ['rtla', 'rtlm'], + # Complex shapers + 'arabic': ['init', 'medi', 'fina', 'isol', 'med2', 'fin2', 'fin3', + 'cswh', 'mset'], + 'hangul': ['ljmo', 'vjmo', 'tjmo'], + 'tibetan': ['abvs', 'blws', 'abvm', 'blwm'], + 'indic': ['nukt', 'akhn', 'rphf', 'rkrf', 'pref', 'blwf', 'half', + 'abvf', 'pstf', 'cfar', 'vatu', 'cjct', 'init', 'pres', + 'abvs', 'blws', 'psts', 'haln', 'dist', 'abvm', 'blwm'], + } + _layout_features_default = _uniq_sort(sum( + iter(_layout_features_groups.values()), [])) + + drop_tables = _drop_tables_default + no_subset_tables = _no_subset_tables_default + hinting_tables = _hinting_tables_default + legacy_kern = False # drop 'kern' table if GPOS available + layout_features = _layout_features_default + ignore_missing_glyphs = False + ignore_missing_unicodes = True + hinting = True + glyph_names = False + legacy_cmap = False + symbol_cmap = False + name_IDs = [1, 2] # Family and Style + name_legacy = False + name_languages = [0x0409] # English + obfuscate_names = False # to make webfont unusable as a system font + notdef_glyph = True # gid0 for TrueType / .notdef for CFF + notdef_outline = False # No need for notdef to have an outline really + recommended_glyphs = False # gid1, gid2, gid3 for TrueType + recalc_bounds = False # Recalculate font bounding boxes + recalc_timestamp = False # Recalculate font modified timestamp + canonical_order = False # Order tables as recommended + flavor = None # May be 'woff' or 'woff2' + desubroutinize = False # Desubroutinize CFF CharStrings + + def __init__(self, **kwargs): + self.set(**kwargs) + + def set(self, **kwargs): + for k,v in kwargs.items(): + if not hasattr(self, k): + raise self.UnknownOptionError("Unknown option '%s'" % k) + setattr(self, k, v) + + def parse_opts(self, argv, ignore_unknown=False): + ret = [] + for a in argv: + orig_a = a + if not a.startswith('--'): + ret.append(a) + continue + a = a[2:] + i = a.find('=') + op = '=' + if i == -1: + if a.startswith("no-"): + k = a[3:] + v = False + else: + k = a + v = True + if k.endswith("?"): + k = k[:-1] + v = '?' + else: + k = a[:i] + if k[-1] in "-+": + op = k[-1]+'=' # Op is '-=' or '+=' now. + k = k[:-1] + v = a[i+1:] + ok = k + k = k.replace('-', '_') + if not hasattr(self, k): + if ignore_unknown is True or ok in ignore_unknown: + ret.append(orig_a) + continue + else: + raise self.UnknownOptionError("Unknown option '%s'" % a) + + ov = getattr(self, k) + if v == '?': + print("Current setting for '%s' is: %s" % (ok, ov)) + continue + if isinstance(ov, bool): + v = bool(v) + elif isinstance(ov, int): + v = int(v) + elif isinstance(ov, str): + v = str(v) # redundant + elif isinstance(ov, list): + if isinstance(v, bool): + raise self.OptionError("Option '%s' requires values to be specified using '='" % a) + vv = v.replace(',', ' ').split() + if vv == ['']: + vv = [] + vv = [int(x, 0) if len(x) and x[0] in "0123456789" else x for x in vv] + if op == '=': + v = vv + elif op == '+=': + v = ov + v.extend(vv) + elif op == '-=': + v = ov + for x in vv: + if x in v: + v.remove(x) + else: + assert False + + setattr(self, k, v) + + return ret + + +class Subsetter(object): + + class SubsettingError(Exception): pass + class MissingGlyphsSubsettingError(SubsettingError): pass + class MissingUnicodesSubsettingError(SubsettingError): pass + + def __init__(self, options=None, log=None): + + if not log: + log = Logger() + if not options: + options = Options() + + self.options = options + self.log = log + self.unicodes_requested = set() + self.glyph_names_requested = set() + self.glyph_ids_requested = set() + + def populate(self, glyphs=[], gids=[], unicodes=[], text=""): + self.unicodes_requested.update(unicodes) + if isinstance(text, bytes): + text = text.decode("utf_8") + for u in text: + self.unicodes_requested.add(ord(u)) + self.glyph_names_requested.update(glyphs) + self.glyph_ids_requested.update(gids) + + def _prune_pre_subset(self, font): + + for tag in font.keys(): + if tag == 'GlyphOrder': continue + + if(tag in self.options.drop_tables or + (tag in self.options.hinting_tables and not self.options.hinting) or + (tag == 'kern' and (not self.options.legacy_kern and 'GPOS' in font))): + self.log(tag, "dropped") + del font[tag] + continue + + clazz = ttLib.getTableClass(tag) + + if hasattr(clazz, 'prune_pre_subset'): + table = font[tag] + self.log.lapse("load '%s'" % tag) + retain = table.prune_pre_subset(self.options) + self.log.lapse("prune '%s'" % tag) + if not retain: + self.log(tag, "pruned to empty; dropped") + del font[tag] + continue + else: + self.log(tag, "pruned") + + def _closure_glyphs(self, font): + + realGlyphs = set(font.getGlyphOrder()) + glyph_order = font.getGlyphOrder() + + self.glyphs_requested = set() + self.glyphs_requested.update(self.glyph_names_requested) + self.glyphs_requested.update(glyph_order[i] + for i in self.glyph_ids_requested + if i < len(glyph_order)) + + self.glyphs_missing = set() + self.glyphs_missing.update(self.glyphs_requested.difference(realGlyphs)) + self.glyphs_missing.update(i for i in self.glyph_ids_requested + if i >= len(glyph_order)) + if self.glyphs_missing: + self.log("Missing requested glyphs: %s" % self.glyphs_missing) + if not self.options.ignore_missing_glyphs: + raise self.MissingGlyphsSubsettingError(self.glyphs_missing) + + self.glyphs = self.glyphs_requested.copy() + + self.unicodes_missing = set() + if 'cmap' in font: + font['cmap'].closure_glyphs(self) + self.glyphs.intersection_update(realGlyphs) + self.log.lapse("close glyph list over 'cmap'") + self.glyphs_cmaped = frozenset(self.glyphs) + if self.unicodes_missing: + missing = ["U+%04X" % u for u in self.unicodes_missing] + self.log("Missing glyphs for requested Unicodes: %s" % missing) + if not self.options.ignore_missing_unicodes: + raise self.MissingUnicodesSubsettingError(missing) + del missing + + if self.options.notdef_glyph: + if 'glyf' in font: + self.glyphs.add(font.getGlyphName(0)) + self.log("Added gid0 to subset") + else: + self.glyphs.add('.notdef') + self.log("Added .notdef to subset") + if self.options.recommended_glyphs: + if 'glyf' in font: + for i in range(min(4, len(font.getGlyphOrder()))): + self.glyphs.add(font.getGlyphName(i)) + self.log("Added first four glyphs to subset") + + if 'GSUB' in font: + self.log("Closing glyph list over 'GSUB': %d glyphs before" % + len(self.glyphs)) + self.log.glyphs(self.glyphs, font=font) + font['GSUB'].closure_glyphs(self) + self.glyphs.intersection_update(realGlyphs) + self.log("Closed glyph list over 'GSUB': %d glyphs after" % + len(self.glyphs)) + self.log.glyphs(self.glyphs, font=font) + self.log.lapse("close glyph list over 'GSUB'") + self.glyphs_gsubed = frozenset(self.glyphs) + + if 'glyf' in font: + self.log("Closing glyph list over 'glyf': %d glyphs before" % + len(self.glyphs)) + self.log.glyphs(self.glyphs, font=font) + font['glyf'].closure_glyphs(self) + self.glyphs.intersection_update(realGlyphs) + self.log("Closed glyph list over 'glyf': %d glyphs after" % + len(self.glyphs)) + self.log.glyphs(self.glyphs, font=font) + self.log.lapse("close glyph list over 'glyf'") + self.glyphs_glyfed = frozenset(self.glyphs) + + self.glyphs_all = frozenset(self.glyphs) + + self.log("Retaining %d glyphs: " % len(self.glyphs_all)) + + del self.glyphs + + def _subset_glyphs(self, font): + for tag in font.keys(): + if tag == 'GlyphOrder': continue + clazz = ttLib.getTableClass(tag) + + if tag in self.options.no_subset_tables: + self.log(tag, "subsetting not needed") + elif hasattr(clazz, 'subset_glyphs'): + table = font[tag] + self.glyphs = self.glyphs_all + retain = table.subset_glyphs(self) + del self.glyphs + self.log.lapse("subset '%s'" % tag) + if not retain: + self.log(tag, "subsetted to empty; dropped") + del font[tag] + else: + self.log(tag, "subsetted") + else: + self.log(tag, "NOT subset; don't know how to subset; dropped") + del font[tag] + + glyphOrder = font.getGlyphOrder() + glyphOrder = [g for g in glyphOrder if g in self.glyphs_all] + font.setGlyphOrder(glyphOrder) + font._buildReverseGlyphOrderDict() + self.log.lapse("subset GlyphOrder") + + def _prune_post_subset(self, font): + for tag in font.keys(): + if tag == 'GlyphOrder': continue + clazz = ttLib.getTableClass(tag) + if hasattr(clazz, 'prune_post_subset'): + table = font[tag] + retain = table.prune_post_subset(self.options) + self.log.lapse("prune '%s'" % tag) + if not retain: + self.log(tag, "pruned to empty; dropped") + del font[tag] + else: + self.log(tag, "pruned") + + def subset(self, font): + + self._prune_pre_subset(font) + self._closure_glyphs(font) + self._subset_glyphs(font) + self._prune_post_subset(font) + + +class Logger(object): + + def __init__(self, verbose=False, xml=False, timing=False): + self.verbose = verbose + self.xml = xml + self.timing = timing + self.last_time = self.start_time = time.time() + + def parse_opts(self, argv): + argv = argv[:] + for v in ['verbose', 'xml', 'timing']: + if "--"+v in argv: + setattr(self, v, True) + argv.remove("--"+v) + return argv + + def __call__(self, *things): + if not self.verbose: + return + print(' '.join(str(x) for x in things)) + + def lapse(self, *things): + if not self.timing: + return + new_time = time.time() + print("Took %0.3fs to %s" %(new_time - self.last_time, + ' '.join(str(x) for x in things))) + self.last_time = new_time + + def glyphs(self, glyphs, font=None): + if not self.verbose: + return + self("Glyph names:", sorted(glyphs)) + if font: + reverseGlyphMap = font.getReverseGlyphMap() + self("Glyph IDs: ", sorted(reverseGlyphMap[g] for g in glyphs)) + + def font(self, font, file=sys.stdout): + if not self.xml: + return + from fontTools.misc import xmlWriter + writer = xmlWriter.XMLWriter(file) + for tag in font.keys(): + writer.begintag(tag) + writer.newline() + font[tag].toXML(writer, font) + writer.endtag(tag) + writer.newline() + + +def load_font(fontFile, + options, + allowVID=False, + checkChecksums=False, + dontLoadGlyphNames=False, + lazy=True): + + font = ttLib.TTFont(fontFile, + allowVID=allowVID, + checkChecksums=checkChecksums, + recalcBBoxes=options.recalc_bounds, + recalcTimestamp=options.recalc_timestamp, + lazy=lazy) + + # Hack: + # + # If we don't need glyph names, change 'post' class to not try to + # load them. It avoid lots of headache with broken fonts as well + # as loading time. + # + # Ideally ttLib should provide a way to ask it to skip loading + # glyph names. But it currently doesn't provide such a thing. + # + if dontLoadGlyphNames: + post = ttLib.getTableClass('post') + saved = post.decode_format_2_0 + post.decode_format_2_0 = post.decode_format_3_0 + f = font['post'] + if f.formatType == 2.0: + f.formatType = 3.0 + post.decode_format_2_0 = saved + + return font + +def save_font(font, outfile, options): + if options.flavor and not hasattr(font, 'flavor'): + raise Exception("fonttools version does not support flavors.") + font.flavor = options.flavor + font.save(outfile, reorderTables=options.canonical_order) + +def parse_unicodes(s): + import re + s = re.sub (r"0[xX]", " ", s) + s = re.sub (r"[<+>,;&#\\xXuU\n ]", " ", s) + l = [] + for item in s.split(): + fields = item.split('-') + if len(fields) == 1: + l.append(int(item, 16)) + else: + start,end = fields + l.extend(range(int(start, 16), int(end, 16)+1)) + return l + +def parse_gids(s): + l = [] + for item in s.replace(',', ' ').split(): + fields = item.split('-') + if len(fields) == 1: + l.append(int(fields[0])) + else: + l.extend(range(int(fields[0]), int(fields[1])+1)) + return l + +def parse_glyphs(s): + return s.replace(',', ' ').split() + +def main(args=None): + + if args is None: + args = sys.argv[1:] + + if '--help' in args: + print(__doc__) + sys.exit(0) + + log = Logger() + args = log.parse_opts(args) + + options = Options() + args = options.parse_opts(args, + ignore_unknown=['gids', 'gids-file', + 'glyphs', 'glyphs-file', + 'text', 'text-file', + 'unicodes', 'unicodes-file', + 'output-file']) + + if len(args) < 2: + print("usage:", __usage__, file=sys.stderr) + print("Try pyftsubset --help for more information.", file=sys.stderr) + sys.exit(1) + + fontfile = args[0] + args = args[1:] + + subsetter = Subsetter(options=options, log=log) + outfile = fontfile + '.subset' + glyphs = [] + gids = [] + unicodes = [] + wildcard_glyphs = False + wildcard_unicodes = False + text = "" + for g in args: + if g == '*': + wildcard_glyphs = True + continue + if g.startswith('--output-file='): + outfile = g[14:] + continue + if g.startswith('--text='): + text += g[7:] + continue + if g.startswith('--text-file='): + text += open(g[12:]).read().replace('\n', '') + continue + if g.startswith('--unicodes='): + if g[11:] == '*': + wildcard_unicodes = True + else: + unicodes.extend(parse_unicodes(g[11:])) + continue + if g.startswith('--unicodes-file='): + for line in open(g[16:]).readlines(): + unicodes.extend(parse_unicodes(line.split('#')[0])) + continue + if g.startswith('--gids='): + gids.extend(parse_gids(g[7:])) + continue + if g.startswith('--gids-file='): + for line in open(g[12:]).readlines(): + gids.extend(parse_gids(line.split('#')[0])) + continue + if g.startswith('--glyphs='): + if g[9:] == '*': + wildcard_glyphs = True + else: + glyphs.extend(parse_glyphs(g[9:])) + continue + if g.startswith('--glyphs-file='): + for line in open(g[14:]).readlines(): + glyphs.extend(parse_glyphs(line.split('#')[0])) + continue + glyphs.append(g) + + dontLoadGlyphNames = not options.glyph_names and not glyphs + font = load_font(fontfile, options, dontLoadGlyphNames=dontLoadGlyphNames) + log.lapse("load font") + if wildcard_glyphs: + glyphs.extend(font.getGlyphOrder()) + if wildcard_unicodes: + for t in font['cmap'].tables: + if t.isUnicode(): + unicodes.extend(t.cmap.keys()) + assert '' not in glyphs + + log.lapse("compile glyph list") + log("Text: '%s'" % text) + log("Unicodes:", unicodes) + log("Glyphs:", glyphs) + log("Gids:", gids) + + subsetter.populate(glyphs=glyphs, gids=gids, unicodes=unicodes, text=text) + subsetter.subset(font) + + save_font (font, outfile, options) + log.lapse("compile and save font") + + log.last_time = log.start_time + log.lapse("make one with everything(TOTAL TIME)") + + if log.verbose: + import os + log("Input font:% 7d bytes: %s" % (os.path.getsize(fontfile), fontfile)) + log("Subset font:% 7d bytes: %s" % (os.path.getsize(outfile), outfile)) + + log.font(font) + + font.close() + + +__all__ = [ + 'Options', + 'Subsetter', + 'Logger', + 'load_font', + 'save_font', + 'parse_gids', + 'parse_glyphs', + 'parse_unicodes', + 'main' +] + +if __name__ == '__main__': + main() diff -Nru fonttools-2.4/Lib/fontTools/t1Lib.py fonttools-3.0/Lib/fontTools/t1Lib.py --- fonttools-2.4/Lib/fontTools/t1Lib.py 2013-04-23 04:08:08.000000000 +0000 +++ fonttools-3.0/Lib/fontTools/t1Lib.py 2015-08-31 17:57:15.000000000 +0000 @@ -3,29 +3,29 @@ Functions for reading and writing raw Type 1 data: read(path) - reads any Type 1 font file, returns the raw data and a type indicator: - 'LWFN', 'PFB' or 'OTHER', depending on the format of the file pointed - to by 'path'. + reads any Type 1 font file, returns the raw data and a type indicator: + 'LWFN', 'PFB' or 'OTHER', depending on the format of the file pointed + to by 'path'. Raises an error when the file does not contain valid Type 1 data. -write(path, data, kind='OTHER', dohex=0) - writes raw Type 1 data to the file pointed to by 'path'. +write(path, data, kind='OTHER', dohex=False) + writes raw Type 1 data to the file pointed to by 'path'. 'kind' can be one of 'LWFN', 'PFB' or 'OTHER'; it defaults to 'OTHER'. 'dohex' is a flag which determines whether the eexec encrypted part should be written as hexadecimal or binary, but only if kind is 'LWFN' or 'PFB'. """ +from __future__ import print_function, division, absolute_import +from fontTools.misc.py23 import * +from fontTools.misc import eexec +from fontTools.misc.macCreatorType import getMacCreatorAndType +import os +import re __author__ = "jvr" __version__ = "1.0b2" DEBUG = 0 -from fontTools.misc import eexec -from fontTools.misc.macCreatorType import getMacCreatorAndType -import string -import re -import os - try: try: @@ -37,49 +37,49 @@ else: haveMacSupport = 1 import MacOS - + class T1Error(Exception): pass -class T1Font: - +class T1Font(object): + """Type 1 font class. - + Uses a minimal interpeter that supports just about enough PS to parse Type 1 fonts. """ - + def __init__(self, path=None): if path is not None: self.data, type = read(path) else: pass # XXX - + def saveAs(self, path, type): write(path, self.getData(), type) - + def getData(self): # XXX Todo: if the data has been converted to Python object, # recreate the PS stream return self.data - + def getGlyphSet(self): """Return a generic GlyphSet, which is a dict-like object mapping glyph names to glyph objects. The returned glyph objects have a .draw() method that supports the Pen protocol, and will have an attribute named 'width', but only *after* the .draw() method has been called. - + In the case of Type 1, the GlyphSet is simply the CharStrings dict. """ return self["CharStrings"] - + def __getitem__(self, key): if not hasattr(self, "font"): self.parse() return self.font[key] - + def parse(self): from fontTools.misc import psLib from fontTools.misc import psCharStrings @@ -100,20 +100,20 @@ # low level T1 data read and write functions -def read(path, onlyHeader=0): +def read(path, onlyHeader=False): """reads any Type 1 font file, returns raw data""" - normpath = string.lower(path) - creator, type = getMacCreatorAndType(path) - if type == 'LWFN': + normpath = path.lower() + creator, typ = getMacCreatorAndType(path) + if typ == 'LWFN': return readLWFN(path, onlyHeader), 'LWFN' if normpath[-4:] == '.pfb': return readPFB(path, onlyHeader), 'PFB' else: return readOther(path), 'OTHER' -def write(path, data, kind='OTHER', dohex=0): +def write(path, data, kind='OTHER', dohex=False): assertType1(data) - kind = string.upper(kind) + kind = kind.upper() try: os.remove(path) except os.error: @@ -135,13 +135,13 @@ pass -# -- internal -- +# -- internal -- LWFNCHUNKSIZE = 2000 HEXLINELENGTH = 80 -def readLWFN(path, onlyHeader=0): +def readLWFN(path, onlyHeader=False): """reads an LWFN font file, returns raw data""" resRef = Res.FSOpenResFile(path, 1) # read-only try: @@ -150,9 +150,9 @@ data = [] for i in range(501, 501 + n): res = Res.Get1Resource('POST', i) - code = ord(res.data[0]) - if ord(res.data[1]) <> 0: - raise T1Error, 'corrupt LWFN file' + code = byteord(res.data[0]) + if byteord(res.data[1]) != 0: + raise T1Error('corrupt LWFN file') if code in [1, 2]: if onlyHeader and code == 2: break @@ -166,21 +166,21 @@ elif code == 0: pass # comment, ignore else: - raise T1Error, 'bad chunk code: ' + `code` + raise T1Error('bad chunk code: ' + repr(code)) finally: Res.CloseResFile(resRef) - data = string.join(data, '') + data = bytesjoin(data) assertType1(data) return data -def readPFB(path, onlyHeader=0): +def readPFB(path, onlyHeader=False): """reads a PFB font file, returns raw data""" f = open(path, "rb") data = [] - while 1: - if f.read(1) <> chr(128): - raise T1Error, 'corrupt PFB file' - code = ord(f.read(1)) + while True: + if f.read(1) != bytechr(128): + raise T1Error('corrupt PFB file') + code = byteord(f.read(1)) if code in [1, 2]: chunklen = stringToLong(f.read(4)) chunk = f.read(chunklen) @@ -189,11 +189,11 @@ elif code == 3: break else: - raise T1Error, 'bad chunk code: ' + `code` + raise T1Error('bad chunk code: ' + repr(code)) if onlyHeader: break f.close() - data = string.join(data, '') + data = bytesjoin(data) assertType1(data) return data @@ -203,7 +203,7 @@ data = f.read() f.close() assertType1(data) - + chunks = findEncryptedChunks(data) data = [] for isEncrypted, chunk in chunks: @@ -211,7 +211,7 @@ data.append(deHexString(chunk)) else: data.append(chunk) - return string.join(data, '') + return bytesjoin(data) # file writing tools @@ -228,11 +228,11 @@ else: code = 1 while chunk: - res = Res.Resource(chr(code) + '\0' + chunk[:LWFNCHUNKSIZE - 2]) + res = Res.Resource(bytechr(code) + '\0' + chunk[:LWFNCHUNKSIZE - 2]) res.AddResource('POST', resID, '') chunk = chunk[LWFNCHUNKSIZE - 2:] resID = resID + 1 - res = Res.Resource(chr(5) + '\0') + res = Res.Resource(bytechr(5) + '\0') res.AddResource('POST', resID, '') finally: Res.CloseResFile(resRef) @@ -246,18 +246,18 @@ code = 2 else: code = 1 - f.write(chr(128) + chr(code)) + f.write(bytechr(128) + bytechr(code)) f.write(longToString(len(chunk))) f.write(chunk) - f.write(chr(128) + chr(3)) + f.write(bytechr(128) + bytechr(3)) finally: f.close() -def writeOther(path, data, dohex = 0): +def writeOther(path, data, dohex=False): chunks = findEncryptedChunks(data) f = open(path, "wb") try: - hexlinelen = HEXLINELENGTH / 2 + hexlinelen = HEXLINELENGTH // 2 for isEncrypted, chunk in chunks: if isEncrypted: code = 2 @@ -297,9 +297,9 @@ chunk = deHexString(chunk) decrypted, R = eexec.decrypt(chunk, 55665) decrypted = decrypted[4:] - if decrypted[-len(EEXECINTERNALEND)-1:-1] <> EEXECINTERNALEND \ - and decrypted[-len(EEXECINTERNALEND)-2:-2] <> EEXECINTERNALEND: - raise T1Error, "invalid end of eexec part" + if decrypted[-len(EEXECINTERNALEND)-1:-1] != EEXECINTERNALEND \ + and decrypted[-len(EEXECINTERNALEND)-2:-2] != EEXECINTERNALEND: + raise T1Error("invalid end of eexec part") decrypted = decrypted[:-len(EEXECINTERNALEND)-2] + '\r' data.append(EEXECBEGINMARKER + decrypted + EEXECENDMARKER) else: @@ -307,25 +307,25 @@ data.append(chunk[:-len(EEXECBEGIN)-1]) else: data.append(chunk) - return string.join(data, '') + return bytesjoin(data) def findEncryptedChunks(data): chunks = [] - while 1: - eBegin = string.find(data, EEXECBEGIN) + while True: + eBegin = data.find(EEXECBEGIN) if eBegin < 0: break eBegin = eBegin + len(EEXECBEGIN) + 1 - eEnd = string.find(data, EEXECEND, eBegin) + eEnd = data.find(EEXECEND, eBegin) if eEnd < 0: - raise T1Error, "can't find end of eexec part" + raise T1Error("can't find end of eexec part") cypherText = data[eBegin:eEnd + 2] if isHex(cypherText[:4]): cypherText = deHexString(cypherText) plainText, R = eexec.decrypt(cypherText, 55665) - eEndLocal = string.find(plainText, EEXECINTERNALEND) + eEndLocal = plainText.find(EEXECINTERNALEND) if eEndLocal < 0: - raise T1Error, "can't find end of eexec part" + raise T1Error("can't find end of eexec part") chunks.append((0, data[:eBegin])) chunks.append((1, cypherText[:eEndLocal + len(EEXECINTERNALEND) + 1])) data = data[eEnd:] @@ -333,23 +333,23 @@ return chunks def deHexString(hexstring): - return eexec.deHexString(string.join(string.split(hexstring), "")) + return eexec.deHexString(strjoin(hexstring.split())) # Type 1 assertion -_fontType1RE = re.compile(r"/FontType\s+1\s+def") +_fontType1RE = re.compile(br"/FontType\s+1\s+def") def assertType1(data): - for head in ['%!PS-AdobeFont', '%!FontType1']: + for head in [b'%!PS-AdobeFont', b'%!FontType1']: if data[:len(head)] == head: break else: - raise T1Error, "not a PostScript font" + raise T1Error("not a PostScript font") if not _fontType1RE.search(data): - raise T1Error, "not a Type 1 font" - if string.find(data, "currentfile eexec") < 0: - raise T1Error, "not an encrypted Type 1 font" + raise T1Error("not a Type 1 font") + if data.find(b"currentfile eexec") < 0: + raise T1Error("not an encrypted Type 1 font") # XXX what else? return data @@ -357,16 +357,15 @@ # pfb helpers def longToString(long): - str = "" + s = "" for i in range(4): - str = str + chr((long & (0xff << (i * 8))) >> i * 8) - return str + s += bytechr((long & (0xff << (i * 8))) >> i * 8) + return s -def stringToLong(str): - if len(str) <> 4: - raise ValueError, 'string must be 4 bytes long' - long = 0 +def stringToLong(s): + if len(s) != 4: + raise ValueError('string must be 4 bytes long') + l = 0 for i in range(4): - long = long + (ord(str[i]) << (i * 8)) - return long - + l += byteord(s[i]) << (i * 8) + return l diff -Nru fonttools-2.4/Lib/fontTools/ttLib/__init__.py fonttools-3.0/Lib/fontTools/ttLib/__init__.py --- fonttools-2.4/Lib/fontTools/ttLib/__init__.py 2013-04-23 04:08:08.000000000 +0000 +++ fonttools-3.0/Lib/fontTools/ttLib/__init__.py 2015-08-31 17:57:15.000000000 +0000 @@ -1,6 +1,6 @@ """fontTools.ttLib -- a package for dealing with TrueType fonts. -This package offers translators to convert TrueType fonts to Python +This package offers translators to convert TrueType fonts to Python objects and vice versa, and additionally from Python to TTX (an XML-based text format) and vice versa. @@ -37,62 +37,62 @@ >>> tt2.importXML("afont.ttx") >>> tt2['maxp'].numGlyphs 242 ->>> +>>> """ -# -# $Id: __init__.py,v 1.51 2009-02-22 08:55:00 pabs3 Exp $ -# - -import sys +from __future__ import print_function, division, absolute_import +from fontTools.misc.py23 import * import os -import string +import sys haveMacSupport = 0 if sys.platform == "mac": haveMacSupport = 1 -elif sys.platform == "darwin" and sys.version_info[:3] != (2, 2, 0): - # Python 2.2's Mac support is broken, so don't enable it there. - haveMacSupport = 1 +elif sys.platform == "darwin": + if sys.version_info[:3] != (2, 2, 0) and sys.version_info[:1] < (3,): + # Python 2.2's Mac support is broken, so don't enable it there. + # Python 3 does not have Res used by macUtils + haveMacSupport = 1 class TTLibError(Exception): pass -class TTFont: - +class TTFont(object): + """The main font object. It manages file input and output, and offers - a convenient way of accessing tables. + a convenient way of accessing tables. Tables will be only decompiled when necessary, ie. when they're actually accessed. This means that simple operations can be extremely fast. """ - + def __init__(self, file=None, res_name_or_index=None, - sfntVersion="\000\001\000\000", checkChecksums=0, - verbose=0, recalcBBoxes=1, allowVID=0, ignoreDecompileErrors=False, - fontNumber=-1): - + sfntVersion="\000\001\000\000", flavor=None, checkChecksums=False, + verbose=False, recalcBBoxes=True, allowVID=False, ignoreDecompileErrors=False, + recalcTimestamp=True, fontNumber=-1, lazy=None, quiet=False): + """The constructor can be called with a few different arguments. When reading a font from disk, 'file' should be either a pathname - pointing to a file, or a readable file object. - - It we're running on a Macintosh, 'res_name_or_index' maybe an sfnt - resource name or an sfnt resource index number or zero. The latter - case will cause TTLib to autodetect whether the file is a flat file + pointing to a file, or a readable file object. + + It we're running on a Macintosh, 'res_name_or_index' maybe an sfnt + resource name or an sfnt resource index number or zero. The latter + case will cause TTLib to autodetect whether the file is a flat file or a suitcase. (If it's a suitcase, only the first 'sfnt' resource will be read!) - + The 'checkChecksums' argument is used to specify how sfnt checksums are treated upon reading a file from disk: 0: don't check (default) 1: check, print warnings if a wrong checksum is found 2: check, raise an exception if a wrong checksum is found. - - The TTFont constructor can also be called without a 'file' - argument: this is the way to create a new empty font. - In this case you can optionally supply the 'sfntVersion' argument. - + + The TTFont constructor can also be called without a 'file' + argument: this is the way to create a new empty font. + In this case you can optionally supply the 'sfntVersion' argument, + and a 'flavor' which can be None, or 'woff'. + If the recalcBBoxes argument is false, a number of things will *not* be recalculated upon save/compile: 1) glyph bounding boxes @@ -100,15 +100,18 @@ 3) hhea min/max values (1) is needed for certain kinds of CJK fonts (ask Werner Lemberg ;-). Additionally, upon importing an TTX file, this option cause glyphs - to be compiled right away. This should reduce memory consumption - greatly, and therefore should have some impact on the time needed + to be compiled right away. This should reduce memory consumption + greatly, and therefore should have some impact on the time needed to parse/compile large fonts. + If the recalcTimestamp argument is false, the modified timestamp in the + 'head' table will *not* be recalculated upon save/compile. + If the allowVID argument is set to true, then virtual GID's are supported. Asking for a glyph ID with a glyph name or GID that is not in the font will return a virtual GID. This is valid for GSUB and cmap tables. For SING glyphlets, the cmap table is used to specify Unicode - values for virtual GI's used in GSUB/GPOS rules. If the gid Nis requested + values for virtual GI's used in GSUB/GPOS rules. If the gid N is requested and does not exist in the font, or the glyphname has the form glyphN and does not exist in the font, then N is used as the virtual GID. Else, the first virtual GID is assigned as 0x1000 -1; for subsequent new @@ -118,11 +121,18 @@ individual tables during decompilation will be ignored, falling back to the DefaultTable implementation, which simply keeps the binary data. + + If lazy is set to True, many data structures are loaded lazily, upon + access only. If it is set to False, many data structures are loaded + immediately. The default is lazy=None which is somewhere in between. """ - - import sfnt + + from fontTools.ttLib import sfnt self.verbose = verbose + self.quiet = quiet + self.lazy = lazy self.recalcBBoxes = recalcBBoxes + self.recalcTimestamp = recalcTimestamp self.tables = {} self.reader = None @@ -135,12 +145,15 @@ if not file: self.sfntVersion = sfntVersion + self.flavor = flavor + self.flavorData = None return if not hasattr(file, "read"): + closeStream = True # assume file is a string if haveMacSupport and res_name_or_index is not None: # on the mac, we deal with sfnt resources as well as flat files - import macUtils + from . import macUtils if res_name_or_index == 0: if macUtils.getSFNTResIndices(file): # get the first available sfnt font. @@ -151,29 +164,40 @@ file = macUtils.SFNTResourceReader(file, res_name_or_index) else: file = open(file, "rb") + else: - pass # assume "file" is a readable file object - self.reader = sfnt.SFNTReader(file, checkChecksums, fontNumber=fontNumber) + # assume "file" is a readable file object + closeStream = False + # read input file in memory and wrap a stream around it to allow overwriting + tmp = BytesIO(file.read()) + if hasattr(file, 'name'): + # save reference to input file name + tmp.name = file.name + if closeStream: + file.close() + self.reader = sfnt.SFNTReader(tmp, checkChecksums, fontNumber=fontNumber) self.sfntVersion = self.reader.sfntVersion - + self.flavor = self.reader.flavor + self.flavorData = self.reader.flavorData + def close(self): """If we still have a reader object, close it.""" if self.reader is not None: self.reader.close() - - def save(self, file, makeSuitcase=0, reorderTables=1): - """Save the font to disk. Similarly to the constructor, + + def save(self, file, makeSuitcase=False, reorderTables=True): + """Save the font to disk. Similarly to the constructor, the 'file' argument can be either a pathname or a writable file object. - + On the Mac, if makeSuitcase is true, a suitcase (resource fork) - file will we made instead of a flat .ttf file. + file will we made instead of a flat .ttf file. """ from fontTools.ttLib import sfnt if not hasattr(file, "write"): closeStream = 1 if os.name == "mac" and makeSuitcase: - import macUtils + from . import macUtils file = macUtils.SFNTResourceWriter(file, self) else: file = open(file, "wb") @@ -183,35 +207,47 @@ else: # assume "file" is a writable file object closeStream = 0 - - tags = self.keys() + + tags = list(self.keys()) if "GlyphOrder" in tags: tags.remove("GlyphOrder") numTables = len(tags) - if reorderTables: - import tempfile - tmp = tempfile.TemporaryFile(prefix="ttx-fonttools") - else: - tmp = file - writer = sfnt.SFNTWriter(tmp, numTables, self.sfntVersion) - + # write to a temporary stream to allow saving to unseekable streams + tmp = BytesIO() + writer = sfnt.SFNTWriter(tmp, numTables, self.sfntVersion, self.flavor, self.flavorData) + done = [] for tag in tags: self._writeTable(tag, writer, done) - + writer.close() - if reorderTables: + if (reorderTables is None or writer.reordersTables() or + (reorderTables is False and self.reader is None)): + # don't reorder tables and save as is + file.write(tmp.getvalue()) + tmp.close() + else: + if reorderTables is False: + # sort tables using the original font's order + tableOrder = list(self.reader.keys()) + else: + # use the recommended order from the OpenType specification + tableOrder = None tmp.flush() tmp.seek(0) - reorderFontTables(tmp, file) + tmp2 = BytesIO() + reorderFontTables(tmp, tmp2, tableOrder) + file.write(tmp2.getvalue()) tmp.close() + tmp2.close() if closeStream: file.close() - - def saveXML(self, fileOrPath, progress=None, - tables=None, skipTables=None, splitTables=0, disassembleInstructions=1): + + def saveXML(self, fileOrPath, progress=None, quiet=False, + tables=None, skipTables=None, splitTables=False, disassembleInstructions=True, + bitmapGlyphDataFormat='raw'): """Export the font as TTX (an XML-based text file), or as a series of text files when splitTables is true. In the latter case, the 'fileOrPath' argument should be a path to a directory. @@ -220,11 +256,12 @@ to skip, but only when the 'tables' argument is false. """ from fontTools import version - import xmlWriter - + from fontTools.misc import xmlWriter + self.disassembleInstructions = disassembleInstructions + self.bitmapGlyphDataFormat = bitmapGlyphDataFormat if not tables: - tables = self.keys() + tables = list(self.keys()) if "GlyphOrder" not in tables: tables = ["GlyphOrder"] + tables if skipTables: @@ -237,19 +274,19 @@ idlefunc = getattr(progress, "idle", None) else: idlefunc = None - + writer = xmlWriter.XMLWriter(fileOrPath, idlefunc=idlefunc) - writer.begintag("ttFont", sfntVersion=`self.sfntVersion`[1:-1], + writer.begintag("ttFont", sfntVersion=repr(self.sfntVersion)[1:-1], ttLibVersion=version) writer.newline() - + if not splitTables: writer.newline() else: # 'fileOrPath' must now be a path path, ext = os.path.splitext(fileOrPath) fileNameTemplate = path + ".%s" + ext - + for i in range(numTables): if progress: progress.set(i) @@ -264,7 +301,7 @@ writer.newline() else: tableWriter = writer - self._tableToXML(tableWriter, tag, progress) + self._tableToXML(tableWriter, tag, progress, quiet) if splitTables: tableWriter.endtag("ttFont") tableWriter.newline() @@ -276,9 +313,9 @@ writer.close() if self.verbose: debugmsg("Done dumping TTX") - - def _tableToXML(self, writer, tag, progress): - if self.has_key(tag): + + def _tableToXML(self, writer, tag, progress, quiet): + if tag in self: table = self[tag] report = "Dumping '%s' table..." % tag else: @@ -288,14 +325,18 @@ elif self.verbose: debugmsg(report) else: - print report - if not self.has_key(tag): + if not quiet: + print(report) + if tag not in self: return xmlTag = tagToXML(tag) + attrs = dict() if hasattr(table, "ERROR"): - writer.begintag(xmlTag, ERROR="decompilation error") - else: - writer.begintag(xmlTag) + attrs['ERROR'] = "decompilation error" + from .tables.DefaultTable import DefaultTable + if table.__class__ == DefaultTable: + attrs['raw'] = True + writer.begintag(xmlTag, **attrs) writer.newline() if tag in ("glyf", "CFF "): table.toXML(writer, self, progress) @@ -304,41 +345,44 @@ writer.endtag(xmlTag) writer.newline() writer.newline() - - def importXML(self, file, progress=None): + + def importXML(self, file, progress=None, quiet=False): """Import a TTX file (an XML-based text format), so as to recreate a font object. """ - if self.has_key("maxp") and self.has_key("post"): + if "maxp" in self and "post" in self: # Make sure the glyph order is loaded, as it otherwise gets # lost if the XML doesn't contain the glyph order, yet does # contain the table which was originally used to extract the # glyph names from (ie. 'post', 'cmap' or 'CFF '). self.getGlyphOrder() - import xmlImport - xmlImport.importXML(self, file, progress) - + + from fontTools.misc import xmlReader + + reader = xmlReader.XMLReader(file, self, progress, quiet) + reader.read() + def isLoaded(self, tag): - """Return true if the table identified by 'tag' has been + """Return true if the table identified by 'tag' has been decompiled and loaded into memory.""" - return self.tables.has_key(tag) - + return tag in self.tables + def has_key(self, tag): if self.isLoaded(tag): - return 1 - elif self.reader and self.reader.has_key(tag): - return 1 + return True + elif self.reader and tag in self.reader: + return True elif tag == "GlyphOrder": - return 1 + return True else: - return 0 - + return False + __contains__ = has_key - + def keys(self): - keys = self.tables.keys() + keys = list(self.tables.keys()) if self.reader: - for key in self.reader.keys(): + for key in list(self.reader.keys()): if key not in keys: keys.append(key) @@ -346,11 +390,12 @@ keys.remove("GlyphOrder") keys = sortedTagList(keys) return ["GlyphOrder"] + keys - + def __len__(self): - return len(self.keys()) - + return len(list(self.keys())) + def __getitem__(self, tag): + tag = Tag(tag) try: return self.tables[tag] except KeyError: @@ -374,10 +419,9 @@ if not self.ignoreDecompileErrors: raise # fall back to DefaultTable, retaining the binary table data - print "An exception occurred during the decompilation of the '%s' table" % tag - from tables.DefaultTable import DefaultTable - import StringIO - file = StringIO.StringIO() + print("An exception occurred during the decompilation of the '%s' table" % tag) + from .tables.DefaultTable import DefaultTable + file = StringIO() traceback.print_exc(file=file) table = DefaultTable(tag) table.ERROR = file.getvalue() @@ -385,37 +429,43 @@ table.decompile(data, self) return table else: - raise KeyError, "'%s' table not found" % tag - + raise KeyError("'%s' table not found" % tag) + def __setitem__(self, tag, table): - self.tables[tag] = table - + self.tables[Tag(tag)] = table + def __delitem__(self, tag): - if not self.has_key(tag): - raise KeyError, "'%s' table not found" % tag - if self.tables.has_key(tag): + if tag not in self: + raise KeyError("'%s' table not found" % tag) + if tag in self.tables: del self.tables[tag] - if self.reader and self.reader.has_key(tag): + if self.reader and tag in self.reader: del self.reader[tag] - + + def get(self, tag, default=None): + try: + return self[tag] + except KeyError: + return default + def setGlyphOrder(self, glyphOrder): self.glyphOrder = glyphOrder - + def getGlyphOrder(self): try: return self.glyphOrder except AttributeError: pass - if self.has_key('CFF '): + if 'CFF ' in self: cff = self['CFF '] self.glyphOrder = cff.getGlyphOrder() - elif self.has_key('post'): + elif 'post' in self: # TrueType font glyphOrder = self['post'].getGlyphOrder() if glyphOrder is None: # # No names found in the 'post' table. - # Try to create glyph names from the unicode cmap (if available) + # Try to create glyph names from the unicode cmap (if available) # in combination with the Adobe Glyph List (AGL). # self._getGlyphNamesFromCmap() @@ -424,7 +474,7 @@ else: self._getGlyphNamesFromCmap() return self.glyphOrder - + def _getGlyphNamesFromCmap(self): # # This is rather convoluted, but then again, it's an interesting problem: @@ -462,34 +512,35 @@ # to work with (so we don't get called recursively). self.glyphOrder = glyphOrder # Get a (new) temporary cmap (based on the just invented names) - tempcmap = self['cmap'].getcmap(3, 1) + try: + tempcmap = self['cmap'].getcmap(3, 1) + except KeyError: + tempcmap = None if tempcmap is not None: # we have a unicode cmap from fontTools import agl cmap = tempcmap.cmap # create a reverse cmap dict reversecmap = {} - for unicode, name in cmap.items(): + for unicode, name in list(cmap.items()): reversecmap[name] = unicode allNames = {} for i in range(numGlyphs): tempName = glyphOrder[i] - if reversecmap.has_key(tempName): + if tempName in reversecmap: unicode = reversecmap[tempName] - if agl.UV2AGL.has_key(unicode): + if unicode in agl.UV2AGL: # get name from the Adobe Glyph List glyphName = agl.UV2AGL[unicode] else: # create uni name - glyphName = "uni" + string.upper(string.zfill( - hex(unicode)[2:], 4)) + glyphName = "uni%04X" % unicode tempName = glyphName - n = 1 - while allNames.has_key(tempName): - tempName = glyphName + "#" + `n` - n = n + 1 + n = allNames.get(tempName, 0) + if n: + tempName = glyphName + "#" + str(n) glyphOrder[i] = tempName - allNames[tempName] = 1 + allNames[tempName] = n + 1 # Delete the temporary cmap table from the cache, so it can # be parsed again with the right names. del self.tables['cmap'] @@ -500,21 +551,20 @@ # restore partially loaded cmap, so it can continue loading # using the proper names. self.tables['cmap'] = cmapLoading - + def getGlyphNames(self): """Get a list of glyph names, sorted alphabetically.""" - glyphNames = self.getGlyphOrder()[:] - glyphNames.sort() + glyphNames = sorted(self.getGlyphOrder()[:]) return glyphNames - + def getGlyphNames2(self): - """Get a list of glyph names, sorted alphabetically, + """Get a list of glyph names, sorted alphabetically, but not case sensitive. """ from fontTools.misc import textTools return textTools.caselessSort(self.getGlyphOrder()) - - def getGlyphName(self, glyphID, requireReal=0): + + def getGlyphName(self, glyphID, requireReal=False): try: return self.getGlyphOrder()[glyphID] except IndexError: @@ -523,7 +573,7 @@ # the cmap table than there are glyphs. I don't think it's legal... return "glyph%.5d" % glyphID else: - # user intends virtual GID support + # user intends virtual GID support try: glyphName = self.VIDDict[glyphID] except KeyError: @@ -533,20 +583,27 @@ self.VIDDict[glyphID] = glyphName return glyphName - def getGlyphID(self, glyphName, requireReal = 0): + def getGlyphID(self, glyphName, requireReal=False): if not hasattr(self, "_reverseGlyphOrderDict"): self._buildReverseGlyphOrderDict() glyphOrder = self.getGlyphOrder() d = self._reverseGlyphOrderDict - if not d.has_key(glyphName): + if glyphName not in d: if glyphName in glyphOrder: self._buildReverseGlyphOrderDict() return self.getGlyphID(glyphName) else: - if requireReal or not self.allowVID: - raise KeyError, glyphName + if requireReal: + raise KeyError(glyphName) + elif not self.allowVID: + # Handle glyphXXX only + if glyphName[:5] == "glyph": + try: + return int(glyphName[5:]) + except (NameError, ValueError): + raise KeyError(glyphName) else: - # user intends virtual GID support + # user intends virtual GID support try: glyphID = self.reverseVIDDict[glyphName] except KeyError: @@ -556,7 +613,7 @@ glyphID = int(glyphName[5:]) except (NameError, ValueError): glyphID = None - if glyphID == None: + if glyphID is None: glyphID = self.last_vid -1 self.last_vid = glyphID self.reverseVIDDict[glyphName] = glyphID @@ -564,12 +621,12 @@ return glyphID glyphID = d[glyphName] - if glyphName <> glyphOrder[glyphID]: + if glyphName != glyphOrder[glyphID]: self._buildReverseGlyphOrderDict() return self.getGlyphID(glyphName) return glyphID - def getReverseGlyphMap(self, rebuild=0): + def getReverseGlyphMap(self, rebuild=False): if rebuild or not hasattr(self, "_reverseGlyphOrderDict"): self._buildReverseGlyphOrderDict() return self._reverseGlyphOrderDict @@ -579,9 +636,9 @@ glyphOrder = self.getGlyphOrder() for glyphID in range(len(glyphOrder)): d[glyphOrder[glyphID]] = glyphID - + def _writeTable(self, tag, writer, done): - """Internal helper function for self.save(). Keeps track of + """Internal helper function for self.save(). Keeps track of inter-table dependencies. """ if tag in done: @@ -589,7 +646,7 @@ tableClass = getTableClass(tag) for masterTable in tableClass.dependencies: if masterTable not in done: - if self.has_key(masterTable): + if masterTable in self: self._writeTable(masterTable, writer, done) else: done.append(masterTable) @@ -598,65 +655,67 @@ debugmsg("writing '%s' table to disk" % tag) writer[tag] = tabledata done.append(tag) - + def getTableData(self, tag): """Returns raw table data, whether compiled or directly read from disk. """ + tag = Tag(tag) if self.isLoaded(tag): if self.verbose: debugmsg("compiling '%s' table" % tag) return self.tables[tag].compile(self) - elif self.reader and self.reader.has_key(tag): + elif self.reader and tag in self.reader: if self.verbose: debugmsg("Reading '%s' table from disk" % tag) return self.reader[tag] else: - raise KeyError, tag - - def getGlyphSet(self, preferCFF=1): + raise KeyError(tag) + + def getGlyphSet(self, preferCFF=True): """Return a generic GlyphSet, which is a dict-like object mapping glyph names to glyph objects. The returned glyph objects have a .draw() method that supports the Pen protocol, and will - have an attribute named 'width', but only *after* the .draw() method - has been called. - + have an attribute named 'width'. + If the font is CFF-based, the outlines will be taken from the 'CFF ' table. Otherwise the outlines will be taken from the 'glyf' table. If the font contains both a 'CFF ' and a 'glyf' table, you can use the 'preferCFF' argument to specify which one should be taken. """ - if preferCFF and self.has_key("CFF "): - return self["CFF "].cff.values()[0].CharStrings - if self.has_key("glyf"): - return _TTGlyphSet(self) - if self.has_key("CFF "): - return self["CFF "].cff.values()[0].CharStrings - raise TTLibError, "Font contains no outlines" - - -class _TTGlyphSet: - - """Generic dict-like GlyphSet class, meant as a TrueType counterpart - to CFF's CharString dict. See TTFont.getGlyphSet(). - """ - - # This class is distinct from the 'glyf' table itself because we need - # access to the 'hmtx' table, which could cause a dependency problem - # there when reading from XML. - - def __init__(self, ttFont): - self._ttFont = ttFont - + glyphs = None + if (preferCFF and "CFF " in self) or "glyf" not in self: + glyphs = _TTGlyphSet(self, list(self["CFF "].cff.values())[0].CharStrings, _TTGlyphCFF) + + if glyphs is None and "glyf" in self: + glyphs = _TTGlyphSet(self, self["glyf"], _TTGlyphGlyf) + + if glyphs is None: + raise TTLibError("Font contains no outlines") + + return glyphs + + +class _TTGlyphSet(object): + + """Generic dict-like GlyphSet class that pulls metrics from hmtx and + glyph shape from TrueType or CFF. + """ + + def __init__(self, ttFont, glyphs, glyphType): + self._glyphs = glyphs + self._hmtx = ttFont['hmtx'] + self._glyphType = glyphType + def keys(self): - return self._ttFont["glyf"].keys() - + return list(self._glyphs.keys()) + def has_key(self, glyphName): - return self._ttFont["glyf"].has_key(glyphName) - + return glyphName in self._glyphs + __contains__ = has_key def __getitem__(self, glyphName): - return _TTGlyph(glyphName, self._ttFont) + return self._glyphType(self, self._glyphs[glyphName], self._hmtx[glyphName]) def get(self, glyphName, default=None): try: @@ -664,77 +723,48 @@ except KeyError: return default +class _TTGlyph(object): -class _TTGlyph: - """Wrapper for a TrueType glyph that supports the Pen protocol, meaning that it has a .draw() method that takes a pen object as its only argument. Additionally there is a 'width' attribute. """ - - def __init__(self, glyphName, ttFont): - self._glyphName = glyphName - self._ttFont = ttFont - self.width, self.lsb = self._ttFont['hmtx'][self._glyphName] - + + def __init__(self, glyphset, glyph, metrics): + self._glyphset = glyphset + self._glyph = glyph + self.width, self.lsb = metrics + def draw(self, pen): """Draw the glyph onto Pen. See fontTools.pens.basePen for details how that works. """ - glyfTable = self._ttFont['glyf'] - glyph = glyfTable[self._glyphName] - if hasattr(glyph, "xMin"): - offset = self.lsb - glyph.xMin - else: - offset = 0 - if glyph.isComposite(): - for component in glyph: - glyphName, transform = component.getComponentInfo() - pen.addComponent(glyphName, transform) - else: - coordinates, endPts, flags = glyph.getCoordinates(glyfTable) - if offset: - coordinates = coordinates + (offset, 0) - start = 0 - for end in endPts: - end = end + 1 - contour = coordinates[start:end].tolist() - cFlags = flags[start:end].tolist() - start = end - if 1 not in cFlags: - # There is not a single on-curve point on the curve, - # use pen.qCurveTo's special case by specifying None - # as the on-curve point. - contour.append(None) - pen.qCurveTo(*contour) - else: - # Shuffle the points so that contour the is guaranteed - # to *end* in an on-curve point, which we'll use for - # the moveTo. - firstOnCurve = cFlags.index(1) + 1 - contour = contour[firstOnCurve:] + contour[:firstOnCurve] - cFlags = cFlags[firstOnCurve:] + cFlags[:firstOnCurve] - pen.moveTo(contour[-1]) - while contour: - nextOnCurve = cFlags.index(1) + 1 - if nextOnCurve == 1: - pen.lineTo(contour[0]) - else: - pen.qCurveTo(*contour[:nextOnCurve]) - contour = contour[nextOnCurve:] - cFlags = cFlags[nextOnCurve:] - pen.closePath() + self._glyph.draw(pen) + +class _TTGlyphCFF(_TTGlyph): + pass + +class _TTGlyphGlyf(_TTGlyph): + + def draw(self, pen): + """Draw the glyph onto Pen. See fontTools.pens.basePen for details + how that works. + """ + glyfTable = self._glyphset._glyphs + glyph = self._glyph + offset = self.lsb - glyph.xMin if hasattr(glyph, "xMin") else 0 + glyph.draw(pen, glyfTable, offset) + +class GlyphOrder(object): -class GlyphOrder: - """A pseudo table. The glyph order isn't in the font as a separate table, but it's nice to present it as such in the TTX format. """ - - def __init__(self, tag): + + def __init__(self, tag=None): pass - + def toXML(self, writer, ttFont): glyphOrder = ttFont.getGlyphOrder() writer.comment("The 'id' attribute is only for humans; " @@ -744,8 +774,8 @@ glyphName = glyphOrder[i] writer.simpletag("GlyphID", id=i, name=glyphName) writer.newline() - - def fromXML(self, (name, attrs, content), ttFont): + + def fromXML(self, name, attrs, content, ttFont): if not hasattr(self, "glyphOrder"): self.glyphOrder = [] ttFont.setGlyphOrder(self.glyphOrder) @@ -754,32 +784,47 @@ def getTableModule(tag): - """Fetch the packer/unpacker module for a table. + """Fetch the packer/unpacker module for a table. Return None when no module is found. """ - import tables + from . import tables pyTag = tagToIdentifier(tag) try: __import__("fontTools.ttLib.tables." + pyTag) - except ImportError: - return None + except ImportError as err: + # If pyTag is found in the ImportError message, + # means table is not implemented. If it's not + # there, then some other module is missing, don't + # suppress the error. + if str(err).find(pyTag) >= 0: + return None + else: + raise err else: return getattr(tables, pyTag) def getTableClass(tag): - """Fetch the packer/unpacker class for a table. + """Fetch the packer/unpacker class for a table. Return None when no class is found. """ module = getTableModule(tag) if module is None: - from tables.DefaultTable import DefaultTable + from .tables.DefaultTable import DefaultTable return DefaultTable pyTag = tagToIdentifier(tag) tableClass = getattr(module, "table_" + pyTag) return tableClass +def getClassTag(klass): + """Fetch the table tag for a class object.""" + name = klass.__name__ + assert name[:6] == 'table_' + name = name[6:] # Chop 'table_' + return identifierToTag(name) + + def newTable(tag): """Return a new instance of a table.""" tableClass = getTableClass(tag) @@ -794,23 +839,24 @@ elif re.match("[A-Z]", c): return c + "_" else: - return hex(ord(c))[2:] + return hex(byteord(c))[2:] def tagToIdentifier(tag): - """Convert a table tag to a valid (but UGLY) python identifier, - as well as a filename that's guaranteed to be unique even on a + """Convert a table tag to a valid (but UGLY) python identifier, + as well as a filename that's guaranteed to be unique even on a caseless file system. Each character is mapped to two characters. Lowercase letters get an underscore before the letter, uppercase letters get an underscore after the letter. Trailing spaces are trimmed. Illegal characters are escaped as two hex bytes. If the result starts with a number (as the result of a hex escape), an - extra underscore is prepended. Examples: + extra underscore is prepended. Examples: 'glyf' -> '_g_l_y_f' 'cvt ' -> '_c_v_t' 'OS/2' -> 'O_S_2f_2' """ import re + tag = Tag(tag) if tag == "GlyphOrder": return tag assert len(tag) == 4, "tag should be 4 characters long" @@ -842,7 +888,7 @@ tag = tag + chr(int(ident[i:i+2], 16)) # append trailing spaces tag = tag + (4 - len(tag)) * ' ' - return tag + return Tag(tag) def tagToXML(tag): @@ -851,12 +897,13 @@ case sensitive, this is a fairly simple/readable translation. """ import re + tag = Tag(tag) if tag == "OS/2": return "OS_2" elif tag == "GlyphOrder": return tag if re.match("[A-Za-z_][A-Za-z_0-9]* *$", tag): - return string.strip(tag) + return tag.strip() else: return tagToIdentifier(tag) @@ -864,34 +911,32 @@ def xmlToTag(tag): """The opposite of tagToXML()""" if tag == "OS_2": - return "OS/2" + return Tag("OS/2") if len(tag) == 8: return identifierToTag(tag) else: - return tag + " " * (4 - len(tag)) - return tag + return Tag(tag + " " * (4 - len(tag))) def debugmsg(msg): import time - print msg + time.strftime(" (%H:%M:%S)", time.localtime(time.time())) + print(msg + time.strftime(" (%H:%M:%S)", time.localtime(time.time()))) # Table order as recommended in the OpenType specification 1.4 TTFTableOrder = ["head", "hhea", "maxp", "OS/2", "hmtx", "LTSH", "VDMX", - "hdmx", "cmap", "fpgm", "prep", "cvt ", "loca", "glyf", - "kern", "name", "post", "gasp", "PCLT"] + "hdmx", "cmap", "fpgm", "prep", "cvt ", "loca", "glyf", + "kern", "name", "post", "gasp", "PCLT"] OTFTableOrder = ["head", "hhea", "maxp", "OS/2", "name", "cmap", "post", - "CFF "] + "CFF "] def sortedTagList(tagList, tableOrder=None): """Return a sorted copy of tagList, sorted according to the OpenType specification, or according to a custom tableOrder. If given and not None, tableOrder needs to be a list of tag names. """ - tagList = list(tagList) - tagList.sort() + tagList = sorted(tagList) if tableOrder is None: if "DSIG" in tagList: # DSIG should be last (XXX spec reference?) @@ -910,14 +955,37 @@ return orderedTables -def reorderFontTables(inFile, outFile, tableOrder=None, checkChecksums=0): +def reorderFontTables(inFile, outFile, tableOrder=None, checkChecksums=False): """Rewrite a font file, ordering the tables as recommended by the OpenType specification 1.4. """ from fontTools.ttLib.sfnt import SFNTReader, SFNTWriter reader = SFNTReader(inFile, checkChecksums=checkChecksums) - writer = SFNTWriter(outFile, reader.numTables, reader.sfntVersion) - tables = reader.keys() + writer = SFNTWriter(outFile, len(reader.tables), reader.sfntVersion, reader.flavor, reader.flavorData) + tables = list(reader.keys()) for tag in sortedTagList(tables, tableOrder): writer[tag] = reader[tag] writer.close() + + +def maxPowerOfTwo(x): + """Return the highest exponent of two, so that + (2 ** exponent) <= x. Return 0 if x is 0. + """ + exponent = 0 + while x: + x = x >> 1 + exponent = exponent + 1 + return max(exponent - 1, 0) + + +def getSearchRange(n, itemSize=16): + """Calculate searchRange, entrySelector, rangeShift. + """ + # itemSize defaults to 16, for backward compatibility + # with upstream fonttools. + exponent = maxPowerOfTwo(n) + searchRange = (2 ** exponent) * itemSize + entrySelector = exponent + rangeShift = max(0, n * itemSize - searchRange) + return searchRange, entrySelector, rangeShift diff -Nru fonttools-2.4/Lib/fontTools/ttLib/macUtils.py fonttools-3.0/Lib/fontTools/ttLib/macUtils.py --- fonttools-2.4/Lib/fontTools/ttLib/macUtils.py 2013-04-23 04:08:08.000000000 +0000 +++ fonttools-3.0/Lib/fontTools/ttLib/macUtils.py 2015-08-31 17:57:15.000000000 +0000 @@ -1,11 +1,11 @@ """ttLib.macUtils.py -- Various Mac-specific stuff.""" +from __future__ import print_function, division, absolute_import +from fontTools.misc.py23 import * import sys import os if sys.platform not in ("mac", "darwin"): - raise ImportError, "This module is Mac-only!" - -import cStringIO + raise ImportError("This module is Mac-only!") try: from Carbon import Res except ImportError: @@ -18,7 +18,7 @@ resref = Res.FSOpenResFile(path, mode) except Res.Error: # try data fork - resref = Res.FSOpenResourceFile(path, u'', mode) + resref = Res.FSOpenResourceFile(path, unicode(), mode) return resref @@ -31,11 +31,11 @@ Res.UseResFile(resref) numSFNTs = Res.Count1Resources('sfnt') Res.CloseResFile(resref) - return range(1, numSFNTs + 1) + return list(range(1, numSFNTs + 1)) def openTTFonts(path): - """Given a pathname, return a list of TTFont objects. In the case + """Given a pathname, return a list of TTFont objects. In the case of a flat TTF/OTF file, the list will contain just one font object; but in the case of a Mac font suitcase it will contain as many font objects as there are sfnt resources in the file. @@ -49,153 +49,25 @@ for index in sfnts: fonts.append(ttLib.TTFont(path, index)) if not fonts: - raise ttLib.TTLibError, "no fonts found in file '%s'" % path + raise ttLib.TTLibError("no fonts found in file '%s'" % path) return fonts -class SFNTResourceReader: - +class SFNTResourceReader(object): + """Simple (Mac-only) read-only file wrapper for 'sfnt' resources.""" - + def __init__(self, path, res_name_or_index): resref = MyOpenResFile(path) Res.UseResFile(resref) - if type(res_name_or_index) == type(""): + if isinstance(res_name_or_index, basestring): res = Res.Get1NamedResource('sfnt', res_name_or_index) else: res = Res.Get1IndResource('sfnt', res_name_or_index) - self.file = cStringIO.StringIO(res.data) + self.file = BytesIO(res.data) Res.CloseResFile(resref) self.name = path - - def __getattr__(self, attr): - # cheap inheritance - return getattr(self.file, attr) - -class SFNTResourceWriter: - - """Simple (Mac-only) file wrapper for 'sfnt' resources.""" - - def __init__(self, path, ttFont, res_id=None): - self.file = cStringIO.StringIO() - self.name = path - self.closed = 0 - fullname = ttFont['name'].getName(4, 1, 0) # Full name, mac, default encoding - familyname = ttFont['name'].getName(1, 1, 0) # Fam. name, mac, default encoding - psname = ttFont['name'].getName(6, 1, 0) # PostScript name, etc. - if fullname is None or fullname is None or psname is None: - from fontTools import ttLib - raise ttLib.TTLibError, "can't make 'sfnt' resource, no Macintosh 'name' table found" - self.fullname = fullname.string - self.familyname = familyname.string - self.psname = psname.string - if self.familyname <> self.psname[:len(self.familyname)]: - # ugh. force fam name to be the same as first part of ps name, - # fondLib otherwise barfs. - for i in range(min(len(self.psname), len(self.familyname))): - if self.familyname[i] <> self.psname[i]: - break - self.familyname = self.psname[:i] - - self.ttFont = ttFont - self.res_id = res_id - if os.path.exists(self.name): - os.remove(self.name) - # XXX datafork support - Res.FSpCreateResFile(self.name, 'DMOV', 'FFIL', 0) - self.resref = Res.FSOpenResFile(self.name, 3) # exclusive read/write permission - - def close(self): - if self.closed: - return - Res.UseResFile(self.resref) - try: - res = Res.Get1NamedResource('sfnt', self.fullname) - except Res.Error: - pass - else: - res.RemoveResource() - res = Res.Resource(self.file.getvalue()) - if self.res_id is None: - self.res_id = Res.Unique1ID('sfnt') - res.AddResource('sfnt', self.res_id, self.fullname) - res.ChangedResource() - - self.createFond() - del self.ttFont - Res.CloseResFile(self.resref) - self.file.close() - self.closed = 1 - - def createFond(self): - fond_res = Res.Resource("") - fond_res.AddResource('FOND', self.res_id, self.fullname) - - from fontTools import fondLib - fond = fondLib.FontFamily(fond_res, "w") - - fond.ffFirstChar = 0 - fond.ffLastChar = 255 - fond.fondClass = 0 - fond.fontAssoc = [(0, 0, self.res_id)] - fond.ffFlags = 20480 # XXX ??? - fond.ffIntl = (0, 0) - fond.ffLeading = 0 - fond.ffProperty = (0, 0, 0, 0, 0, 0, 0, 0, 0) - fond.ffVersion = 0 - fond.glyphEncoding = {} - if self.familyname == self.psname: - fond.styleIndices = (1,) * 48 # uh-oh, fondLib is too dumb. - else: - fond.styleIndices = (2,) * 48 - fond.styleStrings = [] - fond.boundingBoxes = None - fond.ffFamID = self.res_id - fond.changed = 1 - fond.glyphTableOffset = 0 - fond.styleMappingReserved = 0 - - # calc: - scale = 4096.0 / self.ttFont['head'].unitsPerEm - fond.ffAscent = scale * self.ttFont['hhea'].ascent - fond.ffDescent = scale * self.ttFont['hhea'].descent - fond.ffWidMax = scale * self.ttFont['hhea'].advanceWidthMax - - fond.ffFamilyName = self.familyname - fond.psNames = {0: self.psname} - - fond.widthTables = {} - fond.kernTables = {} - cmap = self.ttFont['cmap'].getcmap(1, 0) - if cmap: - names = {} - for code, name in cmap.cmap.items(): - names[name] = code - if self.ttFont.has_key('kern'): - kern = self.ttFont['kern'].getkern(0) - if kern: - fondkerning = [] - for (left, right), value in kern.kernTable.items(): - if names.has_key(left) and names.has_key(right): - fondkerning.append((names[left], names[right], scale * value)) - fondkerning.sort() - fond.kernTables = {0: fondkerning} - if self.ttFont.has_key('hmtx'): - hmtx = self.ttFont['hmtx'] - fondwidths = [2048] * 256 + [0, 0] # default width, + plus two zeros. - for name, (width, lsb) in hmtx.metrics.items(): - if names.has_key(name): - fondwidths[names[name]] = scale * width - fond.widthTables = {0: fondwidths} - fond.save() - - def __del__(self): - if not self.closed: - self.close() - def __getattr__(self, attr): # cheap inheritance return getattr(self.file, attr) - - diff -Nru fonttools-2.4/Lib/fontTools/ttLib/sfnt.py fonttools-3.0/Lib/fontTools/ttLib/sfnt.py --- fonttools-2.4/Lib/fontTools/ttLib/sfnt.py 2013-04-23 04:08:08.000000000 +0000 +++ fonttools-3.0/Lib/fontTools/ttLib/sfnt.py 2015-08-31 17:57:15.000000000 +0000 @@ -4,147 +4,270 @@ SFNTReader SFNTWriter -(Normally you don't have to use these classes explicitly; they are +(Normally you don't have to use these classes explicitly; they are used automatically by ttLib.TTFont.) -The reading and writing of sfnt files is separated in two distinct +The reading and writing of sfnt files is separated in two distinct classes, since whenever to number of tables changes or whenever a table's length chages you need to rewrite the whole file anyway. """ -import sys -import struct, sstruct -import os +from __future__ import print_function, division, absolute_import +from fontTools.misc.py23 import * +from fontTools.misc import sstruct +from fontTools.ttLib import getSearchRange +import struct +from collections import OrderedDict -class SFNTReader: - +class SFNTReader(object): + + def __new__(cls, *args, **kwargs): + """ Return an instance of the SFNTReader sub-class which is compatible + with the input file type. + """ + if args and cls is SFNTReader: + infile = args[0] + sfntVersion = Tag(infile.read(4)) + infile.seek(0) + if sfntVersion == "wOF2": + # return new WOFF2Reader object + from fontTools.ttLib.woff2 import WOFF2Reader + return object.__new__(WOFF2Reader) + # return default object + return object.__new__(cls) + def __init__(self, file, checkChecksums=1, fontNumber=-1): self.file = file self.checkChecksums = checkChecksums - data = self.file.read(sfntDirectorySize) - if len(data) <> sfntDirectorySize: - from fontTools import ttLib - raise ttLib.TTLibError, "Not a TrueType or OpenType font (not enough data)" - sstruct.unpack(sfntDirectoryFormat, data, self) - if self.sfntVersion == "ttcf": - assert ttcHeaderSize == sfntDirectorySize + + self.flavor = None + self.flavorData = None + self.DirectoryEntry = SFNTDirectoryEntry + self.sfntVersion = self.file.read(4) + self.file.seek(0) + if self.sfntVersion == b"ttcf": + data = self.file.read(ttcHeaderSize) + if len(data) != ttcHeaderSize: + from fontTools import ttLib + raise ttLib.TTLibError("Not a Font Collection (not enough data)") sstruct.unpack(ttcHeaderFormat, data, self) assert self.Version == 0x00010000 or self.Version == 0x00020000, "unrecognized TTC version 0x%08x" % self.Version if not 0 <= fontNumber < self.numFonts: from fontTools import ttLib - raise ttLib.TTLibError, "specify a font number between 0 and %d (inclusive)" % (self.numFonts - 1) + raise ttLib.TTLibError("specify a font number between 0 and %d (inclusive)" % (self.numFonts - 1)) offsetTable = struct.unpack(">%dL" % self.numFonts, self.file.read(self.numFonts * 4)) if self.Version == 0x00020000: pass # ignoring version 2.0 signatures self.file.seek(offsetTable[fontNumber]) data = self.file.read(sfntDirectorySize) + if len(data) != sfntDirectorySize: + from fontTools import ttLib + raise ttLib.TTLibError("Not a Font Collection (not enough data)") + sstruct.unpack(sfntDirectoryFormat, data, self) + elif self.sfntVersion == b"wOFF": + self.flavor = "woff" + self.DirectoryEntry = WOFFDirectoryEntry + data = self.file.read(woffDirectorySize) + if len(data) != woffDirectorySize: + from fontTools import ttLib + raise ttLib.TTLibError("Not a WOFF font (not enough data)") + sstruct.unpack(woffDirectoryFormat, data, self) + else: + data = self.file.read(sfntDirectorySize) + if len(data) != sfntDirectorySize: + from fontTools import ttLib + raise ttLib.TTLibError("Not a TrueType or OpenType font (not enough data)") sstruct.unpack(sfntDirectoryFormat, data, self) - if self.sfntVersion not in ("\000\001\000\000", "OTTO", "true"): + self.sfntVersion = Tag(self.sfntVersion) + + if self.sfntVersion not in ("\x00\x01\x00\x00", "OTTO", "true"): from fontTools import ttLib - raise ttLib.TTLibError, "Not a TrueType or OpenType font (bad sfntVersion)" - self.tables = {} + raise ttLib.TTLibError("Not a TrueType or OpenType font (bad sfntVersion)") + self.tables = OrderedDict() for i in range(self.numTables): - entry = SFNTDirectoryEntry() + entry = self.DirectoryEntry() entry.fromFile(self.file) - if entry.length > 0: - self.tables[entry.tag] = entry - else: - # Ignore zero-length tables. This doesn't seem to be documented, - # yet it's apparently how the Windows TT rasterizer behaves. - # Besides, at least one font has been sighted which actually - # *has* a zero-length table. - pass - + tag = Tag(entry.tag) + self.tables[tag] = entry + + # Load flavor data if any + if self.flavor == "woff": + self.flavorData = WOFFFlavorData(self) + def has_key(self, tag): - return self.tables.has_key(tag) - + return tag in self.tables + + __contains__ = has_key + def keys(self): return self.tables.keys() - + def __getitem__(self, tag): """Fetch the raw table data.""" - entry = self.tables[tag] - self.file.seek(entry.offset) - data = self.file.read(entry.length) + entry = self.tables[Tag(tag)] + data = entry.loadData (self.file) if self.checkChecksums: if tag == 'head': # Beh: we have to special-case the 'head' table. - checksum = calcChecksum(data[:8] + '\0\0\0\0' + data[12:]) + checksum = calcChecksum(data[:8] + b'\0\0\0\0' + data[12:]) else: checksum = calcChecksum(data) if self.checkChecksums > 1: # Be obnoxious, and barf when it's wrong - assert checksum == entry.checksum, "bad checksum for '%s' table" % tag - elif checksum <> entry.checkSum: + assert checksum == entry.checkSum, "bad checksum for '%s' table" % tag + elif checksum != entry.checkSum: # Be friendly, and just print a warning. - print "bad checksum for '%s' table" % tag + print("bad checksum for '%s' table" % tag) return data - + def __delitem__(self, tag): - del self.tables[tag] - + del self.tables[Tag(tag)] + def close(self): self.file.close() -class SFNTWriter: - - def __init__(self, file, numTables, sfntVersion="\000\001\000\000"): +class SFNTWriter(object): + + def __new__(cls, *args, **kwargs): + """ Return an instance of the SFNTWriter sub-class which is compatible + with the specified 'flavor'. + """ + flavor = None + if kwargs and 'flavor' in kwargs: + flavor = kwargs['flavor'] + elif args and len(args) > 3: + flavor = args[3] + if cls is SFNTWriter: + if flavor == "woff2": + # return new WOFF2Writer object + from fontTools.ttLib.woff2 import WOFF2Writer + return object.__new__(WOFF2Writer) + # return default object + return object.__new__(cls) + + def __init__(self, file, numTables, sfntVersion="\000\001\000\000", + flavor=None, flavorData=None): self.file = file self.numTables = numTables - self.sfntVersion = sfntVersion - self.searchRange, self.entrySelector, self.rangeShift = getSearchRange(numTables) - self.nextTableOffset = sfntDirectorySize + numTables * sfntDirectoryEntrySize + self.sfntVersion = Tag(sfntVersion) + self.flavor = flavor + self.flavorData = flavorData + + if self.flavor == "woff": + self.directoryFormat = woffDirectoryFormat + self.directorySize = woffDirectorySize + self.DirectoryEntry = WOFFDirectoryEntry + + self.signature = "wOFF" + + # to calculate WOFF checksum adjustment, we also need the original SFNT offsets + self.origNextTableOffset = sfntDirectorySize + numTables * sfntDirectoryEntrySize + else: + assert not self.flavor, "Unknown flavor '%s'" % self.flavor + self.directoryFormat = sfntDirectoryFormat + self.directorySize = sfntDirectorySize + self.DirectoryEntry = SFNTDirectoryEntry + + self.searchRange, self.entrySelector, self.rangeShift = getSearchRange(numTables, 16) + + self.nextTableOffset = self.directorySize + numTables * self.DirectoryEntry.formatSize # clear out directory area self.file.seek(self.nextTableOffset) - # make sure we're actually where we want to be. (XXX old cStringIO bug) - self.file.write('\0' * (self.nextTableOffset - self.file.tell())) - self.tables = {} - + # make sure we're actually where we want to be. (old cStringIO bug) + self.file.write(b'\0' * (self.nextTableOffset - self.file.tell())) + self.tables = OrderedDict() + def __setitem__(self, tag, data): """Write raw table data to disk.""" - if self.tables.has_key(tag): - # We've written this table to file before. If the length - # of the data is still the same, we allow overwriting it. - entry = self.tables[tag] - if len(data) <> entry.length: - from fontTools import ttLib - raise ttLib.TTLibError, "cannot rewrite '%s' table: length does not match directory entry" % tag + if tag in self.tables: + from fontTools import ttLib + raise ttLib.TTLibError("cannot rewrite '%s' table" % tag) + + entry = self.DirectoryEntry() + entry.tag = tag + entry.offset = self.nextTableOffset + if tag == 'head': + entry.checkSum = calcChecksum(data[:8] + b'\0\0\0\0' + data[12:]) + self.headTable = data + entry.uncompressed = True else: - entry = SFNTDirectoryEntry() - entry.tag = tag - entry.offset = self.nextTableOffset - entry.length = len(data) - self.nextTableOffset = self.nextTableOffset + ((len(data) + 3) & ~3) - self.file.seek(entry.offset) - self.file.write(data) + entry.checkSum = calcChecksum(data) + entry.saveData(self.file, data) + + if self.flavor == "woff": + entry.origOffset = self.origNextTableOffset + self.origNextTableOffset += (entry.origLength + 3) & ~3 + + self.nextTableOffset = self.nextTableOffset + ((entry.length + 3) & ~3) # Add NUL bytes to pad the table data to a 4-byte boundary. # Don't depend on f.seek() as we need to add the padding even if no # subsequent write follows (seek is lazy), ie. after the final table # in the font. - self.file.write('\0' * (self.nextTableOffset - self.file.tell())) + self.file.write(b'\0' * (self.nextTableOffset - self.file.tell())) assert self.nextTableOffset == self.file.tell() - - if tag == 'head': - entry.checkSum = calcChecksum(data[:8] + '\0\0\0\0' + data[12:]) - else: - entry.checkSum = calcChecksum(data) + self.tables[tag] = entry - + def close(self): """All tables must have been written to disk. Now write the directory. """ - tables = self.tables.items() - tables.sort() - if len(tables) <> self.numTables: + tables = sorted(self.tables.items()) + if len(tables) != self.numTables: from fontTools import ttLib - raise ttLib.TTLibError, "wrong number of tables; expected %d, found %d" % (self.numTables, len(tables)) - - directory = sstruct.pack(sfntDirectoryFormat, self) - - self.file.seek(sfntDirectorySize) + raise ttLib.TTLibError("wrong number of tables; expected %d, found %d" % (self.numTables, len(tables))) + + if self.flavor == "woff": + self.signature = b"wOFF" + self.reserved = 0 + + self.totalSfntSize = 12 + self.totalSfntSize += 16 * len(tables) + for tag, entry in tables: + self.totalSfntSize += (entry.origLength + 3) & ~3 + + data = self.flavorData if self.flavorData else WOFFFlavorData() + if data.majorVersion is not None and data.minorVersion is not None: + self.majorVersion = data.majorVersion + self.minorVersion = data.minorVersion + else: + if hasattr(self, 'headTable'): + self.majorVersion, self.minorVersion = struct.unpack(">HH", self.headTable[4:8]) + else: + self.majorVersion = self.minorVersion = 0 + if data.metaData: + self.metaOrigLength = len(data.metaData) + self.file.seek(0,2) + self.metaOffset = self.file.tell() + import zlib + compressedMetaData = zlib.compress(data.metaData) + self.metaLength = len(compressedMetaData) + self.file.write(compressedMetaData) + else: + self.metaOffset = self.metaLength = self.metaOrigLength = 0 + if data.privData: + self.file.seek(0,2) + off = self.file.tell() + paddedOff = (off + 3) & ~3 + self.file.write('\0' * (paddedOff - off)) + self.privOffset = self.file.tell() + self.privLength = len(data.privData) + self.file.write(data.privData) + else: + self.privOffset = self.privLength = 0 + + self.file.seek(0,2) + self.length = self.file.tell() + + else: + assert not self.flavor, "Unknown flavor '%s'" % self.flavor + pass + + directory = sstruct.pack(self.directoryFormat, self) + + self.file.seek(self.directorySize) seenHead = 0 for tag, entry in tables: if tag == "head": @@ -157,11 +280,24 @@ def _calcMasterChecksum(self, directory): # calculate checkSumAdjustment - tags = self.tables.keys() + tags = list(self.tables.keys()) checksums = [] for i in range(len(tags)): checksums.append(self.tables[tags[i]].checkSum) + if self.DirectoryEntry != SFNTDirectoryEntry: + # Create a SFNT directory for checksum calculation purposes + self.searchRange, self.entrySelector, self.rangeShift = getSearchRange(self.numTables, 16) + directory = sstruct.pack(sfntDirectoryFormat, self) + tables = sorted(self.tables.items()) + for tag, entry in tables: + sfntEntry = SFNTDirectoryEntry() + sfntEntry.tag = entry.tag + sfntEntry.checkSum = entry.checkSum + sfntEntry.offset = entry.origOffset + sfntEntry.length = entry.origLength + directory = directory + sfntEntry.toString() + directory_end = sfntDirectorySize + len(self.tables) * sfntDirectoryEntrySize assert directory_end == len(directory) @@ -177,6 +313,9 @@ self.file.seek(self.tables['head'].offset + 8) self.file.write(struct.pack(">L", checksumadjustment)) + def reordersTables(self): + return False + # -- sfnt directory helpers and cruft @@ -214,23 +353,138 @@ sfntDirectoryEntrySize = sstruct.calcsize(sfntDirectoryEntryFormat) -class SFNTDirectoryEntry: - +woffDirectoryFormat = """ + > # big endian + signature: 4s # "wOFF" + sfntVersion: 4s + length: L # total woff file size + numTables: H # number of tables + reserved: H # set to 0 + totalSfntSize: L # uncompressed size + majorVersion: H # major version of WOFF file + minorVersion: H # minor version of WOFF file + metaOffset: L # offset to metadata block + metaLength: L # length of compressed metadata + metaOrigLength: L # length of uncompressed metadata + privOffset: L # offset to private data block + privLength: L # length of private data block +""" + +woffDirectorySize = sstruct.calcsize(woffDirectoryFormat) + +woffDirectoryEntryFormat = """ + > # big endian + tag: 4s + offset: L + length: L # compressed length + origLength: L # original length + checkSum: L # original checksum +""" + +woffDirectoryEntrySize = sstruct.calcsize(woffDirectoryEntryFormat) + + +class DirectoryEntry(object): + + def __init__(self): + self.uncompressed = False # if True, always embed entry raw + def fromFile(self, file): - sstruct.unpack(sfntDirectoryEntryFormat, - file.read(sfntDirectoryEntrySize), self) - + sstruct.unpack(self.format, file.read(self.formatSize), self) + def fromString(self, str): - sstruct.unpack(sfntDirectoryEntryFormat, str, self) - + sstruct.unpack(self.format, str, self) + def toString(self): - return sstruct.pack(sfntDirectoryEntryFormat, self) - + return sstruct.pack(self.format, self) + def __repr__(self): if hasattr(self, "tag"): - return "" % (self.tag, id(self)) + return "<%s '%s' at %x>" % (self.__class__.__name__, self.tag, id(self)) else: - return "" % id(self) + return "<%s at %x>" % (self.__class__.__name__, id(self)) + + def loadData(self, file): + file.seek(self.offset) + data = file.read(self.length) + assert len(data) == self.length + if hasattr(self.__class__, 'decodeData'): + data = self.decodeData(data) + return data + + def saveData(self, file, data): + if hasattr(self.__class__, 'encodeData'): + data = self.encodeData(data) + self.length = len(data) + file.seek(self.offset) + file.write(data) + + def decodeData(self, rawData): + return rawData + + def encodeData(self, data): + return data + +class SFNTDirectoryEntry(DirectoryEntry): + + format = sfntDirectoryEntryFormat + formatSize = sfntDirectoryEntrySize + +class WOFFDirectoryEntry(DirectoryEntry): + + format = woffDirectoryEntryFormat + formatSize = woffDirectoryEntrySize + zlibCompressionLevel = 6 + + def decodeData(self, rawData): + import zlib + if self.length == self.origLength: + data = rawData + else: + assert self.length < self.origLength + data = zlib.decompress(rawData) + assert len (data) == self.origLength + return data + + def encodeData(self, data): + import zlib + self.origLength = len(data) + if not self.uncompressed: + compressedData = zlib.compress(data, self.zlibCompressionLevel) + if self.uncompressed or len(compressedData) >= self.origLength: + # Encode uncompressed + rawData = data + self.length = self.origLength + else: + rawData = compressedData + self.length = len(rawData) + return rawData + +class WOFFFlavorData(): + + Flavor = 'woff' + + def __init__(self, reader=None): + self.majorVersion = None + self.minorVersion = None + self.metaData = None + self.privData = None + if reader: + self.majorVersion = reader.majorVersion + self.minorVersion = reader.minorVersion + if reader.metaLength: + reader.file.seek(reader.metaOffset) + rawData = reader.file.read(reader.metaLength) + assert len(rawData) == reader.metaLength + import zlib + data = zlib.decompress(rawData) + assert len(data) == reader.metaOrigLength + self.metaData = data + if reader.privLength: + reader.file.seek(reader.privOffset) + data = reader.file.read(reader.privLength) + assert len(data) == reader.privLength + self.privData = data def calcChecksum(data): @@ -238,52 +492,29 @@ Optionally takes a 'start' argument, which allows you to calculate a checksum in chunks by feeding it a previous result. - + If the data length is not a multiple of four, it assumes - it is to be padded with null byte. + it is to be padded with null byte. - >>> print calcChecksum("abcd") + >>> print(calcChecksum(b"abcd")) 1633837924 - >>> print calcChecksum("abcdxyz") + >>> print(calcChecksum(b"abcdxyz")) 3655064932 """ remainder = len(data) % 4 if remainder: - data += "\0" * (4 - remainder) + data += b"\0" * (4 - remainder) value = 0 blockSize = 4096 assert blockSize % 4 == 0 - for i in xrange(0, len(data), blockSize): + for i in range(0, len(data), blockSize): block = data[i:i+blockSize] longs = struct.unpack(">%dL" % (len(block) // 4), block) value = (value + sum(longs)) & 0xffffffff return value -def maxPowerOfTwo(x): - """Return the highest exponent of two, so that - (2 ** exponent) <= x - """ - exponent = 0 - while x: - x = x >> 1 - exponent = exponent + 1 - return max(exponent - 1, 0) - - -def getSearchRange(n): - """Calculate searchRange, entrySelector, rangeShift for the - sfnt directory. 'n' is the number of tables. - """ - # This stuff needs to be stored in the file, because? - import math - exponent = maxPowerOfTwo(n) - searchRange = (2 ** exponent) * 16 - entrySelector = exponent - rangeShift = n * 16 - searchRange - return searchRange, entrySelector, rangeShift - - if __name__ == "__main__": - import doctest - doctest.testmod() + import sys + import doctest + sys.exit(doctest.testmod().failed) diff -Nru fonttools-2.4/Lib/fontTools/ttLib/standardGlyphOrder.py fonttools-3.0/Lib/fontTools/ttLib/standardGlyphOrder.py --- fonttools-2.4/Lib/fontTools/ttLib/standardGlyphOrder.py 2013-04-23 04:08:08.000000000 +0000 +++ fonttools-3.0/Lib/fontTools/ttLib/standardGlyphOrder.py 2015-08-31 17:57:15.000000000 +0000 @@ -1,3 +1,6 @@ +from __future__ import print_function, division, absolute_import +from fontTools.misc.py23 import * + # # 'post' table formats 1.0 and 2.0 rely on this list of "standard" # glyphs. @@ -10,262 +13,262 @@ # standardGlyphOrder = [ - ".notdef", # 0 - ".null", # 1 - "nonmarkingreturn", # 2 - "space", # 3 - "exclam", # 4 - "quotedbl", # 5 - "numbersign", # 6 - "dollar", # 7 - "percent", # 8 - "ampersand", # 9 - "quotesingle", # 10 - "parenleft", # 11 - "parenright", # 12 - "asterisk", # 13 - "plus", # 14 - "comma", # 15 - "hyphen", # 16 - "period", # 17 - "slash", # 18 - "zero", # 19 - "one", # 20 - "two", # 21 - "three", # 22 - "four", # 23 - "five", # 24 - "six", # 25 - "seven", # 26 - "eight", # 27 - "nine", # 28 - "colon", # 29 - "semicolon", # 30 - "less", # 31 - "equal", # 32 - "greater", # 33 - "question", # 34 - "at", # 35 - "A", # 36 - "B", # 37 - "C", # 38 - "D", # 39 - "E", # 40 - "F", # 41 - "G", # 42 - "H", # 43 - "I", # 44 - "J", # 45 - "K", # 46 - "L", # 47 - "M", # 48 - "N", # 49 - "O", # 50 - "P", # 51 - "Q", # 52 - "R", # 53 - "S", # 54 - "T", # 55 - "U", # 56 - "V", # 57 - "W", # 58 - "X", # 59 - "Y", # 60 - "Z", # 61 - "bracketleft", # 62 - "backslash", # 63 - "bracketright", # 64 - "asciicircum", # 65 - "underscore", # 66 - "grave", # 67 - "a", # 68 - "b", # 69 - "c", # 70 - "d", # 71 - "e", # 72 - "f", # 73 - "g", # 74 - "h", # 75 - "i", # 76 - "j", # 77 - "k", # 78 - "l", # 79 - "m", # 80 - "n", # 81 - "o", # 82 - "p", # 83 - "q", # 84 - "r", # 85 - "s", # 86 - "t", # 87 - "u", # 88 - "v", # 89 - "w", # 90 - "x", # 91 - "y", # 92 - "z", # 93 - "braceleft", # 94 - "bar", # 95 - "braceright", # 96 - "asciitilde", # 97 - "Adieresis", # 98 - "Aring", # 99 - "Ccedilla", # 100 - "Eacute", # 101 - "Ntilde", # 102 - "Odieresis", # 103 - "Udieresis", # 104 - "aacute", # 105 - "agrave", # 106 - "acircumflex", # 107 - "adieresis", # 108 - "atilde", # 109 - "aring", # 110 - "ccedilla", # 111 - "eacute", # 112 - "egrave", # 113 - "ecircumflex", # 114 - "edieresis", # 115 - "iacute", # 116 - "igrave", # 117 - "icircumflex", # 118 - "idieresis", # 119 - "ntilde", # 120 - "oacute", # 121 - "ograve", # 122 - "ocircumflex", # 123 - "odieresis", # 124 - "otilde", # 125 - "uacute", # 126 - "ugrave", # 127 - "ucircumflex", # 128 - "udieresis", # 129 - "dagger", # 130 - "degree", # 131 - "cent", # 132 - "sterling", # 133 - "section", # 134 - "bullet", # 135 - "paragraph", # 136 - "germandbls", # 137 - "registered", # 138 - "copyright", # 139 - "trademark", # 140 - "acute", # 141 - "dieresis", # 142 - "notequal", # 143 - "AE", # 144 - "Oslash", # 145 - "infinity", # 146 - "plusminus", # 147 - "lessequal", # 148 - "greaterequal", # 149 - "yen", # 150 - "mu", # 151 - "partialdiff", # 152 - "summation", # 153 - "product", # 154 - "pi", # 155 - "integral", # 156 - "ordfeminine", # 157 - "ordmasculine", # 158 - "Omega", # 159 - "ae", # 160 - "oslash", # 161 - "questiondown", # 162 - "exclamdown", # 163 - "logicalnot", # 164 - "radical", # 165 - "florin", # 166 - "approxequal", # 167 - "Delta", # 168 - "guillemotleft", # 169 - "guillemotright", # 170 - "ellipsis", # 171 - "nonbreakingspace", # 172 - "Agrave", # 173 - "Atilde", # 174 - "Otilde", # 175 - "OE", # 176 - "oe", # 177 - "endash", # 178 - "emdash", # 179 - "quotedblleft", # 180 - "quotedblright", # 181 - "quoteleft", # 182 - "quoteright", # 183 - "divide", # 184 - "lozenge", # 185 - "ydieresis", # 186 - "Ydieresis", # 187 + ".notdef", # 0 + ".null", # 1 + "nonmarkingreturn", # 2 + "space", # 3 + "exclam", # 4 + "quotedbl", # 5 + "numbersign", # 6 + "dollar", # 7 + "percent", # 8 + "ampersand", # 9 + "quotesingle", # 10 + "parenleft", # 11 + "parenright", # 12 + "asterisk", # 13 + "plus", # 14 + "comma", # 15 + "hyphen", # 16 + "period", # 17 + "slash", # 18 + "zero", # 19 + "one", # 20 + "two", # 21 + "three", # 22 + "four", # 23 + "five", # 24 + "six", # 25 + "seven", # 26 + "eight", # 27 + "nine", # 28 + "colon", # 29 + "semicolon", # 30 + "less", # 31 + "equal", # 32 + "greater", # 33 + "question", # 34 + "at", # 35 + "A", # 36 + "B", # 37 + "C", # 38 + "D", # 39 + "E", # 40 + "F", # 41 + "G", # 42 + "H", # 43 + "I", # 44 + "J", # 45 + "K", # 46 + "L", # 47 + "M", # 48 + "N", # 49 + "O", # 50 + "P", # 51 + "Q", # 52 + "R", # 53 + "S", # 54 + "T", # 55 + "U", # 56 + "V", # 57 + "W", # 58 + "X", # 59 + "Y", # 60 + "Z", # 61 + "bracketleft", # 62 + "backslash", # 63 + "bracketright", # 64 + "asciicircum", # 65 + "underscore", # 66 + "grave", # 67 + "a", # 68 + "b", # 69 + "c", # 70 + "d", # 71 + "e", # 72 + "f", # 73 + "g", # 74 + "h", # 75 + "i", # 76 + "j", # 77 + "k", # 78 + "l", # 79 + "m", # 80 + "n", # 81 + "o", # 82 + "p", # 83 + "q", # 84 + "r", # 85 + "s", # 86 + "t", # 87 + "u", # 88 + "v", # 89 + "w", # 90 + "x", # 91 + "y", # 92 + "z", # 93 + "braceleft", # 94 + "bar", # 95 + "braceright", # 96 + "asciitilde", # 97 + "Adieresis", # 98 + "Aring", # 99 + "Ccedilla", # 100 + "Eacute", # 101 + "Ntilde", # 102 + "Odieresis", # 103 + "Udieresis", # 104 + "aacute", # 105 + "agrave", # 106 + "acircumflex", # 107 + "adieresis", # 108 + "atilde", # 109 + "aring", # 110 + "ccedilla", # 111 + "eacute", # 112 + "egrave", # 113 + "ecircumflex", # 114 + "edieresis", # 115 + "iacute", # 116 + "igrave", # 117 + "icircumflex", # 118 + "idieresis", # 119 + "ntilde", # 120 + "oacute", # 121 + "ograve", # 122 + "ocircumflex", # 123 + "odieresis", # 124 + "otilde", # 125 + "uacute", # 126 + "ugrave", # 127 + "ucircumflex", # 128 + "udieresis", # 129 + "dagger", # 130 + "degree", # 131 + "cent", # 132 + "sterling", # 133 + "section", # 134 + "bullet", # 135 + "paragraph", # 136 + "germandbls", # 137 + "registered", # 138 + "copyright", # 139 + "trademark", # 140 + "acute", # 141 + "dieresis", # 142 + "notequal", # 143 + "AE", # 144 + "Oslash", # 145 + "infinity", # 146 + "plusminus", # 147 + "lessequal", # 148 + "greaterequal", # 149 + "yen", # 150 + "mu", # 151 + "partialdiff", # 152 + "summation", # 153 + "product", # 154 + "pi", # 155 + "integral", # 156 + "ordfeminine", # 157 + "ordmasculine", # 158 + "Omega", # 159 + "ae", # 160 + "oslash", # 161 + "questiondown", # 162 + "exclamdown", # 163 + "logicalnot", # 164 + "radical", # 165 + "florin", # 166 + "approxequal", # 167 + "Delta", # 168 + "guillemotleft", # 169 + "guillemotright", # 170 + "ellipsis", # 171 + "nonbreakingspace", # 172 + "Agrave", # 173 + "Atilde", # 174 + "Otilde", # 175 + "OE", # 176 + "oe", # 177 + "endash", # 178 + "emdash", # 179 + "quotedblleft", # 180 + "quotedblright", # 181 + "quoteleft", # 182 + "quoteright", # 183 + "divide", # 184 + "lozenge", # 185 + "ydieresis", # 186 + "Ydieresis", # 187 "fraction", # 188 "currency", # 189 - "guilsinglleft", # 190 - "guilsinglright", # 191 - "fi", # 192 - "fl", # 193 - "daggerdbl", # 194 - "periodcentered", # 195 - "quotesinglbase", # 196 - "quotedblbase", # 197 - "perthousand", # 198 - "Acircumflex", # 199 - "Ecircumflex", # 200 - "Aacute", # 201 - "Edieresis", # 202 - "Egrave", # 203 - "Iacute", # 204 - "Icircumflex", # 205 - "Idieresis", # 206 - "Igrave", # 207 - "Oacute", # 208 - "Ocircumflex", # 209 - "apple", # 210 - "Ograve", # 211 - "Uacute", # 212 - "Ucircumflex", # 213 - "Ugrave", # 214 - "dotlessi", # 215 - "circumflex", # 216 - "tilde", # 217 - "macron", # 218 - "breve", # 219 - "dotaccent", # 220 - "ring", # 221 - "cedilla", # 222 - "hungarumlaut", # 223 - "ogonek", # 224 - "caron", # 225 - "Lslash", # 226 - "lslash", # 227 - "Scaron", # 228 - "scaron", # 229 - "Zcaron", # 230 - "zcaron", # 231 - "brokenbar", # 232 - "Eth", # 233 - "eth", # 234 - "Yacute", # 235 - "yacute", # 236 - "Thorn", # 237 - "thorn", # 238 - "minus", # 239 - "multiply", # 240 - "onesuperior", # 241 - "twosuperior", # 242 - "threesuperior", # 243 - "onehalf", # 244 - "onequarter", # 245 - "threequarters", # 246 - "franc", # 247 - "Gbreve", # 248 - "gbreve", # 249 - "Idotaccent", # 250 - "Scedilla", # 251 - "scedilla", # 252 - "Cacute", # 253 - "cacute", # 254 - "Ccaron", # 255 - "ccaron", # 256 - "dcroat" # 257 + "guilsinglleft", # 190 + "guilsinglright", # 191 + "fi", # 192 + "fl", # 193 + "daggerdbl", # 194 + "periodcentered", # 195 + "quotesinglbase", # 196 + "quotedblbase", # 197 + "perthousand", # 198 + "Acircumflex", # 199 + "Ecircumflex", # 200 + "Aacute", # 201 + "Edieresis", # 202 + "Egrave", # 203 + "Iacute", # 204 + "Icircumflex", # 205 + "Idieresis", # 206 + "Igrave", # 207 + "Oacute", # 208 + "Ocircumflex", # 209 + "apple", # 210 + "Ograve", # 211 + "Uacute", # 212 + "Ucircumflex", # 213 + "Ugrave", # 214 + "dotlessi", # 215 + "circumflex", # 216 + "tilde", # 217 + "macron", # 218 + "breve", # 219 + "dotaccent", # 220 + "ring", # 221 + "cedilla", # 222 + "hungarumlaut", # 223 + "ogonek", # 224 + "caron", # 225 + "Lslash", # 226 + "lslash", # 227 + "Scaron", # 228 + "scaron", # 229 + "Zcaron", # 230 + "zcaron", # 231 + "brokenbar", # 232 + "Eth", # 233 + "eth", # 234 + "Yacute", # 235 + "yacute", # 236 + "Thorn", # 237 + "thorn", # 238 + "minus", # 239 + "multiply", # 240 + "onesuperior", # 241 + "twosuperior", # 242 + "threesuperior", # 243 + "onehalf", # 244 + "onequarter", # 245 + "threequarters", # 246 + "franc", # 247 + "Gbreve", # 248 + "gbreve", # 249 + "Idotaccent", # 250 + "Scedilla", # 251 + "scedilla", # 252 + "Cacute", # 253 + "cacute", # 254 + "Ccaron", # 255 + "ccaron", # 256 + "dcroat" # 257 ] diff -Nru fonttools-2.4/Lib/fontTools/ttLib/tables/asciiTable.py fonttools-3.0/Lib/fontTools/ttLib/tables/asciiTable.py --- fonttools-2.4/Lib/fontTools/ttLib/tables/asciiTable.py 2013-04-23 04:08:08.000000000 +0000 +++ fonttools-3.0/Lib/fontTools/ttLib/tables/asciiTable.py 2015-08-31 17:57:15.000000000 +0000 @@ -1,22 +1,22 @@ -import string -import DefaultTable +from __future__ import print_function, division, absolute_import +from fontTools.misc.py23 import * +from . import DefaultTable class asciiTable(DefaultTable.DefaultTable): - + def toXML(self, writer, ttFont): - data = self.data + data = tostr(self.data) # removing null bytes. XXX needed?? - data = string.split(data, '\0') - data = string.join(data, '') + data = data.split('\0') + data = strjoin(data) writer.begintag("source") writer.newline() - writer.write_noindent(string.replace(data, "\r", "\n")) + writer.write_noindent(data.replace("\r", "\n")) writer.newline() writer.endtag("source") writer.newline() - - def fromXML(self, (name, attrs, content), ttFont): - lines = string.split(string.replace(string.join(content, ""), "\r", "\n"), "\n") - self.data = string.join(lines[1:-1], "\r") + def fromXML(self, name, attrs, content, ttFont): + lines = strjoin(content).replace("\r", "\n").split("\n") + self.data = tobytes("\r".join(lines[1:-1])) diff -Nru fonttools-2.4/Lib/fontTools/ttLib/tables/_a_v_a_r.py fonttools-3.0/Lib/fontTools/ttLib/tables/_a_v_a_r.py --- fonttools-2.4/Lib/fontTools/ttLib/tables/_a_v_a_r.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.0/Lib/fontTools/ttLib/tables/_a_v_a_r.py 2015-08-31 17:57:15.000000000 +0000 @@ -0,0 +1,94 @@ +from __future__ import print_function, division, absolute_import +from fontTools.misc.py23 import * +from fontTools import ttLib +from fontTools.misc import sstruct +from fontTools.misc.fixedTools import fixedToFloat, floatToFixed +from fontTools.misc.textTools import safeEval +from fontTools.ttLib import TTLibError +from . import DefaultTable +import array +import struct +import warnings + + +# Apple's documentation of 'avar': +# https://developer.apple.com/fonts/TrueType-Reference-Manual/RM06/Chap6avar.html + +AVAR_HEADER_FORMAT = """ + > # big endian + version: L + axisCount: L +""" + + +class table__a_v_a_r(DefaultTable.DefaultTable): + dependencies = ["fvar"] + + def __init__(self, tag=None): + DefaultTable.DefaultTable.__init__(self, tag) + self.segments = {} + + def compile(self, ttFont): + axisTags = [axis.axisTag for axis in ttFont["fvar"].axes] + header = {"version": 0x00010000, "axisCount": len(axisTags)} + result = [sstruct.pack(AVAR_HEADER_FORMAT, header)] + for axis in axisTags: + mappings = sorted(self.segments[axis].items()) + result.append(struct.pack(">H", len(mappings))) + for key, value in mappings: + fixedKey = floatToFixed(key, 14) + fixedValue = floatToFixed(value, 14) + result.append(struct.pack(">hh", fixedKey, fixedValue)) + return bytesjoin(result) + + def decompile(self, data, ttFont): + axisTags = [axis.axisTag for axis in ttFont["fvar"].axes] + header = {} + headerSize = sstruct.calcsize(AVAR_HEADER_FORMAT) + header = sstruct.unpack(AVAR_HEADER_FORMAT, data[0:headerSize]) + if header["version"] != 0x00010000: + raise TTLibError("unsupported 'avar' version %04x" % header["version"]) + pos = headerSize + for axis in axisTags: + segments = self.segments[axis] = {} + numPairs = struct.unpack(">H", data[pos:pos+2])[0] + pos = pos + 2 + for _ in range(numPairs): + fromValue, toValue = struct.unpack(">hh", data[pos:pos+4]) + segments[fixedToFloat(fromValue, 14)] = fixedToFloat(toValue, 14) + pos = pos + 4 + self.fixupSegments_(warn=warnings.warn) + + def toXML(self, writer, ttFont, progress=None): + axisTags = [axis.axisTag for axis in ttFont["fvar"].axes] + for axis in axisTags: + writer.begintag("segment", axis=axis) + writer.newline() + for key, value in sorted(self.segments[axis].items()): + writer.simpletag("mapping", **{"from": key, "to": value}) + writer.newline() + writer.endtag("segment") + writer.newline() + + def fromXML(self, name, attrs, content, ttFont): + if name == "segment": + axis = attrs["axis"] + segment = self.segments[axis] = {} + for element in content: + if isinstance(element, tuple): + elementName, elementAttrs, _ = element + if elementName == "mapping": + fromValue = safeEval(elementAttrs["from"]) + toValue = safeEval(elementAttrs["to"]) + if fromValue in segment: + warnings.warn("duplicate entry for %s in axis '%s'" % + (fromValue, axis)) + segment[fromValue] = toValue + self.fixupSegments_(warn=warnings.warn) + + def fixupSegments_(self, warn): + for axis, mappings in self.segments.items(): + for k in [-1.0, 0.0, 1.0]: + if mappings.get(k) != k: + warn("avar axis '%s' should map %s to %s" % (axis, k, k)) + mappings[k] = k diff -Nru fonttools-2.4/Lib/fontTools/ttLib/tables/_a_v_a_r_test.py fonttools-3.0/Lib/fontTools/ttLib/tables/_a_v_a_r_test.py --- fonttools-2.4/Lib/fontTools/ttLib/tables/_a_v_a_r_test.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.0/Lib/fontTools/ttLib/tables/_a_v_a_r_test.py 2015-08-31 17:57:15.000000000 +0000 @@ -0,0 +1,91 @@ +from __future__ import print_function, division, absolute_import, unicode_literals +from fontTools.misc.py23 import * +from fontTools.misc.textTools import deHexStr +from fontTools.misc.xmlWriter import XMLWriter +from fontTools.ttLib import TTLibError +from fontTools.ttLib.tables._a_v_a_r import table__a_v_a_r +from fontTools.ttLib.tables._f_v_a_r import table__f_v_a_r, Axis +import collections +import unittest + + +TEST_DATA = deHexStr( + "00 01 00 00 00 00 00 02 " + "00 04 C0 00 C0 00 00 00 00 00 13 33 33 33 40 00 40 00 " + "00 03 C0 00 C0 00 00 00 00 00 40 00 40 00") + + +class AxisVariationTableTest(unittest.TestCase): + def test_compile(self): + avar = table__a_v_a_r() + avar.segments["wdth"] = {-1.0: -1.0, 0.0: 0.0, 0.3: 0.8, 1.0: 1.0} + avar.segments["wght"] = {-1.0: -1.0, 0.0: 0.0, 1.0: 1.0} + self.assertEqual(TEST_DATA, avar.compile(self.makeFont(["wdth", "wght"]))) + + def test_decompile(self): + avar = table__a_v_a_r() + avar.decompile(TEST_DATA, self.makeFont(["wdth", "wght"])) + self.assertEqual({ + "wdth": {-1.0: -1.0, 0.0: 0.0, 0.3: 0.8, 1.0: 1.0}, + "wght": {-1.0: -1.0, 0.0: 0.0, 1.0: 1.0} + }, avar.segments) + + def test_decompile_unsupportedVersion(self): + avar = table__a_v_a_r() + font = self.makeFont(["wdth", "wght"]) + self.assertRaises(TTLibError, avar.decompile, deHexStr("02 01 03 06 00 00 00 00"), font) + + def test_toXML(self): + avar = table__a_v_a_r() + avar.segments["opsz"] = {-1.0: -1.0, 0.0: 0.0, 0.3: 0.8, 1.0: 1.0} + writer = XMLWriter(BytesIO()) + avar.toXML(writer, self.makeFont(["opsz"])) + self.assertEqual([ + '', + '', + '', + '', + '', + '' + ], self.xml_lines(writer)) + + def test_fromXML(self): + avar = table__a_v_a_r() + avar.fromXML("segment", {"axis":"wdth"}, [ + ("mapping", {"from": "-1.0", "to": "-1.0"}, []), + ("mapping", {"from": "0.0", "to": "0.0"}, []), + ("mapping", {"from": "0.7", "to": "0.2"}, []), + ("mapping", {"from": "1.0", "to": "1.0"}, []) + ], ttFont=None) + self.assertEqual({"wdth": {-1: -1, 0: 0, 0.7: 0.2, 1.0: 1.0}}, avar.segments) + + def test_fixupSegments(self): + avar = table__a_v_a_r() + avar.segments = {"wdth": {0.3: 0.8, 1.0: 0.7}} + warnings = [] + avar.fixupSegments_(lambda w: warnings.append(w)) + self.assertEqual({"wdth": {-1.0: -1.0, 0.0: 0.0, 0.3: 0.8, 1.0: 1.0}}, avar.segments) + self.assertEqual([ + "avar axis 'wdth' should map -1.0 to -1.0", + "avar axis 'wdth' should map 0.0 to 0.0", + "avar axis 'wdth' should map 1.0 to 1.0" + ], warnings) + + @staticmethod + def makeFont(axisTags): + """['opsz', 'wdth'] --> ttFont""" + fvar = table__f_v_a_r() + for tag in axisTags: + axis = Axis() + axis.axisTag = tag + fvar.axes.append(axis) + return {"fvar": fvar} + + @staticmethod + def xml_lines(writer): + content = writer.file.getvalue().decode("utf-8") + return [line.strip() for line in content.splitlines()][1:] + + +if __name__ == "__main__": + unittest.main() diff -Nru fonttools-2.4/Lib/fontTools/ttLib/tables/B_A_S_E_.py fonttools-3.0/Lib/fontTools/ttLib/tables/B_A_S_E_.py --- fonttools-2.4/Lib/fontTools/ttLib/tables/B_A_S_E_.py 2013-04-23 04:08:08.000000000 +0000 +++ fonttools-3.0/Lib/fontTools/ttLib/tables/B_A_S_E_.py 2015-08-31 17:57:15.000000000 +0000 @@ -1,4 +1,6 @@ -from otBase import BaseTTXConverter +from __future__ import print_function, division, absolute_import +from fontTools.misc.py23 import * +from .otBase import BaseTTXConverter class table_B_A_S_E_(BaseTTXConverter): diff -Nru fonttools-2.4/Lib/fontTools/ttLib/tables/BitmapGlyphMetrics.py fonttools-3.0/Lib/fontTools/ttLib/tables/BitmapGlyphMetrics.py --- fonttools-2.4/Lib/fontTools/ttLib/tables/BitmapGlyphMetrics.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.0/Lib/fontTools/ttLib/tables/BitmapGlyphMetrics.py 2015-08-31 17:57:15.000000000 +0000 @@ -0,0 +1,58 @@ +# Since bitmap glyph metrics are shared between EBLC and EBDT +# this class gets its own python file. +from __future__ import print_function, division, absolute_import +from fontTools.misc.py23 import * +from fontTools.misc import sstruct +from fontTools.misc.textTools import safeEval + + +bigGlyphMetricsFormat = """ + > # big endian + height: B + width: B + horiBearingX: b + horiBearingY: b + horiAdvance: B + vertBearingX: b + vertBearingY: b + vertAdvance: B +""" + +smallGlyphMetricsFormat = """ + > # big endian + height: B + width: B + BearingX: b + BearingY: b + Advance: B +""" + +class BitmapGlyphMetrics(object): + + def toXML(self, writer, ttFont): + writer.begintag(self.__class__.__name__) + writer.newline() + for metricName in sstruct.getformat(self.__class__.binaryFormat)[1]: + writer.simpletag(metricName, value=getattr(self, metricName)) + writer.newline() + writer.endtag(self.__class__.__name__) + writer.newline() + + def fromXML(self, name, attrs, content, ttFont): + metricNames = set(sstruct.getformat(self.__class__.binaryFormat)[1]) + for element in content: + if not isinstance(element, tuple): + continue + name, attrs, content = element + # Make sure this is a metric that is needed by GlyphMetrics. + if name in metricNames: + vars(self)[name] = safeEval(attrs['value']) + else: + print("Warning: unknown name '%s' being ignored in %s." % name, self.__class__.__name__) + + +class BigGlyphMetrics(BitmapGlyphMetrics): + binaryFormat = bigGlyphMetricsFormat + +class SmallGlyphMetrics(BitmapGlyphMetrics): + binaryFormat = smallGlyphMetricsFormat diff -Nru fonttools-2.4/Lib/fontTools/ttLib/tables/C_B_D_T_.py fonttools-3.0/Lib/fontTools/ttLib/tables/C_B_D_T_.py --- fonttools-2.4/Lib/fontTools/ttLib/tables/C_B_D_T_.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.0/Lib/fontTools/ttLib/tables/C_B_D_T_.py 2015-08-31 17:57:15.000000000 +0000 @@ -0,0 +1,93 @@ +# Copyright 2013 Google, Inc. All Rights Reserved. +# +# Google Author(s): Matt Fontaine + + +from __future__ import print_function, division, absolute_import +from fontTools.misc.py23 import * +from fontTools.misc import sstruct +from . import E_B_D_T_ +from .BitmapGlyphMetrics import BigGlyphMetrics, bigGlyphMetricsFormat, SmallGlyphMetrics, smallGlyphMetricsFormat +from .E_B_D_T_ import BitmapGlyph, BitmapPlusSmallMetricsMixin, BitmapPlusBigMetricsMixin +import struct + +class table_C_B_D_T_(E_B_D_T_.table_E_B_D_T_): + + # Change the data locator table being referenced. + locatorName = 'CBLC' + + # Modify the format class accessor for color bitmap use. + def getImageFormatClass(self, imageFormat): + try: + return E_B_D_T_.table_E_B_D_T_.getImageFormatClass(self, imageFormat) + except KeyError: + return cbdt_bitmap_classes[imageFormat] + +# Helper method for removing export features not supported by color bitmaps. +# Write data in the parent class will default to raw if an option is unsupported. +def _removeUnsupportedForColor(dataFunctions): + dataFunctions = dict(dataFunctions) + del dataFunctions['row'] + return dataFunctions + +class ColorBitmapGlyph(BitmapGlyph): + + fileExtension = '.png' + xmlDataFunctions = _removeUnsupportedForColor(BitmapGlyph.xmlDataFunctions) + +class cbdt_bitmap_format_17(BitmapPlusSmallMetricsMixin, ColorBitmapGlyph): + + def decompile(self): + self.metrics = SmallGlyphMetrics() + dummy, data = sstruct.unpack2(smallGlyphMetricsFormat, self.data, self.metrics) + (dataLen,) = struct.unpack(">L", data[:4]) + data = data[4:] + + # For the image data cut it to the size specified by dataLen. + assert dataLen <= len(data), "Data overun in format 17" + self.imageData = data[:dataLen] + + def compile(self, ttFont): + dataList = [] + dataList.append(sstruct.pack(smallGlyphMetricsFormat, self.metrics)) + dataList.append(struct.pack(">L", len(self.imageData))) + dataList.append(self.imageData) + return bytesjoin(dataList) + +class cbdt_bitmap_format_18(BitmapPlusBigMetricsMixin, ColorBitmapGlyph): + + def decompile(self): + self.metrics = BigGlyphMetrics() + dummy, data = sstruct.unpack2(bigGlyphMetricsFormat, self.data, self.metrics) + (dataLen,) = struct.unpack(">L", data[:4]) + data = data[4:] + + # For the image data cut it to the size specified by dataLen. + assert dataLen <= len(data), "Data overun in format 18" + self.imageData = data[:dataLen] + + def compile(self, ttFont): + dataList = [] + dataList.append(sstruct.pack(bigGlyphMetricsFormat, self.metrics)) + dataList.append(struct.pack(">L", len(self.imageData))) + dataList.append(self.imageData) + return bytesjoin(dataList) + +class cbdt_bitmap_format_19(ColorBitmapGlyph): + + def decompile(self): + (dataLen,) = struct.unpack(">L", self.data[:4]) + data = self.data[4:] + + assert dataLen <= len(data), "Data overun in format 19" + self.imageData = data[:dataLen] + + def compile(self, ttFont): + return struct.pack(">L", len(self.imageData)) + self.imageData + +# Dict for CBDT extended formats. +cbdt_bitmap_classes = { + 17: cbdt_bitmap_format_17, + 18: cbdt_bitmap_format_18, + 19: cbdt_bitmap_format_19, +} diff -Nru fonttools-2.4/Lib/fontTools/ttLib/tables/C_B_L_C_.py fonttools-3.0/Lib/fontTools/ttLib/tables/C_B_L_C_.py --- fonttools-2.4/Lib/fontTools/ttLib/tables/C_B_L_C_.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.0/Lib/fontTools/ttLib/tables/C_B_L_C_.py 2015-08-31 17:57:15.000000000 +0000 @@ -0,0 +1,11 @@ +# Copyright 2013 Google, Inc. All Rights Reserved. +# +# Google Author(s): Matt Fontaine + +from __future__ import print_function, division, absolute_import +from fontTools.misc.py23 import * +from . import E_B_L_C_ + +class table_C_B_L_C_(E_B_L_C_.table_E_B_L_C_): + + dependencies = ['CBDT'] diff -Nru fonttools-2.4/Lib/fontTools/ttLib/tables/C_F_F_.py fonttools-3.0/Lib/fontTools/ttLib/tables/C_F_F_.py --- fonttools-2.4/Lib/fontTools/ttLib/tables/C_F_F_.py 2013-04-23 04:08:08.000000000 +0000 +++ fonttools-3.0/Lib/fontTools/ttLib/tables/C_F_F_.py 2015-08-31 17:57:15.000000000 +0000 @@ -1,48 +1,47 @@ -import DefaultTable +from __future__ import print_function, division, absolute_import +from fontTools.misc.py23 import * from fontTools import cffLib +from . import DefaultTable class table_C_F_F_(DefaultTable.DefaultTable): - + def __init__(self, tag): DefaultTable.DefaultTable.__init__(self, tag) self.cff = cffLib.CFFFontSet() - self._gaveGlyphOrder = 0 - + self._gaveGlyphOrder = False + def decompile(self, data, otFont): - from cStringIO import StringIO - self.cff.decompile(StringIO(data), otFont) + self.cff.decompile(BytesIO(data), otFont) assert len(self.cff) == 1, "can't deal with multi-font CFF tables." - + def compile(self, otFont): - from cStringIO import StringIO - f = StringIO() + f = BytesIO() self.cff.compile(f, otFont) return f.getvalue() - + def haveGlyphNames(self): if hasattr(self.cff[self.cff.fontNames[0]], "ROS"): - return 0 # CID-keyed font + return False # CID-keyed font else: - return 1 - + return True + def getGlyphOrder(self): if self._gaveGlyphOrder: from fontTools import ttLib - raise ttLib.TTLibError, "illegal use of getGlyphOrder()" - self._gaveGlyphOrder = 1 + raise ttLib.TTLibError("illegal use of getGlyphOrder()") + self._gaveGlyphOrder = True return self.cff[self.cff.fontNames[0]].getGlyphOrder() - + def setGlyphOrder(self, glyphOrder): pass # XXX #self.cff[self.cff.fontNames[0]].setGlyphOrder(glyphOrder) - + def toXML(self, writer, otFont, progress=None): self.cff.toXML(writer, progress) - - def fromXML(self, (name, attrs, content), otFont): + + def fromXML(self, name, attrs, content, otFont): if not hasattr(self, "cff"): self.cff = cffLib.CFFFontSet() - self.cff.fromXML((name, attrs, content)) - + self.cff.fromXML(name, attrs, content) diff -Nru fonttools-2.4/Lib/fontTools/ttLib/tables/_c_m_a_p.py fonttools-3.0/Lib/fontTools/ttLib/tables/_c_m_a_p.py --- fonttools-2.4/Lib/fontTools/ttLib/tables/_c_m_a_p.py 2013-04-23 04:08:08.000000000 +0000 +++ fonttools-3.0/Lib/fontTools/ttLib/tables/_c_m_a_p.py 2015-08-31 17:57:15.000000000 +0000 @@ -1,23 +1,25 @@ +from __future__ import print_function, division, absolute_import +from fontTools.misc.py23 import * +from fontTools.misc.textTools import safeEval, readHex +from fontTools.misc.encodingTools import getEncoding +from fontTools.ttLib import getSearchRange +from fontTools.unicode import Unicode +from . import DefaultTable import sys -import DefaultTable import struct import array -import numpy import operator -from fontTools import ttLib -from fontTools.misc.textTools import safeEval, readHex -from types import TupleType class table__c_m_a_p(DefaultTable.DefaultTable): - + def getcmap(self, platformID, platEncID): for subtable in self.tables: - if (subtable.platformID == platformID and + if (subtable.platformID == platformID and subtable.platEncID == platEncID): return subtable return None # not found - + def decompile(self, data, ttFont): tableVersion, numSubTables = struct.unpack(">HH", data[:4]) self.tableVersion = int(tableVersion) @@ -28,36 +30,34 @@ ">HHl", data[4+i*8:4+(i+1)*8]) platformID, platEncID = int(platformID), int(platEncID) format, length = struct.unpack(">HH", data[offset:offset+4]) - if format in [8,10,12]: + if format in [8,10,12,13]: format, reserved, length = struct.unpack(">HHL", data[offset:offset+8]) elif format in [14]: format, length = struct.unpack(">HL", data[offset:offset+6]) - + if not length: - print "Error: cmap subtable is reported as having zero length: platformID %s, platEncID %s, format %s offset %s. Skipping table." % (platformID, platEncID,format, offset) + print("Error: cmap subtable is reported as having zero length: platformID %s, platEncID %s, format %s offset %s. Skipping table." % (platformID, platEncID,format, offset)) continue - if not cmap_classes.has_key(format): - table = cmap_format_unknown(format) - else: - table = cmap_classes[format](format) + table = CmapSubtable.newSubtable(format) table.platformID = platformID table.platEncID = platEncID # Note that by default we decompile only the subtable header info; # any other data gets decompiled only when an attribute of the # subtable is referenced. table.decompileHeader(data[offset:offset+int(length)], ttFont) - if seenOffsets.has_key(offset): + if offset in seenOffsets: + table.data = None # Mark as decompiled table.cmap = tables[seenOffsets[offset]].cmap else: seenOffsets[offset] = i tables.append(table) - + def compile(self, ttFont): - self.tables.sort() # sort according to the spec; see CmapSubtable.__cmp__() + self.tables.sort() # sort according to the spec; see CmapSubtable.__lt__() numSubTables = len(self.tables) totalOffset = 4 + 8 * numSubTables data = struct.pack(">HH", self.tableVersion, numSubTables) - tableData = "" + tableData = b"" seen = {} # Some tables are the same object reference. Don't compile them twice. done = {} # Some tables are different objects, but compile to the same data chunk for table in self.tables: @@ -65,41 +65,49 @@ offset = seen[id(table.cmap)] except KeyError: chunk = table.compile(ttFont) - if done.has_key(chunk): + if chunk in done: offset = done[chunk] else: offset = seen[id(table.cmap)] = done[chunk] = totalOffset + len(tableData) tableData = tableData + chunk data = data + struct.pack(">HHl", table.platformID, table.platEncID, offset) return data + tableData - + def toXML(self, writer, ttFont): writer.simpletag("tableVersion", version=self.tableVersion) writer.newline() for table in self.tables: table.toXML(writer, ttFont) - - def fromXML(self, (name, attrs, content), ttFont): + + def fromXML(self, name, attrs, content, ttFont): if name == "tableVersion": self.tableVersion = safeEval(attrs["version"]) return - if name[:12] <> "cmap_format_": + if name[:12] != "cmap_format_": return if not hasattr(self, "tables"): self.tables = [] format = safeEval(name[12:]) - if not cmap_classes.has_key(format): - table = cmap_format_unknown(format) - else: - table = cmap_classes[format](format) + table = CmapSubtable.newSubtable(format) table.platformID = safeEval(attrs["platformID"]) table.platEncID = safeEval(attrs["platEncID"]) - table.fromXML((name, attrs, content), ttFont) + table.fromXML(name, attrs, content, ttFont) self.tables.append(table) -class CmapSubtable: - +class CmapSubtable(object): + + @staticmethod + def getSubtableClass(format): + """Return the subtable class for a format.""" + return cmap_classes.get(format, cmap_format_unknown) + + @staticmethod + def newSubtable(format): + """Return a new instance of a subtable for format.""" + subtableClass = CmapSubtable.getSubtableClass(format) + return subtableClass(format) + def __init__(self, format): self.format = format self.data = None @@ -108,15 +116,15 @@ def __getattr__(self, attr): # allow lazy decompilation of subtables. if attr[:2] == '__': # don't handle requests for member functions like '__lt__' - raise AttributeError, attr - if self.data == None: - raise AttributeError, attr + raise AttributeError(attr) + if self.data is None: + raise AttributeError(attr) self.decompile(None, None) # use saved data. - self.data = None # Once this table has been decompiled, make sure we don't - # just return the original data. Also avoids recursion when - # called with an attribute that the cmap subtable doesn't have. + self.data = None # Once this table has been decompiled, make sure we don't + # just return the original data. Also avoids recursion when + # called with an attribute that the cmap subtable doesn't have. return getattr(self, attr) - + def decompileHeader(self, data, ttFont): format, length, language = struct.unpack(">HHH", data[:6]) assert len(data) == length, "corrupt cmap table format %d (data length: %d, header length: %d)" % (format, len(data), length) @@ -133,100 +141,115 @@ ("language", self.language), ]) writer.newline() - codes = self.cmap.items() - codes.sort() + codes = sorted(self.cmap.items()) self._writeCodes(codes, writer) writer.endtag(self.__class__.__name__) writer.newline() + def getEncoding(self, default=None): + """Returns the Python encoding name for this cmap subtable based on its platformID, + platEncID, and language. If encoding for these values is not known, by default + None is returned. That can be overriden by passing a value to the default + argument. + + Note that if you want to choose a "preferred" cmap subtable, most of the time + self.isUnicode() is what you want as that one only returns true for the modern, + commonly used, Unicode-compatible triplets, not the legacy ones. + """ + return getEncoding(self.platformID, self.platEncID, self.language, default) + + def isUnicode(self): + return (self.platformID == 0 or + (self.platformID == 3 and self.platEncID in [0, 1, 10])) + + def isSymbol(self): + return self.platformID == 3 and self.platEncID == 0 + def _writeCodes(self, codes, writer): - if (self.platformID, self.platEncID) == (3, 1) or (self.platformID, self.platEncID) == (3, 10) or self.platformID == 0: - from fontTools.unicode import Unicode - isUnicode = 1 - else: - isUnicode = 0 + isUnicode = self.isUnicode() for code, name in codes: writer.simpletag("map", code=hex(code), name=name) if isUnicode: writer.comment(Unicode[code]) writer.newline() - - def __cmp__(self, other): - # implemented so that list.sort() sorts according to the cmap spec. + + def __lt__(self, other): + if not isinstance(other, CmapSubtable): + return NotImplemented + + # implemented so that list.sort() sorts according to the spec. selfTuple = ( - self.platformID, - self.platEncID, - self.language, - self.__dict__) + getattr(self, "platformID", None), + getattr(self, "platEncID", None), + getattr(self, "language", None), + self.__dict__) otherTuple = ( - other.platformID, - other.platEncID, - other.language, - other.__dict__) - return cmp(selfTuple, otherTuple) + getattr(other, "platformID", None), + getattr(other, "platEncID", None), + getattr(other, "language", None), + other.__dict__) + return selfTuple < otherTuple class cmap_format_0(CmapSubtable): - + def decompile(self, data, ttFont): # we usually get here indirectly from the subtable __getattr__ function, in which case both args must be None. # If not, someone is calling the subtable decompile() directly, and must provide both args. - if data != None and ttFont != None: - self.decompileHeader(data[offset:offset+int(length)], ttFont) + if data is not None and ttFont is not None: + self.decompileHeader(data, ttFont) else: - assert (data == None and ttFont == None), "Need both data and ttFont arguments" + assert (data is None and ttFont is None), "Need both data and ttFont arguments" data = self.data # decompileHeader assigns the data after the header to self.data assert 262 == self.length, "Format 0 cmap subtable not 262 bytes" glyphIdArray = array.array("B") glyphIdArray.fromstring(self.data) self.cmap = cmap = {} lenArray = len(glyphIdArray) - charCodes = range(lenArray) + charCodes = list(range(lenArray)) names = map(self.ttFont.getGlyphName, glyphIdArray) - map(operator.setitem, [cmap]*lenArray, charCodes, names) + list(map(operator.setitem, [cmap]*lenArray, charCodes, names)) - def compile(self, ttFont): if self.data: return struct.pack(">HHH", 0, 262, self.language) + self.data - charCodeList = self.cmap.items() - charCodeList.sort() + charCodeList = sorted(self.cmap.items()) charCodes = [entry[0] for entry in charCodeList] valueList = [entry[1] for entry in charCodeList] - assert charCodes == range(256) + assert charCodes == list(range(256)) valueList = map(ttFont.getGlyphID, valueList) - glyphIdArray = numpy.array(valueList, numpy.int8) + glyphIdArray = array.array("B", valueList) data = struct.pack(">HHH", 0, 262, self.language) + glyphIdArray.tostring() assert len(data) == 262 return data - - def fromXML(self, (name, attrs, content), ttFont): + + def fromXML(self, name, attrs, content, ttFont): self.language = safeEval(attrs["language"]) if not hasattr(self, "cmap"): self.cmap = {} cmap = self.cmap for element in content: - if type(element) <> TupleType: + if not isinstance(element, tuple): continue name, attrs, content = element - if name <> "map": + if name != "map": continue cmap[safeEval(attrs["code"])] = attrs["name"] subHeaderFormat = ">HHhH" -class SubHeader: +class SubHeader(object): def __init__(self): self.firstCode = None self.entryCount = None self.idDelta = None self.idRangeOffset = None self.glyphIndexArray = [] - + class cmap_format_2(CmapSubtable): - + def setIDDelta(self, subHeader): subHeader.idDelta = 0 # find the minGI which is not zero. @@ -236,13 +259,13 @@ minGI = gid # The lowest gid in glyphIndexArray, after subtracting idDelta, must be 1. # idDelta is a short, and must be between -32K and 32K. minGI can be between 1 and 64K. - # We would like to pick an idDelta such that the first glyphArray GID is 1, + # We would like to pick an idDelta such that the first glyphArray GID is 1, # so that we are more likely to be able to combine glypharray GID subranges. # This means that we have a problem when minGI is > 32K # Since the final gi is reconstructed from the glyphArray GID by: # (short)finalGID = (gid + idDelta) % 0x10000), # we can get from a glypharray GID of 1 to a final GID of 65K by subtracting 2, and casting the - # negative number to an unsigned short. + # negative number to an unsigned short. if (minGI > 1): if minGI > 0x7FFF: @@ -252,17 +275,16 @@ idDelta = subHeader.idDelta for i in range(subHeader.entryCount): gid = subHeader.glyphIndexArray[i] - if gid > 0: - subHeader.glyphIndexArray[i] = gid - idDelta - + if gid > 0: + subHeader.glyphIndexArray[i] = gid - idDelta def decompile(self, data, ttFont): # we usually get here indirectly from the subtable __getattr__ function, in which case both args must be None. # If not, someone is calling the subtable decompile() directly, and must provide both args. - if data != None and ttFont != None: - self.decompileHeader(data[offset:offset+int(length)], ttFont) + if data is not None and ttFont is not None: + self.decompileHeader(data, ttFont) else: - assert (data == None and ttFont == None), "Need both data and ttFont arguments" + assert (data is None and ttFont is None), "Need both data and ttFont arguments" data = self.data # decompileHeader assigns the data after the header to self.data subHeaderKeys = [] @@ -271,11 +293,11 @@ allKeys = array.array("H") allKeys.fromstring(data[:512]) data = data[512:] - if sys.byteorder <> "big": + if sys.byteorder != "big": allKeys.byteswap() - subHeaderKeys = [ key/8 for key in allKeys] + subHeaderKeys = [ key//8 for key in allKeys] maxSubHeaderindex = max(subHeaderKeys) - + #Load subHeaders subHeaderList = [] pos = 0 @@ -287,18 +309,18 @@ giDataPos = pos + subHeader.idRangeOffset-2 giList = array.array("H") giList.fromstring(data[giDataPos:giDataPos + subHeader.entryCount*2]) - if sys.byteorder <> "big": + if sys.byteorder != "big": giList.byteswap() subHeader.glyphIndexArray = giList subHeaderList.append(subHeader) - # How this gets processed. + # How this gets processed. # Charcodes may be one or two bytes. # The first byte of a charcode is mapped through the subHeaderKeys, to select # a subHeader. For any subheader but 0, the next byte is then mapped through the - # selected subheader. If subheader Index 0 is selected, then the byte itself is + # selected subheader. If subheader Index 0 is selected, then the byte itself is # mapped through the subheader, and there is no second byte. # Then assume that the subsequent byte is the first byte of the next charcode,and repeat. - # + # # Each subheader references a range in the glyphIndexArray whose length is entryCount. # The range in glyphIndexArray referenced by a sunheader may overlap with the range in glyphIndexArray # referenced by another subheader. @@ -310,7 +332,7 @@ # firstChar and EntryCount values. If the byte value is outside the subrange, then the glyphIndex is zero # (e.g. glyph not in font). # If the byte index is in the subrange, then an offset index is calculated as (byteIndex - firstChar). - # The index to glyphIndex mapping is a subrange of the glyphIndexArray. You find the start of the subrange by + # The index to glyphIndex mapping is a subrange of the glyphIndexArray. You find the start of the subrange by # counting idRangeOffset bytes from the idRangeOffset word. The first value in this subrange is the # glyphIndex for the index firstChar. The offset index should then be used in this array to get the glyphIndex. # Example for Logocut-Medium @@ -324,11 +346,11 @@ # [257], [1]=2 from charcode [129, 65] # [258], [2]=3 from charcode [129, 66] # [259], [3]=4 from charcode [129, 67] - # So, the glyphIndex = 3 from the array. Then if idDelta is not zero and the glyph ID is not zero, + # So, the glyphIndex = 3 from the array. Then if idDelta is not zero and the glyph ID is not zero, # add it to the glyphID to get the final glyphIndex # value. In this case the final glyph index = 3+ 42 -> 45 for the final glyphIndex. Whew! - - self.data = "" + + self.data = b"" self.cmap = cmap = {} notdefGI = 0 for firstByte in range(256): @@ -358,39 +380,37 @@ continue cmap[charCode] = gi # If not subHeader.entryCount, then all char codes with this first byte are - # mapped to .notdef. We can skip this subtable, and leave the glyphs un-encoded, which is the + # mapped to .notdef. We can skip this subtable, and leave the glyphs un-encoded, which is the # same as mapping it to .notdef. # cmap values are GID's. glyphOrder = self.ttFont.getGlyphOrder() - gids = cmap.values() - charCodes = cmap.keys() + gids = list(cmap.values()) + charCodes = list(cmap.keys()) lenCmap = len(gids) try: - names = map(operator.getitem, [glyphOrder]*lenCmap, gids ) + names = list(map(operator.getitem, [glyphOrder]*lenCmap, gids )) except IndexError: getGlyphName = self.ttFont.getGlyphName - names = map(getGlyphName, gids ) - map(operator.setitem, [cmap]*lenCmap, charCodes, names) - - + names = list(map(getGlyphName, gids )) + list(map(operator.setitem, [cmap]*lenCmap, charCodes, names)) + def compile(self, ttFont): if self.data: return struct.pack(">HHH", self.format, self.length, self.language) + self.data kEmptyTwoCharCodeRange = -1 notdefGI = 0 - items = self.cmap.items() - items.sort() + items = sorted(self.cmap.items()) charCodes = [item[0] for item in items] names = [item[1] for item in items] nameMap = ttFont.getReverseGlyphMap() - lenCharCodes = len(charCodes) + lenCharCodes = len(charCodes) try: - gids = map(operator.getitem, [nameMap]*lenCharCodes, names) + gids = list(map(operator.getitem, [nameMap]*lenCharCodes, names)) except KeyError: - nameMap = ttFont.getReverseGlyphMap(rebuild=1) + nameMap = ttFont.getReverseGlyphMap(rebuild=True) try: - gids = map(operator.getitem, [nameMap]*lenCharCodes, names) + gids = list(map(operator.getitem, [nameMap]*lenCharCodes, names)) except KeyError: # allow virtual GIDs in format 2 tables gids = [] @@ -409,8 +429,8 @@ gids.append(gid) # Process the (char code to gid) item list in char code order. - # By definition, all one byte char codes map to subheader 0. - # For all the two byte char codes, we assume that the first byte maps maps to the empty subhead (with an entry count of 0, + # By definition, all one byte char codes map to subheader 0. + # For all the two byte char codes, we assume that the first byte maps maps to the empty subhead (with an entry count of 0, # which defines all char codes in its range to map to notdef) unless proven otherwise. # Note that since the char code items are processed in char code order, all the char codes with the # same first byte are in sequential order. @@ -429,8 +449,7 @@ subHeader.idDelta = 0 subHeader.idRangeOffset = 0 subHeaderList.append(subHeader) - - + lastFirstByte = -1 items = zip(charCodes, gids) for charCode, gid in items: @@ -467,7 +486,7 @@ subHeader.glyphIndexArray.append(notdefGI) subHeader.glyphIndexArray.append(gid) subHeader.entryCount = subHeader.entryCount + codeDiff + 1 - + # fix GI's and iDelta of last subheader that we we added to the subheader array. self.setIDDelta(subHeader) @@ -484,12 +503,12 @@ subHeaderKeys[index] = emptySubheadIndex # Since this is the last subheader, the GlyphIndex Array starts two bytes after the start of the # idRangeOffset word of this subHeader. We can safely point to the first entry in the GlyphIndexArray, - # since the first subrange of the GlyphIndexArray is for subHeader 0, which always starts with + # since the first subrange of the GlyphIndexArray is for subHeader 0, which always starts with # charcode 0 and GID 0. - - idRangeOffset = (len(subHeaderList)-1)*8 + 2 # offset to beginning of glyphIDArray from first subheader idRangeOffset. + + idRangeOffset = (len(subHeaderList)-1)*8 + 2 # offset to beginning of glyphIDArray from first subheader idRangeOffset. subheadRangeLen = len(subHeaderList) -1 # skip last special empty-set subheader; we've already hardocodes its idRangeOffset to 2. - for index in range(subheadRangeLen): + for index in range(subheadRangeLen): subHeader = subHeaderList[index] subHeader.idRangeOffset = 0 for j in range(index): @@ -498,7 +517,7 @@ subHeader.idRangeOffset = prevSubhead.idRangeOffset - (index-j)*8 subHeader.glyphIndexArray = [] break - if subHeader.idRangeOffset == 0: # didn't find one. + if subHeader.idRangeOffset == 0: # didn't find one. subHeader.idRangeOffset = idRangeOffset idRangeOffset = (idRangeOffset - 8) + subHeader.entryCount*2 # one less subheader, one more subArray. else: @@ -516,22 +535,21 @@ for subhead in subHeaderList[:-1]: for gi in subhead.glyphIndexArray: dataList.append(struct.pack(">H", gi)) - data = "".join(dataList) + data = bytesjoin(dataList) assert (len(data) == length), "Error: cmap format 2 is not same length as calculated! actual: " + str(len(data))+ " calc : " + str(length) return data - - def fromXML(self, (name, attrs, content), ttFont): + def fromXML(self, name, attrs, content, ttFont): self.language = safeEval(attrs["language"]) if not hasattr(self, "cmap"): self.cmap = {} cmap = self.cmap for element in content: - if type(element) <> TupleType: + if not isinstance(element, tuple): continue name, attrs, content = element - if name <> "map": + if name != "map": continue cmap[safeEval(attrs["code"])] = attrs["name"] @@ -552,17 +570,17 @@ # to do well with the fonts I tested: none became bigger, many became smaller. if startCode == endCode: return [], [endCode] - + lastID = cmap[startCode] lastCode = startCode inOrder = None orderedBegin = None subRanges = [] - + # Gather subranges in which the glyph IDs are consecutive. for code in range(startCode + 1, endCode + 1): glyphID = cmap[code] - + if glyphID - 1 == lastID: if inOrder is None or not inOrder: inOrder = 1 @@ -572,14 +590,14 @@ inOrder = 0 subRanges.append((orderedBegin, lastCode)) orderedBegin = None - + lastID = glyphID lastCode = code - + if inOrder: subRanges.append((orderedBegin, lastCode)) assert lastCode == endCode - + # Now filter out those new subranges that would only make the data bigger. # A new segment cost 8 bytes, not using a new segment costs 2 bytes per # character. @@ -594,15 +612,15 @@ if (e - b + 1) > threshold: newRanges.append((b, e)) subRanges = newRanges - + if not subRanges: return [], [endCode] - + if subRanges[0][0] != startCode: subRanges.insert(0, (startCode, subRanges[0][0] - 1)) if subRanges[-1][1] != endCode: subRanges.append((subRanges[-1][1] + 1, endCode)) - + # Fill the "holes" in the segments list -- those are the segments in which # the glyph IDs are _not_ consecutive. i = 1 @@ -611,7 +629,7 @@ subRanges.insert(i, (subRanges[i-1][1] + 1, subRanges[i][0] - 1)) i = i + 1 i = i + 1 - + # Transform the ranges into startCode/endCode lists. start = [] end = [] @@ -619,34 +637,34 @@ start.append(b) end.append(e) start.pop(0) - + assert len(start) + 1 == len(end) return start, end class cmap_format_4(CmapSubtable): - + def decompile(self, data, ttFont): # we usually get here indirectly from the subtable __getattr__ function, in which case both args must be None. # If not, someone is calling the subtable decompile() directly, and must provide both args. - if data != None and ttFont != None: - self.decompileHeader(self.data[offset:offset+int(length)], ttFont) + if data is not None and ttFont is not None: + self.decompileHeader(data, ttFont) else: - assert (data == None and ttFont == None), "Need both data and ttFont arguments" + assert (data is None and ttFont is None), "Need both data and ttFont arguments" data = self.data # decompileHeader assigns the data after the header to self.data (segCountX2, searchRange, entrySelector, rangeShift) = \ struct.unpack(">4H", data[:8]) data = data[8:] - segCount = segCountX2 / 2 - + segCount = segCountX2 // 2 + allCodes = array.array("H") allCodes.fromstring(data) self.data = data = None - if sys.byteorder <> "big": + if sys.byteorder != "big": allCodes.byteswap() - + # divide the data endCode = allCodes[:segCount] allCodes = allCodes[segCount+1:] # the +1 is skipping the reservedPad field @@ -662,76 +680,55 @@ charCodes = [] gids = [] for i in range(len(startCode) - 1): # don't do 0xffff! - rangeCharCodes = range(startCode[i], endCode[i] + 1) - charCodes = charCodes + rangeCharCodes - for charCode in rangeCharCodes: - rangeOffset = idRangeOffset[i] - if rangeOffset == 0: - glyphID = charCode + idDelta[i] - else: - # *someone* needs to get killed. - index = idRangeOffset[i] / 2 + (charCode - startCode[i]) + i - len(idRangeOffset) + start = startCode[i] + delta = idDelta[i] + rangeOffset = idRangeOffset[i] + # *someone* needs to get killed. + partial = rangeOffset // 2 - start + i - len(idRangeOffset) + + rangeCharCodes = list(range(startCode[i], endCode[i] + 1)) + charCodes.extend(rangeCharCodes) + if rangeOffset == 0: + gids.extend([(charCode + delta) & 0xFFFF for charCode in rangeCharCodes]) + else: + for charCode in rangeCharCodes: + index = charCode + partial assert (index < lenGIArray), "In format 4 cmap, range (%d), the calculated index (%d) into the glyph index array is not less than the length of the array (%d) !" % (i, index, lenGIArray) - if glyphIndexArray[index] <> 0: # if not missing glyph - glyphID = glyphIndexArray[index] + idDelta[i] + if glyphIndexArray[index] != 0: # if not missing glyph + glyphID = glyphIndexArray[index] + delta else: glyphID = 0 # missing glyph - gids.append(glyphID % 0x10000) + gids.append(glyphID & 0xFFFF) self.cmap = cmap = {} lenCmap = len(gids) glyphOrder = self.ttFont.getGlyphOrder() try: - names = map(operator.getitem, [glyphOrder]*lenCmap, gids ) + names = list(map(operator.getitem, [glyphOrder]*lenCmap, gids )) except IndexError: getGlyphName = self.ttFont.getGlyphName - names = map(getGlyphName, gids ) - map(operator.setitem, [cmap]*lenCmap, charCodes, names) - - - - def setIDDelta(self, idDelta): - # The lowest gid in glyphIndexArray, after subtracting idDelta, must be 1. - # idDelta is a short, and must be between -32K and 32K - # startCode can be between 0 and 64K-1, and the first glyph index can be between 1 and 64K-1 - # This means that we have a problem because we can need to assign to idDelta values - # between -(64K-2) and 64K -1. - # Since the final gi is reconstructed from the glyphArray GID by: - # (short)finalGID = (gid + idDelta) % 0x10000), - # we can get from a startCode of 0 to a final GID of 64 -1K by subtracting 1, and casting the - # negative number to an unsigned short. - # Similarly , we can get from a startCode of 64K-1 to a final GID of 1 by adding 2, because of - # the modulo arithmetic. - - if idDelta > 0x7FFF: - idDelta = idDelta - 0x10000 - elif idDelta < -0x7FFF: - idDelta = idDelta + 0x10000 - - return idDelta - + names = list(map(getGlyphName, gids )) + list(map(operator.setitem, [cmap]*lenCmap, charCodes, names)) def compile(self, ttFont): if self.data: return struct.pack(">HHH", self.format, self.length, self.language) + self.data - from fontTools.ttLib.sfnt import maxPowerOfTwo - - charCodes = self.cmap.keys() + charCodes = list(self.cmap.keys()) lenCharCodes = len(charCodes) if lenCharCodes == 0: startCode = [0xffff] endCode = [0xffff] else: charCodes.sort() - names = map(operator.getitem, [self.cmap]*lenCharCodes, charCodes) + names = list(map(operator.getitem, [self.cmap]*lenCharCodes, charCodes)) nameMap = ttFont.getReverseGlyphMap() try: - gids = map(operator.getitem, [nameMap]*lenCharCodes, names) + gids = list(map(operator.getitem, [nameMap]*lenCharCodes, names)) except KeyError: - nameMap = ttFont.getReverseGlyphMap(rebuild=1) + nameMap = ttFont.getReverseGlyphMap(rebuild=True) try: - gids = map(operator.getitem, [nameMap]*lenCharCodes, names) + gids = list(map(operator.getitem, [nameMap]*lenCharCodes, names)) except KeyError: # allow virtual GIDs in format 4 tables gids = [] @@ -746,11 +743,11 @@ gid = ttFont.getGlyphID(name) except: raise KeyError(name) - + gids.append(gid) cmap = {} # code:glyphID mapping - map(operator.setitem, [cmap]*len(charCodes), charCodes, gids) - + list(map(operator.setitem, [cmap]*len(charCodes), charCodes, gids)) + # Build startCode and endCode lists. # Split the char codes in ranges of consecutive char codes, then split # each range in more ranges of consecutive/not consecutive glyph IDs. @@ -767,10 +764,12 @@ endCode.extend(end) startCode.append(charCode) lastCode = charCode - endCode.append(lastCode) + start, end = splitRange(startCode[-1], lastCode, cmap) + startCode.extend(start) + endCode.extend(end) startCode.append(0xffff) endCode.append(0xffff) - + # build up rest of cruft idDelta = [] idRangeOffset = [] @@ -779,9 +778,8 @@ indices = [] for charCode in range(startCode[i], endCode[i] + 1): indices.append(cmap[charCode]) - if (indices == range(indices[0], indices[0] + len(indices))): - idDeltaTemp = self.setIDDelta(indices[0] - startCode[i]) - idDelta.append( idDeltaTemp) + if (indices == list(range(indices[0], indices[0] + len(indices)))): + idDelta.append((indices[0] - startCode[i]) % 0x10000) idRangeOffset.append(0) else: # someone *definitely* needs to get killed. @@ -790,53 +788,50 @@ glyphIndexArray.extend(indices) idDelta.append(1) # 0xffff + 1 == (tadaa!) 0. So this end code maps to .notdef idRangeOffset.append(0) - - # Insane. + + # Insane. segCount = len(endCode) segCountX2 = segCount * 2 - maxExponent = maxPowerOfTwo(segCount) - searchRange = 2 * (2 ** maxExponent) - entrySelector = maxExponent - rangeShift = 2 * segCount - searchRange - - charCodeArray = numpy.array( endCode + [0] + startCode, numpy.uint16) - idDeltaeArray = numpy.array(idDelta, numpy.int16) - restArray = numpy.array(idRangeOffset + glyphIndexArray, numpy.uint16) - if sys.byteorder <> "big": - charCodeArray = charCodeArray.byteswap() - idDeltaeArray = idDeltaeArray.byteswap() - restArray = restArray.byteswap() - data = charCodeArray.tostring() + idDeltaeArray.tostring() + restArray.tostring() + searchRange, entrySelector, rangeShift = getSearchRange(segCount, 2) + + charCodeArray = array.array("H", endCode + [0] + startCode) + idDeltaArray = array.array("H", idDelta) + restArray = array.array("H", idRangeOffset + glyphIndexArray) + if sys.byteorder != "big": + charCodeArray.byteswap() + idDeltaArray.byteswap() + restArray.byteswap() + data = charCodeArray.tostring() + idDeltaArray.tostring() + restArray.tostring() length = struct.calcsize(cmap_format_4_format) + len(data) - header = struct.pack(cmap_format_4_format, self.format, length, self.language, + header = struct.pack(cmap_format_4_format, self.format, length, self.language, segCountX2, searchRange, entrySelector, rangeShift) return header + data - - def fromXML(self, (name, attrs, content), ttFont): + + def fromXML(self, name, attrs, content, ttFont): self.language = safeEval(attrs["language"]) if not hasattr(self, "cmap"): self.cmap = {} cmap = self.cmap for element in content: - if type(element) <> TupleType: + if not isinstance(element, tuple): continue nameMap, attrsMap, dummyContent = element - if nameMap <> "map": + if nameMap != "map": assert 0, "Unrecognized keyword in cmap subtable" cmap[safeEval(attrsMap["code"])] = attrsMap["name"] class cmap_format_6(CmapSubtable): - + def decompile(self, data, ttFont): # we usually get here indirectly from the subtable __getattr__ function, in which case both args must be None. # If not, someone is calling the subtable decompile() directly, and must provide both args. - if data != None and ttFont != None: - self.decompileHeader(data[offset:offset+int(length)], ttFont) + if data is not None and ttFont is not None: + self.decompileHeader(data, ttFont) else: - assert (data == None and ttFont == None), "Need both data and ttFont arguments" + assert (data is None and ttFont is None), "Need both data and ttFont arguments" data = self.data # decompileHeader assigns the data after the header to self.data firstCode, entryCount = struct.unpack(">HH", data[:4]) @@ -845,62 +840,60 @@ #assert len(data) == 2 * entryCount # XXX not true in Apple's Helvetica!!! glyphIndexArray = array.array("H") glyphIndexArray.fromstring(data[:2 * int(entryCount)]) - if sys.byteorder <> "big": + if sys.byteorder != "big": glyphIndexArray.byteswap() self.data = data = None self.cmap = cmap = {} lenArray = len(glyphIndexArray) - charCodes = range(firstCode, firstCode + lenArray ) + charCodes = list(range(firstCode, firstCode + lenArray)) glyphOrder = self.ttFont.getGlyphOrder() try: - names = map(operator.getitem, [glyphOrder]*lenArray, glyphIndexArray ) + names = list(map(operator.getitem, [glyphOrder]*lenArray, glyphIndexArray )) except IndexError: getGlyphName = self.ttFont.getGlyphName - names = map(getGlyphName, glyphIndexArray ) - map(operator.setitem, [cmap]*lenArray, charCodes, names) - + names = list(map(getGlyphName, glyphIndexArray )) + list(map(operator.setitem, [cmap]*lenArray, charCodes, names)) + def compile(self, ttFont): if self.data: return struct.pack(">HHH", self.format, self.length, self.language) + self.data cmap = self.cmap - codes = cmap.keys() + codes = sorted(cmap.keys()) if codes: # yes, there are empty cmap tables. - codes.sort() - lenCodes = len(codes) - assert codes == range(codes[0], codes[0] + lenCodes) + codes = list(range(codes[0], codes[-1] + 1)) firstCode = codes[0] - valueList = map(operator.getitem, [cmap]*lenCodes, codes) + valueList = [cmap.get(code, ".notdef") for code in codes] valueList = map(ttFont.getGlyphID, valueList) - glyphIndexArray = numpy.array(valueList, numpy.uint16) - if sys.byteorder <> "big": - glyphIndexArray = glyphIndexArray.byteswap() + glyphIndexArray = array.array("H", valueList) + if sys.byteorder != "big": + glyphIndexArray.byteswap() data = glyphIndexArray.tostring() else: - data = "" + data = b"" firstCode = 0 - header = struct.pack(">HHHHH", + header = struct.pack(">HHHHH", 6, len(data) + 10, self.language, firstCode, len(codes)) return header + data - - def fromXML(self, (name, attrs, content), ttFont): + + def fromXML(self, name, attrs, content, ttFont): self.language = safeEval(attrs["language"]) if not hasattr(self, "cmap"): self.cmap = {} cmap = self.cmap for element in content: - if type(element) <> TupleType: + if not isinstance(element, tuple): continue name, attrs, content = element - if name <> "map": + if name != "map": continue cmap[safeEval(attrs["code"])] = attrs["name"] -class cmap_format_12(CmapSubtable): - +class cmap_format_12_or_13(CmapSubtable): + def __init__(self, format): self.format = format self.reserved = 0 @@ -909,7 +902,7 @@ def decompileHeader(self, data, ttFont): format, reserved, length, language, nGroups = struct.unpack(">HHLLL", data[:16]) - assert len(data) == (16 + nGroups*12) == (length), "corrupt cmap table format 12 (data length: %d, header length: %d)" % (len(data), length) + assert len(data) == (16 + nGroups*12) == (length), "corrupt cmap table format %d (data length: %d, header length: %d)" % (self.format, len(data), length) self.format = format self.reserved = reserved self.length = length @@ -921,10 +914,10 @@ def decompile(self, data, ttFont): # we usually get here indirectly from the subtable __getattr__ function, in which case both args must be None. # If not, someone is calling the subtable decompile() directly, and must provide both args. - if data != None and ttFont != None: - self.decompileHeader(data[offset:offset+int(length)], ttFont) + if data is not None and ttFont is not None: + self.decompileHeader(data, ttFont) else: - assert (data == None and ttFont == None), "Need both data and ttFont arguments" + assert (data is None and ttFont is None), "Need both data and ttFont arguments" data = self.data # decompileHeader assigns the data after the header to self.data charCodes = [] @@ -934,32 +927,32 @@ startCharCode, endCharCode, glyphID = struct.unpack(">LLL",data[pos:pos+12] ) pos += 12 lenGroup = 1 + endCharCode - startCharCode - charCodes += range(startCharCode, endCharCode +1) - gids += range(glyphID, glyphID + lenGroup) + charCodes.extend(list(range(startCharCode, endCharCode +1))) + gids.extend(self._computeGIDs(glyphID, lenGroup)) self.data = data = None self.cmap = cmap = {} lenCmap = len(gids) glyphOrder = self.ttFont.getGlyphOrder() try: - names = map(operator.getitem, [glyphOrder]*lenCmap, gids ) + names = list(map(operator.getitem, [glyphOrder]*lenCmap, gids )) except IndexError: getGlyphName = self.ttFont.getGlyphName - names = map(getGlyphName, gids ) - map(operator.setitem, [cmap]*lenCmap, charCodes, names) - + names = list(map(getGlyphName, gids )) + list(map(operator.setitem, [cmap]*lenCmap, charCodes, names)) + def compile(self, ttFont): if self.data: - return struct.pack(">HHLLL", self.format, self.reserved , self.length, self.language, self.nGroups) + self.data - charCodes = self.cmap.keys() - lenCharCodes = len(charCodes) - names = self.cmap.values() + return struct.pack(">HHLLL", self.format, self.reserved, self.length, self.language, self.nGroups) + self.data + charCodes = list(self.cmap.keys()) + lenCharCodes = len(charCodes) + names = list(self.cmap.values()) nameMap = ttFont.getReverseGlyphMap() try: - gids = map(operator.getitem, [nameMap]*lenCharCodes, names) + gids = list(map(operator.getitem, [nameMap]*lenCharCodes, names)) except KeyError: - nameMap = ttFont.getReverseGlyphMap(rebuild=1) + nameMap = ttFont.getReverseGlyphMap(rebuild=True) try: - gids = map(operator.getitem, [nameMap]*lenCharCodes, names) + gids = list(map(operator.getitem, [nameMap]*lenCharCodes, names)) except KeyError: # allow virtual GIDs in format 12 tables gids = [] @@ -976,15 +969,15 @@ raise KeyError(name) gids.append(gid) - + cmap = {} # code:glyphID mapping - map(operator.setitem, [cmap]*len(charCodes), charCodes, gids) + list(map(operator.setitem, [cmap]*len(charCodes), charCodes, gids)) charCodes.sort() index = 0 startCharCode = charCodes[0] startGlyphID = cmap[startCharCode] - lastGlyphID = startGlyphID - 1 + lastGlyphID = startGlyphID - self._format_step lastCharCode = startCharCode - 1 nGroups = 0 dataList = [] @@ -992,7 +985,7 @@ for index in range(maxIndex): charCode = charCodes[index] glyphID = cmap[charCode] - if (glyphID != 1 + lastGlyphID) or (charCode != 1 + lastCharCode): + if not self._IsInSameRun(glyphID, lastGlyphID, charCode, lastCharCode): dataList.append(struct.pack(">LLL", startCharCode, lastCharCode, startGlyphID)) startCharCode = charCode startGlyphID = glyphID @@ -1001,11 +994,11 @@ lastCharCode = charCode dataList.append(struct.pack(">LLL", startCharCode, lastCharCode, startGlyphID)) nGroups = nGroups + 1 - data = "".join(dataList) + data = bytesjoin(dataList) lengthSubtable = len(data) +16 - assert len(data) == (nGroups*12) == (lengthSubtable-16) - return struct.pack(">HHLLL", self.format, self.reserved , lengthSubtable, self.language, nGroups) + data - + assert len(data) == (nGroups*12) == (lengthSubtable-16) + return struct.pack(">HHLLL", self.format, self.reserved, lengthSubtable, self.language, nGroups) + data + def toXML(self, writer, ttFont): writer.begintag(self.__class__.__name__, [ ("platformID", self.platformID), @@ -1017,13 +1010,12 @@ ("nGroups", self.nGroups), ]) writer.newline() - codes = self.cmap.items() - codes.sort() + codes = sorted(self.cmap.items()) self._writeCodes(codes, writer) writer.endtag(self.__class__.__name__) writer.newline() - - def fromXML(self, (name, attrs, content), ttFont): + + def fromXML(self, name, attrs, content, ttFont): self.format = safeEval(attrs["format"]) self.reserved = safeEval(attrs["reserved"]) self.length = safeEval(attrs["length"]) @@ -1034,44 +1026,53 @@ cmap = self.cmap for element in content: - if type(element) <> TupleType: + if not isinstance(element, tuple): continue name, attrs, content = element - if name <> "map": + if name != "map": continue cmap[safeEval(attrs["code"])] = attrs["name"] +class cmap_format_12(cmap_format_12_or_13): + + _format_step = 1 + + def __init__(self, format=12): + cmap_format_12_or_13.__init__(self, format) + + def _computeGIDs(self, startingGlyph, numberOfGlyphs): + return list(range(startingGlyph, startingGlyph + numberOfGlyphs)) + + def _IsInSameRun(self, glyphID, lastGlyphID, charCode, lastCharCode): + return (glyphID == 1 + lastGlyphID) and (charCode == 1 + lastCharCode) + + +class cmap_format_13(cmap_format_12_or_13): + + _format_step = 0 + + def __init__(self, format=13): + cmap_format_12_or_13.__init__(self, format) + + def _computeGIDs(self, startingGlyph, numberOfGlyphs): + return [startingGlyph] * numberOfGlyphs + + def _IsInSameRun(self, glyphID, lastGlyphID, charCode, lastCharCode): + return (glyphID == lastGlyphID) and (charCode == 1 + lastCharCode) + + def cvtToUVS(threeByteString): - if sys.byteorder <> "big": - data = "\0" +threeByteString - else: - data = threeByteString + "\0" + data = b"\0" + threeByteString val, = struct.unpack(">L", data) return val def cvtFromUVS(val): - if sys.byteorder <> "big": - threeByteString = struct.pack(">L", val)[1:] - else: - threeByteString = struct.pack(">L", val)[:3] - return threeByteString - -def cmpUVSListEntry(first, second): - uv1, glyphName1 = first - uv2, glyphName2 = second - - if (glyphName1 == None) and (glyphName2 != None): - return -1 - elif (glyphName2 == None) and (glyphName1 != None): - return 1 - - ret = cmp(uv1, uv2) - if ret: - return ret - return cmp(glyphName1, glyphName2) - - + assert 0 <= val < 0x1000000 + fourByteString = struct.pack(">L", val) + return fourByteString[1:] + + class cmap_format_14(CmapSubtable): def decompileHeader(self, data, ttFont): @@ -1083,21 +1084,21 @@ self.language = 0xFF # has no language. def decompile(self, data, ttFont): - if data != None and ttFont != None: + if data is not None and ttFont is not None: self.decompileHeader(data, ttFont) else: - assert (data == None and ttFont == None), "Need both data and ttFont arguments" + assert (data is None and ttFont is None), "Need both data and ttFont arguments" data = self.data - + self.cmap = {} # so that clients that expect this to exist in a cmap table won't fail. uvsDict = {} recOffset = 0 for n in range(self.numVarSelectorRecords): - uvs, defOVSOffset, nonDefUVSOffset = struct.unpack(">3sLL", data[recOffset:recOffset +11]) + uvs, defOVSOffset, nonDefUVSOffset = struct.unpack(">3sLL", data[recOffset:recOffset +11]) recOffset += 11 varUVS = cvtToUVS(uvs) if defOVSOffset: - startOffset = defOVSOffset - 10 + startOffset = defOVSOffset - 10 numValues, = struct.unpack(">L", data[startOffset:startOffset+4]) startOffset +=4 for r in range(numValues): @@ -1105,16 +1106,16 @@ startOffset += 4 firstBaseUV = cvtToUVS(uv) cnt = addtlCnt+1 - baseUVList = range(firstBaseUV, firstBaseUV+cnt) + baseUVList = list(range(firstBaseUV, firstBaseUV+cnt)) glyphList = [None]*cnt localUVList = zip(baseUVList, glyphList) try: uvsDict[varUVS].extend(localUVList) except KeyError: - uvsDict[varUVS] = localUVList - + uvsDict[varUVS] = list(localUVList) + if nonDefUVSOffset: - startOffset = nonDefUVSOffset - 10 + startOffset = nonDefUVSOffset - 10 numRecs, = struct.unpack(">L", data[startOffset:startOffset+4]) startOffset +=4 localUVList = [] @@ -1128,9 +1129,9 @@ uvsDict[varUVS].extend(localUVList) except KeyError: uvsDict[varUVS] = localUVList - + self.uvsDict = uvsDict - + def toXML(self, writer, ttFont): writer.begintag(self.__class__.__name__, [ ("platformID", self.platformID), @@ -1141,13 +1142,12 @@ ]) writer.newline() uvsDict = self.uvsDict - uvsList = uvsDict.keys() - uvsList.sort() + uvsList = sorted(uvsDict.keys()) for uvs in uvsList: uvList = uvsDict[uvs] - uvList.sort(cmpUVSListEntry) + uvList.sort(key=lambda item: (item[1] is not None, item[0], item[1])) for uv, gname in uvList: - if gname == None: + if gname is None: gname = "None" # I use the arg rather than th keyword syntax in order to preserve the attribute order. writer.simpletag("map", [ ("uvs",hex(uvs)), ("uv",hex(uv)), ("name", gname)] ) @@ -1155,22 +1155,22 @@ writer.endtag(self.__class__.__name__) writer.newline() - def fromXML(self, (name, attrs, content), ttFont): + def fromXML(self, name, attrs, content, ttFont): self.format = safeEval(attrs["format"]) self.length = safeEval(attrs["length"]) self.numVarSelectorRecords = safeEval(attrs["numVarSelectorRecords"]) - self.language = 0xFF # provide a value so that CmapSubtable.__cmp__() won't fail + self.language = 0xFF # provide a value so that CmapSubtable.__lt__() won't fail if not hasattr(self, "cmap"): self.cmap = {} # so that clients that expect this to exist in a cmap table won't fail. if not hasattr(self, "uvsDict"): - self.uvsDict = {} - uvsDict = self.uvsDict + self.uvsDict = {} + uvsDict = self.uvsDict for element in content: - if type(element) <> TupleType: + if not isinstance(element, tuple): continue name, attrs, content = element - if name <> "map": + if name != "map": continue uvs = safeEval(attrs["uvs"]) uv = safeEval(attrs["uv"]) @@ -1181,15 +1181,13 @@ uvsDict[uvs].append( [uv, gname]) except KeyError: uvsDict[uvs] = [ [uv, gname] ] - def compile(self, ttFont): if self.data: - return struct.pack(">HLL", self.format, self.length , self.numVarSelectorRecords) + self.data + return struct.pack(">HLL", self.format, self.length, self.numVarSelectorRecords) + self.data uvsDict = self.uvsDict - uvsList = uvsDict.keys() - uvsList.sort() + uvsList = sorted(uvsDict.keys()) self.numVarSelectorRecords = len(uvsList) offset = 10 + self.numVarSelectorRecords*11 # current value is end of VarSelectorRecords block. data = [] @@ -1197,9 +1195,9 @@ for uvs in uvsList: entryList = uvsDict[uvs] - defList = filter(lambda entry: entry[1] == None, entryList) + defList = [entry for entry in entryList if entry[1] is None] if defList: - defList = map(lambda entry: entry[0], defList) + defList = [entry[0] for entry in defList] defOVSOffset = offset defList.sort() @@ -1213,7 +1211,7 @@ lastUV = defEntry defRecs.append(rec) cnt = 0 - + rec = struct.pack(">3sB", cvtFromUVS(lastUV), cnt) defRecs.append(rec) @@ -1224,7 +1222,7 @@ else: defOVSOffset = 0 - ndefList = filter(lambda entry: entry[1] != None, entryList) + ndefList = [entry for entry in entryList if entry[1] is not None] if ndefList: nonDefUVSOffset = offset ndefList.sort() @@ -1238,20 +1236,20 @@ data.append(ndrec) else: nonDefUVSOffset = 0 - + vrec = struct.pack(">3sLL", cvtFromUVS(uvs), defOVSOffset, nonDefUVSOffset) varSelectorRecords.append(vrec) - - data = "".join(varSelectorRecords) + "".join(data) + + data = bytesjoin(varSelectorRecords) + bytesjoin(data) self.length = 10 + len(data) - headerdata = struct.pack(">HLL", self.format, self.length , self.numVarSelectorRecords) + headerdata = struct.pack(">HLL", self.format, self.length, self.numVarSelectorRecords) self.data = headerdata + data - + return self.data - - + + class cmap_format_unknown(CmapSubtable): - + def toXML(self, writer, ttFont): cmapName = self.__class__.__name__[:12] + str(self.format) writer.begintag(cmapName, [ @@ -1262,22 +1260,22 @@ writer.dumphex(self.data) writer.endtag(cmapName) writer.newline() - - def fromXML(self, (name, attrs, content), ttFont): + + def fromXML(self, name, attrs, content, ttFont): self.data = readHex(content) self.cmap = {} - + def decompileHeader(self, data, ttFont): self.language = 0 # dummy value self.data = data - + def decompile(self, data, ttFont): # we usually get here indirectly from the subtable __getattr__ function, in which case both args must be None. # If not, someone is calling the subtable decompile() directly, and must provide both args. - if data != None and ttFont != None: - self.decompileHeader(data[offset:offset+int(length)], ttFont) + if data is not None and ttFont is not None: + self.decompileHeader(data, ttFont) else: - assert (data == None and ttFont == None), "Need both data and ttFont arguments" + assert (data is None and ttFont is None), "Need both data and ttFont arguments" def compile(self, ttFont): if self.data: @@ -1291,5 +1289,6 @@ 4: cmap_format_4, 6: cmap_format_6, 12: cmap_format_12, + 13: cmap_format_13, 14: cmap_format_14, - } +} diff -Nru fonttools-2.4/Lib/fontTools/ttLib/tables/_c_m_a_p_test.py fonttools-3.0/Lib/fontTools/ttLib/tables/_c_m_a_p_test.py --- fonttools-2.4/Lib/fontTools/ttLib/tables/_c_m_a_p_test.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.0/Lib/fontTools/ttLib/tables/_c_m_a_p_test.py 2015-08-31 17:57:15.000000000 +0000 @@ -0,0 +1,53 @@ +from __future__ import print_function, division, absolute_import, unicode_literals +from fontTools.misc.py23 import * +from fontTools import ttLib +import unittest +from ._c_m_a_p import CmapSubtable + +class CmapSubtableTest(unittest.TestCase): + + def makeSubtable(self, platformID, platEncID, langID): + subtable = CmapSubtable(None) + subtable.platformID, subtable.platEncID, subtable.language = (platformID, platEncID, langID) + return subtable + + def test_toUnicode_utf16be(self): + subtable = self.makeSubtable(0, 2, 7) + self.assertEqual("utf_16_be", subtable.getEncoding()) + self.assertEqual(True, subtable.isUnicode()) + + def test_toUnicode_macroman(self): + subtable = self.makeSubtable(1, 0, 7) # MacRoman + self.assertEqual("mac_roman", subtable.getEncoding()) + self.assertEqual(False, subtable.isUnicode()) + + def test_toUnicode_macromanian(self): + subtable = self.makeSubtable(1, 0, 37) # Mac Romanian + self.assertNotEqual(None, subtable.getEncoding()) + self.assertEqual(False, subtable.isUnicode()) + + def test_extended_mac_encodings(self): + subtable = self.makeSubtable(1, 1, 0) # Mac Japanese + self.assertNotEqual(None, subtable.getEncoding()) + self.assertEqual(False, subtable.isUnicode()) + + def test_extended_unknown(self): + subtable = self.makeSubtable(10, 11, 12) + self.assertEqual(subtable.getEncoding(), None) + self.assertEqual(subtable.getEncoding("ascii"), "ascii") + self.assertEqual(subtable.getEncoding(default="xyz"), "xyz") + + def test_decompile_4(self): + subtable = CmapSubtable.newSubtable(4) + font = ttLib.TTFont() + font.setGlyphOrder([]) + subtable.decompile(b'\0' * 3 + b'\x10' + b'\0' * 12, font) + + def test_decompile_12(self): + subtable = CmapSubtable.newSubtable(12) + font = ttLib.TTFont() + font.setGlyphOrder([]) + subtable.decompile(b'\0' * 7 + b'\x10' + b'\0' * 8, font) + +if __name__ == "__main__": + unittest.main() diff -Nru fonttools-2.4/Lib/fontTools/ttLib/tables/C_O_L_R_.py fonttools-3.0/Lib/fontTools/ttLib/tables/C_O_L_R_.py --- fonttools-2.4/Lib/fontTools/ttLib/tables/C_O_L_R_.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.0/Lib/fontTools/ttLib/tables/C_O_L_R_.py 2015-08-31 17:57:15.000000000 +0000 @@ -0,0 +1,159 @@ +# Copyright 2013 Google, Inc. All Rights Reserved. +# +# Google Author(s): Behdad Esfahbod + +from __future__ import print_function, division, absolute_import +from fontTools.misc.py23 import * +from fontTools.misc.textTools import safeEval +from . import DefaultTable +import operator +import struct + + +class table_C_O_L_R_(DefaultTable.DefaultTable): + + """ This table is structured so that you can treat it like a dictionary keyed by glyph name. + ttFont['COLR'][] will return the color layers for any glyph + ttFont['COLR'][] = will set the color layers for any glyph. + """ + + def decompile(self, data, ttFont): + self.getGlyphName = ttFont.getGlyphName # for use in get/set item functions, for access by GID + self.version, numBaseGlyphRecords, offsetBaseGlyphRecord, offsetLayerRecord, numLayerRecords = struct.unpack(">HHLLH", data[:14]) + assert (self.version == 0), "Version of COLR table is higher than I know how to handle" + glyphOrder = ttFont.getGlyphOrder() + gids = [] + layerLists = [] + glyphPos = offsetBaseGlyphRecord + for i in range(numBaseGlyphRecords): + gid, firstLayerIndex, numLayers = struct.unpack(">HHH", data[glyphPos:glyphPos+6]) + glyphPos += 6 + gids.append(gid) + assert (firstLayerIndex + numLayers <= numLayerRecords) + layerPos = offsetLayerRecord + firstLayerIndex * 4 + layers = [] + for j in range(numLayers): + layerGid, colorID = struct.unpack(">HH", data[layerPos:layerPos+4]) + try: + layerName = glyphOrder[layerGid] + except IndexError: + layerName = self.getGlyphName(layerGid) + layerPos += 4 + layers.append(LayerRecord(layerName, colorID)) + layerLists.append(layers) + + self.ColorLayers = colorLayerLists = {} + try: + names = list(map(operator.getitem, [glyphOrder]*numBaseGlyphRecords, gids)) + except IndexError: + getGlyphName = self.getGlyphName + names = list(map(getGlyphName, gids )) + + list(map(operator.setitem, [colorLayerLists]*numBaseGlyphRecords, names, layerLists)) + + def compile(self, ttFont): + ordered = [] + ttFont.getReverseGlyphMap(rebuild=True) + glyphNames = self.ColorLayers.keys() + for glyphName in glyphNames: + try: + gid = ttFont.getGlyphID(glyphName) + except: + assert 0, "COLR table contains a glyph name not in ttFont.getGlyphNames(): " + str(glyphName) + ordered.append([gid, glyphName, self.ColorLayers[glyphName]]) + ordered.sort() + + glyphMap = [] + layerMap = [] + for (gid, glyphName, layers) in ordered: + glyphMap.append(struct.pack(">HHH", gid, len(layerMap), len(layers))) + for layer in layers: + layerMap.append(struct.pack(">HH", ttFont.getGlyphID(layer.name), layer.colorID)) + + dataList = [struct.pack(">HHLLH", self.version, len(glyphMap), 14, 14+6*len(glyphMap), len(layerMap))] + dataList.extend(glyphMap) + dataList.extend(layerMap) + data = bytesjoin(dataList) + return data + + def toXML(self, writer, ttFont): + writer.simpletag("version", value=self.version) + writer.newline() + ordered = [] + glyphNames = self.ColorLayers.keys() + for glyphName in glyphNames: + try: + gid = ttFont.getGlyphID(glyphName) + except: + assert 0, "COLR table contains a glyph name not in ttFont.getGlyphNames(): " + str(glyphName) + ordered.append([gid, glyphName, self.ColorLayers[glyphName]]) + ordered.sort() + for entry in ordered: + writer.begintag("ColorGlyph", name=entry[1]) + writer.newline() + for layer in entry[2]: + layer.toXML(writer, ttFont) + writer.endtag("ColorGlyph") + writer.newline() + + def fromXML(self, name, attrs, content, ttFont): + if not hasattr(self, "ColorLayers"): + self.ColorLayers = {} + self.getGlyphName = ttFont.getGlyphName # for use in get/set item functions, for access by GID + if name == "ColorGlyph": + glyphName = attrs["name"] + for element in content: + if isinstance(element, basestring): + continue + layers = [] + for element in content: + if isinstance(element, basestring): + continue + layer = LayerRecord() + layer.fromXML(element[0], element[1], element[2], ttFont) + layers.append (layer) + operator.setitem(self, glyphName, layers) + elif "value" in attrs: + setattr(self, name, safeEval(attrs["value"])) + + def __getitem__(self, glyphSelector): + if isinstance(glyphSelector, int): + # its a gid, convert to glyph name + glyphSelector = self.getGlyphName(glyphSelector) + + if glyphSelector not in self.ColorLayers: + return None + + return self.ColorLayers[glyphSelector] + + def __setitem__(self, glyphSelector, value): + if isinstance(glyphSelector, int): + # its a gid, convert to glyph name + glyphSelector = self.getGlyphName(glyphSelector) + + if value: + self.ColorLayers[glyphSelector] = value + elif glyphSelector in self.ColorLayers: + del self.ColorLayers[glyphSelector] + + def __delitem__(self, glyphSelector): + del self.ColorLayers[glyphSelector] + +class LayerRecord(object): + + def __init__(self, name=None, colorID=None): + self.name = name + self.colorID = colorID + + def toXML(self, writer, ttFont): + writer.simpletag("layer", name=self.name, colorID=self.colorID) + writer.newline() + + def fromXML(self, eltname, attrs, content, ttFont): + for (name, value) in attrs.items(): + if name == "name": + if isinstance(value, int): + value = ttFont.getGlyphName(value) + setattr(self, name, value) + else: + setattr(self, name, safeEval(value)) diff -Nru fonttools-2.4/Lib/fontTools/ttLib/tables/C_P_A_L_.py fonttools-3.0/Lib/fontTools/ttLib/tables/C_P_A_L_.py --- fonttools-2.4/Lib/fontTools/ttLib/tables/C_P_A_L_.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.0/Lib/fontTools/ttLib/tables/C_P_A_L_.py 2015-08-31 17:57:15.000000000 +0000 @@ -0,0 +1,100 @@ +# Copyright 2013 Google, Inc. All Rights Reserved. +# +# Google Author(s): Behdad Esfahbod + +from __future__ import print_function, division, absolute_import +from fontTools.misc.py23 import * +from fontTools.misc.textTools import safeEval +from . import DefaultTable +import struct + + +class table_C_P_A_L_(DefaultTable.DefaultTable): + + def decompile(self, data, ttFont): + self.version, self.numPaletteEntries, numPalettes, numColorRecords, goffsetFirstColorRecord = struct.unpack(">HHHHL", data[:12]) + assert (self.version == 0), "Version of COLR table is higher than I know how to handle" + self.palettes = [] + pos = 12 + for i in range(numPalettes): + startIndex = struct.unpack(">H", data[pos:pos+2])[0] + assert (startIndex + self.numPaletteEntries <= numColorRecords) + pos += 2 + palette = [] + ppos = goffsetFirstColorRecord + startIndex * 4 + for j in range(self.numPaletteEntries): + palette.append( Color(*struct.unpack(">BBBB", data[ppos:ppos+4])) ) + ppos += 4 + self.palettes.append(palette) + + def compile(self, ttFont): + dataList = [struct.pack(">HHHHL", self.version, self.numPaletteEntries, len(self.palettes), self.numPaletteEntries * len(self.palettes), 12+2*len(self.palettes))] + for i in range(len(self.palettes)): + dataList.append(struct.pack(">H", i*self.numPaletteEntries)) + for palette in self.palettes: + assert(len(palette) == self.numPaletteEntries) + for color in palette: + dataList.append(struct.pack(">BBBB", color.blue,color.green,color.red,color.alpha)) + data = bytesjoin(dataList) + return data + + def toXML(self, writer, ttFont): + writer.simpletag("version", value=self.version) + writer.newline() + writer.simpletag("numPaletteEntries", value=self.numPaletteEntries) + writer.newline() + for index, palette in enumerate(self.palettes): + writer.begintag("palette", index=index) + writer.newline() + assert(len(palette) == self.numPaletteEntries) + for cindex, color in enumerate(palette): + color.toXML(writer, ttFont, cindex) + writer.endtag("palette") + writer.newline() + + def fromXML(self, name, attrs, content, ttFont): + if not hasattr(self, "palettes"): + self.palettes = [] + if name == "palette": + palette = [] + for element in content: + if isinstance(element, basestring): + continue + palette = [] + for element in content: + if isinstance(element, basestring): + continue + color = Color() + color.fromXML(element[0], element[1], element[2], ttFont) + palette.append (color) + self.palettes.append(palette) + elif "value" in attrs: + value = safeEval(attrs["value"]) + setattr(self, name, value) + +class Color(object): + + def __init__(self, blue=None, green=None, red=None, alpha=None): + self.blue = blue + self.green = green + self.red = red + self.alpha = alpha + + def hex(self): + return "#%02X%02X%02X%02X" % (self.red, self.green, self.blue, self.alpha) + + def __repr__(self): + return self.hex() + + def toXML(self, writer, ttFont, index=None): + writer.simpletag("color", value=self.hex(), index=index) + writer.newline() + + def fromXML(self, eltname, attrs, content, ttFont): + value = attrs["value"] + if value[0] == '#': + value = value[1:] + self.red = int(value[0:2], 16) + self.green = int(value[2:4], 16) + self.blue = int(value[4:6], 16) + self.alpha = int(value[6:8], 16) if len (value) >= 8 else 0xFF diff -Nru fonttools-2.4/Lib/fontTools/ttLib/tables/_c_v_t.py fonttools-3.0/Lib/fontTools/ttLib/tables/_c_v_t.py --- fonttools-2.4/Lib/fontTools/ttLib/tables/_c_v_t.py 2013-04-23 04:08:08.000000000 +0000 +++ fonttools-3.0/Lib/fontTools/ttLib/tables/_c_v_t.py 2015-08-31 17:57:15.000000000 +0000 @@ -1,31 +1,32 @@ +from __future__ import print_function, division, absolute_import +from fontTools.misc.py23 import * +from fontTools.misc.textTools import safeEval +from . import DefaultTable import sys -import DefaultTable import array -from fontTools import ttLib -from fontTools.misc.textTools import safeEval class table__c_v_t(DefaultTable.DefaultTable): - + def decompile(self, data, ttFont): values = array.array("h") values.fromstring(data) - if sys.byteorder <> "big": + if sys.byteorder != "big": values.byteswap() self.values = values - + def compile(self, ttFont): values = self.values[:] - if sys.byteorder <> "big": + if sys.byteorder != "big": values.byteswap() return values.tostring() - + def toXML(self, writer, ttFont): for i in range(len(self.values)): value = self.values[i] writer.simpletag("cv", value=value, index=i) writer.newline() - - def fromXML(self, (name, attrs, content), ttFont): + + def fromXML(self, name, attrs, content, ttFont): if not hasattr(self, "values"): self.values = array.array("h") if name == "cv": @@ -34,16 +35,15 @@ for i in range(1 + index - len(self.values)): self.values.append(0) self.values[index] = value - + def __len__(self): return len(self.values) - + def __getitem__(self, index): return self.values[index] - + def __setitem__(self, index, value): self.values[index] = value - + def __delitem__(self, index): del self.values[index] - diff -Nru fonttools-2.4/Lib/fontTools/ttLib/tables/DefaultTable.py fonttools-3.0/Lib/fontTools/ttLib/tables/DefaultTable.py --- fonttools-2.4/Lib/fontTools/ttLib/tables/DefaultTable.py 2013-04-23 04:08:08.000000000 +0000 +++ fonttools-3.0/Lib/fontTools/ttLib/tables/DefaultTable.py 2015-08-31 17:57:15.000000000 +0000 @@ -1,20 +1,23 @@ -import string -import sys +from __future__ import print_function, division, absolute_import +from fontTools.misc.py23 import * +from fontTools.ttLib import getClassTag + +class DefaultTable(object): -class DefaultTable: - dependencies = [] - - def __init__(self, tag): - self.tableTag = tag - + + def __init__(self, tag=None): + if tag is None: + tag = getClassTag(self.__class__) + self.tableTag = Tag(tag) + def decompile(self, data, ttFont): self.data = data - + def compile(self, ttFont): return self.data - - def toXML(self, writer, ttFont): + + def toXML(self, writer, ttFont, progress=None): if hasattr(self, "ERROR"): writer.comment("An error occurred during the decompilation of this table") writer.newline() @@ -25,17 +28,20 @@ writer.dumphex(self.compile(ttFont)) writer.endtag("hexdata") writer.newline() - - def fromXML(self, (name, attrs, content), ttFont): + + def fromXML(self, name, attrs, content, ttFont): from fontTools.misc.textTools import readHex from fontTools import ttLib - if name <> "hexdata": - raise ttLib.TTLibError, "can't handle '%s' element" % name + if name != "hexdata": + raise ttLib.TTLibError("can't handle '%s' element" % name) self.decompile(readHex(content), ttFont) - + def __repr__(self): return "<'%s' table at %x>" % (self.tableTag, id(self)) - - def __cmp__(self, other): - return cmp(self.__dict__, other.__dict__) + def __ne__(self, other): + return not self.__eq__(other) + def __eq__(self, other): + if type(self) != type(other): + return NotImplemented + return self.__dict__ == other.__dict__ diff -Nru fonttools-2.4/Lib/fontTools/ttLib/tables/D_S_I_G_.py fonttools-3.0/Lib/fontTools/ttLib/tables/D_S_I_G_.py --- fonttools-2.4/Lib/fontTools/ttLib/tables/D_S_I_G_.py 2013-04-23 04:08:08.000000000 +0000 +++ fonttools-3.0/Lib/fontTools/ttLib/tables/D_S_I_G_.py 2015-08-31 17:57:15.000000000 +0000 @@ -1,6 +1,9 @@ -import DefaultTable +from __future__ import print_function, division, absolute_import +from fontTools.misc.py23 import * from fontTools.misc.textTools import safeEval -import sstruct +from fontTools.misc import sstruct +from . import DefaultTable +import base64 DSIG_HeaderFormat = """ > # big endian @@ -37,7 +40,7 @@ # class table_D_S_I_G_(DefaultTable.DefaultTable): - + def decompile(self, data, ttFont): dummy, newData = sstruct.unpack2(DSIG_HeaderFormat, data, self) assert self.ulVersion == 1, "DSIG ulVersion must be 1" @@ -52,7 +55,7 @@ assert sigrec.usReserved1 == 0, "DSIG signature record #%d usReserverd1 must be 0" % n assert sigrec.usReserved2 == 0, "DSIG signature record #%d usReserverd2 must be 0" % n sigrec.pkcs7 = newData[:sigrec.cbSignature] - + def compile(self, ttFont): packed = sstruct.pack(DSIG_HeaderFormat, self) headers = [packed] @@ -69,8 +72,11 @@ sigrec.ulOffset = offset headers.append(sstruct.pack(DSIG_SignatureFormat, sigrec)) offset += sigrec.ulLength - return ''.join(headers+data) - + if offset % 2: + # Pad to even bytes + data.append(b'\0') + return bytesjoin(headers+data) + def toXML(self, xmlWriter, ttFont): xmlWriter.comment("note that the Digital Signature will be invalid after recompilation!") xmlWriter.newline() @@ -79,8 +85,8 @@ xmlWriter.newline() sigrec.toXML(xmlWriter, ttFont) xmlWriter.newline() - - def fromXML(self, (name, attrs, content), ttFont): + + def fromXML(self, name, attrs, content, ttFont): if name == "tableHeader": self.signatureRecords = [] self.ulVersion = safeEval(attrs["version"]) @@ -89,27 +95,37 @@ return if name == "SignatureRecord": sigrec = SignatureRecord() - sigrec.fromXML((name, attrs, content), ttFont) + sigrec.fromXML(name, attrs, content, ttFont) self.signatureRecords.append(sigrec) pem_spam = lambda l, spam = { "-----BEGIN PKCS7-----": True, "-----END PKCS7-----": True, "": True }: not spam.get(l.strip()) -class SignatureRecord: +def b64encode(b): + s = base64.b64encode(b) + # Line-break at 76 chars. + items = [] + while s: + items.append(tostr(s[:76])) + items.append('\n') + s = s[76:] + return strjoin(items) + +class SignatureRecord(object): def __repr__(self): return "<%s: %s>" % (self.__class__.__name__, self.__dict__) - + def toXML(self, writer, ttFont): writer.begintag(self.__class__.__name__, format=self.ulFormat) writer.newline() writer.write_noindent("-----BEGIN PKCS7-----\n") - writer.write_noindent(self.pkcs7.encode('base64')) + writer.write_noindent(b64encode(self.pkcs7)) writer.write_noindent("-----END PKCS7-----\n") writer.endtag(self.__class__.__name__) - - def fromXML(self, (name, attrs, content), ttFont): + + def fromXML(self, name, attrs, content, ttFont): self.ulFormat = safeEval(attrs["format"]) self.usReserved1 = safeEval(attrs.get("reserved1", "0")) self.usReserved2 = safeEval(attrs.get("reserved2", "0")) - self.pkcs7 = "".join(filter(pem_spam, content)).decode('base64') + self.pkcs7 = base64.b64decode(tobytes(strjoin(filter(pem_spam, content)))) diff -Nru fonttools-2.4/Lib/fontTools/ttLib/tables/E_B_D_T_.py fonttools-3.0/Lib/fontTools/ttLib/tables/E_B_D_T_.py --- fonttools-2.4/Lib/fontTools/ttLib/tables/E_B_D_T_.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.0/Lib/fontTools/ttLib/tables/E_B_D_T_.py 2015-08-31 17:57:15.000000000 +0000 @@ -0,0 +1,759 @@ +from __future__ import print_function, division, absolute_import +from fontTools.misc.py23 import * +from fontTools.misc import sstruct +from fontTools.misc.textTools import safeEval, readHex, hexStr, deHexStr +from .BitmapGlyphMetrics import BigGlyphMetrics, bigGlyphMetricsFormat, SmallGlyphMetrics, smallGlyphMetricsFormat +from . import DefaultTable +import itertools +import os +import struct + +ebdtTableVersionFormat = """ + > # big endian + version: 16.16F +""" + +ebdtComponentFormat = """ + > # big endian + glyphCode: H + xOffset: b + yOffset: b +""" + +class table_E_B_D_T_(DefaultTable.DefaultTable): + + # Keep a reference to the name of the data locator table. + locatorName = 'EBLC' + + # This method can be overridden in subclasses to support new formats + # without changing the other implementation. Also can be used as a + # convenience method for coverting a font file to an alternative format. + def getImageFormatClass(self, imageFormat): + return ebdt_bitmap_classes[imageFormat] + + def decompile(self, data, ttFont): + # Get the version but don't advance the slice. + # Most of the lookup for this table is done relative + # to the begining so slice by the offsets provided + # in the EBLC table. + sstruct.unpack2(ebdtTableVersionFormat, data, self) + + # Keep a dict of glyphs that have been seen so they aren't remade. + # This dict maps intervals of data to the BitmapGlyph. + glyphDict = {} + + # Pull out the EBLC table and loop through glyphs. + # A strike is a concept that spans both tables. + # The actual bitmap data is stored in the EBDT. + locator = ttFont[self.__class__.locatorName] + self.strikeData = [] + for curStrike in locator.strikes: + bitmapGlyphDict = {} + self.strikeData.append(bitmapGlyphDict) + for indexSubTable in curStrike.indexSubTables: + dataIter = zip(indexSubTable.names, indexSubTable.locations) + for curName, curLoc in dataIter: + # Don't create duplicate data entries for the same glyphs. + # Instead just use the structures that already exist if they exist. + if curLoc in glyphDict: + curGlyph = glyphDict[curLoc] + else: + curGlyphData = data[slice(*curLoc)] + imageFormatClass = self.getImageFormatClass(indexSubTable.imageFormat) + curGlyph = imageFormatClass(curGlyphData, ttFont) + glyphDict[curLoc] = curGlyph + bitmapGlyphDict[curName] = curGlyph + + def compile(self, ttFont): + + dataList = [] + dataList.append(sstruct.pack(ebdtTableVersionFormat, self)) + dataSize = len(dataList[0]) + + # Keep a dict of glyphs that have been seen so they aren't remade. + # This dict maps the id of the BitmapGlyph to the interval + # in the data. + glyphDict = {} + + # Go through the bitmap glyph data. Just in case the data for a glyph + # changed the size metrics should be recalculated. There are a variety + # of formats and they get stored in the EBLC table. That is why + # recalculation is defered to the EblcIndexSubTable class and just + # pass what is known about bitmap glyphs from this particular table. + locator = ttFont[self.__class__.locatorName] + for curStrike, curGlyphDict in zip(locator.strikes, self.strikeData): + for curIndexSubTable in curStrike.indexSubTables: + dataLocations = [] + for curName in curIndexSubTable.names: + # Handle the data placement based on seeing the glyph or not. + # Just save a reference to the location if the glyph has already + # been saved in compile. This code assumes that glyphs will only + # be referenced multiple times from indexFormat5. By luck the + # code may still work when referencing poorly ordered fonts with + # duplicate references. If there is a font that is unlucky the + # respective compile methods for the indexSubTables will fail + # their assertions. All fonts seem to follow this assumption. + # More complicated packing may be needed if a counter-font exists. + glyph = curGlyphDict[curName] + objectId = id(glyph) + if objectId not in glyphDict: + data = glyph.compile(ttFont) + data = curIndexSubTable.padBitmapData(data) + startByte = dataSize + dataSize += len(data) + endByte = dataSize + dataList.append(data) + dataLoc = (startByte, endByte) + glyphDict[objectId] = dataLoc + else: + dataLoc = glyphDict[objectId] + dataLocations.append(dataLoc) + # Just use the new data locations in the indexSubTable. + # The respective compile implementations will take care + # of any of the problems in the convertion that may arise. + curIndexSubTable.locations = dataLocations + + return bytesjoin(dataList) + + def toXML(self, writer, ttFont): + # When exporting to XML if one of the data export formats + # requires metrics then those metrics may be in the locator. + # In this case populate the bitmaps with "export metrics". + if ttFont.bitmapGlyphDataFormat in ('row', 'bitwise'): + locator = ttFont[self.__class__.locatorName] + for curStrike, curGlyphDict in zip(locator.strikes, self.strikeData): + for curIndexSubTable in curStrike.indexSubTables: + for curName in curIndexSubTable.names: + glyph = curGlyphDict[curName] + # I'm not sure which metrics have priority here. + # For now if both metrics exist go with glyph metrics. + if hasattr(glyph, 'metrics'): + glyph.exportMetrics = glyph.metrics + else: + glyph.exportMetrics = curIndexSubTable.metrics + glyph.exportBitDepth = curStrike.bitmapSizeTable.bitDepth + + writer.simpletag("header", [('version', self.version)]) + writer.newline() + locator = ttFont[self.__class__.locatorName] + for strikeIndex, bitmapGlyphDict in enumerate(self.strikeData): + writer.begintag('strikedata', [('index', strikeIndex)]) + writer.newline() + for curName, curBitmap in bitmapGlyphDict.items(): + curBitmap.toXML(strikeIndex, curName, writer, ttFont) + writer.endtag('strikedata') + writer.newline() + + def fromXML(self, name, attrs, content, ttFont): + if name == 'header': + self.version = safeEval(attrs['version']) + elif name == 'strikedata': + if not hasattr(self, 'strikeData'): + self.strikeData = [] + strikeIndex = safeEval(attrs['index']) + + bitmapGlyphDict = {} + for element in content: + if not isinstance(element, tuple): + continue + name, attrs, content = element + if name[4:].startswith(_bitmapGlyphSubclassPrefix[4:]): + imageFormat = safeEval(name[len(_bitmapGlyphSubclassPrefix):]) + glyphName = attrs['name'] + imageFormatClass = self.getImageFormatClass(imageFormat) + curGlyph = imageFormatClass(None, None) + curGlyph.fromXML(name, attrs, content, ttFont) + assert glyphName not in bitmapGlyphDict, "Duplicate glyphs with the same name '%s' in the same strike." % glyphName + bitmapGlyphDict[glyphName] = curGlyph + else: + print("Warning: %s being ignored by %s", name, self.__class__.__name__) + + # Grow the strike data array to the appropriate size. The XML + # format allows the strike index value to be out of order. + if strikeIndex >= len(self.strikeData): + self.strikeData += [None] * (strikeIndex + 1 - len(self.strikeData)) + assert self.strikeData[strikeIndex] is None, "Duplicate strike EBDT indices." + self.strikeData[strikeIndex] = bitmapGlyphDict + +class EbdtComponent(object): + + def toXML(self, writer, ttFont): + writer.begintag('ebdtComponent', [('name', self.name)]) + writer.newline() + for componentName in sstruct.getformat(ebdtComponentFormat)[1][1:]: + writer.simpletag(componentName, value=getattr(self, componentName)) + writer.newline() + writer.endtag('ebdtComponent') + writer.newline() + + def fromXML(self, name, attrs, content, ttFont): + self.name = attrs['name'] + componentNames = set(sstruct.getformat(ebdtComponentFormat)[1][1:]) + for element in content: + if not isinstance(element, tuple): + continue + name, attrs, content = element + if name in componentNames: + vars(self)[name] = safeEval(attrs['value']) + else: + print("Warning: unknown name '%s' being ignored by EbdtComponent." % name) + +# Helper functions for dealing with binary. + +def _data2binary(data, numBits): + binaryList = [] + for curByte in data: + value = byteord(curByte) + numBitsCut = min(8, numBits) + for i in range(numBitsCut): + if value & 0x1: + binaryList.append('1') + else: + binaryList.append('0') + value = value >> 1 + numBits -= numBitsCut + return strjoin(binaryList) + +def _binary2data(binary): + byteList = [] + for bitLoc in range(0, len(binary), 8): + byteString = binary[bitLoc:bitLoc+8] + curByte = 0 + for curBit in reversed(byteString): + curByte = curByte << 1 + if curBit == '1': + curByte |= 1 + byteList.append(bytechr(curByte)) + return bytesjoin(byteList) + +def _memoize(f): + class memodict(dict): + def __missing__(self, key): + ret = f(key) + if len(key) == 1: + self[key] = ret + return ret + return memodict().__getitem__ + +# 00100111 -> 11100100 per byte, not to be confused with little/big endian. +# Bitmap data per byte is in the order that binary is written on the page +# with the least significant bit as far right as possible. This is the +# opposite of what makes sense algorithmically and hence this function. +@_memoize +def _reverseBytes(data): + if len(data) != 1: + return bytesjoin(map(_reverseBytes, data)) + byte = byteord(data) + result = 0 + for i in range(8): + result = result << 1 + result |= byte & 1 + byte = byte >> 1 + return bytechr(result) + +# This section of code is for reading and writing image data to/from XML. + +def _writeRawImageData(strikeIndex, glyphName, bitmapObject, writer, ttFont): + writer.begintag('rawimagedata') + writer.newline() + writer.dumphex(bitmapObject.imageData) + writer.endtag('rawimagedata') + writer.newline() + +def _readRawImageData(bitmapObject, name, attrs, content, ttFont): + bitmapObject.imageData = readHex(content) + +def _writeRowImageData(strikeIndex, glyphName, bitmapObject, writer, ttFont): + metrics = bitmapObject.exportMetrics + del bitmapObject.exportMetrics + bitDepth = bitmapObject.exportBitDepth + del bitmapObject.exportBitDepth + + writer.begintag('rowimagedata', bitDepth=bitDepth, width=metrics.width, height=metrics.height) + writer.newline() + for curRow in range(metrics.height): + rowData = bitmapObject.getRow(curRow, bitDepth=bitDepth, metrics=metrics) + writer.simpletag('row', value=hexStr(rowData)) + writer.newline() + writer.endtag('rowimagedata') + writer.newline() + +def _readRowImageData(bitmapObject, name, attrs, content, ttFont): + bitDepth = safeEval(attrs['bitDepth']) + metrics = SmallGlyphMetrics() + metrics.width = safeEval(attrs['width']) + metrics.height = safeEval(attrs['height']) + + dataRows = [] + for element in content: + if not isinstance(element, tuple): + continue + name, attr, content = element + # Chop off 'imagedata' from the tag to get just the option. + if name == 'row': + dataRows.append(deHexStr(attr['value'])) + bitmapObject.setRows(dataRows, bitDepth=bitDepth, metrics=metrics) + +def _writeBitwiseImageData(strikeIndex, glyphName, bitmapObject, writer, ttFont): + metrics = bitmapObject.exportMetrics + del bitmapObject.exportMetrics + bitDepth = bitmapObject.exportBitDepth + del bitmapObject.exportBitDepth + + # A dict for mapping binary to more readable/artistic ASCII characters. + binaryConv = {'0':'.', '1':'@'} + + writer.begintag('bitwiseimagedata', bitDepth=bitDepth, width=metrics.width, height=metrics.height) + writer.newline() + for curRow in range(metrics.height): + rowData = bitmapObject.getRow(curRow, bitDepth=1, metrics=metrics, reverseBytes=True) + rowData = _data2binary(rowData, metrics.width) + # Make the output a readable ASCII art form. + rowData = strjoin(map(binaryConv.get, rowData)) + writer.simpletag('row', value=rowData) + writer.newline() + writer.endtag('bitwiseimagedata') + writer.newline() + +def _readBitwiseImageData(bitmapObject, name, attrs, content, ttFont): + bitDepth = safeEval(attrs['bitDepth']) + metrics = SmallGlyphMetrics() + metrics.width = safeEval(attrs['width']) + metrics.height = safeEval(attrs['height']) + + # A dict for mapping from ASCII to binary. All characters are considered + # a '1' except space, period and '0' which maps to '0'. + binaryConv = {' ':'0', '.':'0', '0':'0'} + + dataRows = [] + for element in content: + if not isinstance(element, tuple): + continue + name, attr, content = element + if name == 'row': + mapParams = zip(attr['value'], itertools.repeat('1')) + rowData = strjoin(itertools.starmap(binaryConv.get, mapParams)) + dataRows.append(_binary2data(rowData)) + + bitmapObject.setRows(dataRows, bitDepth=bitDepth, metrics=metrics, reverseBytes=True) + +def _writeExtFileImageData(strikeIndex, glyphName, bitmapObject, writer, ttFont): + try: + folder = os.path.dirname(writer.file.name) + except AttributeError: + # fall back to current directory if output file's directory isn't found + folder = '.' + folder = os.path.join(folder, 'bitmaps') + filename = glyphName + bitmapObject.fileExtension + if not os.path.isdir(folder): + os.makedirs(folder) + folder = os.path.join(folder, 'strike%d' % strikeIndex) + if not os.path.isdir(folder): + os.makedirs(folder) + + fullPath = os.path.join(folder, filename) + writer.simpletag('extfileimagedata', value=fullPath) + writer.newline() + + with open(fullPath, "wb") as file: + file.write(bitmapObject.imageData) + +def _readExtFileImageData(bitmapObject, name, attrs, content, ttFont): + fullPath = attrs['value'] + with open(fullPath, "rb") as file: + bitmapObject.imageData = file.read() + +# End of XML writing code. + +# Important information about the naming scheme. Used for identifying formats +# in XML. +_bitmapGlyphSubclassPrefix = 'ebdt_bitmap_format_' + +class BitmapGlyph(object): + + # For the external file format. This can be changed in subclasses. This way + # when the extfile option is turned on files have the form: glyphName.ext + # The default is just a flat binary file with no meaning. + fileExtension = '.bin' + + # Keep track of reading and writing of various forms. + xmlDataFunctions = { + 'raw': (_writeRawImageData, _readRawImageData), + 'row': (_writeRowImageData, _readRowImageData), + 'bitwise': (_writeBitwiseImageData, _readBitwiseImageData), + 'extfile': (_writeExtFileImageData, _readExtFileImageData), + } + + def __init__(self, data, ttFont): + self.data = data + self.ttFont = ttFont + # TODO Currently non-lazy decompilation is untested here... + #if not ttFont.lazy: + # self.decompile() + # del self.data + + def __getattr__(self, attr): + # Allow lazy decompile. + if attr[:2] == '__': + raise AttributeError(attr) + if not hasattr(self, "data"): + raise AttributeError(attr) + self.decompile() + del self.data + return getattr(self, attr) + + # Not a fan of this but it is needed for safer safety checking. + def getFormat(self): + return safeEval(self.__class__.__name__[len(_bitmapGlyphSubclassPrefix):]) + + def toXML(self, strikeIndex, glyphName, writer, ttFont): + writer.begintag(self.__class__.__name__, [('name', glyphName)]) + writer.newline() + + self.writeMetrics(writer, ttFont) + # Use the internal write method to write using the correct output format. + self.writeData(strikeIndex, glyphName, writer, ttFont) + + writer.endtag(self.__class__.__name__) + writer.newline() + + def fromXML(self, name, attrs, content, ttFont): + self.readMetrics(name, attrs, content, ttFont) + for element in content: + if not isinstance(element, tuple): + continue + name, attr, content = element + if not name.endswith('imagedata'): + continue + # Chop off 'imagedata' from the tag to get just the option. + option = name[:-len('imagedata')] + assert option in self.__class__.xmlDataFunctions + self.readData(name, attr, content, ttFont) + + # Some of the glyphs have the metrics. This allows for metrics to be + # added if the glyph format has them. Default behavior is to do nothing. + def writeMetrics(self, writer, ttFont): + pass + + # The opposite of write metrics. + def readMetrics(self, name, attrs, content, ttFont): + pass + + def writeData(self, strikeIndex, glyphName, writer, ttFont): + try: + writeFunc, readFunc = self.__class__.xmlDataFunctions[ttFont.bitmapGlyphDataFormat] + except KeyError: + writeFunc = _writeRawImageData + writeFunc(strikeIndex, glyphName, self, writer, ttFont) + + def readData(self, name, attrs, content, ttFont): + # Chop off 'imagedata' from the tag to get just the option. + option = name[:-len('imagedata')] + writeFunc, readFunc = self.__class__.xmlDataFunctions[option] + readFunc(self, name, attrs, content, ttFont) + + +# A closure for creating a mixin for the two types of metrics handling. +# Most of the code is very similar so its easier to deal with here. +# Everything works just by passing the class that the mixin is for. +def _createBitmapPlusMetricsMixin(metricsClass): + # Both metrics names are listed here to make meaningful error messages. + metricStrings = [BigGlyphMetrics.__name__, SmallGlyphMetrics.__name__] + curMetricsName = metricsClass.__name__ + # Find which metrics this is for and determine the opposite name. + metricsId = metricStrings.index(curMetricsName) + oppositeMetricsName = metricStrings[1-metricsId] + + class BitmapPlusMetricsMixin(object): + + def writeMetrics(self, writer, ttFont): + self.metrics.toXML(writer, ttFont) + + def readMetrics(self, name, attrs, content, ttFont): + for element in content: + if not isinstance(element, tuple): + continue + name, attrs, content = element + if name == curMetricsName: + self.metrics = metricsClass() + self.metrics.fromXML(name, attrs, content, ttFont) + elif name == oppositeMetricsName: + print("Warning: %s being ignored in format %d." % oppositeMetricsName, self.getFormat()) + + return BitmapPlusMetricsMixin + +# Since there are only two types of mixin's just create them here. +BitmapPlusBigMetricsMixin = _createBitmapPlusMetricsMixin(BigGlyphMetrics) +BitmapPlusSmallMetricsMixin = _createBitmapPlusMetricsMixin(SmallGlyphMetrics) + +# Data that is bit aligned can be tricky to deal with. These classes implement +# helper functionality for dealing with the data and getting a particular row +# of bitwise data. Also helps implement fancy data export/import in XML. +class BitAlignedBitmapMixin(object): + + def _getBitRange(self, row, bitDepth, metrics): + rowBits = (bitDepth * metrics.width) + bitOffset = row * rowBits + return (bitOffset, bitOffset+rowBits) + + def getRow(self, row, bitDepth=1, metrics=None, reverseBytes=False): + if metrics is None: + metrics = self.metrics + assert 0 <= row and row < metrics.height, "Illegal row access in bitmap" + + # Loop through each byte. This can cover two bytes in the original data or + # a single byte if things happen to be aligned. The very last entry might + # not be aligned so take care to trim the binary data to size and pad with + # zeros in the row data. Bit aligned data is somewhat tricky. + # + # Example of data cut. Data cut represented in x's. + # '|' represents byte boundary. + # data = ...0XX|XXXXXX00|000... => XXXXXXXX + # or + # data = ...0XX|XXXX0000|000... => XXXXXX00 + # or + # data = ...000|XXXXXXXX|000... => XXXXXXXX + # or + # data = ...000|00XXXX00|000... => XXXX0000 + # + dataList = [] + bitRange = self._getBitRange(row, bitDepth, metrics) + stepRange = bitRange + (8,) + for curBit in range(*stepRange): + endBit = min(curBit+8, bitRange[1]) + numBits = endBit - curBit + cutPoint = curBit % 8 + firstByteLoc = curBit // 8 + secondByteLoc = endBit // 8 + if firstByteLoc < secondByteLoc: + numBitsCut = 8 - cutPoint + else: + numBitsCut = endBit - curBit + curByte = _reverseBytes(self.imageData[firstByteLoc]) + firstHalf = byteord(curByte) >> cutPoint + firstHalf = ((1<> numBitsCut) & ((1<<8-numBitsCut)-1) + ordDataList[secondByteLoc] |= secondByte + + # Save the image data with the bits going the correct way. + self.imageData = _reverseBytes(bytesjoin(map(bytechr, ordDataList))) + +class ByteAlignedBitmapMixin(object): + + def _getByteRange(self, row, bitDepth, metrics): + rowBytes = (bitDepth * metrics.width + 7) // 8 + byteOffset = row * rowBytes + return (byteOffset, byteOffset+rowBytes) + + def getRow(self, row, bitDepth=1, metrics=None, reverseBytes=False): + if metrics is None: + metrics = self.metrics + assert 0 <= row and row < metrics.height, "Illegal row access in bitmap" + byteRange = self._getByteRange(row, bitDepth, metrics) + data = self.imageData[slice(*byteRange)] + if reverseBytes: + data = _reverseBytes(data) + return data + + def setRows(self, dataRows, bitDepth=1, metrics=None, reverseBytes=False): + if metrics is None: + metrics = self.metrics + if reverseBytes: + dataRows = map(_reverseBytes, dataRows) + self.imageData = bytesjoin(dataRows) + +class ebdt_bitmap_format_1(ByteAlignedBitmapMixin, BitmapPlusSmallMetricsMixin, BitmapGlyph): + + def decompile(self): + self.metrics = SmallGlyphMetrics() + dummy, data = sstruct.unpack2(smallGlyphMetricsFormat, self.data, self.metrics) + self.imageData = data + + def compile(self, ttFont): + data = sstruct.pack(smallGlyphMetricsFormat, self.metrics) + return data + self.imageData + + +class ebdt_bitmap_format_2(BitAlignedBitmapMixin, BitmapPlusSmallMetricsMixin, BitmapGlyph): + + def decompile(self): + self.metrics = SmallGlyphMetrics() + dummy, data = sstruct.unpack2(smallGlyphMetricsFormat, self.data, self.metrics) + self.imageData = data + + def compile(self, ttFont): + data = sstruct.pack(smallGlyphMetricsFormat, self.metrics) + return data + self.imageData + + +class ebdt_bitmap_format_5(BitAlignedBitmapMixin, BitmapGlyph): + + def decompile(self): + self.imageData = self.data + + def compile(self, ttFont): + return self.imageData + +class ebdt_bitmap_format_6(ByteAlignedBitmapMixin, BitmapPlusBigMetricsMixin, BitmapGlyph): + + def decompile(self): + self.metrics = BigGlyphMetrics() + dummy, data = sstruct.unpack2(bigGlyphMetricsFormat, self.data, self.metrics) + self.imageData = data + + def compile(self, ttFont): + data = sstruct.pack(bigGlyphMetricsFormat, self.metrics) + return data + self.imageData + + +class ebdt_bitmap_format_7(BitAlignedBitmapMixin, BitmapPlusBigMetricsMixin, BitmapGlyph): + + def decompile(self): + self.metrics = BigGlyphMetrics() + dummy, data = sstruct.unpack2(bigGlyphMetricsFormat, self.data, self.metrics) + self.imageData = data + + def compile(self, ttFont): + data = sstruct.pack(bigGlyphMetricsFormat, self.metrics) + return data + self.imageData + + +class ComponentBitmapGlyph(BitmapGlyph): + + def toXML(self, strikeIndex, glyphName, writer, ttFont): + writer.begintag(self.__class__.__name__, [('name', glyphName)]) + writer.newline() + + self.writeMetrics(writer, ttFont) + + writer.begintag('components') + writer.newline() + for curComponent in self.componentArray: + curComponent.toXML(writer, ttFont) + writer.endtag('components') + writer.newline() + + writer.endtag(self.__class__.__name__) + writer.newline() + + def fromXML(self, name, attrs, content, ttFont): + self.readMetrics(name, attrs, content, ttFont) + for element in content: + if not isinstance(element, tuple): + continue + name, attr, content = element + if name == 'components': + self.componentArray = [] + for compElement in content: + if not isinstance(compElement, tuple): + continue + name, attrs, content = compElement + if name == 'ebdtComponent': + curComponent = EbdtComponent() + curComponent.fromXML(name, attrs, content, ttFont) + self.componentArray.append(curComponent) + else: + print("Warning: '%s' being ignored in component array." % name) + + +class ebdt_bitmap_format_8(BitmapPlusSmallMetricsMixin, ComponentBitmapGlyph): + + def decompile(self): + self.metrics = SmallGlyphMetrics() + dummy, data = sstruct.unpack2(smallGlyphMetricsFormat, self.data, self.metrics) + data = data[1:] + + (numComponents,) = struct.unpack(">H", data[:2]) + data = data[2:] + self.componentArray = [] + for i in range(numComponents): + curComponent = EbdtComponent() + dummy, data = sstruct.unpack2(ebdtComponentFormat, data, curComponent) + curComponent.name = self.ttFont.getGlyphName(curComponent.glyphCode) + self.componentArray.append(curComponent) + + def compile(self, ttFont): + dataList = [] + dataList.append(sstruct.pack(smallGlyphMetricsFormat, self.metrics)) + dataList.append(b'\0') + dataList.append(struct.pack(">H", len(self.componentArray))) + for curComponent in self.componentArray: + curComponent.glyphCode = ttFont.getGlyphID(curComponent.name) + dataList.append(sstruct.pack(ebdtComponentFormat, curComponent)) + return bytesjoin(dataList) + + +class ebdt_bitmap_format_9(BitmapPlusBigMetricsMixin, ComponentBitmapGlyph): + + def decompile(self): + self.metrics = BigGlyphMetrics() + dummy, data = sstruct.unpack2(bigGlyphMetricsFormat, self.data, self.metrics) + (numComponents,) = struct.unpack(">H", data[:2]) + data = data[2:] + self.componentArray = [] + for i in range(numComponents): + curComponent = EbdtComponent() + dummy, data = sstruct.unpack2(ebdtComponentFormat, data, curComponent) + curComponent.name = self.ttFont.getGlyphName(curComponent.glyphCode) + self.componentArray.append(curComponent) + + def compile(self, ttFont): + dataList = [] + dataList.append(sstruct.pack(bigGlyphMetricsFormat, self.metrics)) + dataList.append(struct.pack(">H", len(self.componentArray))) + for curComponent in self.componentArray: + curComponent.glyphCode = ttFont.getGlyphID(curComponent.name) + dataList.append(sstruct.pack(ebdtComponentFormat, curComponent)) + return bytesjoin(dataList) + + +# Dictionary of bitmap formats to the class representing that format +# currently only the ones listed in this map are the ones supported. +ebdt_bitmap_classes = { + 1: ebdt_bitmap_format_1, + 2: ebdt_bitmap_format_2, + 5: ebdt_bitmap_format_5, + 6: ebdt_bitmap_format_6, + 7: ebdt_bitmap_format_7, + 8: ebdt_bitmap_format_8, + 9: ebdt_bitmap_format_9, + } diff -Nru fonttools-2.4/Lib/fontTools/ttLib/tables/E_B_L_C_.py fonttools-3.0/Lib/fontTools/ttLib/tables/E_B_L_C_.py --- fonttools-2.4/Lib/fontTools/ttLib/tables/E_B_L_C_.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.0/Lib/fontTools/ttLib/tables/E_B_L_C_.py 2015-08-31 17:57:15.000000000 +0000 @@ -0,0 +1,617 @@ +from __future__ import print_function, division, absolute_import +from fontTools.misc.py23 import * +from fontTools.misc import sstruct +from . import DefaultTable +from fontTools.misc.textTools import safeEval +from .BitmapGlyphMetrics import BigGlyphMetrics, bigGlyphMetricsFormat, SmallGlyphMetrics, smallGlyphMetricsFormat +import struct +import itertools +from collections import deque + +eblcHeaderFormat = """ + > # big endian + version: 16.16F + numSizes: I +""" +# The table format string is split to handle sbitLineMetrics simply. +bitmapSizeTableFormatPart1 = """ + > # big endian + indexSubTableArrayOffset: I + indexTablesSize: I + numberOfIndexSubTables: I + colorRef: I +""" +# The compound type for hori and vert. +sbitLineMetricsFormat = """ + > # big endian + ascender: b + descender: b + widthMax: B + caretSlopeNumerator: b + caretSlopeDenominator: b + caretOffset: b + minOriginSB: b + minAdvanceSB: b + maxBeforeBL: b + minAfterBL: b + pad1: b + pad2: b +""" +# hori and vert go between the two parts. +bitmapSizeTableFormatPart2 = """ + > # big endian + startGlyphIndex: H + endGlyphIndex: H + ppemX: B + ppemY: B + bitDepth: B + flags: b +""" + +indexSubTableArrayFormat = ">HHL" +indexSubTableArraySize = struct.calcsize(indexSubTableArrayFormat) + +indexSubHeaderFormat = ">HHL" +indexSubHeaderSize = struct.calcsize(indexSubHeaderFormat) + +codeOffsetPairFormat = ">HH" +codeOffsetPairSize = struct.calcsize(codeOffsetPairFormat) + +class table_E_B_L_C_(DefaultTable.DefaultTable): + + dependencies = ['EBDT'] + + # This method can be overridden in subclasses to support new formats + # without changing the other implementation. Also can be used as a + # convenience method for coverting a font file to an alternative format. + def getIndexFormatClass(self, indexFormat): + return eblc_sub_table_classes[indexFormat] + + def decompile(self, data, ttFont): + + # Save the original data because offsets are from the start of the table. + origData = data + + dummy, data = sstruct.unpack2(eblcHeaderFormat, data, self) + + self.strikes = [] + for curStrikeIndex in range(self.numSizes): + curStrike = Strike() + self.strikes.append(curStrike) + curTable = curStrike.bitmapSizeTable + dummy, data = sstruct.unpack2(bitmapSizeTableFormatPart1, data, curTable) + for metric in ('hori', 'vert'): + metricObj = SbitLineMetrics() + vars(curTable)[metric] = metricObj + dummy, data = sstruct.unpack2(sbitLineMetricsFormat, data, metricObj) + dummy, data = sstruct.unpack2(bitmapSizeTableFormatPart2, data, curTable) + + for curStrike in self.strikes: + curTable = curStrike.bitmapSizeTable + for subtableIndex in range(curTable.numberOfIndexSubTables): + lowerBound = curTable.indexSubTableArrayOffset + subtableIndex * indexSubTableArraySize + upperBound = lowerBound + indexSubTableArraySize + data = origData[lowerBound:upperBound] + + tup = struct.unpack(indexSubTableArrayFormat, data) + (firstGlyphIndex, lastGlyphIndex, additionalOffsetToIndexSubtable) = tup + offsetToIndexSubTable = curTable.indexSubTableArrayOffset + additionalOffsetToIndexSubtable + data = origData[offsetToIndexSubTable:] + + tup = struct.unpack(indexSubHeaderFormat, data[:indexSubHeaderSize]) + (indexFormat, imageFormat, imageDataOffset) = tup + + indexFormatClass = self.getIndexFormatClass(indexFormat) + indexSubTable = indexFormatClass(data[indexSubHeaderSize:], ttFont) + indexSubTable.firstGlyphIndex = firstGlyphIndex + indexSubTable.lastGlyphIndex = lastGlyphIndex + indexSubTable.additionalOffsetToIndexSubtable = additionalOffsetToIndexSubtable + indexSubTable.indexFormat = indexFormat + indexSubTable.imageFormat = imageFormat + indexSubTable.imageDataOffset = imageDataOffset + curStrike.indexSubTables.append(indexSubTable) + + def compile(self, ttFont): + + dataList = [] + self.numSizes = len(self.strikes) + dataList.append(sstruct.pack(eblcHeaderFormat, self)) + + # Data size of the header + bitmapSizeTable needs to be calculated + # in order to form offsets. This value will hold the size of the data + # in dataList after all the data is consolidated in dataList. + dataSize = len(dataList[0]) + + # The table will be structured in the following order: + # (0) header + # (1) Each bitmapSizeTable [1 ... self.numSizes] + # (2) Alternate between indexSubTableArray and indexSubTable + # for each bitmapSizeTable present. + # + # The issue is maintaining the proper offsets when table information + # gets moved around. All offsets and size information must be recalculated + # when building the table to allow editing within ttLib and also allow easy + # import/export to and from XML. All of this offset information is lost + # when exporting to XML so everything must be calculated fresh so importing + # from XML will work cleanly. Only byte offset and size information is + # calculated fresh. Count information like numberOfIndexSubTables is + # checked through assertions. If the information in this table was not + # touched or was changed properly then these types of values should match. + # + # The table will be rebuilt the following way: + # (0) Precompute the size of all the bitmapSizeTables. This is needed to + # compute the offsets properly. + # (1) For each bitmapSizeTable compute the indexSubTable and + # indexSubTableArray pair. The indexSubTable must be computed first + # so that the offset information in indexSubTableArray can be + # calculated. Update the data size after each pairing. + # (2) Build each bitmapSizeTable. + # (3) Consolidate all the data into the main dataList in the correct order. + + for curStrike in self.strikes: + dataSize += sstruct.calcsize(bitmapSizeTableFormatPart1) + dataSize += len(('hori', 'vert')) * sstruct.calcsize(sbitLineMetricsFormat) + dataSize += sstruct.calcsize(bitmapSizeTableFormatPart2) + + indexSubTablePairDataList = [] + for curStrike in self.strikes: + curTable = curStrike.bitmapSizeTable + curTable.numberOfIndexSubTables = len(curStrike.indexSubTables) + curTable.indexSubTableArrayOffset = dataSize + + # Precompute the size of the indexSubTableArray. This information + # is important for correctly calculating the new value for + # additionalOffsetToIndexSubtable. + sizeOfSubTableArray = curTable.numberOfIndexSubTables * indexSubTableArraySize + lowerBound = dataSize + dataSize += sizeOfSubTableArray + upperBound = dataSize + + indexSubTableDataList = [] + for indexSubTable in curStrike.indexSubTables: + indexSubTable.additionalOffsetToIndexSubtable = dataSize - curTable.indexSubTableArrayOffset + glyphIds = list(map(ttFont.getGlyphID, indexSubTable.names)) + indexSubTable.firstGlyphIndex = min(glyphIds) + indexSubTable.lastGlyphIndex = max(glyphIds) + data = indexSubTable.compile(ttFont) + indexSubTableDataList.append(data) + dataSize += len(data) + curTable.startGlyphIndex = min(ist.firstGlyphIndex for ist in curStrike.indexSubTables) + curTable.endGlyphIndex = max(ist.lastGlyphIndex for ist in curStrike.indexSubTables) + + for i in curStrike.indexSubTables: + data = struct.pack(indexSubHeaderFormat, i.firstGlyphIndex, i.lastGlyphIndex, i.additionalOffsetToIndexSubtable) + indexSubTablePairDataList.append(data) + indexSubTablePairDataList.extend(indexSubTableDataList) + curTable.indexTablesSize = dataSize - curTable.indexSubTableArrayOffset + + for curStrike in self.strikes: + curTable = curStrike.bitmapSizeTable + data = sstruct.pack(bitmapSizeTableFormatPart1, curTable) + dataList.append(data) + for metric in ('hori', 'vert'): + metricObj = vars(curTable)[metric] + data = sstruct.pack(sbitLineMetricsFormat, metricObj) + dataList.append(data) + data = sstruct.pack(bitmapSizeTableFormatPart2, curTable) + dataList.append(data) + dataList.extend(indexSubTablePairDataList) + + return bytesjoin(dataList) + + def toXML(self, writer, ttFont): + writer.simpletag('header', [('version', self.version)]) + writer.newline() + for curIndex, curStrike in enumerate(self.strikes): + curStrike.toXML(curIndex, writer, ttFont) + + def fromXML(self, name, attrs, content, ttFont): + if name == 'header': + self.version = safeEval(attrs['version']) + elif name == 'strike': + if not hasattr(self, 'strikes'): + self.strikes = [] + strikeIndex = safeEval(attrs['index']) + curStrike = Strike() + curStrike.fromXML(name, attrs, content, ttFont, self) + + # Grow the strike array to the appropriate size. The XML format + # allows for the strike index value to be out of order. + if strikeIndex >= len(self.strikes): + self.strikes += [None] * (strikeIndex + 1 - len(self.strikes)) + assert self.strikes[strikeIndex] is None, "Duplicate strike EBLC indices." + self.strikes[strikeIndex] = curStrike + +class Strike(object): + + def __init__(self): + self.bitmapSizeTable = BitmapSizeTable() + self.indexSubTables = [] + + def toXML(self, strikeIndex, writer, ttFont): + writer.begintag('strike', [('index', strikeIndex)]) + writer.newline() + self.bitmapSizeTable.toXML(writer, ttFont) + writer.comment('GlyphIds are written but not read. The firstGlyphIndex and\nlastGlyphIndex values will be recalculated by the compiler.') + writer.newline() + for indexSubTable in self.indexSubTables: + indexSubTable.toXML(writer, ttFont) + writer.endtag('strike') + writer.newline() + + def fromXML(self, name, attrs, content, ttFont, locator): + for element in content: + if not isinstance(element, tuple): + continue + name, attrs, content = element + if name == 'bitmapSizeTable': + self.bitmapSizeTable.fromXML(name, attrs, content, ttFont) + elif name.startswith(_indexSubTableSubclassPrefix): + indexFormat = safeEval(name[len(_indexSubTableSubclassPrefix):]) + indexFormatClass = locator.getIndexFormatClass(indexFormat) + indexSubTable = indexFormatClass(None, None) + indexSubTable.indexFormat = indexFormat + indexSubTable.fromXML(name, attrs, content, ttFont) + self.indexSubTables.append(indexSubTable) + + +class BitmapSizeTable(object): + + # Returns all the simple metric names that bitmap size table + # cares about in terms of XML creation. + def _getXMLMetricNames(self): + dataNames = sstruct.getformat(bitmapSizeTableFormatPart1)[1] + dataNames = dataNames + sstruct.getformat(bitmapSizeTableFormatPart2)[1] + # Skip the first 3 data names because they are byte offsets and counts. + return dataNames[3:] + + def toXML(self, writer, ttFont): + writer.begintag('bitmapSizeTable') + writer.newline() + for metric in ('hori', 'vert'): + getattr(self, metric).toXML(metric, writer, ttFont) + for metricName in self._getXMLMetricNames(): + writer.simpletag(metricName, value=getattr(self, metricName)) + writer.newline() + writer.endtag('bitmapSizeTable') + writer.newline() + + def fromXML(self, name, attrs, content, ttFont): + # Create a lookup for all the simple names that make sense to + # bitmap size table. Only read the information from these names. + dataNames = set(self._getXMLMetricNames()) + for element in content: + if not isinstance(element, tuple): + continue + name, attrs, content = element + if name == 'sbitLineMetrics': + direction = attrs['direction'] + assert direction in ('hori', 'vert'), "SbitLineMetrics direction specified invalid." + metricObj = SbitLineMetrics() + metricObj.fromXML(name, attrs, content, ttFont) + vars(self)[direction] = metricObj + elif name in dataNames: + vars(self)[name] = safeEval(attrs['value']) + else: + print("Warning: unknown name '%s' being ignored in BitmapSizeTable." % name) + + +class SbitLineMetrics(object): + + def toXML(self, name, writer, ttFont): + writer.begintag('sbitLineMetrics', [('direction', name)]) + writer.newline() + for metricName in sstruct.getformat(sbitLineMetricsFormat)[1]: + writer.simpletag(metricName, value=getattr(self, metricName)) + writer.newline() + writer.endtag('sbitLineMetrics') + writer.newline() + + def fromXML(self, name, attrs, content, ttFont): + metricNames = set(sstruct.getformat(sbitLineMetricsFormat)[1]) + for element in content: + if not isinstance(element, tuple): + continue + name, attrs, content = element + if name in metricNames: + vars(self)[name] = safeEval(attrs['value']) + +# Important information about the naming scheme. Used for identifying subtables. +_indexSubTableSubclassPrefix = 'eblc_index_sub_table_' + +class EblcIndexSubTable(object): + + def __init__(self, data, ttFont): + self.data = data + self.ttFont = ttFont + # TODO Currently non-lazy decompiling doesn't work for this class... + #if not ttFont.lazy: + # self.decompile() + # del self.data, self.ttFont + + def __getattr__(self, attr): + # Allow lazy decompile. + if attr[:2] == '__': + raise AttributeError(attr) + if not hasattr(self, "data"): + raise AttributeError(attr) + self.decompile() + del self.data, self.ttFont + return getattr(self, attr) + + # This method just takes care of the indexSubHeader. Implementing subclasses + # should call it to compile the indexSubHeader and then continue compiling + # the remainder of their unique format. + def compile(self, ttFont): + return struct.pack(indexSubHeaderFormat, self.indexFormat, self.imageFormat, self.imageDataOffset) + + # Creates the XML for bitmap glyphs. Each index sub table basically makes + # the same XML except for specific metric information that is written + # out via a method call that a subclass implements optionally. + def toXML(self, writer, ttFont): + writer.begintag(self.__class__.__name__, [ + ('imageFormat', self.imageFormat), + ('firstGlyphIndex', self.firstGlyphIndex), + ('lastGlyphIndex', self.lastGlyphIndex), + ]) + writer.newline() + self.writeMetrics(writer, ttFont) + # Write out the names as thats all thats needed to rebuild etc. + # For font debugging of consecutive formats the ids are also written. + # The ids are not read when moving from the XML format. + glyphIds = map(ttFont.getGlyphID, self.names) + for glyphName, glyphId in zip(self.names, glyphIds): + writer.simpletag('glyphLoc', name=glyphName, id=glyphId) + writer.newline() + writer.endtag(self.__class__.__name__) + writer.newline() + + def fromXML(self, name, attrs, content, ttFont): + # Read all the attributes. Even though the glyph indices are + # recalculated, they are still read in case there needs to + # be an immediate export of the data. + self.imageFormat = safeEval(attrs['imageFormat']) + self.firstGlyphIndex = safeEval(attrs['firstGlyphIndex']) + self.lastGlyphIndex = safeEval(attrs['lastGlyphIndex']) + + self.readMetrics(name, attrs, content, ttFont) + + self.names = [] + for element in content: + if not isinstance(element, tuple): + continue + name, attrs, content = element + if name == 'glyphLoc': + self.names.append(attrs['name']) + + # A helper method that writes the metrics for the index sub table. It also + # is responsible for writing the image size for fixed size data since fixed + # size is not recalculated on compile. Default behavior is to do nothing. + def writeMetrics(self, writer, ttFont): + pass + + # A helper method that is the inverse of writeMetrics. + def readMetrics(self, name, attrs, content, ttFont): + pass + + # This method is for fixed glyph data sizes. There are formats where + # the glyph data is fixed but are actually composite glyphs. To handle + # this the font spec in indexSubTable makes the data the size of the + # fixed size by padding the component arrays. This function abstracts + # out this padding process. Input is data unpadded. Output is data + # padded only in fixed formats. Default behavior is to return the data. + def padBitmapData(self, data): + return data + + # Remove any of the glyph locations and names that are flagged as skipped. + # This only occurs in formats {1,3}. + def removeSkipGlyphs(self): + # Determines if a name, location pair is a valid data location. + # Skip glyphs are marked when the size is equal to zero. + def isValidLocation(args): + (name, (startByte, endByte)) = args + return startByte < endByte + # Remove all skip glyphs. + dataPairs = list(filter(isValidLocation, zip(self.names, self.locations))) + self.names, self.locations = list(map(list, zip(*dataPairs))) + +# A closure for creating a custom mixin. This is done because formats 1 and 3 +# are very similar. The only difference between them is the size per offset +# value. Code put in here should handle both cases generally. +def _createOffsetArrayIndexSubTableMixin(formatStringForDataType): + + # Prep the data size for the offset array data format. + dataFormat = '>'+formatStringForDataType + offsetDataSize = struct.calcsize(dataFormat) + + class OffsetArrayIndexSubTableMixin(object): + + def decompile(self): + + numGlyphs = self.lastGlyphIndex - self.firstGlyphIndex + 1 + indexingOffsets = [glyphIndex * offsetDataSize for glyphIndex in range(numGlyphs+2)] + indexingLocations = zip(indexingOffsets, indexingOffsets[1:]) + offsetArray = [struct.unpack(dataFormat, self.data[slice(*loc)])[0] for loc in indexingLocations] + + glyphIds = list(range(self.firstGlyphIndex, self.lastGlyphIndex+1)) + modifiedOffsets = [offset + self.imageDataOffset for offset in offsetArray] + self.locations = list(zip(modifiedOffsets, modifiedOffsets[1:])) + + self.names = list(map(self.ttFont.getGlyphName, glyphIds)) + self.removeSkipGlyphs() + + def compile(self, ttFont): + # First make sure that all the data lines up properly. Formats 1 and 3 + # must have all its data lined up consecutively. If not this will fail. + for curLoc, nxtLoc in zip(self.locations, self.locations[1:]): + assert curLoc[1] == nxtLoc[0], "Data must be consecutive in indexSubTable offset formats" + + glyphIds = list(map(ttFont.getGlyphID, self.names)) + # Make sure that all ids are sorted strictly increasing. + assert all(glyphIds[i] < glyphIds[i+1] for i in range(len(glyphIds)-1)) + + # Run a simple algorithm to add skip glyphs to the data locations at + # the places where an id is not present. + idQueue = deque(glyphIds) + locQueue = deque(self.locations) + allGlyphIds = list(range(self.firstGlyphIndex, self.lastGlyphIndex+1)) + allLocations = [] + for curId in allGlyphIds: + if curId != idQueue[0]: + allLocations.append((locQueue[0][0], locQueue[0][0])) + else: + idQueue.popleft() + allLocations.append(locQueue.popleft()) + + # Now that all the locations are collected, pack them appropriately into + # offsets. This is the form where offset[i] is the location and + # offset[i+1]-offset[i] is the size of the data location. + offsets = list(allLocations[0]) + [loc[1] for loc in allLocations[1:]] + # Image data offset must be less than or equal to the minimum of locations. + # This offset may change the value for round tripping but is safer and + # allows imageDataOffset to not be required to be in the XML version. + self.imageDataOffset = min(offsets) + offsetArray = [offset - self.imageDataOffset for offset in offsets] + + dataList = [EblcIndexSubTable.compile(self, ttFont)] + dataList += [struct.pack(dataFormat, offsetValue) for offsetValue in offsetArray] + # Take care of any padding issues. Only occurs in format 3. + if offsetDataSize * len(dataList) % 4 != 0: + dataList.append(struct.pack(dataFormat, 0)) + return bytesjoin(dataList) + + return OffsetArrayIndexSubTableMixin + +# A Mixin for functionality shared between the different kinds +# of fixed sized data handling. Both kinds have big metrics so +# that kind of special processing is also handled in this mixin. +class FixedSizeIndexSubTableMixin(object): + + def writeMetrics(self, writer, ttFont): + writer.simpletag('imageSize', value=self.imageSize) + writer.newline() + self.metrics.toXML(writer, ttFont) + + def readMetrics(self, name, attrs, content, ttFont): + for element in content: + if not isinstance(element, tuple): + continue + name, attrs, content = element + if name == 'imageSize': + self.imageSize = safeEval(attrs['value']) + elif name == BigGlyphMetrics.__name__: + self.metrics = BigGlyphMetrics() + self.metrics.fromXML(name, attrs, content, ttFont) + elif name == SmallGlyphMetrics.__name__: + print("Warning: SmallGlyphMetrics being ignored in format %d." % self.indexFormat) + + def padBitmapData(self, data): + # Make sure that the data isn't bigger than the fixed size. + assert len(data) <= self.imageSize, "Data in indexSubTable format %d must be less than the fixed size." % self.indexFormat + # Pad the data so that it matches the fixed size. + pad = (self.imageSize - len(data)) * b'\0' + return data + pad + +class eblc_index_sub_table_1(_createOffsetArrayIndexSubTableMixin('L'), EblcIndexSubTable): + pass + +class eblc_index_sub_table_2(FixedSizeIndexSubTableMixin, EblcIndexSubTable): + + def decompile(self): + (self.imageSize,) = struct.unpack(">L", self.data[:4]) + self.metrics = BigGlyphMetrics() + sstruct.unpack2(bigGlyphMetricsFormat, self.data[4:], self.metrics) + glyphIds = list(range(self.firstGlyphIndex, self.lastGlyphIndex+1)) + offsets = [self.imageSize * i + self.imageDataOffset for i in range(len(glyphIds)+1)] + self.locations = list(zip(offsets, offsets[1:])) + self.names = list(map(self.ttFont.getGlyphName, glyphIds)) + + def compile(self, ttFont): + glyphIds = list(map(ttFont.getGlyphID, self.names)) + # Make sure all the ids are consecutive. This is required by Format 2. + assert glyphIds == list(range(self.firstGlyphIndex, self.lastGlyphIndex+1)), "Format 2 ids must be consecutive." + self.imageDataOffset = min(zip(*self.locations)[0]) + + dataList = [EblcIndexSubTable.compile(self, ttFont)] + dataList.append(struct.pack(">L", self.imageSize)) + dataList.append(sstruct.pack(bigGlyphMetricsFormat, self.metrics)) + return bytesjoin(dataList) + +class eblc_index_sub_table_3(_createOffsetArrayIndexSubTableMixin('H'), EblcIndexSubTable): + pass + +class eblc_index_sub_table_4(EblcIndexSubTable): + + def decompile(self): + + (numGlyphs,) = struct.unpack(">L", self.data[:4]) + data = self.data[4:] + indexingOffsets = [glyphIndex * codeOffsetPairSize for glyphIndex in range(numGlyphs+2)] + indexingLocations = zip(indexingOffsets, indexingOffsets[1:]) + glyphArray = [struct.unpack(codeOffsetPairFormat, data[slice(*loc)]) for loc in indexingLocations] + glyphIds, offsets = list(map(list, zip(*glyphArray))) + # There are one too many glyph ids. Get rid of the last one. + glyphIds.pop() + + offsets = [offset + self.imageDataOffset for offset in offsets] + self.locations = list(zip(offsets, offsets[1:])) + self.names = list(map(self.ttFont.getGlyphName, glyphIds)) + + def compile(self, ttFont): + # First make sure that all the data lines up properly. Format 4 + # must have all its data lined up consecutively. If not this will fail. + for curLoc, nxtLoc in zip(self.locations, self.locations[1:]): + assert curLoc[1] == nxtLoc[0], "Data must be consecutive in indexSubTable format 4" + + offsets = list(self.locations[0]) + [loc[1] for loc in self.locations[1:]] + # Image data offset must be less than or equal to the minimum of locations. + # Resetting this offset may change the value for round tripping but is safer + # and allows imageDataOffset to not be required to be in the XML version. + self.imageDataOffset = min(offsets) + offsets = [offset - self.imageDataOffset for offset in offsets] + glyphIds = list(map(ttFont.getGlyphID, self.names)) + # Create an iterator over the ids plus a padding value. + idsPlusPad = list(itertools.chain(glyphIds, [0])) + + dataList = [EblcIndexSubTable.compile(self, ttFont)] + dataList.append(struct.pack(">L", len(glyphIds))) + tmp = [struct.pack(codeOffsetPairFormat, *cop) for cop in zip(idsPlusPad, offsets)] + dataList += tmp + data = bytesjoin(dataList) + return data + +class eblc_index_sub_table_5(FixedSizeIndexSubTableMixin, EblcIndexSubTable): + + def decompile(self): + self.origDataLen = 0 + (self.imageSize,) = struct.unpack(">L", self.data[:4]) + data = self.data[4:] + self.metrics, data = sstruct.unpack2(bigGlyphMetricsFormat, data, BigGlyphMetrics()) + (numGlyphs,) = struct.unpack(">L", data[:4]) + data = data[4:] + glyphIds = [struct.unpack(">H", data[2*i:2*(i+1)])[0] for i in range(numGlyphs)] + + offsets = [self.imageSize * i + self.imageDataOffset for i in range(len(glyphIds)+1)] + self.locations = list(zip(offsets, offsets[1:])) + self.names = list(map(self.ttFont.getGlyphName, glyphIds)) + + def compile(self, ttFont): + self.imageDataOffset = min(zip(*self.locations)[0]) + dataList = [EblcIndexSubTable.compile(self, ttFont)] + dataList.append(struct.pack(">L", self.imageSize)) + dataList.append(sstruct.pack(bigGlyphMetricsFormat, self.metrics)) + glyphIds = list(map(ttFont.getGlyphID, self.names)) + dataList.append(struct.pack(">L", len(glyphIds))) + dataList += [struct.pack(">H", curId) for curId in glyphIds] + if len(glyphIds) % 2 == 1: + dataList.append(struct.pack(">H", 0)) + return bytesjoin(dataList) + +# Dictionary of indexFormat to the class representing that format. +eblc_sub_table_classes = { + 1: eblc_index_sub_table_1, + 2: eblc_index_sub_table_2, + 3: eblc_index_sub_table_3, + 4: eblc_index_sub_table_4, + 5: eblc_index_sub_table_5, + } diff -Nru fonttools-2.4/Lib/fontTools/ttLib/tables/_f_e_a_t.py fonttools-3.0/Lib/fontTools/ttLib/tables/_f_e_a_t.py --- fonttools-2.4/Lib/fontTools/ttLib/tables/_f_e_a_t.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.0/Lib/fontTools/ttLib/tables/_f_e_a_t.py 2015-08-31 17:57:15.000000000 +0000 @@ -0,0 +1,7 @@ +from __future__ import print_function, division, absolute_import +from fontTools.misc.py23 import * +from .otBase import BaseTTXConverter + + +class table__f_e_a_t(BaseTTXConverter): + pass diff -Nru fonttools-2.4/Lib/fontTools/ttLib/tables/F_F_T_M_.py fonttools-3.0/Lib/fontTools/ttLib/tables/F_F_T_M_.py --- fonttools-2.4/Lib/fontTools/ttLib/tables/F_F_T_M_.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.0/Lib/fontTools/ttLib/tables/F_F_T_M_.py 2015-08-31 17:57:15.000000000 +0000 @@ -0,0 +1,42 @@ +from __future__ import print_function, division, absolute_import +from fontTools.misc.py23 import * +from fontTools.misc import sstruct +from fontTools.misc.textTools import safeEval +from fontTools.misc.timeTools import timestampFromString, timestampToString +from . import DefaultTable + +FFTMFormat = """ + > # big endian + version: I + FFTimeStamp: Q + sourceCreated: Q + sourceModified: Q +""" + +class table_F_F_T_M_(DefaultTable.DefaultTable): + + def decompile(self, data, ttFont): + dummy, rest = sstruct.unpack2(FFTMFormat, data, self) + + def compile(self, ttFont): + data = sstruct.pack(FFTMFormat, self) + return data + + def toXML(self, writer, ttFont): + writer.comment("FontForge's timestamp, font source creation and modification dates") + writer.newline() + formatstring, names, fixes = sstruct.getformat(FFTMFormat) + for name in names: + value = getattr(self, name) + if name in ("FFTimeStamp", "sourceCreated", "sourceModified"): + value = timestampToString(value) + writer.simpletag(name, value=value) + writer.newline() + + def fromXML(self, name, attrs, content, ttFont): + value = attrs["value"] + if name in ("FFTimeStamp", "sourceCreated", "sourceModified"): + value = timestampFromString(value) + else: + value = safeEval(value) + setattr(self, name, value) diff -Nru fonttools-2.4/Lib/fontTools/ttLib/tables/_f_p_g_m.py fonttools-3.0/Lib/fontTools/ttLib/tables/_f_p_g_m.py --- fonttools-2.4/Lib/fontTools/ttLib/tables/_f_p_g_m.py 2013-04-23 04:08:08.000000000 +0000 +++ fonttools-3.0/Lib/fontTools/ttLib/tables/_f_p_g_m.py 2015-08-31 17:57:15.000000000 +0000 @@ -1,26 +1,51 @@ -import DefaultTable -import array -import ttProgram +from __future__ import print_function, division, absolute_import +from fontTools.misc.py23 import * +from . import DefaultTable +from . import ttProgram class table__f_p_g_m(DefaultTable.DefaultTable): - + def decompile(self, data, ttFont): program = ttProgram.Program() program.fromBytecode(data) self.program = program - + def compile(self, ttFont): return self.program.getBytecode() - + def toXML(self, writer, ttFont): self.program.toXML(writer, ttFont) writer.newline() - - def fromXML(self, (name, attrs, content), ttFont): + + def fromXML(self, name, attrs, content, ttFont): program = ttProgram.Program() - program.fromXML((name, attrs, content), ttFont) + program.fromXML(name, attrs, content, ttFont) self.program = program - - def __len__(self): - return len(self.program) + def __bool__(self): + """ + >>> fpgm = table__f_p_g_m() + >>> bool(fpgm) + False + >>> p = ttProgram.Program() + >>> fpgm.program = p + >>> bool(fpgm) + False + >>> bc = bytearray([0]) + >>> p.fromBytecode(bc) + >>> bool(fpgm) + True + >>> p.bytecode.pop() + 0 + >>> bool(fpgm) + False + """ + return hasattr(self, 'program') and bool(self.program) + + __nonzero__ = __bool__ + + +if __name__ == "__main__": + import sys + import doctest + sys.exit(doctest.testmod().failed) diff -Nru fonttools-2.4/Lib/fontTools/ttLib/tables/_f_v_a_r.py fonttools-3.0/Lib/fontTools/ttLib/tables/_f_v_a_r.py --- fonttools-2.4/Lib/fontTools/ttLib/tables/_f_v_a_r.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.0/Lib/fontTools/ttLib/tables/_f_v_a_r.py 2015-08-31 17:57:15.000000000 +0000 @@ -0,0 +1,187 @@ +from __future__ import print_function, division, absolute_import +from fontTools.misc.py23 import * +from fontTools.misc import sstruct +from fontTools.misc.fixedTools import fixedToFloat, floatToFixed +from fontTools.misc.textTools import safeEval, num2binary, binary2num +from fontTools.ttLib import TTLibError +from . import DefaultTable +import struct + + +# Apple's documentation of 'fvar': +# https://developer.apple.com/fonts/TrueType-Reference-Manual/RM06/Chap6fvar.html + +FVAR_HEADER_FORMAT = """ + > # big endian + version: L + offsetToData: H + countSizePairs: H + axisCount: H + axisSize: H + instanceCount: H + instanceSize: H +""" + +FVAR_AXIS_FORMAT = """ + > # big endian + axisTag: 4s + minValue: 16.16F + defaultValue: 16.16F + maxValue: 16.16F + flags: H + nameID: H +""" + +FVAR_INSTANCE_FORMAT = """ + > # big endian + nameID: H + flags: H +""" + +class table__f_v_a_r(DefaultTable.DefaultTable): + dependencies = ["name"] + + def __init__(self, tag="fvar"): + DefaultTable.DefaultTable.__init__(self, tag) + self.axes = [] + self.instances = [] + + def compile(self, ttFont): + header = { + "version": 0x00010000, + "offsetToData": sstruct.calcsize(FVAR_HEADER_FORMAT), + "countSizePairs": 2, + "axisCount": len(self.axes), + "axisSize": sstruct.calcsize(FVAR_AXIS_FORMAT), + "instanceCount": len(self.instances), + "instanceSize": sstruct.calcsize(FVAR_INSTANCE_FORMAT) + len(self.axes) * 4 + } + result = [sstruct.pack(FVAR_HEADER_FORMAT, header)] + result.extend([axis.compile() for axis in self.axes]) + axisTags = [axis.axisTag for axis in self.axes] + result.extend([instance.compile(axisTags) for instance in self.instances]) + return bytesjoin(result) + + def decompile(self, data, ttFont): + header = {} + headerSize = sstruct.calcsize(FVAR_HEADER_FORMAT) + header = sstruct.unpack(FVAR_HEADER_FORMAT, data[0:headerSize]) + if header["version"] != 0x00010000: + raise TTLibError("unsupported 'fvar' version %04x" % header["version"]) + pos = header["offsetToData"] + axisSize = header["axisSize"] + for _ in range(header["axisCount"]): + axis = Axis() + axis.decompile(data[pos:pos+axisSize]) + self.axes.append(axis) + pos += axisSize + instanceSize = header["instanceSize"] + axisTags = [axis.axisTag for axis in self.axes] + for _ in range(header["instanceCount"]): + instance = NamedInstance() + instance.decompile(data[pos:pos+instanceSize], axisTags) + self.instances.append(instance) + pos += instanceSize + + def toXML(self, writer, ttFont, progress=None): + for axis in self.axes: + axis.toXML(writer, ttFont) + for instance in self.instances: + instance.toXML(writer, ttFont) + + def fromXML(self, name, attrs, content, ttFont): + if name == "Axis": + axis = Axis() + axis.fromXML(name, attrs, content, ttFont) + self.axes.append(axis) + elif name == "NamedInstance": + instance = NamedInstance() + instance.fromXML(name, attrs, content, ttFont) + self.instances.append(instance) + +class Axis(object): + def __init__(self): + self.axisTag = None + self.nameID = 0 + self.flags = 0 # not exposed in XML because spec defines no values + self.minValue = -1.0 + self.defaultValue = 0.0 + self.maxValue = 1.0 + + def compile(self): + return sstruct.pack(FVAR_AXIS_FORMAT, self) + + def decompile(self, data): + sstruct.unpack2(FVAR_AXIS_FORMAT, data, self) + + def toXML(self, writer, ttFont): + name = ttFont["name"].getDebugName(self.nameID) + if name is not None: + writer.newline() + writer.comment(name) + writer.newline() + writer.begintag("Axis") + writer.newline() + for tag, value in [("AxisTag", self.axisTag), + ("MinValue", str(self.minValue)), + ("DefaultValue", str(self.defaultValue)), + ("MaxValue", str(self.maxValue)), + ("NameID", str(self.nameID))]: + writer.begintag(tag) + writer.write(value) + writer.endtag(tag) + writer.newline() + writer.endtag("Axis") + writer.newline() + + def fromXML(self, name, _attrs, content, ttFont): + assert(name == "Axis") + for tag, _, value in filter(lambda t: type(t) is tuple, content): + value = ''.join(value) + if tag == "AxisTag": + self.axisTag = value + elif tag in ["MinValue", "DefaultValue", "MaxValue", "NameID"]: + setattr(self, tag[0].lower() + tag[1:], safeEval(value)) + +class NamedInstance(object): + def __init__(self): + self.nameID = 0 + self.flags = 0 # not exposed in XML because spec defines no values + self.coordinates = {} + + def compile(self, axisTags): + result = [sstruct.pack(FVAR_INSTANCE_FORMAT, self)] + for axis in axisTags: + fixedCoord = floatToFixed(self.coordinates[axis], 16) + result.append(struct.pack(">l", fixedCoord)) + return bytesjoin(result) + + def decompile(self, data, axisTags): + sstruct.unpack2(FVAR_INSTANCE_FORMAT, data, self) + pos = sstruct.calcsize(FVAR_INSTANCE_FORMAT) + for axis in axisTags: + value = struct.unpack(">l", data[pos : pos + 4])[0] + self.coordinates[axis] = fixedToFloat(value, 16) + pos += 4 + + def toXML(self, writer, ttFont): + name = ttFont["name"].getDebugName(self.nameID) + if name is not None: + writer.newline() + writer.comment(name) + writer.newline() + writer.begintag("NamedInstance", nameID=self.nameID) + writer.newline() + for axis in ttFont["fvar"].axes: + writer.simpletag("coord", axis=axis.axisTag, + value=self.coordinates[axis.axisTag]) + writer.newline() + writer.endtag("NamedInstance") + writer.newline() + + def fromXML(self, name, attrs, content, ttFont): + assert(name == "NamedInstance") + self.nameID = safeEval(attrs["nameID"]) + for tag, elementAttrs, _ in filter(lambda t: type(t) is tuple, content): + if tag == "coord": + self.coordinates[elementAttrs["axis"]] = safeEval(elementAttrs["value"]) diff -Nru fonttools-2.4/Lib/fontTools/ttLib/tables/_f_v_a_r_test.py fonttools-3.0/Lib/fontTools/ttLib/tables/_f_v_a_r_test.py --- fonttools-2.4/Lib/fontTools/ttLib/tables/_f_v_a_r_test.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.0/Lib/fontTools/ttLib/tables/_f_v_a_r_test.py 2015-08-31 17:57:15.000000000 +0000 @@ -0,0 +1,190 @@ +from __future__ import print_function, division, absolute_import, unicode_literals +from fontTools.misc.py23 import * +from fontTools.misc.textTools import deHexStr +from fontTools.misc.xmlWriter import XMLWriter +from fontTools.ttLib import TTLibError +from fontTools.ttLib.tables._f_v_a_r import table__f_v_a_r, Axis, NamedInstance +from fontTools.ttLib.tables._n_a_m_e import table__n_a_m_e, NameRecord +import unittest + + + +FVAR_DATA = deHexStr( + "00 01 00 00 00 10 00 02 00 02 00 14 00 02 00 0C " + "77 67 68 74 00 64 00 00 01 90 00 00 03 84 00 00 00 00 01 01 " + "77 64 74 68 00 32 00 00 00 64 00 00 00 c8 00 00 00 00 01 02 " + "01 03 00 00 01 2c 00 00 00 64 00 00 " + "01 04 00 00 01 2c 00 00 00 4b 00 00") + +FVAR_AXIS_DATA = deHexStr( + "6F 70 73 7a ff ff 80 00 00 01 4c cd 00 01 80 00 00 00 01 59") + +FVAR_INSTANCE_DATA = deHexStr("01 59 00 00 00 00 b3 33 00 00 80 00") + + +def xml_lines(writer): + content = writer.file.getvalue().decode("utf-8") + return [line.strip() for line in content.splitlines()][1:] + + +def AddName(font, name): + nameTable = font.get("name") + if nameTable is None: + nameTable = font["name"] = table__n_a_m_e() + nameTable.names = [] + namerec = NameRecord() + namerec.nameID = 1 + max([n.nameID for n in nameTable.names] + [256]) + namerec.string = name.encode('mac_roman') + namerec.platformID, namerec.platEncID, namerec.langID = (1, 0, 0) + nameTable.names.append(namerec) + return namerec + + +def MakeFont(): + axes = [("wght", "Weight", 100, 400, 900), ("wdth", "Width", 50, 100, 200)] + instances = [("Light", 300, 100), ("Light Condensed", 300, 75)] + fvarTable = table__f_v_a_r() + font = {"fvar": fvarTable} + for tag, name, minValue, defaultValue, maxValue in axes: + axis = Axis() + axis.axisTag = tag + axis.defaultValue = defaultValue + axis.minValue, axis.maxValue = minValue, maxValue + axis.nameID = AddName(font, name).nameID + fvarTable.axes.append(axis) + for name, weight, width in instances: + inst = NamedInstance() + inst.nameID = AddName(font, name).nameID + inst.coordinates = {"wght": weight, "wdth": width} + fvarTable.instances.append(inst) + return font + + +class FontVariationTableTest(unittest.TestCase): + def test_compile(self): + font = MakeFont() + h = font["fvar"].compile(font) + self.assertEqual(FVAR_DATA, font["fvar"].compile(font)) + + def test_decompile(self): + fvar = table__f_v_a_r() + fvar.decompile(FVAR_DATA, ttFont={"fvar": fvar}) + self.assertEqual(["wght", "wdth"], [a.axisTag for a in fvar.axes]) + self.assertEqual([259, 260], [i.nameID for i in fvar.instances]) + + def test_toXML(self): + font = MakeFont() + writer = XMLWriter(BytesIO()) + font["fvar"].toXML(writer, font) + xml = writer.file.getvalue().decode("utf-8") + self.assertEqual(2, xml.count("")) + self.assertTrue("wght" in xml) + self.assertTrue("wdth" in xml) + self.assertEqual(2, xml.count("" in xml) + self.assertTrue("" in xml) + + def test_fromXML(self): + fvar = table__f_v_a_r() + fvar.fromXML("Axis", {}, [("AxisTag", {}, ["opsz"])], ttFont=None) + fvar.fromXML("Axis", {}, [("AxisTag", {}, ["slnt"])], ttFont=None) + fvar.fromXML("NamedInstance", {"nameID": "765"}, [], ttFont=None) + fvar.fromXML("NamedInstance", {"nameID": "234"}, [], ttFont=None) + self.assertEqual(["opsz", "slnt"], [a.axisTag for a in fvar.axes]) + self.assertEqual([765, 234], [i.nameID for i in fvar.instances]) + + +class AxisTest(unittest.TestCase): + def test_compile(self): + axis = Axis() + axis.axisTag, axis.nameID = ('opsz', 345) + axis.minValue, axis.defaultValue, axis.maxValue = (-0.5, 1.3, 1.5) + self.assertEqual(FVAR_AXIS_DATA, axis.compile()) + + def test_decompile(self): + axis = Axis() + axis.decompile(FVAR_AXIS_DATA) + self.assertEqual("opsz", axis.axisTag) + self.assertEqual(345, axis.nameID) + self.assertEqual(-0.5, axis.minValue) + self.assertEqual(1.3, axis.defaultValue) + self.assertEqual(1.5, axis.maxValue) + + def test_toXML(self): + font = MakeFont() + axis = Axis() + axis.decompile(FVAR_AXIS_DATA) + AddName(font, "Optical Size").nameID = 256 + axis.nameID = 256 + writer = XMLWriter(BytesIO()) + axis.toXML(writer, font) + self.assertEqual([ + '', + '', + '', + 'opsz', + '-0.5', + '1.3', + '1.5', + '256', + '' + ], xml_lines(writer)) + + def test_fromXML(self): + axis = Axis() + axis.fromXML("Axis", {}, [ + ("AxisTag", {}, ["wght"]), + ("MinValue", {}, ["100"]), + ("DefaultValue", {}, ["400"]), + ("MaxValue", {}, ["900"]), + ("NameID", {}, ["256"]) + ], ttFont=None) + self.assertEqual("wght", axis.axisTag) + self.assertEqual(100, axis.minValue) + self.assertEqual(400, axis.defaultValue) + self.assertEqual(900, axis.maxValue) + self.assertEqual(256, axis.nameID) + + +class NamedInstanceTest(unittest.TestCase): + def test_compile(self): + inst = NamedInstance() + inst.nameID = 345 + inst.coordinates = {"wght": 0.7, "wdth": 0.5} + self.assertEqual(FVAR_INSTANCE_DATA, inst.compile(["wght", "wdth"])) + + def test_decompile(self): + inst = NamedInstance() + inst.decompile(FVAR_INSTANCE_DATA, ["wght", "wdth"]) + self.assertEqual(345, inst.nameID) + self.assertEqual({"wght": 0.7, "wdth": 0.5}, inst.coordinates) + + def test_toXML(self): + font = MakeFont() + inst = NamedInstance() + inst.nameID = AddName(font, "Light Condensed").nameID + inst.coordinates = {"wght": 0.7, "wdth": 0.5} + writer = XMLWriter(BytesIO()) + inst.toXML(writer, font) + self.assertEqual([ + '', + '', + '' % inst.nameID, + '', + '', + '' + ], xml_lines(writer)) + + def test_fromXML(self): + inst = NamedInstance() + attrs = {"nameID": "345"} + inst.fromXML("NamedInstance", attrs, [ + ("coord", {"axis": "wght", "value": "0.7"}, []), + ("coord", {"axis": "wdth", "value": "0.5"}, []), + ], ttFont=MakeFont()) + self.assertEqual(345, inst.nameID) + self.assertEqual({"wght": 0.7, "wdth": 0.5}, inst.coordinates) + + +if __name__ == "__main__": + unittest.main() diff -Nru fonttools-2.4/Lib/fontTools/ttLib/tables/_g_a_s_p.py fonttools-3.0/Lib/fontTools/ttLib/tables/_g_a_s_p.py --- fonttools-2.4/Lib/fontTools/ttLib/tables/_g_a_s_p.py 2013-04-23 04:08:08.000000000 +0000 +++ fonttools-3.0/Lib/fontTools/ttLib/tables/_g_a_s_p.py 2015-08-31 17:57:15.000000000 +0000 @@ -1,6 +1,8 @@ -import DefaultTable -import struct +from __future__ import print_function, division, absolute_import +from fontTools.misc.py23 import * from fontTools.misc.textTools import safeEval +from . import DefaultTable +import struct GASP_SYMMETRIC_GRIDFIT = 0x0004 @@ -9,7 +11,7 @@ GASP_GRIDFIT = 0x0001 class table__g_a_s_p(DefaultTable.DefaultTable): - + def decompile(self, data, ttFont): self.version, numRanges = struct.unpack(">HH", data[:4]) assert 0 <= self.version <= 1, "unknown 'gasp' format: %s" % self.version @@ -20,33 +22,30 @@ self.gaspRange[int(rangeMaxPPEM)] = int(rangeGaspBehavior) data = data[4:] assert not data, "too much data" - + def compile(self, ttFont): version = 0 # ignore self.version numRanges = len(self.gaspRange) - data = "" - items = self.gaspRange.items() - items.sort() + data = b"" + items = sorted(self.gaspRange.items()) for rangeMaxPPEM, rangeGaspBehavior in items: data = data + struct.pack(">HH", rangeMaxPPEM, rangeGaspBehavior) if rangeGaspBehavior & ~(GASP_GRIDFIT | GASP_DOGRAY): version = 1 data = struct.pack(">HH", version, numRanges) + data return data - + def toXML(self, writer, ttFont): - items = self.gaspRange.items() - items.sort() + items = sorted(self.gaspRange.items()) for rangeMaxPPEM, rangeGaspBehavior in items: writer.simpletag("gaspRange", [ ("rangeMaxPPEM", rangeMaxPPEM), ("rangeGaspBehavior", rangeGaspBehavior)]) writer.newline() - - def fromXML(self, (name, attrs, content), ttFont): - if name <> "gaspRange": + + def fromXML(self, name, attrs, content, ttFont): + if name != "gaspRange": return if not hasattr(self, "gaspRange"): self.gaspRange = {} self.gaspRange[safeEval(attrs["rangeMaxPPEM"])] = safeEval(attrs["rangeGaspBehavior"]) - diff -Nru fonttools-2.4/Lib/fontTools/ttLib/tables/G_D_E_F_.py fonttools-3.0/Lib/fontTools/ttLib/tables/G_D_E_F_.py --- fonttools-2.4/Lib/fontTools/ttLib/tables/G_D_E_F_.py 2013-04-23 04:08:08.000000000 +0000 +++ fonttools-3.0/Lib/fontTools/ttLib/tables/G_D_E_F_.py 2015-08-31 17:57:15.000000000 +0000 @@ -1,4 +1,6 @@ -from otBase import BaseTTXConverter +from __future__ import print_function, division, absolute_import +from fontTools.misc.py23 import * +from .otBase import BaseTTXConverter class table_G_D_E_F_(BaseTTXConverter): diff -Nru fonttools-2.4/Lib/fontTools/ttLib/tables/_g_l_y_f.py fonttools-3.0/Lib/fontTools/ttLib/tables/_g_l_y_f.py --- fonttools-2.4/Lib/fontTools/ttLib/tables/_g_l_y_f.py 2013-04-23 04:08:08.000000000 +0000 +++ fonttools-3.0/Lib/fontTools/ttLib/tables/_g_l_y_f.py 2015-08-31 17:57:15.000000000 +0000 @@ -1,33 +1,35 @@ """_g_l_y_f.py -- Converter classes for the 'glyf' table.""" +from __future__ import print_function, division, absolute_import +from fontTools.misc.py23 import * +from fontTools.misc import sstruct +from fontTools import ttLib +from fontTools.misc.textTools import safeEval, pad +from fontTools.misc.arrayTools import calcBounds, calcIntBounds, pointInRect +from fontTools.misc.bezierTools import calcQuadraticBounds +from fontTools.misc.fixedTools import fixedToFloat as fi2fl, floatToFixed as fl2fi +from . import DefaultTable +from . import ttProgram +import sys +import struct +import array +import warnings # -# The Apple and MS rasterizers behave differently for +# The Apple and MS rasterizers behave differently for # scaled composite components: one does scale first and then translate # and the other does it vice versa. MS defined some flags to indicate # the difference, but it seems nobody actually _sets_ those flags. # # Funny thing: Apple seems to _only_ do their thing in the -# WE_HAVE_A_SCALE (eg. Chicago) case, and not when it's WE_HAVE_AN_X_AND_Y_SCALE +# WE_HAVE_A_SCALE (eg. Chicago) case, and not when it's WE_HAVE_AN_X_AND_Y_SCALE # (eg. Charcoal)... # SCALE_COMPONENT_OFFSET_DEFAULT = 0 # 0 == MS, 1 == Apple -import sys -import struct, sstruct -import DefaultTable -from fontTools import ttLib -from fontTools.misc.textTools import safeEval, readHex -import ttProgram -import array -import numpy -from types import StringType, TupleType -import warnings - - class table__g_l_y_f(DefaultTable.DefaultTable): - + def decompile(self, data, ttFont): loca = ttFont['loca'] last = int(loca[0]) @@ -42,21 +44,24 @@ glyphName = 'ttxautoglyph%s' % i next = int(loca[i+1]) glyphdata = data[last:next] - if len(glyphdata) <> (next - last): - raise ttLib.TTLibError, "not enough 'glyf' table data" + if len(glyphdata) != (next - last): + raise ttLib.TTLibError("not enough 'glyf' table data") glyph = Glyph(glyphdata) self.glyphs[glyphName] = glyph last = next - # this should become a warning: - #if len(data) > next: - # raise ttLib.TTLibError, "too much 'glyf' table data" + if len(data) - next >= 4: + warnings.warn("too much 'glyf' table data: expected %d, received %d bytes" % + (next, len(data))) if noname: - warnings.warn('%s glyphs have no name' % i) - + warnings.warn('%s glyphs have no name' % noname) + if ttFont.lazy is False: # Be lazy for None and True + for glyph in self.glyphs.values(): + glyph.expand(self) + def compile(self, ttFont): if not hasattr(self, "glyphOrder"): self.glyphOrder = ttFont.getGlyphOrder() - import string + padding = self.padding if hasattr(self, 'padding') else None locations = [] currentLocation = 0 dataList = [] @@ -64,15 +69,34 @@ for glyphName in self.glyphOrder: glyph = self.glyphs[glyphName] glyphData = glyph.compile(self, recalcBBoxes) + if padding: + glyphData = pad(glyphData, size=padding) locations.append(currentLocation) currentLocation = currentLocation + len(glyphData) dataList.append(glyphData) locations.append(currentLocation) - data = string.join(dataList, "") - ttFont['loca'].set(locations) - ttFont['maxp'].numGlyphs = len(self.glyphs) + + if padding is None and currentLocation < 0x20000: + # See if we can pad any odd-lengthed glyphs to allow loca + # table to use the short offsets. + indices = [i for i,glyphData in enumerate(dataList) if len(glyphData) % 2 == 1] + if indices and currentLocation + len(indices) < 0x20000: + # It fits. Do it. + for i in indices: + dataList[i] += b'\0' + currentLocation = 0 + for i,glyphData in enumerate(dataList): + locations[i] = currentLocation + currentLocation += len(glyphData) + locations[len(dataList)] = currentLocation + + data = bytesjoin(dataList) + if 'loca' in ttFont: + ttFont['loca'].set(locations) + if 'maxp' in ttFont: + ttFont['maxp'].numGlyphs = len(self.glyphs) return data - + def toXML(self, writer, ttFont, progress=None): writer.newline() glyphNames = ttFont.getGlyphNames() @@ -85,7 +109,7 @@ for glyphName in glyphNames: if not counter % progressStep and progress is not None: progress.setLabel("Dumping 'glyf' table... (%s)" % glyphName) - progress.increment(progressStep / float(numGlyphs)) + progress.increment(progressStep / numGlyphs) counter = counter + 1 glyph = self[glyphName] if glyph.numberOfContours: @@ -105,9 +129,9 @@ writer.comment("contains no outline data") writer.newline() writer.newline() - - def fromXML(self, (name, attrs, content), ttFont): - if name <> "TTGlyph": + + def fromXML(self, name, attrs, content, ttFont): + if name != "TTGlyph": return if not hasattr(self, "glyphs"): self.glyphs = {} @@ -121,44 +145,45 @@ setattr(glyph, attr, safeEval(attrs.get(attr, '0'))) self.glyphs[glyphName] = glyph for element in content: - if type(element) <> TupleType: + if not isinstance(element, tuple): continue - glyph.fromXML(element, ttFont) + name, attrs, content = element + glyph.fromXML(name, attrs, content, ttFont) if not ttFont.recalcBBoxes: glyph.compact(self, 0) - + def setGlyphOrder(self, glyphOrder): self.glyphOrder = glyphOrder - + def getGlyphName(self, glyphID): return self.glyphOrder[glyphID] - + def getGlyphID(self, glyphName): # XXX optimize with reverse dict!!! return self.glyphOrder.index(glyphName) - + def keys(self): return self.glyphs.keys() - + def has_key(self, glyphName): - return self.glyphs.has_key(glyphName) - + return glyphName in self.glyphs + __contains__ = has_key - + def __getitem__(self, glyphName): glyph = self.glyphs[glyphName] glyph.expand(self) return glyph - + def __setitem__(self, glyphName, glyph): self.glyphs[glyphName] = glyph if glyphName not in self.glyphOrder: self.glyphOrder.append(glyphName) - + def __delitem__(self, glyphName): del self.glyphs[glyphName] self.glyphOrder.remove(glyphName) - + def __len__(self): assert len(self.glyphOrder) == len(self.glyphs) return len(self.glyphs) @@ -183,36 +208,98 @@ flagReserved1 = 0x40 flagReserved2 = 0x80 +_flagSignBytes = { + 0: 2, + flagXsame: 0, + flagXShort|flagXsame: +1, + flagXShort: -1, + flagYsame: 0, + flagYShort|flagYsame: +1, + flagYShort: -1, +} + +def flagBest(x, y, onCurve): + """For a given x,y delta pair, returns the flag that packs this pair + most efficiently, as well as the number of byte cost of such flag.""" + + flag = flagOnCurve if onCurve else 0 + cost = 0 + # do x + if x == 0: + flag = flag | flagXsame + elif -255 <= x <= 255: + flag = flag | flagXShort + if x > 0: + flag = flag | flagXsame + cost += 1 + else: + cost += 2 + # do y + if y == 0: + flag = flag | flagYsame + elif -255 <= y <= 255: + flag = flag | flagYShort + if y > 0: + flag = flag | flagYsame + cost += 1 + else: + cost += 2 + return flag, cost + +def flagFits(newFlag, oldFlag, mask): + newBytes = _flagSignBytes[newFlag & mask] + oldBytes = _flagSignBytes[oldFlag & mask] + return newBytes == oldBytes or abs(newBytes) > abs(oldBytes) -ARG_1_AND_2_ARE_WORDS = 0x0001 # if set args are words otherwise they are bytes -ARGS_ARE_XY_VALUES = 0x0002 # if set args are xy values, otherwise they are points -ROUND_XY_TO_GRID = 0x0004 # for the xy values if above is true -WE_HAVE_A_SCALE = 0x0008 # Sx = Sy, otherwise scale == 1.0 -NON_OVERLAPPING = 0x0010 # set to same value for all components (obsolete!) -MORE_COMPONENTS = 0x0020 # indicates at least one more glyph after this one -WE_HAVE_AN_X_AND_Y_SCALE = 0x0040 # Sx, Sy -WE_HAVE_A_TWO_BY_TWO = 0x0080 # t00, t01, t10, t11 -WE_HAVE_INSTRUCTIONS = 0x0100 # instructions follow -USE_MY_METRICS = 0x0200 # apply these metrics to parent glyph -OVERLAP_COMPOUND = 0x0400 # used by Apple in GX fonts -SCALED_COMPONENT_OFFSET = 0x0800 # composite designed to have the component offset scaled (designed for Apple) -UNSCALED_COMPONENT_OFFSET = 0x1000 # composite designed not to have the component offset scaled (designed for MS) +def flagSupports(newFlag, oldFlag): + return ((oldFlag & flagOnCurve) == (newFlag & flagOnCurve) and + flagFits(newFlag, oldFlag, flagXsame|flagXShort) and + flagFits(newFlag, oldFlag, flagYsame|flagYShort)) +def flagEncodeCoord(flag, mask, coord, coordBytes): + byteCount = _flagSignBytes[flag & mask] + if byteCount == 1: + coordBytes.append(coord) + elif byteCount == -1: + coordBytes.append(-coord) + elif byteCount == 2: + coordBytes.append((coord >> 8) & 0xFF) + coordBytes.append(coord & 0xFF) + +def flagEncodeCoords(flag, x, y, xBytes, yBytes): + flagEncodeCoord(flag, flagXsame|flagXShort, x, xBytes) + flagEncodeCoord(flag, flagYsame|flagYShort, y, yBytes) + + +ARG_1_AND_2_ARE_WORDS = 0x0001 # if set args are words otherwise they are bytes +ARGS_ARE_XY_VALUES = 0x0002 # if set args are xy values, otherwise they are points +ROUND_XY_TO_GRID = 0x0004 # for the xy values if above is true +WE_HAVE_A_SCALE = 0x0008 # Sx = Sy, otherwise scale == 1.0 +NON_OVERLAPPING = 0x0010 # set to same value for all components (obsolete!) +MORE_COMPONENTS = 0x0020 # indicates at least one more glyph after this one +WE_HAVE_AN_X_AND_Y_SCALE = 0x0040 # Sx, Sy +WE_HAVE_A_TWO_BY_TWO = 0x0080 # t00, t01, t10, t11 +WE_HAVE_INSTRUCTIONS = 0x0100 # instructions follow +USE_MY_METRICS = 0x0200 # apply these metrics to parent glyph +OVERLAP_COMPOUND = 0x0400 # used by Apple in GX fonts +SCALED_COMPONENT_OFFSET = 0x0800 # composite designed to have the component offset scaled (designed for Apple) +UNSCALED_COMPONENT_OFFSET = 0x1000 # composite designed not to have the component offset scaled (designed for MS) + + +class Glyph(object): -class Glyph: - def __init__(self, data=""): if not data: # empty char self.numberOfContours = 0 return self.data = data - - def compact(self, glyfTable, recalcBBoxes=1): + + def compact(self, glyfTable, recalcBBoxes=True): data = self.compile(glyfTable, recalcBBoxes) self.__dict__.clear() self.data = data - + def expand(self, glyfTable): if not hasattr(self, "data"): # already unpacked @@ -227,8 +314,8 @@ self.decompileComponents(data, glyfTable) else: self.decompileCoordinates(data) - - def compile(self, glyfTable, recalcBBoxes=1): + + def compile(self, glyfTable, recalcBBoxes=True): if hasattr(self, "data"): return self.data if self.numberOfContours == 0: @@ -240,15 +327,8 @@ data = data + self.compileComponents(glyfTable) else: data = data + self.compileCoordinates() - # From the spec: "Note that the local offsets should be word-aligned" - # From a later MS spec: "Note that the local offsets should be long-aligned" - # Let's be modern and align on 4-byte boundaries. - if len(data) % 4: - # add pad bytes - nPadBytes = 4 - (len(data) % 4) - data = data + "\0" * nPadBytes return data - + def toXML(self, writer, ttFont): if self.isComposite(): for compo in self.components: @@ -265,7 +345,7 @@ writer.newline() for j in range(last, self.endPtsOfContours[i] + 1): writer.simpletag("pt", [ - ("x", self.coordinates[j][0]), + ("x", self.coordinates[j][0]), ("y", self.coordinates[j][1]), ("on", self.flags[j] & flagOnCurve)]) writer.newline() @@ -277,48 +357,48 @@ self.program.toXML(writer, ttFont) writer.endtag("instructions") writer.newline() - - def fromXML(self, (name, attrs, content), ttFont): + + def fromXML(self, name, attrs, content, ttFont): if name == "contour": if self.numberOfContours < 0: - raise ttLib.TTLibError, "can't mix composites and contours in glyph" + raise ttLib.TTLibError("can't mix composites and contours in glyph") self.numberOfContours = self.numberOfContours + 1 - coordinates = [] + coordinates = GlyphCoordinates() flags = [] for element in content: - if type(element) <> TupleType: + if not isinstance(element, tuple): continue name, attrs, content = element - if name <> "pt": + if name != "pt": continue # ignore anything but "pt" - coordinates.append([safeEval(attrs["x"]), safeEval(attrs["y"])]) + coordinates.append((safeEval(attrs["x"]), safeEval(attrs["y"]))) flags.append(not not safeEval(attrs["on"])) - coordinates = numpy.array(coordinates, numpy.int16) - flags = numpy.array(flags, numpy.int8) + flags = array.array("B", flags) if not hasattr(self, "coordinates"): self.coordinates = coordinates self.flags = flags self.endPtsOfContours = [len(coordinates)-1] else: - self.coordinates = numpy.concatenate((self.coordinates, coordinates)) - self.flags = numpy.concatenate((self.flags, flags)) + self.coordinates.extend (coordinates) + self.flags.extend(flags) self.endPtsOfContours.append(len(self.coordinates)-1) elif name == "component": if self.numberOfContours > 0: - raise ttLib.TTLibError, "can't mix composites and contours in glyph" + raise ttLib.TTLibError("can't mix composites and contours in glyph") self.numberOfContours = -1 if not hasattr(self, "components"): self.components = [] component = GlyphComponent() self.components.append(component) - component.fromXML((name, attrs, content), ttFont) + component.fromXML(name, attrs, content, ttFont) elif name == "instructions": self.program = ttProgram.Program() for element in content: - if type(element) <> TupleType: + if not isinstance(element, tuple): continue - self.program.fromXML(element, ttFont) - + name, attrs, content = element + self.program.fromXML(name, attrs, content, ttFont) + def getCompositeMaxpValues(self, glyfTable, maxComponentDepth=1): assert self.isComposite() nContours = 0 @@ -335,11 +415,11 @@ nPoints = nPoints + nP nContours = nContours + nC return nPoints, nContours, maxComponentDepth - + def getMaxpValues(self): assert self.numberOfContours > 0 return len(self.coordinates), len(self.endPtsOfContours) - + def decompileComponents(self, data, glyfTable): self.components = [] more = 1 @@ -355,17 +435,18 @@ self.program = ttProgram.Program() self.program.fromBytecode(data[:numInstructions]) data = data[numInstructions:] - assert len(data) < 4, "bad composite data" - + if len(data) >= 4: + warnings.warn("too much glyph data at the end of composite glyph: %d excess bytes" % len(data)) + def decompileCoordinates(self, data): endPtsOfContours = array.array("h") endPtsOfContours.fromstring(data[:2*self.numberOfContours]) - if sys.byteorder <> "big": + if sys.byteorder != "big": endPtsOfContours.byteswap() self.endPtsOfContours = endPtsOfContours.tolist() - + data = data[2*self.numberOfContours:] - + instructionLength, = struct.unpack(">h", data[:2]) data = data[2:] self.program = ttProgram.Program() @@ -374,9 +455,9 @@ nCoordinates = self.endPtsOfContours[-1] + 1 flags, xCoordinates, yCoordinates = \ self.decompileCoordinatesRaw(nCoordinates, data) - + # fill in repetitions and apply signs - coordinates = numpy.zeros((nCoordinates, 2), numpy.int16) + self.coordinates = coordinates = GlyphCoordinates.zeros(nCoordinates) xIndex = 0 yIndex = 0 for i in range(nCoordinates): @@ -408,26 +489,25 @@ coordinates[i] = (x, y) assert xIndex == len(xCoordinates) assert yIndex == len(yCoordinates) - # convert relative to absolute coordinates - self.coordinates = numpy.add.accumulate(coordinates) + coordinates.relativeToAbsolute() # discard all flags but for "flagOnCurve" - self.flags = numpy.bitwise_and(flags, flagOnCurve).astype(numpy.int8) + self.flags = array.array("B", (f & flagOnCurve for f in flags)) def decompileCoordinatesRaw(self, nCoordinates, data): # unpack flags and prepare unpacking of coordinates - flags = numpy.array([0] * nCoordinates, numpy.int8) + flags = array.array("B", [0] * nCoordinates) # Warning: deep Python trickery going on. We use the struct module to unpack # the coordinates. We build a format string based on the flags, so we can # unpack the coordinates in one struct.unpack() call. xFormat = ">" # big endian yFormat = ">" # big endian i = j = 0 - while 1: - flag = ord(data[i]) + while True: + flag = byteord(data[i]) i = i + 1 repeat = 1 if flag & flagRepeat: - repeat = ord(data[i]) + 1 + repeat = byteord(data[i]) + 1 i = i + 1 for k in range(repeat): if flag & flagXShort: @@ -447,14 +527,14 @@ # unpack raw coordinates, krrrrrr-tching! xDataLen = struct.calcsize(xFormat) yDataLen = struct.calcsize(yFormat) - if not (0 <= (len(data) - (xDataLen + yDataLen)) < 4): - raise ttLib.TTLibError, "bad glyph record (leftover bytes: %s)" % (len(data) - (xDataLen + yDataLen)) + if len(data) - (xDataLen + yDataLen) >= 4: + warnings.warn("too much glyph data: %d excess bytes" % (len(data) - (xDataLen + yDataLen))) xCoordinates = struct.unpack(xFormat, data[:xDataLen]) yCoordinates = struct.unpack(yFormat, data[xDataLen:xDataLen+yDataLen]) return flags, xCoordinates, yCoordinates - + def compileComponents(self, glyfTable): - data = "" + data = b"" lastcomponent = len(self.components) - 1 more = 1 haveInstructions = 0 @@ -468,35 +548,42 @@ instructions = self.program.getBytecode() data = data + struct.pack(">h", len(instructions)) + instructions return data - - + def compileCoordinates(self): assert len(self.coordinates) == len(self.flags) - data = "" + data = [] endPtsOfContours = array.array("h", self.endPtsOfContours) - if sys.byteorder <> "big": + if sys.byteorder != "big": endPtsOfContours.byteswap() - data = data + endPtsOfContours.tostring() + data.append(endPtsOfContours.tostring()) instructions = self.program.getBytecode() - data = data + struct.pack(">h", len(instructions)) + instructions - nCoordinates = len(self.coordinates) - - # make a copy - coordinates = numpy.array(self.coordinates) - # absolute to relative coordinates - coordinates[1:] = numpy.subtract(coordinates[1:], coordinates[:-1]) - flags = self.flags + data.append(struct.pack(">h", len(instructions))) + data.append(instructions) + + deltas = self.coordinates.copy() + if deltas.isFloat(): + # Warn? + xPoints = [int(round(x)) for x in xPoints] + yPoints = [int(round(y)) for y in xPoints] + deltas.absoluteToRelative() + + # TODO(behdad): Add a configuration option for this? + deltas = self.compileDeltasGreedy(self.flags, deltas) + #deltas = self.compileDeltasOptimal(self.flags, deltas) + + data.extend(deltas) + return bytesjoin(data) + + def compileDeltasGreedy(self, flags, deltas): + # Implements greedy algorithm for packing coordinate deltas: + # uses shortest representation one coordinate at a time. compressedflags = [] xPoints = [] yPoints = [] - xFormat = ">" - yFormat = ">" lastflag = None repeat = 0 - for i in range(len(coordinates)): + for flag,(x,y) in zip(flags, deltas): # Oh, the horrors of TrueType - flag = self.flags[i] - x, y = coordinates[i] # do x if x == 0: flag = flag | flagXsame @@ -506,11 +593,9 @@ flag = flag | flagXsame else: x = -x - xPoints.append(x) - xFormat = xFormat + 'B' + xPoints.append(bytechr(x)) else: - xPoints.append(x) - xFormat = xFormat + 'h' + xPoints.append(struct.pack(">h", x)) # do y if y == 0: flag = flag | flagYsame @@ -520,68 +605,174 @@ flag = flag | flagYsame else: y = -y - yPoints.append(y) - yFormat = yFormat + 'B' + yPoints.append(bytechr(y)) else: - yPoints.append(y) - yFormat = yFormat + 'h' + yPoints.append(struct.pack(">h", y)) # handle repeating flags - if flag == lastflag: + if flag == lastflag and repeat != 255: repeat = repeat + 1 if repeat == 1: compressedflags.append(flag) - elif repeat > 1: - compressedflags[-2] = flag | flagRepeat - compressedflags[-1] = repeat else: + compressedflags[-2] = flag | flagRepeat compressedflags[-1] = repeat else: repeat = 0 compressedflags.append(flag) lastflag = flag - data = data + array.array("B", compressedflags).tostring() - xPoints = map(int, xPoints) # work around numpy vs. struct >= 2.5 bug - yPoints = map(int, yPoints) - data = data + apply(struct.pack, (xFormat,)+tuple(xPoints)) - data = data + apply(struct.pack, (yFormat,)+tuple(yPoints)) - return data - + compressedFlags = array.array("B", compressedflags).tostring() + compressedXs = bytesjoin(xPoints) + compressedYs = bytesjoin(yPoints) + return (compressedFlags, compressedXs, compressedYs) + + def compileDeltasOptimal(self, flags, deltas): + # Implements optimal, dynaic-programming, algorithm for packing coordinate + # deltas. The savings are negligible :(. + candidates = [] + bestTuple = None + bestCost = 0 + repeat = 0 + for flag,(x,y) in zip(flags, deltas): + # Oh, the horrors of TrueType + flag, coordBytes = flagBest(x, y, flag) + bestCost += 1 + coordBytes + newCandidates = [(bestCost, bestTuple, flag, coordBytes), + (bestCost+1, bestTuple, (flag|flagRepeat), coordBytes)] + for lastCost,lastTuple,lastFlag,coordBytes in candidates: + if lastCost + coordBytes <= bestCost + 1 and (lastFlag & flagRepeat) and (lastFlag < 0xff00) and flagSupports(lastFlag, flag): + if (lastFlag & 0xFF) == (flag|flagRepeat) and lastCost == bestCost + 1: + continue + newCandidates.append((lastCost + coordBytes, lastTuple, lastFlag+256, coordBytes)) + candidates = newCandidates + bestTuple = min(candidates, key=lambda t:t[0]) + bestCost = bestTuple[0] + + flags = [] + while bestTuple: + cost, bestTuple, flag, coordBytes = bestTuple + flags.append(flag) + flags.reverse() + + compressedFlags = array.array("B") + compressedXs = array.array("B") + compressedYs = array.array("B") + coords = iter(deltas) + ff = [] + for flag in flags: + repeatCount, flag = flag >> 8, flag & 0xFF + compressedFlags.append(flag) + if flag & flagRepeat: + assert(repeatCount > 0) + compressedFlags.append(repeatCount) + else: + assert(repeatCount == 0) + for i in range(1 + repeatCount): + x,y = next(coords) + flagEncodeCoords(flag, x, y, compressedXs, compressedYs) + ff.append(flag) + try: + next(coords) + raise Exception("internal error") + except StopIteration: + pass + compressedFlags = compressedFlags.tostring() + compressedXs = compressedXs.tostring() + compressedYs = compressedYs.tostring() + + return (compressedFlags, compressedXs, compressedYs) + def recalcBounds(self, glyfTable): - coordinates, endPts, flags = self.getCoordinates(glyfTable) - if len(coordinates) > 0: - self.xMin, self.yMin = numpy.minimum.reduce(coordinates) - self.xMax, self.yMax = numpy.maximum.reduce(coordinates) + coords, endPts, flags = self.getCoordinates(glyfTable) + if len(coords) > 0: + if 0: + # This branch calculates exact glyph outline bounds + # analytically, handling cases without on-curve + # extremas, etc. However, the glyf table header + # simply says that the bounds should be min/max x/y + # "for coordinate data", so I suppose that means no + # fancy thing here, just get extremas of all coord + # points (on and off). As such, this branch is + # disabled. + + # Collect on-curve points + onCurveCoords = [coords[j] for j in range(len(coords)) + if flags[j] & flagOnCurve] + # Add implicit on-curve points + start = 0 + for end in endPts: + last = end + for j in range(start, end + 1): + if not ((flags[j] | flags[last]) & flagOnCurve): + x = (coords[last][0] + coords[j][0]) / 2 + y = (coords[last][1] + coords[j][1]) / 2 + onCurveCoords.append((x,y)) + last = j + start = end + 1 + # Add bounds for curves without an explicit extrema + start = 0 + for end in endPts: + last = end + for j in range(start, end + 1): + if not (flags[j] & flagOnCurve): + next = j + 1 if j < end else start + bbox = calcBounds([coords[last], coords[next]]) + if not pointInRect(coords[j], bbox): + # Ouch! + warnings.warn("Outline has curve with implicit extrema.") + # Ouch! Find analytical curve bounds. + pthis = coords[j] + plast = coords[last] + if not (flags[last] & flagOnCurve): + plast = ((pthis[0]+plast[0])/2, (pthis[1]+plast[1])/2) + pnext = coords[next] + if not (flags[next] & flagOnCurve): + pnext = ((pthis[0]+pnext[0])/2, (pthis[1]+pnext[1])/2) + bbox = calcQuadraticBounds(plast, pthis, pnext) + onCurveCoords.append((bbox[0],bbox[1])) + onCurveCoords.append((bbox[2],bbox[3])) + last = j + start = end + 1 + + self.xMin, self.yMin, self.xMax, self.yMax = calcIntBounds(onCurveCoords) + else: + self.xMin, self.yMin, self.xMax, self.yMax = calcIntBounds(coords) else: self.xMin, self.yMin, self.xMax, self.yMax = (0, 0, 0, 0) - + def isComposite(self): - return self.numberOfContours == -1 - + """Can be called on compact or expanded glyph.""" + if hasattr(self, "data") and self.data: + return struct.unpack(">h", self.data[:2])[0] == -1 + else: + return self.numberOfContours == -1 + def __getitem__(self, componentIndex): if not self.isComposite(): - raise ttLib.TTLibError, "can't use glyph as sequence" + raise ttLib.TTLibError("can't use glyph as sequence") return self.components[componentIndex] - + def getCoordinates(self, glyfTable): if self.numberOfContours > 0: return self.coordinates, self.endPtsOfContours, self.flags elif self.isComposite(): # it's a composite - allCoords = None - allFlags = None - allEndPts = None + allCoords = GlyphCoordinates() + allFlags = array.array("B") + allEndPts = [] for compo in self.components: g = glyfTable[compo.glyphName] coordinates, endPts, flags = g.getCoordinates(glyfTable) if hasattr(compo, "firstPt"): # move according to two reference points - move = allCoords[compo.firstPt] - coordinates[compo.secondPt] + x1,y1 = allCoords[compo.firstPt] + x2,y2 = coordinates[compo.secondPt] + move = x1-x2, y1-y2 else: move = compo.x, compo.y - + + coordinates = GlyphCoordinates(coordinates) if not hasattr(compo, "transform"): - if len(coordinates) > 0: - coordinates = coordinates + move # I love NumPy! + coordinates.translate(move) else: apple_way = compo.flags & SCALED_COMPONENT_OFFSET ms_way = compo.flags & UNSCALED_COMPONENT_OFFSET @@ -592,53 +783,197 @@ scale_component_offset = apple_way if scale_component_offset: # the Apple way: first move, then scale (ie. scale the component offset) - coordinates = coordinates + move - coordinates = numpy.dot(coordinates, compo.transform) + coordinates.translate(move) + coordinates.transform(compo.transform) else: # the MS way: first scale, then move - coordinates = numpy.dot(coordinates, compo.transform) - coordinates = coordinates + move - # due to the transformation the coords. are now floats; - # round them off nicely, and cast to short - coordinates = numpy.floor(coordinates + 0.5).astype(numpy.int16) - if allCoords is None or len(allCoords) == 0: - allCoords = coordinates - allEndPts = endPts - allFlags = flags - else: - allEndPts = allEndPts + (numpy.array(endPts) + len(allCoords)).tolist() - if len(coordinates) > 0: - allCoords = numpy.concatenate((allCoords, coordinates)) - allFlags = numpy.concatenate((allFlags, flags)) + coordinates.transform(compo.transform) + coordinates.translate(move) + offset = len(allCoords) + allEndPts.extend(e + offset for e in endPts) + allCoords.extend(coordinates) + allFlags.extend(flags) return allCoords, allEndPts, allFlags else: - return numpy.array([], numpy.int16), [], numpy.array([], numpy.int8) - - def __cmp__(self, other): - if self.numberOfContours <= 0: - return cmp(self.__dict__, other.__dict__) - else: - if cmp(len(self.coordinates), len(other.coordinates)): - return 1 - ctest = numpy.alltrue(numpy.alltrue(numpy.equal(self.coordinates, other.coordinates))) - ftest = numpy.alltrue(numpy.equal(self.flags, other.flags)) - if not ctest or not ftest: - return 1 - return ( - cmp(self.endPtsOfContours, other.endPtsOfContours) or - cmp(self.program, other.instructions) - ) + return GlyphCoordinates(), [], array.array("B") + + def getComponentNames(self, glyfTable): + if not hasattr(self, "data"): + if self.isComposite(): + return [c.glyphName for c in self.components] + else: + return [] + + # Extract components without expanding glyph + + if not self.data or struct.unpack(">h", self.data[:2])[0] >= 0: + return [] # Not composite + + data = self.data + i = 10 + components = [] + more = 1 + while more: + flags, glyphID = struct.unpack(">HH", data[i:i+4]) + i += 4 + flags = int(flags) + components.append(glyfTable.getGlyphName(int(glyphID))) + + if flags & ARG_1_AND_2_ARE_WORDS: i += 4 + else: i += 2 + if flags & WE_HAVE_A_SCALE: i += 2 + elif flags & WE_HAVE_AN_X_AND_Y_SCALE: i += 4 + elif flags & WE_HAVE_A_TWO_BY_TWO: i += 8 + more = flags & MORE_COMPONENTS + + return components + + def trim(self, remove_hinting=False): + """ Remove padding and, if requested, hinting, from a glyph. + This works on both expanded and compacted glyphs, without + expanding it.""" + if not hasattr(self, "data"): + if remove_hinting: + self.program = ttProgram.Program() + self.program.fromBytecode([]) + # No padding to trim. + return + if not self.data: + return + numContours = struct.unpack(">h", self.data[:2])[0] + data = array.array("B", self.data) + i = 10 + if numContours >= 0: + i += 2 * numContours # endPtsOfContours + nCoordinates = ((data[i-2] << 8) | data[i-1]) + 1 + instructionLen = (data[i] << 8) | data[i+1] + if remove_hinting: + # Zero instruction length + data[i] = data [i+1] = 0 + i += 2 + if instructionLen: + # Splice it out + data = data[:i] + data[i+instructionLen:] + instructionLen = 0 + else: + i += 2 + instructionLen + + coordBytes = 0 + j = 0 + while True: + flag = data[i] + i = i + 1 + repeat = 1 + if flag & flagRepeat: + repeat = data[i] + 1 + i = i + 1 + xBytes = yBytes = 0 + if flag & flagXShort: + xBytes = 1 + elif not (flag & flagXsame): + xBytes = 2 + if flag & flagYShort: + yBytes = 1 + elif not (flag & flagYsame): + yBytes = 2 + coordBytes += (xBytes + yBytes) * repeat + j += repeat + if j >= nCoordinates: + break + assert j == nCoordinates, "bad glyph flags" + i += coordBytes + # Remove padding + data = data[:i] + else: + more = 1 + we_have_instructions = False + while more: + flags =(data[i] << 8) | data[i+1] + if remove_hinting: + flags &= ~WE_HAVE_INSTRUCTIONS + if flags & WE_HAVE_INSTRUCTIONS: + we_have_instructions = True + data[i+0] = flags >> 8 + data[i+1] = flags & 0xFF + i += 4 + flags = int(flags) + if flags & ARG_1_AND_2_ARE_WORDS: i += 4 + else: i += 2 + if flags & WE_HAVE_A_SCALE: i += 2 + elif flags & WE_HAVE_AN_X_AND_Y_SCALE: i += 4 + elif flags & WE_HAVE_A_TWO_BY_TWO: i += 8 + more = flags & MORE_COMPONENTS + if we_have_instructions: + instructionLen = (data[i] << 8) | data[i+1] + i += 2 + instructionLen + # Remove padding + data = data[:i] + + self.data = data.tostring() + + def removeHinting(self): + self.trim (remove_hinting=True) + + def draw(self, pen, glyfTable, offset=0): + + if self.isComposite(): + for component in self.components: + glyphName, transform = component.getComponentInfo() + pen.addComponent(glyphName, transform) + return + + coordinates, endPts, flags = self.getCoordinates(glyfTable) + if offset: + coordinates = coordinates.copy() + coordinates.translate((offset, 0)) + start = 0 + for end in endPts: + end = end + 1 + contour = coordinates[start:end] + cFlags = flags[start:end] + start = end + if 1 not in cFlags: + # There is not a single on-curve point on the curve, + # use pen.qCurveTo's special case by specifying None + # as the on-curve point. + contour.append(None) + pen.qCurveTo(*contour) + else: + # Shuffle the points so that contour the is guaranteed + # to *end* in an on-curve point, which we'll use for + # the moveTo. + firstOnCurve = cFlags.index(1) + 1 + contour = contour[firstOnCurve:] + contour[:firstOnCurve] + cFlags = cFlags[firstOnCurve:] + cFlags[:firstOnCurve] + pen.moveTo(contour[-1]) + while contour: + nextOnCurve = cFlags.index(1) + 1 + if nextOnCurve == 1: + pen.lineTo(contour[0]) + else: + pen.qCurveTo(*contour[:nextOnCurve]) + contour = contour[nextOnCurve:] + cFlags = cFlags[nextOnCurve:] + pen.closePath() + + def __ne__(self, other): + return not self.__eq__(other) + def __eq__(self, other): + if type(self) != type(other): + return NotImplemented + return self.__dict__ == other.__dict__ + + +class GlyphComponent(object): -class GlyphComponent: - def __init__(self): pass - + def getComponentInfo(self): """Return the base glyph name and a transform.""" # XXX Ignoring self.firstPt & self.lastpt for now: I need to implement - # something equivalent in fontTools.objects.glyph (I'd rather not + # something equivalent in fontTools.objects.glyph (I'd rather not # convert it to an absolute offset, since it is valuable information). # This method will now raise "AttributeError: x" on glyphs that use # this TT feature. @@ -648,7 +983,7 @@ else: trans = (1, 0, 0, 1, self.x, self.y) return self.glyphName, trans - + def decompile(self, data, glyfTable): flags, glyphID = struct.unpack(">HH", data[:4]) self.flags = int(flags) @@ -656,7 +991,7 @@ self.glyphName = glyfTable.getGlyphName(int(glyphID)) #print ">>", reprflag(self.flags) data = data[4:] - + if self.flags & ARG_1_AND_2_ARE_WORDS: if self.flags & ARGS_ARE_XY_VALUES: self.x, self.y = struct.unpack(">hh", data[:4]) @@ -671,42 +1006,40 @@ x, y = struct.unpack(">BB", data[:2]) self.firstPt, self.secondPt = int(x), int(y) data = data[2:] - + if self.flags & WE_HAVE_A_SCALE: scale, = struct.unpack(">h", data[:2]) - self.transform = numpy.array( - [[scale, 0], [0, scale]]) / float(0x4000) # fixed 2.14 + self.transform = [[fi2fl(scale,14), 0], [0, fi2fl(scale,14)]] # fixed 2.14 data = data[2:] elif self.flags & WE_HAVE_AN_X_AND_Y_SCALE: xscale, yscale = struct.unpack(">hh", data[:4]) - self.transform = numpy.array( - [[xscale, 0], [0, yscale]]) / float(0x4000) # fixed 2.14 + self.transform = [[fi2fl(xscale,14), 0], [0, fi2fl(yscale,14)]] # fixed 2.14 data = data[4:] elif self.flags & WE_HAVE_A_TWO_BY_TWO: - (xscale, scale01, + (xscale, scale01, scale10, yscale) = struct.unpack(">hhhh", data[:8]) - self.transform = numpy.array( - [[xscale, scale01], [scale10, yscale]]) / float(0x4000) # fixed 2.14 + self.transform = [[fi2fl(xscale,14), fi2fl(scale01,14)], + [fi2fl(scale10,14), fi2fl(yscale,14)]] # fixed 2.14 data = data[8:] more = self.flags & MORE_COMPONENTS haveInstructions = self.flags & WE_HAVE_INSTRUCTIONS - self.flags = self.flags & (ROUND_XY_TO_GRID | USE_MY_METRICS | + self.flags = self.flags & (ROUND_XY_TO_GRID | USE_MY_METRICS | SCALED_COMPONENT_OFFSET | UNSCALED_COMPONENT_OFFSET | NON_OVERLAPPING) return more, haveInstructions, data - + def compile(self, more, haveInstructions, glyfTable): - data = "" - + data = b"" + # reset all flags we will calculate ourselves - flags = self.flags & (ROUND_XY_TO_GRID | USE_MY_METRICS | + flags = self.flags & (ROUND_XY_TO_GRID | USE_MY_METRICS | SCALED_COMPONENT_OFFSET | UNSCALED_COMPONENT_OFFSET | NON_OVERLAPPING) if more: flags = flags | MORE_COMPONENTS if haveInstructions: flags = flags | WE_HAVE_INSTRUCTIONS - + if hasattr(self, "firstPt"): if (0 <= self.firstPt <= 255) and (0 <= self.secondPt <= 255): data = data + struct.pack(">BB", self.firstPt, self.secondPt) @@ -720,43 +1053,41 @@ else: data = data + struct.pack(">hh", self.x, self.y) flags = flags | ARG_1_AND_2_ARE_WORDS - + if hasattr(self, "transform"): - # XXX needs more testing - transform = numpy.floor(self.transform * 0x4000 + 0.5) + transform = [[fl2fi(x,14) for x in row] for row in self.transform] if transform[0][1] or transform[1][0]: flags = flags | WE_HAVE_A_TWO_BY_TWO - data = data + struct.pack(">hhhh", + data = data + struct.pack(">hhhh", transform[0][0], transform[0][1], transform[1][0], transform[1][1]) - elif transform[0][0] <> transform[1][1]: + elif transform[0][0] != transform[1][1]: flags = flags | WE_HAVE_AN_X_AND_Y_SCALE - data = data + struct.pack(">hh", + data = data + struct.pack(">hh", transform[0][0], transform[1][1]) else: flags = flags | WE_HAVE_A_SCALE - data = data + struct.pack(">h", + data = data + struct.pack(">h", transform[0][0]) - + glyphID = glyfTable.getGlyphID(self.glyphName) return struct.pack(">HH", flags, glyphID) + data - + def toXML(self, writer, ttFont): attrs = [("glyphName", self.glyphName)] if not hasattr(self, "firstPt"): attrs = attrs + [("x", self.x), ("y", self.y)] else: attrs = attrs + [("firstPt", self.firstPt), ("secondPt", self.secondPt)] - + if hasattr(self, "transform"): - # XXX needs more testing transform = self.transform if transform[0][1] or transform[1][0]: attrs = attrs + [ ("scalex", transform[0][0]), ("scale01", transform[0][1]), ("scale10", transform[1][0]), ("scaley", transform[1][1]), ] - elif transform[0][0] <> transform[1][1]: + elif transform[0][0] != transform[1][1]: attrs = attrs + [ ("scalex", transform[0][0]), ("scaley", transform[1][1]), ] @@ -765,48 +1096,146 @@ attrs = attrs + [("flags", hex(self.flags))] writer.simpletag("component", attrs) writer.newline() - - def fromXML(self, (name, attrs, content), ttFont): + + def fromXML(self, name, attrs, content, ttFont): self.glyphName = attrs["glyphName"] - if attrs.has_key("firstPt"): + if "firstPt" in attrs: self.firstPt = safeEval(attrs["firstPt"]) self.secondPt = safeEval(attrs["secondPt"]) else: self.x = safeEval(attrs["x"]) self.y = safeEval(attrs["y"]) - if attrs.has_key("scale01"): + if "scale01" in attrs: scalex = safeEval(attrs["scalex"]) scale01 = safeEval(attrs["scale01"]) scale10 = safeEval(attrs["scale10"]) scaley = safeEval(attrs["scaley"]) - self.transform = numpy.array([[scalex, scale01], [scale10, scaley]]) - elif attrs.has_key("scalex"): + self.transform = [[scalex, scale01], [scale10, scaley]] + elif "scalex" in attrs: scalex = safeEval(attrs["scalex"]) scaley = safeEval(attrs["scaley"]) - self.transform = numpy.array([[scalex, 0], [0, scaley]]) - elif attrs.has_key("scale"): + self.transform = [[scalex, 0], [0, scaley]] + elif "scale" in attrs: scale = safeEval(attrs["scale"]) - self.transform = numpy.array([[scale, 0], [0, scale]]) + self.transform = [[scale, 0], [0, scale]] self.flags = safeEval(attrs["flags"]) - - def __cmp__(self, other): - if hasattr(self, "transform"): - if numpy.alltrue(numpy.equal(self.transform, other.transform)): - selfdict = self.__dict__.copy() - otherdict = other.__dict__.copy() - del selfdict["transform"] - del otherdict["transform"] - return cmp(selfdict, otherdict) - else: - return 1 - else: - return cmp(self.__dict__, other.__dict__) + + def __ne__(self, other): + return not self.__eq__(other) + def __eq__(self, other): + if type(self) != type(other): + return NotImplemented + return self.__dict__ == other.__dict__ + +class GlyphCoordinates(object): + + def __init__(self, iterable=[]): + self._a = array.array("h") + self.extend(iterable) + + def isFloat(self): + return self._a.typecode == 'f' + + def _ensureFloat(self): + if self.isFloat(): + return + # The conversion to list() is to work around Jython bug + self._a = array.array("f", list(self._a)) + + def _checkFloat(self, p): + if any(isinstance(v, float) for v in p): + p = [int(v) if int(v) == v else v for v in p] + if any(isinstance(v, float) for v in p): + self._ensureFloat() + return p + + @staticmethod + def zeros(count): + return GlyphCoordinates([(0,0)] * count) + + def copy(self): + c = GlyphCoordinates() + c._a.extend(self._a) + return c + + def __len__(self): + return len(self._a) // 2 + + def __getitem__(self, k): + if isinstance(k, slice): + indices = range(*k.indices(len(self))) + return [self[i] for i in indices] + return self._a[2*k],self._a[2*k+1] + + def __setitem__(self, k, v): + if isinstance(k, slice): + indices = range(*k.indices(len(self))) + # XXX This only works if len(v) == len(indices) + # TODO Implement __delitem__ + for j,i in enumerate(indices): + self[i] = v[j] + return + v = self._checkFloat(v) + self._a[2*k],self._a[2*k+1] = v + + def __repr__(self): + return 'GlyphCoordinates(['+','.join(str(c) for c in self)+'])' + + def append(self, p): + p = self._checkFloat(p) + self._a.extend(tuple(p)) + + def extend(self, iterable): + for p in iterable: + p = self._checkFloat(p) + self._a.extend(p) + + def relativeToAbsolute(self): + a = self._a + x,y = 0,0 + for i in range(len(a) // 2): + a[2*i ] = x = a[2*i ] + x + a[2*i+1] = y = a[2*i+1] + y + + def absoluteToRelative(self): + a = self._a + x,y = 0,0 + for i in range(len(a) // 2): + dx = a[2*i ] - x + dy = a[2*i+1] - y + x = a[2*i ] + y = a[2*i+1] + a[2*i ] = dx + a[2*i+1] = dy + + def translate(self, p): + (x,y) = p + a = self._a + for i in range(len(a) // 2): + a[2*i ] += x + a[2*i+1] += y + + def transform(self, t): + a = self._a + for i in range(len(a) // 2): + x = a[2*i ] + y = a[2*i+1] + px = x * t[0][0] + y * t[1][0] + py = x * t[0][1] + y * t[1][1] + self[i] = (px, py) + + def __ne__(self, other): + return not self.__eq__(other) + def __eq__(self, other): + if type(self) != type(other): + return NotImplemented + return self._a == other._a def reprflag(flag): bin = "" - if type(flag) == StringType: - flag = ord(flag) + if isinstance(flag, str): + flag = byteord(flag) while flag: if flag & 0x01: bin = "1" + bin @@ -815,4 +1244,3 @@ flag = flag >> 1 bin = (14 - len(bin)) * "0" + bin return bin - diff -Nru fonttools-2.4/Lib/fontTools/ttLib/tables/G_M_A_P_.py fonttools-3.0/Lib/fontTools/ttLib/tables/G_M_A_P_.py --- fonttools-2.4/Lib/fontTools/ttLib/tables/G_M_A_P_.py 2013-04-23 04:08:08.000000000 +0000 +++ fonttools-3.0/Lib/fontTools/ttLib/tables/G_M_A_P_.py 2015-08-31 17:57:15.000000000 +0000 @@ -1,7 +1,8 @@ -import DefaultTable -import sstruct -from types import StringType -from fontTools.misc.textTools import safeEval, num2binary, binary2num +from __future__ import print_function, division, absolute_import +from fontTools.misc.py23 import * +from fontTools.misc import sstruct +from fontTools.misc.textTools import safeEval +from . import DefaultTable GMAPFormat = """ > # big endian @@ -12,7 +13,7 @@ recordsOffset: H fontNameLength: H """ -# psFontName is a byte string which follows the record above. This is zero padded +# psFontName is a byte string which follows the record above. This is zero padded # to the beginning of the records array. The recordsOffsst is 32 bit aligned. GMAPRecordFormat1 = """ @@ -23,17 +24,16 @@ ggid: H name: 32s """ - -class GMAPRecord: - def __init__(self, uv = 0, cid = 0, gid = 0, ggid = 0, name = ""): +class GMAPRecord(object): + def __init__(self, uv=0, cid=0, gid=0, ggid=0, name=""): self.UV = uv self.cid = cid self.gid = gid self.ggid = ggid self.name = name - + def toXML(self, writer, ttFont): writer.begintag("GMAPRecord") writer.newline() @@ -50,23 +50,17 @@ writer.endtag("GMAPRecord") writer.newline() - - def fromXML(self, (name, attrs, content), ttFont): + def fromXML(self, name, attrs, content, ttFont): value = attrs["value"] if name == "GlyphletName": self.name = value else: - try: - value = safeEval(value) - except OverflowError: - value = long(value) - setattr(self, name, value) - + setattr(self, name, safeEval(value)) def compile(self, ttFont): - if self.UV == None: + if self.UV is None: self.UV = 0 - nameLen = len(self.name) + nameLen = len(self.name) if nameLen < 32: self.name = self.name + "\0"*(32 - nameLen) data = sstruct.pack(GMAPRecordFormat1, self) @@ -77,12 +71,12 @@ class table_G_M_A_P_(DefaultTable.DefaultTable): - + dependencies = [] - + def decompile(self, data, ttFont): dummy, newData = sstruct.unpack2(GMAPFormat, data, self) - self.psFontName = newData[:self.fontNameLength] + self.psFontName = tostr(newData[:self.fontNameLength]) assert (self.recordsOffset % 4) == 0, "GMAP error: recordsOffset is not 32 bit aligned." newData = data[self.recordsOffset:] self.gmapRecords = [] @@ -90,19 +84,17 @@ gmapRecord, newData = sstruct.unpack2(GMAPRecordFormat1, newData, GMAPRecord()) gmapRecord.name = gmapRecord.name.strip('\0') self.gmapRecords.append(gmapRecord) - def compile(self, ttFont): self.recordsCount = len(self.gmapRecords) self.fontNameLength = len(self.psFontName) - self.recordsOffset = 4 *(((self.fontNameLength + 12) + 3) /4) + self.recordsOffset = 4 * (((self.fontNameLength + 12) + 3) // 4) data = sstruct.pack(GMAPFormat, self) - data = data + self.psFontName - data = data + "\0" * (self.recordsOffset - len(data)) + data = data + tobytes(self.psFontName) + data = data + b"\0" * (self.recordsOffset - len(data)) for record in self.gmapRecords: data = data + record.compile(ttFont) return data - def toXML(self, writer, ttFont): writer.comment("Most of this table will be recalculated by the compiler") @@ -116,24 +108,21 @@ writer.newline() for gmapRecord in self.gmapRecords: gmapRecord.toXML(writer, ttFont) - - def fromXML(self, (name, attrs, content), ttFont): + + def fromXML(self, name, attrs, content, ttFont): if name == "GMAPRecord": if not hasattr(self, "gmapRecords"): self.gmapRecords = [] gmapRecord = GMAPRecord() self.gmapRecords.append(gmapRecord) for element in content: - if isinstance(element, StringType): + if isinstance(element, basestring): continue - gmapRecord.fromXML(element, ttFont) + name, attrs, content = element + gmapRecord.fromXML(name, attrs, content, ttFont) else: value = attrs["value"] if name == "PSFontName": self.psFontName = value - else: - try: - value = safeEval(value) - except OverflowError: - value = long(value) - setattr(self, name, value) + else: + setattr(self, name, safeEval(value)) diff -Nru fonttools-2.4/Lib/fontTools/ttLib/tables/G_P_K_G_.py fonttools-3.0/Lib/fontTools/ttLib/tables/G_P_K_G_.py --- fonttools-2.4/Lib/fontTools/ttLib/tables/G_P_K_G_.py 2013-04-23 04:08:08.000000000 +0000 +++ fonttools-3.0/Lib/fontTools/ttLib/tables/G_P_K_G_.py 2015-08-31 17:57:15.000000000 +0000 @@ -1,11 +1,10 @@ +from __future__ import print_function, division, absolute_import +from fontTools.misc.py23 import * +from fontTools.misc import sstruct +from fontTools.misc.textTools import safeEval, readHex +from . import DefaultTable import sys -import DefaultTable -import sstruct import array -import numpy -from types import StringType -from fontTools.misc.textTools import safeEval, readHex -from fontTools import ttLib GPKGFormat = """ > # big endian @@ -14,19 +13,19 @@ numGMAPs: H numGlyplets: H """ -# psFontName is a byte string which follows the record above. This is zero padded +# psFontName is a byte string which follows the record above. This is zero padded # to the beginning of the records array. The recordsOffsst is 32 bit aligned. class table_G_P_K_G_(DefaultTable.DefaultTable): - + def decompile(self, data, ttFont): dummy, newData = sstruct.unpack2(GPKGFormat, data, self) - GMAPoffsets = array.array("L") + GMAPoffsets = array.array("I") endPos = (self.numGMAPs+1) * 4 GMAPoffsets.fromstring(newData[:endPos]) - if sys.byteorder <> "big": + if sys.byteorder != "big": GMAPoffsets.byteswap() self.GMAPs = [] for i in range(self.numGMAPs): @@ -35,9 +34,9 @@ self.GMAPs.append(data[start:end]) pos = endPos endPos = pos + (self.numGlyplets + 1)*4 - glyphletOffsets = array.array("L") + glyphletOffsets = array.array("I") glyphletOffsets.fromstring(newData[pos:endPos]) - if sys.byteorder <> "big": + if sys.byteorder != "big": glyphletOffsets.byteswap() self.glyphlets = [] for i in range(self.numGlyplets): @@ -45,7 +44,6 @@ end = glyphletOffsets[i+1] self.glyphlets.append(data[start:end]) - def compile(self, ttFont): self.numGMAPs = len(self.GMAPs) self.numGlyplets = len(self.glyphlets) @@ -59,24 +57,24 @@ for i in range(1, self.numGMAPs +1): pos += len(self.GMAPs[i-1]) GMAPoffsets[i] = pos - gmapArray = numpy.array(GMAPoffsets, numpy.uint32) - if sys.byteorder <> "big": - gmapArray = gmapArray.byteswap() + gmapArray = array.array("I", GMAPoffsets) + if sys.byteorder != "big": + gmapArray.byteswap() dataList.append(gmapArray.tostring()) glyphletOffsets[0] = pos for i in range(1, self.numGlyplets +1): pos += len(self.glyphlets[i-1]) glyphletOffsets[i] = pos - glyphletArray = numpy.array(glyphletOffsets, numpy.uint32) - if sys.byteorder <> "big": - glyphletArray = glyphletArray.byteswap() + glyphletArray = array.array("I", glyphletOffsets) + if sys.byteorder != "big": + glyphletArray.byteswap() dataList.append(glyphletArray.tostring()) dataList += self.GMAPs dataList += self.glyphlets - data = "".join(dataList) + data = bytesjoin(dataList) return data - + def toXML(self, writer, ttFont): writer.comment("Most of this table will be recalculated by the compiler") writer.newline() @@ -108,12 +106,12 @@ writer.endtag("glyphlets") writer.newline() - def fromXML(self, (name, attrs, content), ttFont): + def fromXML(self, name, attrs, content, ttFont): if name == "GMAPs": if not hasattr(self, "GMAPs"): self.GMAPs = [] for element in content: - if isinstance(element, StringType): + if isinstance(element, basestring): continue itemName, itemAttrs, itemContent = element if itemName == "hexdata": @@ -122,15 +120,10 @@ if not hasattr(self, "glyphlets"): self.glyphlets = [] for element in content: - if isinstance(element, StringType): + if isinstance(element, basestring): continue itemName, itemAttrs, itemContent = element if itemName == "hexdata": self.glyphlets.append(readHex(itemContent)) - else: - value = attrs["value"] - try: - value = safeEval(value) - except OverflowError: - value = long(value) - setattr(self, name, value) + else: + setattr(self, name, safeEval(attrs["value"])) diff -Nru fonttools-2.4/Lib/fontTools/ttLib/tables/G_P_O_S_.py fonttools-3.0/Lib/fontTools/ttLib/tables/G_P_O_S_.py --- fonttools-2.4/Lib/fontTools/ttLib/tables/G_P_O_S_.py 2013-04-23 04:08:08.000000000 +0000 +++ fonttools-3.0/Lib/fontTools/ttLib/tables/G_P_O_S_.py 2015-08-31 17:57:15.000000000 +0000 @@ -1,4 +1,6 @@ -from otBase import BaseTTXConverter +from __future__ import print_function, division, absolute_import +from fontTools.misc.py23 import * +from .otBase import BaseTTXConverter class table_G_P_O_S_(BaseTTXConverter): diff -Nru fonttools-2.4/Lib/fontTools/ttLib/tables/G_S_U_B_.py fonttools-3.0/Lib/fontTools/ttLib/tables/G_S_U_B_.py --- fonttools-2.4/Lib/fontTools/ttLib/tables/G_S_U_B_.py 2013-04-23 04:08:08.000000000 +0000 +++ fonttools-3.0/Lib/fontTools/ttLib/tables/G_S_U_B_.py 2015-08-31 17:57:15.000000000 +0000 @@ -1,4 +1,6 @@ -from otBase import BaseTTXConverter +from __future__ import print_function, division, absolute_import +from fontTools.misc.py23 import * +from .otBase import BaseTTXConverter class table_G_S_U_B_(BaseTTXConverter): diff -Nru fonttools-2.4/Lib/fontTools/ttLib/tables/_g_v_a_r.py fonttools-3.0/Lib/fontTools/ttLib/tables/_g_v_a_r.py --- fonttools-2.4/Lib/fontTools/ttLib/tables/_g_v_a_r.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.0/Lib/fontTools/ttLib/tables/_g_v_a_r.py 2015-08-31 17:57:15.000000000 +0000 @@ -0,0 +1,717 @@ +from __future__ import print_function, division, absolute_import +from fontTools.misc.py23 import * +from fontTools import ttLib +from fontTools.misc import sstruct +from fontTools.misc.fixedTools import fixedToFloat, floatToFixed +from fontTools.misc.textTools import safeEval +from fontTools.ttLib import TTLibError +from . import DefaultTable +import array +import io +import sys +import struct + +# Apple's documentation of 'gvar': +# https://developer.apple.com/fonts/TrueType-Reference-Manual/RM06/Chap6gvar.html +# +# FreeType2 source code for parsing 'gvar': +# http://git.savannah.gnu.org/cgit/freetype/freetype2.git/tree/src/truetype/ttgxvar.c + +GVAR_HEADER_FORMAT = """ + > # big endian + version: H + reserved: H + axisCount: H + sharedCoordCount: H + offsetToCoord: I + glyphCount: H + flags: H + offsetToData: I +""" + +GVAR_HEADER_SIZE = sstruct.calcsize(GVAR_HEADER_FORMAT) + +TUPLES_SHARE_POINT_NUMBERS = 0x8000 +TUPLE_COUNT_MASK = 0x0fff + +EMBEDDED_TUPLE_COORD = 0x8000 +INTERMEDIATE_TUPLE = 0x4000 +PRIVATE_POINT_NUMBERS = 0x2000 +TUPLE_INDEX_MASK = 0x0fff + +DELTAS_ARE_ZERO = 0x80 +DELTAS_ARE_WORDS = 0x40 +DELTA_RUN_COUNT_MASK = 0x3f + +POINTS_ARE_WORDS = 0x80 +POINT_RUN_COUNT_MASK = 0x7f + + +class table__g_v_a_r(DefaultTable.DefaultTable): + + dependencies = ["fvar", "glyf"] + + def compile(self, ttFont): + axisTags = [axis.axisTag for axis in ttFont["fvar"].axes] + + sharedCoords = self.compileSharedCoords_(axisTags) + sharedCoordIndices = {coord:i for i, coord in enumerate(sharedCoords)} + sharedCoordSize = sum([len(c) for c in sharedCoords]) + + compiledGlyphs = self.compileGlyphs_(ttFont, axisTags, sharedCoordIndices) + offset = 0 + offsets = [] + for glyph in compiledGlyphs: + offsets.append(offset) + offset += len(glyph) + offsets.append(offset) + compiledOffsets, tableFormat = self.compileOffsets_(offsets) + + header = {} + header["version"] = self.version + header["reserved"] = self.reserved + header["axisCount"] = len(axisTags) + header["sharedCoordCount"] = len(sharedCoords) + header["offsetToCoord"] = GVAR_HEADER_SIZE + len(compiledOffsets) + header["glyphCount"] = len(compiledGlyphs) + header["flags"] = tableFormat + header["offsetToData"] = header["offsetToCoord"] + sharedCoordSize + compiledHeader = sstruct.pack(GVAR_HEADER_FORMAT, header) + + result = [compiledHeader, compiledOffsets] + result.extend(sharedCoords) + result.extend(compiledGlyphs) + return bytesjoin(result) + + def compileSharedCoords_(self, axisTags): + coordCount = {} + for variations in self.variations.values(): + for gvar in variations: + coord = gvar.compileCoord(axisTags) + coordCount[coord] = coordCount.get(coord, 0) + 1 + sharedCoords = [(count, coord) for (coord, count) in coordCount.items() if count > 1] + sharedCoords.sort(reverse=True) + MAX_NUM_SHARED_COORDS = TUPLE_INDEX_MASK + 1 + sharedCoords = sharedCoords[:MAX_NUM_SHARED_COORDS] + return [c[1] for c in sharedCoords] # Strip off counts. + + def compileGlyphs_(self, ttFont, axisTags, sharedCoordIndices): + result = [] + for glyphName in ttFont.getGlyphOrder(): + glyph = ttFont["glyf"][glyphName] + numPointsInGlyph = self.getNumPoints_(glyph) + result.append(self.compileGlyph_(glyphName, numPointsInGlyph, axisTags, sharedCoordIndices)) + return result + + def compileGlyph_(self, glyphName, numPointsInGlyph, axisTags, sharedCoordIndices): + variations = self.variations.get(glyphName, []) + variations = [v for v in variations if v.hasImpact()] + if len(variations) == 0: + return b"" + + # Each glyph variation tuples modifies a set of control points. To indicate + # which exact points are getting modified, a single tuple can either refer + # to a shared set of points, or the tuple can supply its private point numbers. + # Because the impact of sharing can be positive (no need for a private point list) + # or negative (need to supply 0,0 deltas for unused points), it is not obvious + # how to determine which tuples should take their points from the shared + # pool versus have their own. Perhaps we should resort to brute force, + # and try all combinations? However, if a glyph has n variation tuples, + # we would need to try 2^n combinations (because each tuple may or may not + # be part of the shared set). How many variations tuples do glyphs have? + # + # Skia.ttf: {3: 1, 5: 11, 6: 41, 7: 62, 8: 387, 13: 1, 14: 3} + # JamRegular.ttf: {3: 13, 4: 122, 5: 1, 7: 4, 8: 1, 9: 1, 10: 1} + # BuffaloGalRegular.ttf: {1: 16, 2: 13, 4: 2, 5: 4, 6: 19, 7: 1, 8: 3, 9: 18} + # (Reading example: In Skia.ttf, 41 glyphs have 6 variation tuples). + # + # Is this even worth optimizing? If we never use a shared point list, + # the private lists will consume 112K for Skia, 5K for BuffaloGalRegular, + # and 15K for JamRegular. If we always use a shared point list, + # the shared lists will consume 16K for Skia, 3K for BuffaloGalRegular, + # and 10K for JamRegular. However, in the latter case the delta arrays + # will become larger, but I haven't yet measured by how much. From + # gut feeling (which may be wrong), the optimum is to share some but + # not all points; however, then we would need to try all combinations. + # + # For the time being, we try two variants and then pick the better one: + # (a) each tuple supplies its own private set of points; + # (b) all tuples refer to a shared set of points, which consists of + # "every control point in the glyph". + allPoints = set(range(numPointsInGlyph)) + tuples = [] + data = [] + someTuplesSharePoints = False + for gvar in variations: + privateTuple, privateData = gvar.compile(axisTags, sharedCoordIndices, sharedPoints=None) + sharedTuple, sharedData = gvar.compile(axisTags, sharedCoordIndices, sharedPoints=allPoints) + # TODO: If we use shared points, Apple MacOS X 10.9.5 cannot display our fonts. + # This is probably a problem with our code; find the problem and fix it. + #if (len(sharedTuple) + len(sharedData)) < (len(privateTuple) + len(privateData)): + if False: + tuples.append(sharedTuple) + data.append(sharedData) + someTuplesSharePoints = True + else: + tuples.append(privateTuple) + data.append(privateData) + if someTuplesSharePoints: + data = bytechr(0) + bytesjoin(data) # 0x00 = "all points in glyph" + tupleCount = TUPLES_SHARE_POINT_NUMBERS | len(tuples) + else: + data = bytesjoin(data) + tupleCount = len(tuples) + tuples = bytesjoin(tuples) + result = struct.pack(">HH", tupleCount, 4 + len(tuples)) + tuples + data + if len(result) % 2 != 0: + result = result + b"\0" # padding + return result + + def decompile(self, data, ttFont): + axisTags = [axis.axisTag for axis in ttFont["fvar"].axes] + glyphs = ttFont.getGlyphOrder() + sstruct.unpack(GVAR_HEADER_FORMAT, data[0:GVAR_HEADER_SIZE], self) + assert len(glyphs) == self.glyphCount + assert len(axisTags) == self.axisCount + offsets = self.decompileOffsets_(data[GVAR_HEADER_SIZE:], tableFormat=(self.flags & 1), glyphCount=self.glyphCount) + sharedCoords = self.decompileSharedCoords_(axisTags, data) + self.variations = {} + for i in range(self.glyphCount): + glyphName = glyphs[i] + glyph = ttFont["glyf"][glyphName] + numPointsInGlyph = self.getNumPoints_(glyph) + gvarData = data[self.offsetToData + offsets[i] : self.offsetToData + offsets[i + 1]] + self.variations[glyphName] = \ + self.decompileGlyph_(numPointsInGlyph, sharedCoords, axisTags, gvarData) + + def decompileSharedCoords_(self, axisTags, data): + result, _pos = GlyphVariation.decompileCoords_(axisTags, self.sharedCoordCount, data, self.offsetToCoord) + return result + + @staticmethod + def decompileOffsets_(data, tableFormat, glyphCount): + if tableFormat == 0: + # Short format: array of UInt16 + offsets = array.array("H") + offsetsSize = (glyphCount + 1) * 2 + else: + # Long format: array of UInt32 + offsets = array.array("I") + offsetsSize = (glyphCount + 1) * 4 + offsets.fromstring(data[0 : offsetsSize]) + if sys.byteorder != "big": + offsets.byteswap() + + # In the short format, offsets need to be multiplied by 2. + # This is not documented in Apple's TrueType specification, + # but can be inferred from the FreeType implementation, and + # we could verify it with two sample GX fonts. + if tableFormat == 0: + offsets = [off * 2 for off in offsets] + + return offsets + + @staticmethod + def compileOffsets_(offsets): + """Packs a list of offsets into a 'gvar' offset table. + + Returns a pair (bytestring, tableFormat). Bytestring is the + packed offset table. Format indicates whether the table + uses short (tableFormat=0) or long (tableFormat=1) integers. + The returned tableFormat should get packed into the flags field + of the 'gvar' header. + """ + assert len(offsets) >= 2 + for i in range(1, len(offsets)): + assert offsets[i - 1] <= offsets[i] + if max(offsets) <= 0xffff * 2: + packed = array.array("H", [n >> 1 for n in offsets]) + tableFormat = 0 + else: + packed = array.array("I", offsets) + tableFormat = 1 + if sys.byteorder != "big": + packed.byteswap() + return (packed.tostring(), tableFormat) + + def decompileGlyph_(self, numPointsInGlyph, sharedCoords, axisTags, data): + if len(data) < 4: + return [] + numAxes = len(axisTags) + tuples = [] + flags, offsetToData = struct.unpack(">HH", data[:4]) + pos = 4 + dataPos = offsetToData + if (flags & TUPLES_SHARE_POINT_NUMBERS) != 0: + sharedPoints, dataPos = GlyphVariation.decompilePoints_(numPointsInGlyph, data, dataPos) + else: + sharedPoints = [] + for _ in range(flags & TUPLE_COUNT_MASK): + dataSize, flags = struct.unpack(">HH", data[pos:pos+4]) + tupleSize = GlyphVariation.getTupleSize_(flags, numAxes) + tupleData = data[pos : pos + tupleSize] + pointDeltaData = data[dataPos : dataPos + dataSize] + tuples.append(self.decompileTuple_(numPointsInGlyph, sharedCoords, sharedPoints, axisTags, tupleData, pointDeltaData)) + pos += tupleSize + dataPos += dataSize + return tuples + + @staticmethod + def decompileTuple_(numPointsInGlyph, sharedCoords, sharedPoints, axisTags, data, tupleData): + flags = struct.unpack(">H", data[2:4])[0] + + pos = 4 + if (flags & EMBEDDED_TUPLE_COORD) == 0: + coord = sharedCoords[flags & TUPLE_INDEX_MASK] + else: + coord, pos = GlyphVariation.decompileCoord_(axisTags, data, pos) + if (flags & INTERMEDIATE_TUPLE) != 0: + minCoord, pos = GlyphVariation.decompileCoord_(axisTags, data, pos) + maxCoord, pos = GlyphVariation.decompileCoord_(axisTags, data, pos) + else: + minCoord, maxCoord = table__g_v_a_r.computeMinMaxCoord_(coord) + axes = {} + for axis in axisTags: + coords = minCoord[axis], coord[axis], maxCoord[axis] + if coords != (0.0, 0.0, 0.0): + axes[axis] = coords + pos = 0 + if (flags & PRIVATE_POINT_NUMBERS) != 0: + points, pos = GlyphVariation.decompilePoints_(numPointsInGlyph, tupleData, pos) + else: + points = sharedPoints + deltas_x, pos = GlyphVariation.decompileDeltas_(len(points), tupleData, pos) + deltas_y, pos = GlyphVariation.decompileDeltas_(len(points), tupleData, pos) + deltas = [None] * numPointsInGlyph + for p, x, y in zip(points, deltas_x, deltas_y): + deltas[p] = (x, y) + return GlyphVariation(axes, deltas) + + @staticmethod + def computeMinMaxCoord_(coord): + minCoord = {} + maxCoord = {} + for (axis, value) in coord.items(): + minCoord[axis] = min(value, 0.0) # -0.3 --> -0.3; 0.7 --> 0.0 + maxCoord[axis] = max(value, 0.0) # -0.3 --> 0.0; 0.7 --> 0.7 + return (minCoord, maxCoord) + + def toXML(self, writer, ttFont, progress=None): + writer.simpletag("version", value=self.version) + writer.newline() + writer.simpletag("reserved", value=self.reserved) + writer.newline() + axisTags = [axis.axisTag for axis in ttFont["fvar"].axes] + for glyphName in ttFont.getGlyphOrder(): + variations = self.variations.get(glyphName) + if not variations: + continue + writer.begintag("glyphVariations", glyph=glyphName) + writer.newline() + for gvar in variations: + gvar.toXML(writer, axisTags) + writer.endtag("glyphVariations") + writer.newline() + + def fromXML(self, name, attrs, content, ttFont): + if name == "version": + self.version = safeEval(attrs["value"]) + elif name == "reserved": + self.reserved = safeEval(attrs["value"]) + elif name == "glyphVariations": + if not hasattr(self, "variations"): + self.variations = {} + glyphName = attrs["glyph"] + glyph = ttFont["glyf"][glyphName] + numPointsInGlyph = self.getNumPoints_(glyph) + glyphVariations = [] + for element in content: + if isinstance(element, tuple): + name, attrs, content = element + if name == "tuple": + gvar = GlyphVariation({}, [None] * numPointsInGlyph) + glyphVariations.append(gvar) + for tupleElement in content: + if isinstance(tupleElement, tuple): + tupleName, tupleAttrs, tupleContent = tupleElement + gvar.fromXML(tupleName, tupleAttrs, tupleContent) + self.variations[glyphName] = glyphVariations + + @staticmethod + def getNumPoints_(glyph): + NUM_PHANTOM_POINTS = 4 + if glyph.isComposite(): + return len(glyph.components) + NUM_PHANTOM_POINTS + else: + # Empty glyphs (eg. space, nonmarkingreturn) have no "coordinates" attribute. + return len(getattr(glyph, "coordinates", [])) + NUM_PHANTOM_POINTS + + +class GlyphVariation(object): + def __init__(self, axes, coordinates): + self.axes = axes + self.coordinates = coordinates + + def __repr__(self): + axes = ",".join(sorted(["%s=%s" % (name, value) for (name, value) in self.axes.items()])) + return "" % (axes, self.coordinates) + + def __eq__(self, other): + return self.coordinates == other.coordinates and self.axes == other.axes + + def getUsedPoints(self): + result = set() + for i, point in enumerate(self.coordinates): + if point is not None: + result.add(i) + return result + + def hasImpact(self): + """Returns True if this GlyphVariation has any visible impact. + + If the result is False, the GlyphVariation can be omitted from the font + without making any visible difference. + """ + for c in self.coordinates: + if c is not None: + return True + return False + + def toXML(self, writer, axisTags): + writer.begintag("tuple") + writer.newline() + for axis in axisTags: + value = self.axes.get(axis) + if value is not None: + minValue, value, maxValue = value + defaultMinValue = min(value, 0.0) # -0.3 --> -0.3; 0.7 --> 0.0 + defaultMaxValue = max(value, 0.0) # -0.3 --> 0.0; 0.7 --> 0.7 + if minValue == defaultMinValue and maxValue == defaultMaxValue: + writer.simpletag("coord", axis=axis, value=value) + else: + writer.simpletag("coord", axis=axis, value=value, min=minValue, max=maxValue) + writer.newline() + wrote_any_points = False + for i, point in enumerate(self.coordinates): + if point is not None: + writer.simpletag("delta", pt=i, x=point[0], y=point[1]) + writer.newline() + wrote_any_points = True + if not wrote_any_points: + writer.comment("no deltas") + writer.newline() + writer.endtag("tuple") + writer.newline() + + def fromXML(self, name, attrs, _content): + if name == "coord": + axis = attrs["axis"] + value = float(attrs["value"]) + defaultMinValue = min(value, 0.0) # -0.3 --> -0.3; 0.7 --> 0.0 + defaultMaxValue = max(value, 0.0) # -0.3 --> 0.0; 0.7 --> 0.7 + minValue = float(attrs.get("min", defaultMinValue)) + maxValue = float(attrs.get("max", defaultMaxValue)) + self.axes[axis] = (minValue, value, maxValue) + elif name == "delta": + point = safeEval(attrs["pt"]) + x = safeEval(attrs["x"]) + y = safeEval(attrs["y"]) + self.coordinates[point] = (x, y) + + def compile(self, axisTags, sharedCoordIndices, sharedPoints): + tupleData = [] + + coord = self.compileCoord(axisTags) + if coord in sharedCoordIndices: + flags = sharedCoordIndices[coord] + else: + flags = EMBEDDED_TUPLE_COORD + tupleData.append(coord) + + intermediateCoord = self.compileIntermediateCoord(axisTags) + if intermediateCoord is not None: + flags |= INTERMEDIATE_TUPLE + tupleData.append(intermediateCoord) + + if sharedPoints is not None: + auxData = self.compileDeltas(sharedPoints) + else: + flags |= PRIVATE_POINT_NUMBERS + points = self.getUsedPoints() + numPointsInGlyph = len(self.coordinates) + auxData = self.compilePoints(points, numPointsInGlyph) + self.compileDeltas(points) + + tupleData = struct.pack('>HH', len(auxData), flags) + bytesjoin(tupleData) + return (tupleData, auxData) + + def compileCoord(self, axisTags): + result = [] + for axis in axisTags: + _minValue, value, _maxValue = self.axes.get(axis, (0.0, 0.0, 0.0)) + result.append(struct.pack(">h", floatToFixed(value, 14))) + return bytesjoin(result) + + def compileIntermediateCoord(self, axisTags): + needed = False + for axis in axisTags: + minValue, value, maxValue = self.axes.get(axis, (0.0, 0.0, 0.0)) + defaultMinValue = min(value, 0.0) # -0.3 --> -0.3; 0.7 --> 0.0 + defaultMaxValue = max(value, 0.0) # -0.3 --> 0.0; 0.7 --> 0.7 + if (minValue != defaultMinValue) or (maxValue != defaultMaxValue): + needed = True + break + if not needed: + return None + minCoords = [] + maxCoords = [] + for axis in axisTags: + minValue, value, maxValue = self.axes.get(axis, (0.0, 0.0, 0.0)) + minCoords.append(struct.pack(">h", floatToFixed(minValue, 14))) + maxCoords.append(struct.pack(">h", floatToFixed(maxValue, 14))) + return bytesjoin(minCoords + maxCoords) + + @staticmethod + def decompileCoord_(axisTags, data, offset): + coord = {} + pos = offset + for axis in axisTags: + coord[axis] = fixedToFloat(struct.unpack(">h", data[pos:pos+2])[0], 14) + pos += 2 + return coord, pos + + @staticmethod + def decompileCoords_(axisTags, numCoords, data, offset): + result = [] + pos = offset + for _ in range(numCoords): + coord, pos = GlyphVariation.decompileCoord_(axisTags, data, pos) + result.append(coord) + return result, pos + + @staticmethod + def compilePoints(points, numPointsInGlyph): + # If the set consists of all points in the glyph, it gets encoded with + # a special encoding: a single zero byte. + if len(points) == numPointsInGlyph: + return b"\0" + + # In the 'gvar' table, the packing of point numbers is a little surprising. + # It consists of multiple runs, each being a delta-encoded list of integers. + # For example, the point set {17, 18, 19, 20, 21, 22, 23} gets encoded as + # [6, 17, 1, 1, 1, 1, 1, 1]. The first value (6) is the run length minus 1. + # There are two types of runs, with values being either 8 or 16 bit unsigned + # integers. + points = list(points) + points.sort() + numPoints = len(points) + + # The binary representation starts with the total number of points in the set, + # encoded into one or two bytes depending on the value. + if numPoints < 0x80: + result = [bytechr(numPoints)] + else: + result = [bytechr((numPoints >> 8) | 0x80) + bytechr(numPoints & 0xff)] + + MAX_RUN_LENGTH = 127 + pos = 0 + while pos < numPoints: + run = io.BytesIO() + runLength = 0 + lastValue = 0 + useByteEncoding = (points[pos] <= 0xff) + while pos < numPoints and runLength <= MAX_RUN_LENGTH: + curValue = points[pos] + delta = curValue - lastValue + if useByteEncoding and delta > 0xff: + # we need to start a new run (which will not use byte encoding) + break + if useByteEncoding: + run.write(bytechr(delta)) + else: + run.write(bytechr(delta >> 8)) + run.write(bytechr(delta & 0xff)) + lastValue = curValue + pos += 1 + runLength += 1 + if useByteEncoding: + runHeader = bytechr(runLength - 1) + else: + runHeader = bytechr((runLength - 1) | POINTS_ARE_WORDS) + result.append(runHeader) + result.append(run.getvalue()) + + return bytesjoin(result) + + @staticmethod + def decompilePoints_(numPointsInGlyph, data, offset): + """(numPointsInGlyph, data, offset) --> ([point1, point2, ...], newOffset)""" + pos = offset + numPointsInData = byteord(data[pos]) + pos += 1 + if (numPointsInData & POINTS_ARE_WORDS) != 0: + numPointsInData = (numPointsInData & POINT_RUN_COUNT_MASK) << 8 | byteord(data[pos]) + pos += 1 + if numPointsInData == 0: + return (range(numPointsInGlyph), pos) + result = [] + while len(result) < numPointsInData: + runHeader = byteord(data[pos]) + pos += 1 + numPointsInRun = (runHeader & POINT_RUN_COUNT_MASK) + 1 + point = 0 + if (runHeader & POINTS_ARE_WORDS) == 0: + for _ in range(numPointsInRun): + point += byteord(data[pos]) + pos += 1 + result.append(point) + else: + for _ in range(numPointsInRun): + point += struct.unpack(">H", data[pos:pos+2])[0] + pos += 2 + result.append(point) + if max(result) >= numPointsInGlyph: + raise TTLibError("malformed 'gvar' table") + return (result, pos) + + def compileDeltas(self, points): + deltaX = [] + deltaY = [] + for p in sorted(list(points)): + c = self.coordinates[p] + if c is not None: + deltaX.append(c[0]) + deltaY.append(c[1]) + return self.compileDeltaValues_(deltaX) + self.compileDeltaValues_(deltaY) + + @staticmethod + def compileDeltaValues_(deltas): + """[value1, value2, value3, ...] --> bytestring + + Emits a sequence of runs. Each run starts with a + byte-sized header whose 6 least significant bits + (header & 0x3F) indicate how many values are encoded + in this run. The stored length is the actual length + minus one; run lengths are thus in the range [1..64]. + If the header byte has its most significant bit (0x80) + set, all values in this run are zero, and no data + follows. Otherwise, the header byte is followed by + ((header & 0x3F) + 1) signed values. If (header & + 0x40) is clear, the delta values are stored as signed + bytes; if (header & 0x40) is set, the delta values are + signed 16-bit integers. + """ # Explaining the format because the 'gvar' spec is hard to understand. + stream = io.BytesIO() + pos = 0 + while pos < len(deltas): + value = deltas[pos] + if value == 0: + pos = GlyphVariation.encodeDeltaRunAsZeroes_(deltas, pos, stream) + elif value >= -128 and value <= 127: + pos = GlyphVariation.encodeDeltaRunAsBytes_(deltas, pos, stream) + else: + pos = GlyphVariation.encodeDeltaRunAsWords_(deltas, pos, stream) + return stream.getvalue() + + @staticmethod + def encodeDeltaRunAsZeroes_(deltas, offset, stream): + runLength = 0 + pos = offset + numDeltas = len(deltas) + while pos < numDeltas and runLength < 64 and deltas[pos] == 0: + pos += 1 + runLength += 1 + assert runLength >= 1 and runLength <= 64 + stream.write(bytechr(DELTAS_ARE_ZERO | (runLength - 1))) + return pos + + @staticmethod + def encodeDeltaRunAsBytes_(deltas, offset, stream): + runLength = 0 + pos = offset + numDeltas = len(deltas) + while pos < numDeltas and runLength < 64: + value = deltas[pos] + if value < -128 or value > 127: + break + # Within a byte-encoded run of deltas, a single zero + # is best stored literally as 0x00 value. However, + # if are two or more zeroes in a sequence, it is + # better to start a new run. For example, the sequence + # of deltas [15, 15, 0, 15, 15] becomes 6 bytes + # (04 0F 0F 00 0F 0F) when storing the zero value + # literally, but 7 bytes (01 0F 0F 80 01 0F 0F) + # when starting a new run. + if value == 0 and pos+1 < numDeltas and deltas[pos+1] == 0: + break + pos += 1 + runLength += 1 + assert runLength >= 1 and runLength <= 64 + stream.write(bytechr(runLength - 1)) + for i in range(offset, pos): + stream.write(struct.pack('b', deltas[i])) + return pos + + @staticmethod + def encodeDeltaRunAsWords_(deltas, offset, stream): + runLength = 0 + pos = offset + numDeltas = len(deltas) + while pos < numDeltas and runLength < 64: + value = deltas[pos] + # Within a word-encoded run of deltas, it is easiest + # to start a new run (with a different encoding) + # whenever we encounter a zero value. For example, + # the sequence [0x6666, 0, 0x7777] needs 7 bytes when + # storing the zero literally (42 66 66 00 00 77 77), + # and equally 7 bytes when starting a new run + # (40 66 66 80 40 77 77). + if value == 0: + break + + # Within a word-encoded run of deltas, a single value + # in the range (-128..127) should be encoded literally + # because it is more compact. For example, the sequence + # [0x6666, 2, 0x7777] becomes 7 bytes when storing + # the value literally (42 66 66 00 02 77 77), but 8 bytes + # when starting a new run (40 66 66 00 02 40 77 77). + isByteEncodable = lambda value: value >= -128 and value <= 127 + if isByteEncodable(value) and pos+1 < numDeltas and isByteEncodable(deltas[pos+1]): + break + pos += 1 + runLength += 1 + assert runLength >= 1 and runLength <= 64 + stream.write(bytechr(DELTAS_ARE_WORDS | (runLength - 1))) + for i in range(offset, pos): + stream.write(struct.pack('>h', deltas[i])) + return pos + + @staticmethod + def decompileDeltas_(numDeltas, data, offset): + """(numDeltas, data, offset) --> ([delta, delta, ...], newOffset)""" + result = [] + pos = offset + while len(result) < numDeltas: + runHeader = byteord(data[pos]) + pos += 1 + numDeltasInRun = (runHeader & DELTA_RUN_COUNT_MASK) + 1 + if (runHeader & DELTAS_ARE_ZERO) != 0: + result.extend([0] * numDeltasInRun) + elif (runHeader & DELTAS_ARE_WORDS) != 0: + for _ in range(numDeltasInRun): + result.append(struct.unpack(">h", data[pos:pos+2])[0]) + pos += 2 + else: + for _ in range(numDeltasInRun): + result.append(struct.unpack(">b", data[pos:pos+1])[0]) + pos += 1 + assert len(result) == numDeltas + return (result, pos) + + @staticmethod + def getTupleSize_(flags, axisCount): + size = 4 + if (flags & EMBEDDED_TUPLE_COORD) != 0: + size += axisCount * 2 + if (flags & INTERMEDIATE_TUPLE) != 0: + size += axisCount * 4 + return size diff -Nru fonttools-2.4/Lib/fontTools/ttLib/tables/_g_v_a_r_test.py fonttools-3.0/Lib/fontTools/ttLib/tables/_g_v_a_r_test.py --- fonttools-2.4/Lib/fontTools/ttLib/tables/_g_v_a_r_test.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.0/Lib/fontTools/ttLib/tables/_g_v_a_r_test.py 2015-08-31 17:57:15.000000000 +0000 @@ -0,0 +1,539 @@ +from __future__ import print_function, division, absolute_import, unicode_literals +from fontTools.misc.py23 import * +from fontTools.misc.textTools import deHexStr, hexStr +from fontTools.misc.xmlWriter import XMLWriter +from fontTools.ttLib import TTLibError +from fontTools.ttLib.tables._g_v_a_r import table__g_v_a_r, GlyphVariation +import random +import unittest + +def hexencode(s): + h = hexStr(s).upper() + return ' '.join([h[i:i+2] for i in range(0, len(h), 2)]) + +# Glyph variation table of uppercase I in the Skia font, as printed in Apple's +# TrueType spec. The actual Skia font uses a different table for uppercase I. +# https://developer.apple.com/fonts/TrueType-Reference-Manual/RM06/Chap6gvar.html +SKIA_GVAR_I = deHexStr( + "00 08 00 24 00 33 20 00 00 15 20 01 00 1B 20 02 " + "00 24 20 03 00 15 20 04 00 26 20 07 00 0D 20 06 " + "00 1A 20 05 00 40 01 01 01 81 80 43 FF 7E FF 7E " + "FF 7E FF 7E 00 81 45 01 01 01 03 01 04 01 04 01 " + "04 01 02 80 40 00 82 81 81 04 3A 5A 3E 43 20 81 " + "04 0E 40 15 45 7C 83 00 0D 9E F3 F2 F0 F0 F0 F0 " + "F3 9E A0 A1 A1 A1 9F 80 00 91 81 91 00 0D 0A 0A " + "09 0A 0A 0A 0A 0A 0A 0A 0A 0A 0A 0B 80 00 15 81 " + "81 00 C4 89 00 C4 83 00 0D 80 99 98 96 96 96 96 " + "99 80 82 83 83 83 81 80 40 FF 18 81 81 04 E6 F9 " + "10 21 02 81 04 E8 E5 EB 4D DA 83 00 0D CE D3 D4 " + "D3 D3 D3 D5 D2 CE CC CD CD CD CD 80 00 A1 81 91 " + "00 0D 07 03 04 02 02 02 03 03 07 07 08 08 08 07 " + "80 00 09 81 81 00 28 40 00 A4 02 24 24 66 81 04 " + "08 FA FA FA 28 83 00 82 02 FF FF FF 83 02 01 01 " + "01 84 91 00 80 06 07 08 08 08 08 0A 07 80 03 FE " + "FF FF FF 81 00 08 81 82 02 EE EE EE 8B 6D 00") + +# Shared coordinates in the Skia font, as printed in Apple's TrueType spec. +SKIA_SHARED_COORDS = deHexStr( + "40 00 00 00 C0 00 00 00 00 00 40 00 00 00 C0 00 " + "C0 00 C0 00 40 00 C0 00 40 00 40 00 C0 00 40 00") + + +class GlyphVariationTableTest(unittest.TestCase): + def test_compileOffsets_shortFormat(self): + self.assertEqual((deHexStr("00 00 00 02 FF C0"), 0), + table__g_v_a_r.compileOffsets_([0, 4, 0x1ff80])) + + def test_compileOffsets_longFormat(self): + self.assertEqual((deHexStr("00 00 00 00 00 00 00 04 CA FE BE EF"), 1), + table__g_v_a_r.compileOffsets_([0, 4, 0xCAFEBEEF])) + + def test_decompileOffsets_shortFormat(self): + decompileOffsets = table__g_v_a_r.decompileOffsets_ + data = deHexStr("00 11 22 33 44 55 66 77 88 99 aa bb") + self.assertEqual([2*0x0011, 2*0x2233, 2*0x4455, 2*0x6677, 2*0x8899, 2*0xaabb], + list(decompileOffsets(data, tableFormat=0, glyphCount=5))) + + def test_decompileOffsets_longFormat(self): + decompileOffsets = table__g_v_a_r.decompileOffsets_ + data = deHexStr("00 11 22 33 44 55 66 77 88 99 aa bb") + self.assertEqual([0x00112233, 0x44556677, 0x8899aabb], + list(decompileOffsets(data, tableFormat=1, glyphCount=2))) + + def test_compileGlyph_noVariations(self): + table = table__g_v_a_r() + table.variations = {} + self.assertEqual(b"", table.compileGlyph_("glyphname", 8, ["wght", "opsz"], {})) + + def test_compileGlyph_emptyVariations(self): + table = table__g_v_a_r() + table.variations = {"glyphname": []} + self.assertEqual(b"", table.compileGlyph_("glyphname", 8, ["wght", "opsz"], {})) + + def test_compileGlyph_onlyRedundantVariations(self): + table = table__g_v_a_r() + axes = {"wght": (0.3, 0.4, 0.5), "opsz": (0.7, 0.8, 0.9)} + table.variations = {"glyphname": [ + GlyphVariation(axes, [None] * 4), + GlyphVariation(axes, [None] * 4), + GlyphVariation(axes, [None] * 4) + ]} + self.assertEqual(b"", table.compileGlyph_("glyphname", 8, ["wght", "opsz"], {})) + + def test_compileGlyph_roundTrip(self): + table = table__g_v_a_r() + axisTags = ["wght", "wdth"] + numPointsInGlyph = 4 + glyphCoords = [(1,1), (2,2), (3,3), (4,4)] + gvar1 = GlyphVariation({"wght": (0.5, 1.0, 1.0), "wdth": (1.0, 1.0, 1.0)}, glyphCoords) + gvar2 = GlyphVariation({"wght": (1.0, 1.0, 1.0), "wdth": (1.0, 1.0, 1.0)}, glyphCoords) + table.variations = {"oslash": [gvar1, gvar2]} + data = table.compileGlyph_("oslash", numPointsInGlyph, axisTags, {}) + self.assertEqual([gvar1, gvar2], table.decompileGlyph_(numPointsInGlyph, {}, axisTags, data)) + + def test_compileSharedCoords(self): + table = table__g_v_a_r() + table.variations = {} + deltas = [None] * 4 + table.variations["A"] = [ + GlyphVariation({"wght": (1.0, 1.0, 1.0), "wdth": (0.5, 0.7, 1.0)}, deltas) + ] + table.variations["B"] = [ + GlyphVariation({"wght": (1.0, 1.0, 1.0), "wdth": (0.2, 0.7, 1.0)}, deltas), + GlyphVariation({"wght": (1.0, 1.0, 1.0), "wdth": (0.2, 0.8, 1.0)}, deltas) + ] + table.variations["C"] = [ + GlyphVariation({"wght": (1.0, 1.0, 1.0), "wdth": (0.3, 0.7, 1.0)}, deltas), + GlyphVariation({"wght": (1.0, 1.0, 1.0), "wdth": (0.3, 0.8, 1.0)}, deltas), + GlyphVariation({"wght": (1.0, 1.0, 1.0), "wdth": (0.3, 0.9, 1.0)}, deltas) + ] + # {"wght":1.0, "wdth":0.7} is shared 3 times; {"wght":1.0, "wdth":0.8} is shared twice. + # Min and max values are not part of the shared coordinate pool and should get ignored. + result = table.compileSharedCoords_(["wght", "wdth"]) + self.assertEqual(["40 00 2C CD", "40 00 33 33"], [hexencode(c) for c in result]) + + def test_decompileSharedCoords_Skia(self): + table = table__g_v_a_r() + table.offsetToCoord = 0 + table.sharedCoordCount = 8 + sharedCoords = table.decompileSharedCoords_(["wght", "wdth"], SKIA_SHARED_COORDS) + self.assertEqual([ + {"wght": 1.0, "wdth": 0.0}, + {"wght": -1.0, "wdth": 0.0}, + {"wght": 0.0, "wdth": 1.0}, + {"wght": 0.0, "wdth": -1.0}, + {"wght": -1.0, "wdth": -1.0}, + {"wght": 1.0, "wdth": -1.0}, + {"wght": 1.0, "wdth": 1.0}, + {"wght": -1.0, "wdth": 1.0} + ], sharedCoords) + + def test_decompileSharedCoords_empty(self): + table = table__g_v_a_r() + table.offsetToCoord = 0 + table.sharedCoordCount = 0 + self.assertEqual([], table.decompileSharedCoords_(["wght"], b"")) + + def test_decompileGlyph_Skia_I(self): + axes = ["wght", "wdth"] + table = table__g_v_a_r() + table.offsetToCoord = 0 + table.sharedCoordCount = 8 + table.axisCount = len(axes) + sharedCoords = table.decompileSharedCoords_(axes, SKIA_SHARED_COORDS) + tuples = table.decompileGlyph_(18, sharedCoords, axes, SKIA_GVAR_I) + self.assertEqual(8, len(tuples)) + self.assertEqual({"wght": (0.0, 1.0, 1.0)}, tuples[0].axes) + self.assertEqual("257,0 -127,0 -128,58 -130,90 -130,62 -130,67 -130,32 -127,0 257,0 " + "259,14 260,64 260,21 260,69 258,124 0,0 130,0 0,0 0,0", + " ".join(["%d,%d" % c for c in tuples[0].coordinates])) + + def test_decompileGlyph_empty(self): + table = table__g_v_a_r() + self.assertEqual([], table.decompileGlyph_(numPointsInGlyph=5, sharedCoords=[], axisTags=[], data=b"")) + + def test_computeMinMaxCord(self): + coord = {"wght": -0.3, "wdth": 0.7} + minCoord, maxCoord = table__g_v_a_r.computeMinMaxCoord_(coord) + self.assertEqual({"wght": -0.3, "wdth": 0.0}, minCoord) + self.assertEqual({"wght": 0.0, "wdth": 0.7}, maxCoord) + +class GlyphVariationTest(unittest.TestCase): + def test_equal(self): + gvar1 = GlyphVariation({"wght":(0.0, 1.0, 1.0)}, [(0,0), (9,8), (7,6)]) + gvar2 = GlyphVariation({"wght":(0.0, 1.0, 1.0)}, [(0,0), (9,8), (7,6)]) + self.assertEqual(gvar1, gvar2) + + def test_equal_differentAxes(self): + gvar1 = GlyphVariation({"wght":(0.0, 1.0, 1.0)}, [(0,0), (9,8), (7,6)]) + gvar2 = GlyphVariation({"wght":(0.7, 0.8, 0.9)}, [(0,0), (9,8), (7,6)]) + self.assertNotEqual(gvar1, gvar2) + + def test_equal_differentCoordinates(self): + gvar1 = GlyphVariation({"wght":(0.0, 1.0, 1.0)}, [(0,0), (9,8), (7,6)]) + gvar2 = GlyphVariation({"wght":(0.0, 1.0, 1.0)}, [(0,0), (9,8)]) + self.assertNotEqual(gvar1, gvar2) + + def test_hasImpact_someDeltasNotZero(self): + axes = {"wght":(0.0, 1.0, 1.0)} + gvar = GlyphVariation(axes, [(0,0), (9,8), (7,6)]) + self.assertTrue(gvar.hasImpact()) + + def test_hasImpact_allDeltasZero(self): + axes = {"wght":(0.0, 1.0, 1.0)} + gvar = GlyphVariation(axes, [(0,0), (0,0), (0,0)]) + self.assertTrue(gvar.hasImpact()) + + def test_hasImpact_allDeltasNone(self): + axes = {"wght":(0.0, 1.0, 1.0)} + gvar = GlyphVariation(axes, [None, None, None]) + self.assertFalse(gvar.hasImpact()) + + def test_toXML(self): + writer = XMLWriter(BytesIO()) + axes = {"wdth":(0.3, 0.4, 0.5), "wght":(0.0, 1.0, 1.0), "opsz":(-0.7, -0.7, 0.0)} + g = GlyphVariation(axes, [(9,8), None, (7,6), (0,0), (-1,-2), None]) + g.toXML(writer, ["wdth", "wght", "opsz"]) + self.assertEqual([ + '', + '', + '', + '', + '', + '', + '', + '', + '' + ], GlyphVariationTest.xml_lines(writer)) + + def test_toXML_allDeltasNone(self): + writer = XMLWriter(BytesIO()) + axes = {"wght":(0.0, 1.0, 1.0)} + g = GlyphVariation(axes, [None] * 5) + g.toXML(writer, ["wght", "wdth"]) + self.assertEqual([ + '', + '', + '', + '' + ], GlyphVariationTest.xml_lines(writer)) + + def test_fromXML(self): + g = GlyphVariation({}, [None] * 4) + g.fromXML("coord", {"axis":"wdth", "min":"0.3", "value":"0.4", "max":"0.5"}, []) + g.fromXML("coord", {"axis":"wght", "value":"1.0"}, []) + g.fromXML("coord", {"axis":"opsz", "value":"-0.5"}, []) + g.fromXML("delta", {"pt":"1", "x":"33", "y":"44"}, []) + g.fromXML("delta", {"pt":"2", "x":"-2", "y":"170"}, []) + self.assertEqual({ + "wdth":( 0.3, 0.4, 0.5), + "wght":( 0.0, 1.0, 1.0), + "opsz":(-0.5, -0.5, 0.0) + }, g.axes) + self.assertEqual([None, (33, 44), (-2, 170), None], g.coordinates) + + def test_compile_sharedCoords_nonIntermediate_sharedPoints(self): + gvar = GlyphVariation({"wght": (0.0, 0.5, 0.5), "wdth": (0.0, 0.8, 0.8)}, + [(7,4), (8,5), (9,6)]) + axisTags = ["wght", "wdth"] + sharedCoordIndices = { gvar.compileCoord(axisTags): 0x77 } + tuple, data = gvar.compile(axisTags, sharedCoordIndices, sharedPoints={0,1,2}) + # len(data)=8; flags=None; tupleIndex=0x77 + # embeddedCoord=[]; intermediateCoord=[] + self.assertEqual("00 08 00 77", hexencode(tuple)) + self.assertEqual("02 07 08 09 " # deltaX: [7, 8, 9] + "02 04 05 06", # deltaY: [4, 5, 6] + hexencode(data)) + + def test_compile_sharedCoords_intermediate_sharedPoints(self): + gvar = GlyphVariation({"wght": (0.3, 0.5, 0.7), "wdth": (0.1, 0.8, 0.9)}, + [(7,4), (8,5), (9,6)]) + axisTags = ["wght", "wdth"] + sharedCoordIndices = { gvar.compileCoord(axisTags): 0x77 } + tuple, data = gvar.compile(axisTags, sharedCoordIndices, sharedPoints={0,1,2}) + # len(data)=8; flags=INTERMEDIATE_TUPLE; tupleIndex=0x77 + # embeddedCoord=[]; intermediateCoord=[(0.3, 0.1), (0.7, 0.9)] + self.assertEqual("00 08 40 77 13 33 06 66 2C CD 39 9A", hexencode(tuple)) + self.assertEqual("02 07 08 09 " # deltaX: [7, 8, 9] + "02 04 05 06", # deltaY: [4, 5, 6] + hexencode(data)) + + def test_compile_sharedCoords_nonIntermediate_privatePoints(self): + gvar = GlyphVariation({"wght": (0.0, 0.5, 0.5), "wdth": (0.0, 0.8, 0.8)}, + [(7,4), (8,5), (9,6)]) + axisTags = ["wght", "wdth"] + sharedCoordIndices = { gvar.compileCoord(axisTags): 0x77 } + tuple, data = gvar.compile(axisTags, sharedCoordIndices, sharedPoints=None) + # len(data)=13; flags=PRIVATE_POINT_NUMBERS; tupleIndex=0x77 + # embeddedCoord=[]; intermediateCoord=[] + self.assertEqual("00 09 20 77", hexencode(tuple)) + self.assertEqual("00 " # all points in glyph + "02 07 08 09 " # deltaX: [7, 8, 9] + "02 04 05 06", # deltaY: [4, 5, 6] + hexencode(data)) + + def test_compile_sharedCoords_intermediate_privatePoints(self): + gvar = GlyphVariation({"wght": (0.0, 0.5, 1.0), "wdth": (0.0, 0.8, 1.0)}, + [(7,4), (8,5), (9,6)]) + axisTags = ["wght", "wdth"] + sharedCoordIndices = { gvar.compileCoord(axisTags): 0x77 } + tuple, data = gvar.compile(axisTags, sharedCoordIndices, sharedPoints=None) + # len(data)=13; flags=PRIVATE_POINT_NUMBERS; tupleIndex=0x77 + # embeddedCoord=[]; intermediateCoord=[(0.0, 0.0), (1.0, 1.0)] + self.assertEqual("00 09 60 77 00 00 00 00 40 00 40 00", hexencode(tuple)) + self.assertEqual("00 " # all points in glyph + "02 07 08 09 " # deltaX: [7, 8, 9] + "02 04 05 06", # deltaY: [4, 5, 6] + hexencode(data)) + + def test_compile_embeddedCoords_nonIntermediate_sharedPoints(self): + gvar = GlyphVariation({"wght": (0.0, 0.5, 0.5), "wdth": (0.0, 0.8, 0.8)}, + [(7,4), (8,5), (9,6)]) + axisTags = ["wght", "wdth"] + tuple, data = gvar.compile(axisTags, sharedCoordIndices={}, sharedPoints={0,1,2}) + # len(data)=8; flags=EMBEDDED_TUPLE_COORD + # embeddedCoord=[(0.5, 0.8)]; intermediateCoord=[] + self.assertEqual("00 08 80 00 20 00 33 33", hexencode(tuple)) + self.assertEqual("02 07 08 09 " # deltaX: [7, 8, 9] + "02 04 05 06", # deltaY: [4, 5, 6] + hexencode(data)) + + def test_compile_embeddedCoords_intermediate_sharedPoints(self): + gvar = GlyphVariation({"wght": (0.0, 0.5, 1.0), "wdth": (0.0, 0.8, 0.8)}, + [(7,4), (8,5), (9,6)]) + axisTags = ["wght", "wdth"] + tuple, data = gvar.compile(axisTags, sharedCoordIndices={}, sharedPoints={0,1,2}) + # len(data)=8; flags=EMBEDDED_TUPLE_COORD + # embeddedCoord=[(0.5, 0.8)]; intermediateCoord=[(0.0, 0.0), (1.0, 0.8)] + self.assertEqual("00 08 C0 00 20 00 33 33 00 00 00 00 40 00 33 33", hexencode(tuple)) + self.assertEqual("02 07 08 09 " # deltaX: [7, 8, 9] + "02 04 05 06", # deltaY: [4, 5, 6] + hexencode(data)) + + def test_compile_embeddedCoords_nonIntermediate_privatePoints(self): + gvar = GlyphVariation({"wght": (0.0, 0.5, 0.5), "wdth": (0.0, 0.8, 0.8)}, + [(7,4), (8,5), (9,6)]) + axisTags = ["wght", "wdth"] + tuple, data = gvar.compile(axisTags, sharedCoordIndices={}, sharedPoints=None) + # len(data)=13; flags=PRIVATE_POINT_NUMBERS|EMBEDDED_TUPLE_COORD + # embeddedCoord=[(0.5, 0.8)]; intermediateCoord=[] + self.assertEqual("00 09 A0 00 20 00 33 33", hexencode(tuple)) + self.assertEqual("00 " # all points in glyph + "02 07 08 09 " # deltaX: [7, 8, 9] + "02 04 05 06", # deltaY: [4, 5, 6] + hexencode(data)) + + def test_compile_embeddedCoords_intermediate_privatePoints(self): + gvar = GlyphVariation({"wght": (0.4, 0.5, 0.6), "wdth": (0.7, 0.8, 0.9)}, + [(7,4), (8,5), (9,6)]) + axisTags = ["wght", "wdth"] + tuple, data = gvar.compile(axisTags, sharedCoordIndices={}, sharedPoints=None) + # len(data)=13; flags=PRIVATE_POINT_NUMBERS|INTERMEDIATE_TUPLE|EMBEDDED_TUPLE_COORD + # embeddedCoord=(0.5, 0.8); intermediateCoord=[(0.4, 0.7), (0.6, 0.9)] + self.assertEqual("00 09 E0 00 20 00 33 33 19 9A 2C CD 26 66 39 9A", hexencode(tuple)) + self.assertEqual("00 " # all points in glyph + "02 07 08 09 " # deltaX: [7, 8, 9] + "02 04 05 06", # deltaY: [4, 5, 6] + hexencode(data)) + + def test_compileCoord(self): + gvar = GlyphVariation({"wght": (-1.0, -1.0, -1.0), "wdth": (0.4, 0.5, 0.6)}, [None] * 4) + self.assertEqual("C0 00 20 00", hexencode(gvar.compileCoord(["wght", "wdth"]))) + self.assertEqual("20 00 C0 00", hexencode(gvar.compileCoord(["wdth", "wght"]))) + self.assertEqual("C0 00", hexencode(gvar.compileCoord(["wght"]))) + + def test_compileIntermediateCoord(self): + gvar = GlyphVariation({"wght": (-1.0, -1.0, 0.0), "wdth": (0.4, 0.5, 0.6)}, [None] * 4) + self.assertEqual("C0 00 19 9A 00 00 26 66", hexencode(gvar.compileIntermediateCoord(["wght", "wdth"]))) + self.assertEqual("19 9A C0 00 26 66 00 00", hexencode(gvar.compileIntermediateCoord(["wdth", "wght"]))) + self.assertEqual(None, gvar.compileIntermediateCoord(["wght"])) + self.assertEqual("19 9A 26 66", hexencode(gvar.compileIntermediateCoord(["wdth"]))) + + def test_decompileCoord(self): + decompileCoord = GlyphVariation.decompileCoord_ + data = deHexStr("DE AD C0 00 20 00 DE AD") + self.assertEqual(({"wght": -1.0, "wdth": 0.5}, 6), decompileCoord(["wght", "wdth"], data, 2)) + + def test_decompileCoord_roundTrip(self): + # Make sure we are not affected by https://github.com/behdad/fonttools/issues/286 + data = deHexStr("7F B9 80 35") + values, _ = GlyphVariation.decompileCoord_(["wght", "wdth"], data, 0) + axisValues = {axis:(val, val, val) for axis, val in values.items()} + gvar = GlyphVariation(axisValues, [None] * 4) + self.assertEqual("7F B9 80 35", hexencode(gvar.compileCoord(["wght", "wdth"]))) + + def test_decompileCoords(self): + decompileCoords = GlyphVariation.decompileCoords_ + axes = ["wght", "wdth", "opsz"] + coords = [ + {"wght": 1.0, "wdth": 0.0, "opsz": 0.5}, + {"wght": -1.0, "wdth": 0.0, "opsz": 0.25}, + {"wght": 0.0, "wdth": -1.0, "opsz": 1.0} + ] + data = deHexStr("DE AD 40 00 00 00 20 00 C0 00 00 00 10 00 00 00 C0 00 40 00") + self.assertEqual((coords, 20), decompileCoords(axes, numCoords=3, data=data, offset=2)) + + def test_compilePoints(self): + compilePoints = lambda p: GlyphVariation.compilePoints(set(p), numPointsInGlyph=999) + self.assertEqual("00", hexencode(compilePoints(range(999)))) # all points in glyph + self.assertEqual("01 00 07", hexencode(compilePoints([7]))) + self.assertEqual("01 80 FF FF", hexencode(compilePoints([65535]))) + self.assertEqual("02 01 09 06", hexencode(compilePoints([9, 15]))) + self.assertEqual("06 05 07 01 F7 02 01 F2", hexencode(compilePoints([7, 8, 255, 257, 258, 500]))) + self.assertEqual("03 01 07 01 80 01 F4", hexencode(compilePoints([7, 8, 500]))) + self.assertEqual("04 01 07 01 81 BE EF 0C 0F", hexencode(compilePoints([7, 8, 0xBEEF, 0xCAFE]))) + self.assertEqual("81 2C" + # 300 points (0x12c) in total + " 7F 00" + (127 * " 01") + # first run, contains 128 points: [0 .. 127] + " 7F 80" + (127 * " 01") + # second run, contains 128 points: [128 .. 255] + " AB 01 00" + (43 * " 00 01"), # third run, contains 44 points: [256 .. 299] + hexencode(compilePoints(range(300)))) + self.assertEqual("81 8F" + # 399 points (0x18f) in total + " 7F 00" + (127 * " 01") + # first run, contains 128 points: [0 .. 127] + " 7F 80" + (127 * " 01") + # second run, contains 128 points: [128 .. 255] + " FF 01 00" + (127 * " 00 01") + # third run, contains 128 points: [256 .. 383] + " 8E 01 80" + (14 * " 00 01"), # fourth run, contains 15 points: [384 .. 398] + hexencode(compilePoints(range(399)))) + + def test_decompilePoints(self): + numPointsInGlyph = 65536 + allPoints = list(range(numPointsInGlyph)) + def decompilePoints(data, offset): + points, offset = GlyphVariation.decompilePoints_(numPointsInGlyph, deHexStr(data), offset) + # Conversion to list needed for Python 3. + return (list(points), offset) + # all points in glyph + self.assertEqual((allPoints, 1), decompilePoints("00", 0)) + # all points in glyph (in overly verbose encoding, not explicitly prohibited by spec) + self.assertEqual((allPoints, 2), decompilePoints("80 00", 0)) + # 2 points; first run: [9, 9+6] + self.assertEqual(([9, 15], 4), decompilePoints("02 01 09 06", 0)) + # 2 points; first run: [0xBEEF, 0xCAFE]. (0x0C0F = 0xCAFE - 0xBEEF) + self.assertEqual(([0xBEEF, 0xCAFE], 6), decompilePoints("02 81 BE EF 0C 0F", 0)) + # 1 point; first run: [7] + self.assertEqual(([7], 3), decompilePoints("01 00 07", 0)) + # 1 point; first run: [7] in overly verbose encoding + self.assertEqual(([7], 4), decompilePoints("01 80 00 07", 0)) + # 1 point; first run: [65535]; requires words to be treated as unsigned numbers + self.assertEqual(([65535], 4), decompilePoints("01 80 FF FF", 0)) + # 4 points; first run: [7, 8]; second run: [255, 257]. 257 is stored in delta-encoded bytes (0xFF + 2). + self.assertEqual(([7, 8, 255, 257], 7), decompilePoints("04 01 07 01 01 FF 02", 0)) + # combination of all encodings, preceded and followed by 4 bytes of unused data + data = "DE AD DE AD 04 01 07 01 81 BE EF 0C 0F DE AD DE AD" + self.assertEqual(([7, 8, 0xBEEF, 0xCAFE], 13), decompilePoints(data, 4)) + self.assertSetEqual(set(range(300)), set(decompilePoints( + "81 2C" + # 300 points (0x12c) in total + " 7F 00" + (127 * " 01") + # first run, contains 128 points: [0 .. 127] + " 7F 80" + (127 * " 01") + # second run, contains 128 points: [128 .. 255] + " AB 01 00" + (43 * " 00 01"), # third run, contains 44 points: [256 .. 299] + 0)[0])) + self.assertSetEqual(set(range(399)), set(decompilePoints( + "81 8F" + # 399 points (0x18f) in total + " 7F 00" + (127 * " 01") + # first run, contains 128 points: [0 .. 127] + " 7F 80" + (127 * " 01") + # second run, contains 128 points: [128 .. 255] + " FF 01 00" + (127 * " 00 01") + # third run, contains 128 points: [256 .. 383] + " 8E 01 80" + (14 * " 00 01"), # fourth run, contains 15 points: [384 .. 398] + 0)[0])) + + def test_decompilePoints_shouldGuardAgainstBadPointNumbers(self): + decompilePoints = GlyphVariation.decompilePoints_ + # 2 points; first run: [3, 9]. + numPointsInGlyph = 8 + self.assertRaises(TTLibError, decompilePoints, numPointsInGlyph, deHexStr("02 01 03 06"), 0) + + def test_decompilePoints_roundTrip(self): + numPointsInGlyph = 500 # greater than 255, so we also exercise code path for 16-bit encoding + compile = lambda points: GlyphVariation.compilePoints(points, numPointsInGlyph) + decompile = lambda data: set(GlyphVariation.decompilePoints_(numPointsInGlyph, data, 0)[0]) + for i in range(50): + points = set(random.sample(range(numPointsInGlyph), 30)) + self.assertSetEqual(points, decompile(compile(points)), + "failed round-trip decompile/compilePoints; points=%s" % points) + allPoints = set(range(numPointsInGlyph)) + self.assertSetEqual(allPoints, decompile(compile(allPoints))) + + def test_compileDeltas(self): + gvar = GlyphVariation({}, [(0,0), (1, 0), (2, 0), (3, 3)]) + points = {1, 2} + # deltaX for points: [1, 2]; deltaY for points: [0, 0] + self.assertEqual("01 01 02 81", hexencode(gvar.compileDeltas(points))) + + def test_compileDeltaValues(self): + compileDeltaValues = lambda values: hexencode(GlyphVariation.compileDeltaValues_(values)) + # zeroes + self.assertEqual("80", compileDeltaValues([0])) + self.assertEqual("BF", compileDeltaValues([0] * 64)) + self.assertEqual("BF 80", compileDeltaValues([0] * 65)) + self.assertEqual("BF A3", compileDeltaValues([0] * 100)) + self.assertEqual("BF BF BF BF", compileDeltaValues([0] * 256)) + # bytes + self.assertEqual("00 01", compileDeltaValues([1])) + self.assertEqual("06 01 02 03 7F 80 FF FE", compileDeltaValues([1, 2, 3, 127, -128, -1, -2])) + self.assertEqual("3F" + (64 * " 7F"), compileDeltaValues([127] * 64)) + self.assertEqual("3F" + (64 * " 7F") + " 00 7F", compileDeltaValues([127] * 65)) + # words + self.assertEqual("40 66 66", compileDeltaValues([0x6666])) + self.assertEqual("43 66 66 7F FF FF FF 80 00", compileDeltaValues([0x6666, 32767, -1, -32768])) + self.assertEqual("7F" + (64 * " 11 22"), compileDeltaValues([0x1122] * 64)) + self.assertEqual("7F" + (64 * " 11 22") + " 40 11 22", compileDeltaValues([0x1122] * 65)) + # bytes, zeroes, bytes: a single zero is more compact when encoded as part of the bytes run + self.assertEqual("04 7F 7F 00 7F 7F", compileDeltaValues([127, 127, 0, 127, 127])) + self.assertEqual("01 7F 7F 81 01 7F 7F", compileDeltaValues([127, 127, 0, 0, 127, 127])) + self.assertEqual("01 7F 7F 82 01 7F 7F", compileDeltaValues([127, 127, 0, 0, 0, 127, 127])) + self.assertEqual("01 7F 7F 83 01 7F 7F", compileDeltaValues([127, 127, 0, 0, 0, 0, 127, 127])) + # bytes, zeroes + self.assertEqual("01 01 00", compileDeltaValues([1, 0])) + self.assertEqual("00 01 81", compileDeltaValues([1, 0, 0])) + # words, bytes, words: a single byte is more compact when encoded as part of the words run + self.assertEqual("42 66 66 00 02 77 77", compileDeltaValues([0x6666, 2, 0x7777])) + self.assertEqual("40 66 66 01 02 02 40 77 77", compileDeltaValues([0x6666, 2, 2, 0x7777])) + # words, zeroes, words + self.assertEqual("40 66 66 80 40 77 77", compileDeltaValues([0x6666, 0, 0x7777])) + self.assertEqual("40 66 66 81 40 77 77", compileDeltaValues([0x6666, 0, 0, 0x7777])) + self.assertEqual("40 66 66 82 40 77 77", compileDeltaValues([0x6666, 0, 0, 0, 0x7777])) + # words, zeroes, bytes + self.assertEqual("40 66 66 80 02 01 02 03", compileDeltaValues([0x6666, 0, 1, 2, 3])) + self.assertEqual("40 66 66 81 02 01 02 03", compileDeltaValues([0x6666, 0, 0, 1, 2, 3])) + self.assertEqual("40 66 66 82 02 01 02 03", compileDeltaValues([0x6666, 0, 0, 0, 1, 2, 3])) + # words, zeroes + self.assertEqual("40 66 66 80", compileDeltaValues([0x6666, 0])) + self.assertEqual("40 66 66 81", compileDeltaValues([0x6666, 0, 0])) + + def test_decompileDeltas(self): + decompileDeltas = GlyphVariation.decompileDeltas_ + # 83 = zero values (0x80), count = 4 (1 + 0x83 & 0x3F) + self.assertEqual(([0, 0, 0, 0], 1), decompileDeltas(4, deHexStr("83"), 0)) + # 41 01 02 FF FF = signed 16-bit values (0x40), count = 2 (1 + 0x41 & 0x3F) + self.assertEqual(([258, -1], 5), decompileDeltas(2, deHexStr("41 01 02 FF FF"), 0)) + # 01 81 07 = signed 8-bit values, count = 2 (1 + 0x01 & 0x3F) + self.assertEqual(([-127, 7], 3), decompileDeltas(2, deHexStr("01 81 07"), 0)) + # combination of all three encodings, preceded and followed by 4 bytes of unused data + data = deHexStr("DE AD BE EF 83 40 01 02 01 81 80 DE AD BE EF") + self.assertEqual(([0, 0, 0, 0, 258, -127, -128], 11), decompileDeltas(7, data, 4)) + + def test_decompileDeltas_roundTrip(self): + numDeltas = 30 + compile = GlyphVariation.compileDeltaValues_ + decompile = lambda data: GlyphVariation.decompileDeltas_(numDeltas, data, 0)[0] + for i in range(50): + deltas = random.sample(range(-128, 127), 10) + deltas.extend(random.sample(range(-32768, 32767), 10)) + deltas.extend([0] * 10) + random.shuffle(deltas) + self.assertListEqual(deltas, decompile(compile(deltas))) + + def test_getTupleSize(self): + getTupleSize = GlyphVariation.getTupleSize_ + numAxes = 3 + self.assertEqual(4 + numAxes * 2, getTupleSize(0x8042, numAxes)) + self.assertEqual(4 + numAxes * 4, getTupleSize(0x4077, numAxes)) + self.assertEqual(4, getTupleSize(0x2077, numAxes)) + self.assertEqual(4, getTupleSize(11, numAxes)) + + @staticmethod + def xml_lines(writer): + content = writer.file.getvalue().decode("utf-8") + return [line.strip() for line in content.splitlines()][1:] + + +if __name__ == "__main__": + unittest.main() diff -Nru fonttools-2.4/Lib/fontTools/ttLib/tables/_h_d_m_x.py fonttools-3.0/Lib/fontTools/ttLib/tables/_h_d_m_x.py --- fonttools-2.4/Lib/fontTools/ttLib/tables/_h_d_m_x.py 2013-04-23 04:08:08.000000000 +0000 +++ fonttools-3.0/Lib/fontTools/ttLib/tables/_h_d_m_x.py 2015-08-31 17:57:15.000000000 +0000 @@ -1,6 +1,8 @@ -import DefaultTable -import sstruct -import string +from __future__ import print_function, division, absolute_import +from fontTools.misc.py23 import * +from fontTools.misc import sstruct +from . import DefaultTable +import array hdmxHeaderFormat = """ > # big endian! @@ -9,46 +11,65 @@ recordSize: l """ +try: + from collections.abc import Mapping +except: + from UserDict import DictMixin as Mapping + +class _GlyphnamedList(Mapping): + + def __init__(self, reverseGlyphOrder, data): + self._array = data + self._map = dict(reverseGlyphOrder) + + def __getitem__(self, k): + return self._array[self._map[k]] + + def __len__(self): + return len(self._map) + + def __iter__(self): + return iter(self._map) + + def keys(self): + return self._map.keys() + class table__h_d_m_x(DefaultTable.DefaultTable): - + def decompile(self, data, ttFont): numGlyphs = ttFont['maxp'].numGlyphs glyphOrder = ttFont.getGlyphOrder() dummy, data = sstruct.unpack2(hdmxHeaderFormat, data, self) self.hdmx = {} for i in range(self.numRecords): - ppem = ord(data[0]) - maxSize = ord(data[1]) - widths = {} - for glyphID in range(numGlyphs): - widths[glyphOrder[glyphID]] = ord(data[glyphID+2]) + ppem = byteord(data[0]) + maxSize = byteord(data[1]) + widths = _GlyphnamedList(ttFont.getReverseGlyphMap(), array.array("B", data[2:2+numGlyphs])) self.hdmx[ppem] = widths data = data[self.recordSize:] assert len(data) == 0, "too much hdmx data" - + def compile(self, ttFont): self.version = 0 numGlyphs = ttFont['maxp'].numGlyphs glyphOrder = ttFont.getGlyphOrder() - self.recordSize = 4 * ((2 + numGlyphs + 3) / 4) - pad = (self.recordSize - 2 - numGlyphs) * "\0" + self.recordSize = 4 * ((2 + numGlyphs + 3) // 4) + pad = (self.recordSize - 2 - numGlyphs) * b"\0" self.numRecords = len(self.hdmx) data = sstruct.pack(hdmxHeaderFormat, self) - items = self.hdmx.items() - items.sort() + items = sorted(self.hdmx.items()) for ppem, widths in items: - data = data + chr(ppem) + chr(max(widths.values())) + data = data + bytechr(ppem) + bytechr(max(widths.values())) for glyphID in range(len(glyphOrder)): width = widths[glyphOrder[glyphID]] - data = data + chr(width) + data = data + bytechr(width) data = data + pad return data - + def toXML(self, writer, ttFont): writer.begintag("hdmxData") writer.newline() - ppems = self.hdmx.keys() - ppems.sort() + ppems = sorted(self.hdmx.keys()) records = [] format = "" for ppem in ppems: @@ -58,7 +79,7 @@ glyphNames = ttFont.getGlyphOrder()[:] glyphNames.sort() maxNameLen = max(map(len, glyphNames)) - format = "%" + `maxNameLen` + 's:' + format + ' ;' + format = "%" + repr(maxNameLen) + 's:' + format + ' ;' writer.write(format % (("ppem",) + tuple(ppems))) writer.newline() writer.newline() @@ -73,19 +94,19 @@ writer.newline() writer.endtag("hdmxData") writer.newline() - - def fromXML(self, (name, attrs, content), ttFont): - if name <> "hdmxData": + + def fromXML(self, name, attrs, content, ttFont): + if name != "hdmxData": return - content = string.join(content, "") - lines = string.split(content, ";") - topRow = string.split(lines[0]) + content = strjoin(content) + lines = content.split(";") + topRow = lines[0].split() assert topRow[0] == "ppem:", "illegal hdmx format" - ppems = map(int, topRow[1:]) + ppems = list(map(int, topRow[1:])) self.hdmx = hdmx = {} for ppem in ppems: hdmx[ppem] = {} - lines = map(string.split, lines[1:]) + lines = (line.split() for line in lines[1:]) for line in lines: if not line: continue @@ -94,8 +115,7 @@ if "\\" in glyphName: from fontTools.misc.textTools import safeEval glyphName = safeEval('"""' + glyphName + '"""') - line = map(int, line[1:]) + line = list(map(int, line[1:])) assert len(line) == len(ppems), "illegal hdmx format" for i in range(len(ppems)): hdmx[ppems[i]][glyphName] = line[i] - diff -Nru fonttools-2.4/Lib/fontTools/ttLib/tables/_h_e_a_d.py fonttools-3.0/Lib/fontTools/ttLib/tables/_h_e_a_d.py --- fonttools-2.4/Lib/fontTools/ttLib/tables/_h_e_a_d.py 2013-04-23 04:08:08.000000000 +0000 +++ fonttools-3.0/Lib/fontTools/ttLib/tables/_h_e_a_d.py 2015-08-31 17:57:15.000000000 +0000 @@ -1,8 +1,11 @@ -import DefaultTable -import sstruct -import time -import string +from __future__ import print_function, division, absolute_import +from fontTools.misc.py23 import * +from fontTools.misc import sstruct from fontTools.misc.textTools import safeEval, num2binary, binary2num +from fontTools.misc.timeTools import timestampFromString, timestampToString, timestampNow +from fontTools.misc.timeTools import epoch_diff as mac_epoch_diff # For backward compat +from . import DefaultTable +import warnings headFormat = """ @@ -13,8 +16,8 @@ magicNumber: I flags: H unitsPerEm: H - created: 8s - modified: 8s + created: Q + modified: Q xMin: h yMin: h xMax: h @@ -27,33 +30,38 @@ """ class table__h_e_a_d(DefaultTable.DefaultTable): - + dependencies = ['maxp', 'loca'] - + def decompile(self, data, ttFont): dummy, rest = sstruct.unpack2(headFormat, data, self) if rest: # this is quite illegal, but there seem to be fonts out there that do this + warnings.warn("extra bytes at the end of 'head' table") assert rest == "\0\0" - self.unitsPerEm = int(self.unitsPerEm) - self.flags = int(self.flags) - self.strings2dates() - + + # For timestamp fields, ignore the top four bytes. Some fonts have + # bogus values there. Since till 2038 those bytes only can be zero, + # ignore them. + # + # https://github.com/behdad/fonttools/issues/99#issuecomment-66776810 + for stamp in 'created', 'modified': + value = getattr(self, stamp) + if value > 0xFFFFFFFF: + warnings.warn("'%s' timestamp out of range; ignoring top bytes" % stamp) + value &= 0xFFFFFFFF + setattr(self, stamp, value) + if value < 0x7C259DC0: # January 1, 1970 00:00:00 + warnings.warn("'%s' timestamp seems very low; regarding as unix timestamp" % stamp) + value += 0x7C259DC0 + setattr(self, stamp, value) + def compile(self, ttFont): - self.modified = long(time.time() - mac_epoch_diff) - self.dates2strings() + if ttFont.recalcTimestamp: + self.modified = timestampNow() data = sstruct.pack(headFormat, self) - self.strings2dates() return data - - def strings2dates(self): - self.created = bin2long(self.created) - self.modified = bin2long(self.modified) - - def dates2strings(self): - self.created = long2bin(self.created) - self.modified = long2bin(self.modified) - + def toXML(self, writer, ttFont): writer.comment("Most of this table will be recalculated by the compiler") writer.newline() @@ -61,13 +69,10 @@ for name in names: value = getattr(self, name) if name in ("created", "modified"): - try: - value = time.asctime(time.gmtime(max(0, value + mac_epoch_diff))) - except ValueError: - value = time.asctime(time.gmtime(0)) + value = timestampToString(value) if name in ("magicNumber", "checkSumAdjustment"): if value < 0: - value = value + 0x100000000L + value = value + 0x100000000 value = hex(value) if value[-1:] == "L": value = value[:-1] @@ -75,78 +80,13 @@ value = num2binary(value, 16) writer.simpletag(name, value=value) writer.newline() - - def fromXML(self, (name, attrs, content), ttFont): + + def fromXML(self, name, attrs, content, ttFont): value = attrs["value"] if name in ("created", "modified"): - value = parse_date(value) - mac_epoch_diff + value = timestampFromString(value) elif name in ("macStyle", "flags"): value = binary2num(value) else: - try: - value = safeEval(value) - except OverflowError: - value = long(value) + value = safeEval(value) setattr(self, name, value) - - def __cmp__(self, other): - selfdict = self.__dict__.copy() - otherdict = other.__dict__.copy() - # for testing purposes, compare without the modified and checkSumAdjustment - # fields, since they are allowed to be different. - for key in ["modified", "checkSumAdjustment"]: - del selfdict[key] - del otherdict[key] - return cmp(selfdict, otherdict) - - -def calc_mac_epoch_diff(): - """calculate the difference between the original Mac epoch (1904) - to the epoch on this machine. - """ - safe_epoch_t = (1972, 1, 1, 0, 0, 0, 0, 0, 0) - safe_epoch = time.mktime(safe_epoch_t) - time.timezone - # This assert fails in certain time zones, with certain daylight settings - #assert time.gmtime(safe_epoch)[:6] == safe_epoch_t[:6] - seconds1904to1972 = 60 * 60 * 24 * (365 * (1972-1904) + 17) # thanks, Laurence! - return long(safe_epoch - seconds1904to1972) - -mac_epoch_diff = calc_mac_epoch_diff() - - -_months = [' ', 'jan', 'feb', 'mar', 'apr', 'may', 'jun', 'jul', 'aug', - 'sep', 'oct', 'nov', 'dec'] -_weekdays = ['mon', 'tue', 'wed', 'thu', 'fri', 'sat', 'sun'] - -def parse_date(datestring): - datestring = string.lower(datestring) - weekday, month, day, tim, year = string.split(datestring) - weekday = _weekdays.index(weekday) - month = _months.index(month) - year = int(year) - day = int(day) - hour, minute, second = map(int, string.split(tim, ":")) - t = (year, month, day, hour, minute, second, weekday, 0, 0) - try: - return long(time.mktime(t) - time.timezone) - except OverflowError: - return 0L - - -def bin2long(data): - # thanks ! - v = 0L - for i in map(ord, data): - v = v<<8 | i - return v - -def long2bin(v, bytes=8): - mask = long("FF" * bytes, 16) - data = "" - while v: - data = chr(v & 0xff) + data - v = (v >> 8) & mask - data = (bytes - len(data)) * "\0" + data - assert len(data) == 8, "long too long" - return data - diff -Nru fonttools-2.4/Lib/fontTools/ttLib/tables/_h_h_e_a.py fonttools-3.0/Lib/fontTools/ttLib/tables/_h_h_e_a.py --- fonttools-2.4/Lib/fontTools/ttLib/tables/_h_h_e_a.py 2013-04-23 04:08:08.000000000 +0000 +++ fonttools-3.0/Lib/fontTools/ttLib/tables/_h_h_e_a.py 2015-08-31 17:57:15.000000000 +0000 @@ -1,7 +1,8 @@ -import DefaultTable -import sstruct +from __future__ import print_function, division, absolute_import +from fontTools.misc.py23 import * +from fontTools.misc import sstruct from fontTools.misc.textTools import safeEval - +from . import DefaultTable hheaFormat = """ > # big endian @@ -26,37 +27,51 @@ class table__h_h_e_a(DefaultTable.DefaultTable): - + + # Note: Keep in sync with table__v_h_e_a + dependencies = ['hmtx', 'glyf'] - + def decompile(self, data, ttFont): sstruct.unpack(hheaFormat, data, self) - + def compile(self, ttFont): if ttFont.isLoaded('glyf') and ttFont.recalcBBoxes: self.recalc(ttFont) return sstruct.pack(hheaFormat, self) - + def recalc(self, ttFont): hmtxTable = ttFont['hmtx'] - if ttFont.has_key('glyf'): + if 'glyf' in ttFont: glyfTable = ttFont['glyf'] - advanceWidthMax = -100000 # arbitrary big negative number - minLeftSideBearing = 100000 # arbitrary big number - minRightSideBearing = 100000 # arbitrary big number - xMaxExtent = -100000 # arbitrary big negative number - + INFINITY = 100000 + advanceWidthMax = 0 + minLeftSideBearing = +INFINITY # arbitrary big number + minRightSideBearing = +INFINITY # arbitrary big number + xMaxExtent = -INFINITY # arbitrary big negative number + for name in ttFont.getGlyphOrder(): width, lsb = hmtxTable[name] + advanceWidthMax = max(advanceWidthMax, width) g = glyfTable[name] - if g.numberOfContours <= 0: + if g.numberOfContours == 0: continue - advanceWidthMax = max(advanceWidthMax, width) + if g.numberOfContours < 0 and not hasattr(g, "xMax"): + # Composite glyph without extents set. + # Calculate those. + g.recalcBounds(glyfTable) minLeftSideBearing = min(minLeftSideBearing, lsb) rsb = width - lsb - (g.xMax - g.xMin) minRightSideBearing = min(minRightSideBearing, rsb) extent = lsb + (g.xMax - g.xMin) xMaxExtent = max(xMaxExtent, extent) + + if xMaxExtent == -INFINITY: + # No glyph has outlines. + minLeftSideBearing = 0 + minRightSideBearing = 0 + xMaxExtent = 0 + self.advanceWidthMax = advanceWidthMax self.minLeftSideBearing = minLeftSideBearing self.minRightSideBearing = minRightSideBearing @@ -64,16 +79,13 @@ else: # XXX CFF recalc... pass - + def toXML(self, writer, ttFont): formatstring, names, fixes = sstruct.getformat(hheaFormat) for name in names: value = getattr(self, name) - if type(value) == type(0L): - value = int(value) writer.simpletag(name, value=value) writer.newline() - - def fromXML(self, (name, attrs, content), ttFont): - setattr(self, name, safeEval(attrs["value"])) + def fromXML(self, name, attrs, content, ttFont): + setattr(self, name, safeEval(attrs["value"])) diff -Nru fonttools-2.4/Lib/fontTools/ttLib/tables/_h_m_t_x.py fonttools-3.0/Lib/fontTools/ttLib/tables/_h_m_t_x.py --- fonttools-2.4/Lib/fontTools/ttLib/tables/_h_m_t_x.py 2013-04-23 04:08:08.000000000 +0000 +++ fonttools-3.0/Lib/fontTools/ttLib/tables/_h_m_t_x.py 2015-08-31 17:57:15.000000000 +0000 @@ -1,51 +1,47 @@ -import sys -import DefaultTable -import numpy -from fontTools import ttLib +from __future__ import print_function, division, absolute_import +from fontTools.misc.py23 import * from fontTools.misc.textTools import safeEval +from . import DefaultTable +import sys +import array import warnings class table__h_m_t_x(DefaultTable.DefaultTable): - + headerTag = 'hhea' advanceName = 'width' sideBearingName = 'lsb' numberOfMetricsName = 'numberOfHMetrics' - + def decompile(self, data, ttFont): + numGlyphs = ttFont['maxp'].numGlyphs numberOfMetrics = int(getattr(ttFont[self.headerTag], self.numberOfMetricsName)) - metrics = numpy.fromstring(data[:4 * numberOfMetrics], - numpy.int16) - if sys.byteorder <> "big": - metrics = metrics.byteswap() - metrics.shape = (numberOfMetrics, 2) + if numberOfMetrics > numGlyphs: + numberOfMetrics = numGlyphs # We warn later. + # Note: advanceWidth is unsigned, but we read/write as signed. + metrics = array.array("h", data[:4 * numberOfMetrics]) + if sys.byteorder != "big": + metrics.byteswap() data = data[4 * numberOfMetrics:] - numberOfSideBearings = ttFont['maxp'].numGlyphs - numberOfMetrics - numberOfSideBearings = int(numberOfSideBearings) - if numberOfSideBearings: - assert numberOfSideBearings > 0, "bad hmtx/vmtx table" - lastAdvance = metrics[-1][0] - advances = numpy.array([lastAdvance] * numberOfSideBearings, - numpy.int16) - sideBearings = numpy.fromstring(data[:2 * numberOfSideBearings], - numpy.int16) - if sys.byteorder <> "big": - sideBearings = sideBearings.byteswap() - data = data[2 * numberOfSideBearings:] - if len(advances) and len(sideBearings): - additionalMetrics = numpy.array([advances, sideBearings], numpy.int16) - metrics = numpy.concatenate((metrics, numpy.transpose(additionalMetrics))) - else: - warnings.warn('Unable to include additional metrics') + numberOfSideBearings = numGlyphs - numberOfMetrics + sideBearings = array.array("h", data[:2 * numberOfSideBearings]) + data = data[2 * numberOfSideBearings:] + + if sys.byteorder != "big": + sideBearings.byteswap() if data: - sys.stderr.write("too much data for hmtx/vmtx table\n") - metrics = metrics.tolist() + warnings.warn("too much 'hmtx'/'vmtx' table data") self.metrics = {} - for i in range(len(metrics)): - glyphName = ttFont.getGlyphName(i) - self.metrics[glyphName] = metrics[i] - + glyphOrder = ttFont.getGlyphOrder() + for i in range(numberOfMetrics): + glyphName = glyphOrder[i] + self.metrics[glyphName] = list(metrics[i*2:i*2+2]) + lastAdvance = metrics[-2] + for i in range(numberOfSideBearings): + glyphName = glyphOrder[i + numberOfMetrics] + self.metrics[glyphName] = [lastAdvance, sideBearings[i]] + def compile(self, ttFont): metrics = [] for glyphName in ttFont.getGlyphOrder(): @@ -53,49 +49,53 @@ lastAdvance = metrics[-1][0] lastIndex = len(metrics) while metrics[lastIndex-2][0] == lastAdvance: - lastIndex = lastIndex - 1 + lastIndex -= 1 if lastIndex <= 1: # all advances are equal lastIndex = 1 break additionalMetrics = metrics[lastIndex:] - additionalMetrics = map(lambda (advance, sb): sb, additionalMetrics) + additionalMetrics = [sb for advance, sb in additionalMetrics] metrics = metrics[:lastIndex] setattr(ttFont[self.headerTag], self.numberOfMetricsName, len(metrics)) - - metrics = numpy.array(metrics, numpy.int16) - if sys.byteorder <> "big": - metrics = metrics.byteswap() - data = metrics.tostring() - - additionalMetrics = numpy.array(additionalMetrics, numpy.int16) - if sys.byteorder <> "big": - additionalMetrics = additionalMetrics.byteswap() + + allMetrics = [] + for item in metrics: + allMetrics.extend(item) + allMetrics = array.array("h", allMetrics) + if sys.byteorder != "big": + allMetrics.byteswap() + data = allMetrics.tostring() + + additionalMetrics = array.array("h", additionalMetrics) + if sys.byteorder != "big": + additionalMetrics.byteswap() data = data + additionalMetrics.tostring() return data - + def toXML(self, writer, ttFont): - names = self.metrics.keys() - names.sort() + names = sorted(self.metrics.keys()) for glyphName in names: advance, sb = self.metrics[glyphName] writer.simpletag("mtx", [ - ("name", glyphName), - (self.advanceName, advance), + ("name", glyphName), + (self.advanceName, advance), (self.sideBearingName, sb), ]) writer.newline() - - def fromXML(self, (name, attrs, content), ttFont): + + def fromXML(self, name, attrs, content, ttFont): if not hasattr(self, "metrics"): self.metrics = {} if name == "mtx": - self.metrics[attrs["name"]] = [safeEval(attrs[self.advanceName]), + self.metrics[attrs["name"]] = [safeEval(attrs[self.advanceName]), safeEval(attrs[self.sideBearingName])] - + + def __delitem__(self, glyphName): + del self.metrics[glyphName] + def __getitem__(self, glyphName): return self.metrics[glyphName] - - def __setitem__(self, glyphName, (advance, sb)): - self.metrics[glyphName] = advance, sb + def __setitem__(self, glyphName, advance_sb_pair): + self.metrics[glyphName] = tuple(advance_sb_pair) diff -Nru fonttools-2.4/Lib/fontTools/ttLib/tables/__init__.py fonttools-3.0/Lib/fontTools/ttLib/tables/__init__.py --- fonttools-2.4/Lib/fontTools/ttLib/tables/__init__.py 2013-04-23 04:08:08.000000000 +0000 +++ fonttools-3.0/Lib/fontTools/ttLib/tables/__init__.py 2015-08-31 17:57:15.000000000 +0000 @@ -1,47 +1,74 @@ + +from __future__ import print_function, division, absolute_import +from fontTools.misc.py23 import * + # DON'T EDIT! This file is generated by MetaTools/buildTableList.py. def _moduleFinderHint(): """Dummy function to let modulefinder know what tables may be dynamically imported. Generated by MetaTools/buildTableList.py. + + >>> _moduleFinderHint() """ - import B_A_S_E_ - import C_F_F_ - import D_S_I_G_ - import G_D_E_F_ - import G_M_A_P_ - import G_P_K_G_ - import G_P_O_S_ - import G_S_U_B_ - import J_S_T_F_ - import L_T_S_H_ - import M_E_T_A_ - import O_S_2f_2 - import S_I_N_G_ - import T_S_I_B_ - import T_S_I_D_ - import T_S_I_J_ - import T_S_I_P_ - import T_S_I_S_ - import T_S_I_V_ - import T_S_I__0 - import T_S_I__1 - import T_S_I__2 - import T_S_I__3 - import T_S_I__5 - import V_O_R_G_ - import _c_m_a_p - import _c_v_t - import _f_p_g_m - import _g_a_s_p - import _g_l_y_f - import _h_d_m_x - import _h_e_a_d - import _h_h_e_a - import _h_m_t_x - import _k_e_r_n - import _l_o_c_a - import _m_a_x_p - import _n_a_m_e - import _p_o_s_t - import _p_r_e_p - import _v_h_e_a - import _v_m_t_x + from . import B_A_S_E_ + from . import C_B_D_T_ + from . import C_B_L_C_ + from . import C_F_F_ + from . import C_O_L_R_ + from . import C_P_A_L_ + from . import D_S_I_G_ + from . import E_B_D_T_ + from . import E_B_L_C_ + from . import F_F_T_M_ + from . import G_D_E_F_ + from . import G_M_A_P_ + from . import G_P_K_G_ + from . import G_P_O_S_ + from . import G_S_U_B_ + from . import J_S_T_F_ + from . import L_T_S_H_ + from . import M_A_T_H_ + from . import M_E_T_A_ + from . import O_S_2f_2 + from . import S_I_N_G_ + from . import S_V_G_ + from . import T_S_I_B_ + from . import T_S_I_D_ + from . import T_S_I_J_ + from . import T_S_I_P_ + from . import T_S_I_S_ + from . import T_S_I_V_ + from . import T_S_I__0 + from . import T_S_I__1 + from . import T_S_I__2 + from . import T_S_I__3 + from . import T_S_I__5 + from . import V_D_M_X_ + from . import V_O_R_G_ + from . import _a_v_a_r + from . import _c_m_a_p + from . import _c_v_t + from . import _f_e_a_t + from . import _f_p_g_m + from . import _f_v_a_r + from . import _g_a_s_p + from . import _g_l_y_f + from . import _g_v_a_r + from . import _h_d_m_x + from . import _h_e_a_d + from . import _h_h_e_a + from . import _h_m_t_x + from . import _k_e_r_n + from . import _l_o_c_a + from . import _l_t_a_g + from . import _m_a_x_p + from . import _m_e_t_a + from . import _n_a_m_e + from . import _p_o_s_t + from . import _p_r_e_p + from . import _s_b_i_x + from . import _v_h_e_a + from . import _v_m_t_x + +if __name__ == "__main__": + import doctest, sys + sys.exit(doctest.testmod().failed) diff -Nru fonttools-2.4/Lib/fontTools/ttLib/tables/J_S_T_F_.py fonttools-3.0/Lib/fontTools/ttLib/tables/J_S_T_F_.py --- fonttools-2.4/Lib/fontTools/ttLib/tables/J_S_T_F_.py 2013-04-23 04:08:08.000000000 +0000 +++ fonttools-3.0/Lib/fontTools/ttLib/tables/J_S_T_F_.py 2015-08-31 17:57:15.000000000 +0000 @@ -1,4 +1,6 @@ -from otBase import BaseTTXConverter +from __future__ import print_function, division, absolute_import +from fontTools.misc.py23 import * +from .otBase import BaseTTXConverter class table_J_S_T_F_(BaseTTXConverter): diff -Nru fonttools-2.4/Lib/fontTools/ttLib/tables/_k_e_r_n.py fonttools-3.0/Lib/fontTools/ttLib/tables/_k_e_r_n.py --- fonttools-2.4/Lib/fontTools/ttLib/tables/_k_e_r_n.py 2013-04-23 04:08:08.000000000 +0000 +++ fonttools-3.0/Lib/fontTools/ttLib/tables/_k_e_r_n.py 2015-08-31 17:57:15.000000000 +0000 @@ -1,25 +1,29 @@ -import DefaultTable -import struct -from fontTools.ttLib import sfnt +from __future__ import print_function, division, absolute_import +from fontTools.misc.py23 import * +from fontTools.ttLib import getSearchRange from fontTools.misc.textTools import safeEval, readHex -from types import TupleType +from fontTools.misc.fixedTools import fixedToFloat as fi2fl, floatToFixed as fl2fi +from . import DefaultTable +import struct +import array +import warnings class table__k_e_r_n(DefaultTable.DefaultTable): - + def getkern(self, format): for subtable in self.kernTables: if subtable.version == format: return subtable return None # not found - + def decompile(self, data, ttFont): version, nTables = struct.unpack(">HH", data[:4]) apple = False if (len(data) >= 8) and (version == 1): # AAT Apple's "new" format. Hm. version, nTables = struct.unpack(">LL", data[:8]) - self.version = version / float(0x10000) + self.version = fi2fl(version, 16) data = data[8:] apple = True else: @@ -35,7 +39,7 @@ else: version, length = struct.unpack(">HH", data[:4]) length = int(length) - if not kern_classes.has_key(version): + if version not in kern_classes: subtable = KernTable_format_unkown(version) else: subtable = kern_classes[version]() @@ -43,7 +47,7 @@ subtable.decompile(data[:length], ttFont) self.kernTables.append(subtable) data = data[length:] - + def compile(self, ttFont): if hasattr(self, "kernTables"): nTables = len(self.kernTables) @@ -51,39 +55,39 @@ nTables = 0 if self.version == 1.0: # AAT Apple's "new" format. - data = struct.pack(">ll", self.version * 0x10000, nTables) + data = struct.pack(">ll", fl2fi(self.version, 16), nTables) else: data = struct.pack(">HH", self.version, nTables) if hasattr(self, "kernTables"): for subtable in self.kernTables: data = data + subtable.compile(ttFont) return data - + def toXML(self, writer, ttFont): writer.simpletag("version", value=self.version) writer.newline() for subtable in self.kernTables: subtable.toXML(writer, ttFont) - - def fromXML(self, (name, attrs, content), ttFont): + + def fromXML(self, name, attrs, content, ttFont): if name == "version": self.version = safeEval(attrs["value"]) return - if name <> "kernsubtable": + if name != "kernsubtable": return if not hasattr(self, "kernTables"): self.kernTables = [] format = safeEval(attrs["format"]) - if not kern_classes.has_key(format): + if format not in kern_classes: subtable = KernTable_format_unkown(format) else: subtable = kern_classes[format]() self.kernTables.append(subtable) - subtable.fromXML((name, attrs, content), ttFont) + subtable.fromXML(name, attrs, content, ttFont) -class KernTable_format_0: - +class KernTable_format_0(object): + def decompile(self, data, ttFont): version, length, coverage = (0,0,0) if not self.apple: @@ -93,44 +97,51 @@ version, length, coverage = struct.unpack(">LHH", data[:8]) data = data[8:] self.version, self.coverage = int(version), int(coverage) - + self.kernTable = kernTable = {} - + nPairs, searchRange, entrySelector, rangeShift = struct.unpack(">HHHH", data[:8]) data = data[8:] - + + nPairs = min(nPairs, len(data) // 6) + datas = array.array("H", data[:6 * nPairs]) + if sys.byteorder != "big": + datas.byteswap() + it = iter(datas) + glyphOrder = ttFont.getGlyphOrder() for k in range(nPairs): - if len(data) < 6: - # buggy kern table - data = "" - break - left, right, value = struct.unpack(">HHh", data[:6]) - data = data[6:] - left, right = int(left), int(right) - kernTable[(ttFont.getGlyphName(left), ttFont.getGlyphName(right))] = value - assert len(data) == 0, len(data) - + left, right, value = next(it), next(it), next(it) + if value >= 32768: value -= 65536 + try: + kernTable[(glyphOrder[left], glyphOrder[right])] = value + except IndexError: + # Slower, but will not throw an IndexError on an invalid glyph id. + kernTable[(ttFont.getGlyphName(left), ttFont.getGlyphName(right))] = value + if len(data) > 6 * nPairs: + warnings.warn("excess data in 'kern' subtable: %d bytes" % len(data)) + def compile(self, ttFont): nPairs = len(self.kernTable) - entrySelector = sfnt.maxPowerOfTwo(nPairs) - searchRange = (2 ** entrySelector) * 6 - rangeShift = (nPairs - (2 ** entrySelector)) * 6 + searchRange, entrySelector, rangeShift = getSearchRange(nPairs, 6) data = struct.pack(">HHHH", nPairs, searchRange, entrySelector, rangeShift) - + # yeehee! (I mean, turn names into indices) - kernTable = map(lambda ((left, right), value), getGlyphID=ttFont.getGlyphID: - (getGlyphID(left), getGlyphID(right), value), - self.kernTable.items()) - kernTable.sort() + try: + reverseOrder = ttFont.getReverseGlyphMap() + kernTable = sorted((reverseOrder[left], reverseOrder[right], value) for ((left,right),value) in self.kernTable.items()) + except KeyError: + # Slower, but will not throw KeyError on invalid glyph id. + getGlyphID = ttFont.getGlyphID + kernTable = sorted((getGlyphID(left), getGlyphID(right), value) for ((left,right),value) in self.kernTable.items()) + for left, right, value in kernTable: data = data + struct.pack(">HHh", left, right, value) return struct.pack(">HHH", self.version, len(data) + 6, self.coverage) + data - + def toXML(self, writer, ttFont): writer.begintag("kernsubtable", coverage=self.coverage, format=0) writer.newline() - items = self.kernTable.items() - items.sort() + items = sorted(self.kernTable.items()) for (left, right), value in items: writer.simpletag("pair", [ ("l", left), @@ -140,61 +151,39 @@ writer.newline() writer.endtag("kernsubtable") writer.newline() - - def fromXML(self, (name, attrs, content), ttFont): + + def fromXML(self, name, attrs, content, ttFont): self.coverage = safeEval(attrs["coverage"]) self.version = safeEval(attrs["format"]) if not hasattr(self, "kernTable"): self.kernTable = {} for element in content: - if type(element) <> TupleType: + if not isinstance(element, tuple): continue name, attrs, content = element self.kernTable[(attrs["l"], attrs["r"])] = safeEval(attrs["v"]) - + def __getitem__(self, pair): return self.kernTable[pair] - + def __setitem__(self, pair, value): self.kernTable[pair] = value - + def __delitem__(self, pair): del self.kernTable[pair] - - def __cmp__(self, other): - return cmp(self.__dict__, other.__dict__) -class KernTable_format_2: - - def decompile(self, data, ttFont): - self.data = data - - def compile(self, ttFont): - return self.data - - def toXML(self, writer): - writer.begintag("kernsubtable", format=2) - writer.newline() - writer.dumphex(self.data) - writer.endtag("kernsubtable") - writer.newline() - - def fromXML(self, (name, attrs, content), ttFont): - self.decompile(readHex(content), ttFont) +class KernTable_format_unkown(object): - -class KernTable_format_unkown: - def __init__(self, format): self.format = format - + def decompile(self, data, ttFont): self.data = data - + def compile(self, ttFont): return self.data - + def toXML(self, writer, ttFont): writer.begintag("kernsubtable", format=self.format) writer.newline() @@ -203,10 +192,9 @@ writer.dumphex(self.data) writer.endtag("kernsubtable") writer.newline() - - def fromXML(self, (name, attrs, content), ttFont): - self.decompile(readHex(content), ttFont) + def fromXML(self, name, attrs, content, ttFont): + self.decompile(readHex(content), ttFont) -kern_classes = {0: KernTable_format_0, 2: KernTable_format_2} +kern_classes = {0: KernTable_format_0} diff -Nru fonttools-2.4/Lib/fontTools/ttLib/tables/_k_e_r_n_test.py fonttools-3.0/Lib/fontTools/ttLib/tables/_k_e_r_n_test.py --- fonttools-2.4/Lib/fontTools/ttLib/tables/_k_e_r_n_test.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.0/Lib/fontTools/ttLib/tables/_k_e_r_n_test.py 2015-08-31 17:57:15.000000000 +0000 @@ -0,0 +1,29 @@ +from __future__ import print_function, absolute_import +from fontTools.misc.py23 import * +from fontTools import ttLib +import unittest +from ._k_e_r_n import KernTable_format_0 + +class MockFont(object): + + def getGlyphOrder(self): + return ["glyph00000", "glyph00001", "glyph00002", "glyph00003"] + + def getGlyphName(self, glyphID): + return "glyph%.5d" % glyphID + +class KernTable_format_0_Test(unittest.TestCase): + + def test_decompileBadGlyphId(self): + subtable = KernTable_format_0() + subtable.apple = False + subtable.decompile( b'\x00' * 6 + + b'\x00' + b'\x02' + b'\x00' * 6 + + b'\x00' + b'\x01' + b'\x00' + b'\x03' + b'\x00' + b'\x01' + + b'\x00' + b'\x01' + b'\xFF' + b'\xFF' + b'\x00' + b'\x02', + MockFont()) + self.assertEqual(subtable[("glyph00001", "glyph00003")], 1) + self.assertEqual(subtable[("glyph00001", "glyph65535")], 2) + +if __name__ == "__main__": + unittest.main() diff -Nru fonttools-2.4/Lib/fontTools/ttLib/tables/_l_o_c_a.py fonttools-3.0/Lib/fontTools/ttLib/tables/_l_o_c_a.py --- fonttools-2.4/Lib/fontTools/ttLib/tables/_l_o_c_a.py 2013-04-23 04:08:08.000000000 +0000 +++ fonttools-3.0/Lib/fontTools/ttLib/tables/_l_o_c_a.py 2015-08-31 17:57:15.000000000 +0000 @@ -1,15 +1,14 @@ +from __future__ import print_function, division, absolute_import +from fontTools.misc.py23 import * +from . import DefaultTable import sys -import DefaultTable import array -import numpy -from fontTools import ttLib -import struct import warnings class table__l_o_c_a(DefaultTable.DefaultTable): - + dependencies = ['glyf'] - + def decompile(self, data, ttFont): longFormat = ttFont['head'].indexToLocFormat if longFormat: @@ -18,40 +17,44 @@ format = "H" locations = array.array(format) locations.fromstring(data) - if sys.byteorder <> "big": + if sys.byteorder != "big": locations.byteswap() - locations = numpy.array(locations, numpy.int32) if not longFormat: - locations = locations * 2 + l = array.array("I") + for i in range(len(locations)): + l.append(locations[i] * 2) + locations = l if len(locations) < (ttFont['maxp'].numGlyphs + 1): warnings.warn("corrupt 'loca' table, or wrong numGlyphs in 'maxp': %d %d" % (len(locations) - 1, ttFont['maxp'].numGlyphs)) self.locations = locations - + def compile(self, ttFont): - locations = self.locations - if max(locations) < 0x20000: - locations = locations / 2 - locations = locations.astype(numpy.int16) + try: + max_location = max(self.locations) + except AttributeError: + self.set([]) + max_location = 0 + if max_location < 0x20000 and all(l % 2 == 0 for l in self.locations): + locations = array.array("H") + for i in range(len(self.locations)): + locations.append(self.locations[i] // 2) ttFont['head'].indexToLocFormat = 0 else: + locations = array.array("I", self.locations) ttFont['head'].indexToLocFormat = 1 - if sys.byteorder <> "big": - locations = locations.byteswap() + if sys.byteorder != "big": + locations.byteswap() return locations.tostring() - + def set(self, locations): - self.locations = numpy.array(locations, numpy.int32) - + self.locations = array.array("I", locations) + def toXML(self, writer, ttFont): writer.comment("The 'loca' table will be calculated by the compiler") writer.newline() - + def __getitem__(self, index): return self.locations[index] - + def __len__(self): return len(self.locations) - - def __cmp__(self, other): - return cmp(len(self), len(other)) or not numpy.alltrue(numpy.equal(self.locations, other.locations)) - diff -Nru fonttools-2.4/Lib/fontTools/ttLib/tables/_l_t_a_g.py fonttools-3.0/Lib/fontTools/ttLib/tables/_l_t_a_g.py --- fonttools-2.4/Lib/fontTools/ttLib/tables/_l_t_a_g.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.0/Lib/fontTools/ttLib/tables/_l_t_a_g.py 2015-08-31 17:57:15.000000000 +0000 @@ -0,0 +1,50 @@ +from __future__ import print_function, division, absolute_import +from fontTools.misc.py23 import * +from fontTools.misc.textTools import safeEval +from . import DefaultTable +import struct + +# https://developer.apple.com/fonts/TrueType-Reference-Manual/RM06/Chap6ltag.html + +class table__l_t_a_g(DefaultTable.DefaultTable): + + def decompile(self, data, ttFont): + self.version, self.flags, numTags = struct.unpack(">LLL", data[:12]) + assert self.version == 1 + self.tags = [] + for i in range(numTags): + pos = 12 + i * 4 + offset, length = struct.unpack(">HH", data[pos:pos+4]) + tag = data[offset:offset+length].decode("ascii") + self.tags.append(tag) + + def compile(self, ttFont): + dataList = [struct.pack(">LLL", self.version, self.flags, len(self.tags))] + stringPool = "" + for tag in self.tags: + offset = stringPool.find(tag) + if offset < 0: + offset = len(stringPool) + stringPool = stringPool + tag + offset = offset + 12 + len(self.tags) * 4 + dataList.append(struct.pack(">HH", offset, len(tag))) + dataList.append(stringPool) + return bytesjoin(dataList) + + def toXML(self, writer, ttFont): + writer.simpletag("version", value=self.version) + writer.newline() + writer.simpletag("flags", value=self.flags) + writer.newline() + for tag in self.tags: + writer.simpletag("LanguageTag", tag=tag) + writer.newline() + + def fromXML(self, name, attrs, content, ttFont): + if not hasattr(self, "tags"): + self.tags = [] + if name == "LanguageTag": + self.tags.append(attrs["tag"]) + elif "value" in attrs: + value = safeEval(attrs["value"]) + setattr(self, name, value) diff -Nru fonttools-2.4/Lib/fontTools/ttLib/tables/_l_t_a_g_test.py fonttools-3.0/Lib/fontTools/ttLib/tables/_l_t_a_g_test.py --- fonttools-2.4/Lib/fontTools/ttLib/tables/_l_t_a_g_test.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.0/Lib/fontTools/ttLib/tables/_l_t_a_g_test.py 2015-08-31 17:57:15.000000000 +0000 @@ -0,0 +1,49 @@ +from __future__ import print_function, division, absolute_import +from fontTools.misc.py23 import * +from fontTools.misc.xmlWriter import XMLWriter +import os +import struct +import unittest +from ._l_t_a_g import table__l_t_a_g + +class Test_l_t_a_g(unittest.TestCase): + + DATA_ = struct.pack(b">LLLHHHHHH", 1, 0, 3, 24 + 0, 2, 24 + 2, 7, 24 + 2, 2) + b"enzh-Hant" + TAGS_ = ["en", "zh-Hant", "zh"] + + def test_decompile_compile(self): + table = table__l_t_a_g() + table.decompile(self.DATA_, ttFont=None) + self.assertEqual(1, table.version) + self.assertEqual(0, table.flags) + self.assertEqual(self.TAGS_, table.tags) + self.assertEqual(self.DATA_, table.compile(ttFont=None)) + + def test_fromXML(self): + table = table__l_t_a_g() + table.fromXML("version", {"value": "1"}, content=None, ttFont=None) + table.fromXML("flags", {"value": "777"}, content=None, ttFont=None) + table.fromXML("LanguageTag", {"tag": "sr-Latn"}, content=None, ttFont=None) + table.fromXML("LanguageTag", {"tag": "fa"}, content=None, ttFont=None) + self.assertEqual(1, table.version) + self.assertEqual(777, table.flags) + self.assertEqual(["sr-Latn", "fa"], table.tags) + + def test_toXML(self): + writer = XMLWriter(BytesIO()) + table = table__l_t_a_g() + table.decompile(self.DATA_, ttFont=None) + table.toXML(writer, ttFont=None) + expected = os.linesep.join([ + '', + '', + '', + '', + '', + '' + ]) + os.linesep + self.assertEqual(expected.encode("utf_8"), writer.file.getvalue()) + + +if __name__ == '__main__': + unittest.main() diff -Nru fonttools-2.4/Lib/fontTools/ttLib/tables/L_T_S_H_.py fonttools-3.0/Lib/fontTools/ttLib/tables/L_T_S_H_.py --- fonttools-2.4/Lib/fontTools/ttLib/tables/L_T_S_H_.py 2013-04-23 04:08:08.000000000 +0000 +++ fonttools-3.0/Lib/fontTools/ttLib/tables/L_T_S_H_.py 2015-08-31 17:57:15.000000000 +0000 @@ -1,14 +1,16 @@ -import DefaultTable -import array -import struct +from __future__ import print_function, division, absolute_import +from fontTools.misc.py23 import * from fontTools.misc.textTools import safeEval +from . import DefaultTable +import struct +import array # XXX I've lowered the strictness, to make sure Apple's own Chicago # XXX gets through. They're looking into it, I hope to raise the standards # XXX back to normal eventually. class table_L_T_S_H_(DefaultTable.DefaultTable): - + def decompile(self, data, ttFont): version, numGlyphs = struct.unpack(">HH", data[:4]) data = data[4:] @@ -21,10 +23,10 @@ self.yPels = {} for i in range(numGlyphs): self.yPels[ttFont.getGlyphName(i)] = yPels[i] - + def compile(self, ttFont): version = 0 - names = self.yPels.keys() + names = list(self.yPels.keys()) numGlyphs = len(names) yPels = [0] * numGlyphs # ouch: the assertion is not true in Chicago! @@ -33,18 +35,16 @@ yPels[ttFont.getGlyphID(name)] = self.yPels[name] yPels = array.array("B", yPels) return struct.pack(">HH", version, numGlyphs) + yPels.tostring() - + def toXML(self, writer, ttFont): - names = self.yPels.keys() - names.sort() + names = sorted(self.yPels.keys()) for name in names: writer.simpletag("yPel", name=name, value=self.yPels[name]) writer.newline() - - def fromXML(self, (name, attrs, content), ttFont): + + def fromXML(self, name, attrs, content, ttFont): if not hasattr(self, "yPels"): self.yPels = {} - if name <> "yPel": + if name != "yPel": return # ignore unknown tags self.yPels[attrs["name"]] = safeEval(attrs["value"]) - diff -Nru fonttools-2.4/Lib/fontTools/ttLib/tables/M_A_T_H_.py fonttools-3.0/Lib/fontTools/ttLib/tables/M_A_T_H_.py --- fonttools-2.4/Lib/fontTools/ttLib/tables/M_A_T_H_.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.0/Lib/fontTools/ttLib/tables/M_A_T_H_.py 2015-08-31 17:57:15.000000000 +0000 @@ -0,0 +1,7 @@ +from __future__ import print_function, division, absolute_import +from fontTools.misc.py23 import * +from .otBase import BaseTTXConverter + + +class table_M_A_T_H_(BaseTTXConverter): + pass diff -Nru fonttools-2.4/Lib/fontTools/ttLib/tables/_m_a_x_p.py fonttools-3.0/Lib/fontTools/ttLib/tables/_m_a_x_p.py --- fonttools-2.4/Lib/fontTools/ttLib/tables/_m_a_x_p.py 2013-04-23 04:08:08.000000000 +0000 +++ fonttools-3.0/Lib/fontTools/ttLib/tables/_m_a_x_p.py 2015-08-31 17:57:15.000000000 +0000 @@ -1,6 +1,8 @@ -import DefaultTable -import sstruct +from __future__ import print_function, division, absolute_import +from fontTools.misc.py23 import * +from fontTools.misc import sstruct from fontTools.misc.textTools import safeEval +from . import DefaultTable maxpFormat_0_5 = """ > # big endian @@ -27,18 +29,18 @@ class table__m_a_x_p(DefaultTable.DefaultTable): - + dependencies = ['glyf'] - + def decompile(self, data, ttFont): dummy, data = sstruct.unpack2(maxpFormat_0_5, data, self) self.numGlyphs = int(self.numGlyphs) if self.tableVersion != 0x00005000: dummy, data = sstruct.unpack2(maxpFormat_1_0_add, data, self) assert len(data) == 0 - + def compile(self, ttFont): - if ttFont.has_key('glyf'): + if 'glyf' in ttFont: if ttFont.isLoaded('glyf') and ttFont.recalcBBoxes: self.recalc(ttFont) else: @@ -50,7 +52,7 @@ if self.tableVersion == 0x00010000: data = data + sstruct.pack(maxpFormat_1_0_add, self) return data - + def recalc(self, ttFont): """Recalculate the font bounding box, and most other maxp values except for the TT instructions values. Also recalculate the value of bit 1 @@ -60,10 +62,11 @@ hmtxTable = ttFont['hmtx'] headTable = ttFont['head'] self.numGlyphs = len(glyfTable) - xMin = 100000 - yMin = 100000 - xMax = -100000 - yMax = -100000 + INFINITY = 100000 + xMin = +INFINITY + yMin = +INFINITY + xMax = -INFINITY + yMax = -INFINITY maxPoints = 0 maxContours = 0 maxCompositePoints = 0 @@ -74,7 +77,7 @@ for glyphName in ttFont.getGlyphOrder(): g = glyfTable[glyphName] if g.numberOfContours: - if hmtxTable[glyphName][1] <> g.xMin: + if hmtxTable[glyphName][1] != g.xMin: allXMaxIsLsb = 0 xMin = min(xMin, g.xMin) yMin = min(yMin, g.yMin) @@ -90,10 +93,16 @@ maxCompositeContours = max(maxCompositeContours, nContours) maxComponentElements = max(maxComponentElements, len(g.components)) maxComponentDepth = max(maxComponentDepth, componentDepth) - headTable.xMin = xMin - headTable.yMin = yMin - headTable.xMax = xMax - headTable.yMax = yMax + if xMin == +INFINITY: + headTable.xMin = 0 + headTable.yMin = 0 + headTable.xMax = 0 + headTable.yMax = 0 + else: + headTable.xMin = xMin + headTable.yMin = yMin + headTable.xMax = xMax + headTable.yMax = yMax self.maxPoints = maxPoints self.maxContours = maxContours self.maxCompositePoints = maxCompositePoints @@ -103,15 +112,14 @@ headTable.flags = headTable.flags | 0x2 else: headTable.flags = headTable.flags & ~0x2 - + def testrepr(self): - items = self.__dict__.items() - items.sort() - print ". . . . . . . . ." + items = sorted(self.__dict__.items()) + print(". . . . . . . . .") for combo in items: - print " %s: %s" % combo - print ". . . . . . . . ." - + print(" %s: %s" % combo) + print(". . . . . . . . .") + def toXML(self, writer, ttFont): if self.tableVersion != 0x00005000: writer.comment("Most of this table will be recalculated by the compiler") @@ -122,14 +130,10 @@ names = names + names_1_0 for name in names: value = getattr(self, name) - if type(value) == type(0L): - value=int(value) if name == "tableVersion": value = hex(value) writer.simpletag(name, value=value) writer.newline() - - def fromXML(self, (name, attrs, content), ttFont): - setattr(self, name, safeEval(attrs["value"])) - + def fromXML(self, name, attrs, content, ttFont): + setattr(self, name, safeEval(attrs["value"])) diff -Nru fonttools-2.4/Lib/fontTools/ttLib/tables/_m_e_t_a.py fonttools-3.0/Lib/fontTools/ttLib/tables/_m_e_t_a.py --- fonttools-2.4/Lib/fontTools/ttLib/tables/_m_e_t_a.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.0/Lib/fontTools/ttLib/tables/_m_e_t_a.py 2015-08-31 17:57:15.000000000 +0000 @@ -0,0 +1,93 @@ +from __future__ import print_function, division, absolute_import +from fontTools.misc.py23 import * +from fontTools.misc import sstruct +from fontTools.misc.textTools import readHex +from fontTools.ttLib import TTLibError +from . import DefaultTable + +# Apple's documentation of 'meta': +# https://developer.apple.com/fonts/TrueType-Reference-Manual/RM06/Chap6meta.html + +META_HEADER_FORMAT = """ + > # big endian + version: L + flags: L + dataOffset: L + numDataMaps: L +""" + +# According to Apple's spec, the dataMaps entries contain a dataOffset +# that is documented as "Offset from the beginning of the data section +# to the data for this tag". However, this is *not* the case with +# the fonts that Apple ships pre-installed on MacOS X Yosemite 10.10.4, +# and it also does not reflect how Apple's ftxdumperfuser tool is parsing +# the 'meta' table (tested ftxdumperfuser build 330, FontToolbox.framework +# build 187). Instead of what is claimed in the spec, the data maps contain +# a dataOffset relative to the very beginning of the 'meta' table. +# The dataOffset field of the 'meta' header apparently gets ignored. + +DATA_MAP_FORMAT = """ + > # big endian + tag: 4s + dataOffset: L + dataLength: L +""" + + +class table__m_e_t_a(DefaultTable.DefaultTable): + def __init__(self, tag="meta"): + DefaultTable.DefaultTable.__init__(self, tag) + self.data = {} + + def decompile(self, data, ttFont): + headerSize = sstruct.calcsize(META_HEADER_FORMAT) + header = sstruct.unpack(META_HEADER_FORMAT, data[0 : headerSize]) + if header["version"] != 1: + raise TTLibError("unsupported 'meta' version %d" % + header["version"]) + dataMapSize = sstruct.calcsize(DATA_MAP_FORMAT) + for i in range(header["numDataMaps"]): + dataMapOffset = headerSize + i * dataMapSize + dataMap = sstruct.unpack( + DATA_MAP_FORMAT, + data[dataMapOffset : dataMapOffset + dataMapSize]) + tag = dataMap["tag"] + offset = dataMap["dataOffset"] + self.data[tag] = data[offset : offset + dataMap["dataLength"]] + + def compile(self, ttFont): + keys = sorted(self.data.keys()) + headerSize = sstruct.calcsize(META_HEADER_FORMAT) + dataOffset = headerSize + len(keys) * sstruct.calcsize(DATA_MAP_FORMAT) + header = sstruct.pack(META_HEADER_FORMAT, { + "version": 1, + "flags": 0, + "dataOffset": dataOffset, + "numDataMaps": len(keys) + }) + dataMaps = [] + dataBlocks = [] + for tag in keys: + data = self.data[tag] + dataMaps.append(sstruct.pack(DATA_MAP_FORMAT, { + "tag": tag, + "dataOffset": dataOffset, + "dataLength": len(data) + })) + dataBlocks.append(data) + dataOffset += len(data) + return bytesjoin([header] + dataMaps + dataBlocks) + + def toXML(self, writer, ttFont, progress=None): + for tag in sorted(self.data.keys()): + writer.begintag("hexdata", tag=tag) + writer.newline() + writer.dumphex(self.data[tag]) + writer.endtag("hexdata") + writer.newline() + + def fromXML(self, name, attrs, content, ttFont): + if name == "hexdata": + self.data[attrs["tag"]] = readHex(content) + else: + raise TTLibError("can't handle '%s' element" % name) diff -Nru fonttools-2.4/Lib/fontTools/ttLib/tables/M_E_T_A_.py fonttools-3.0/Lib/fontTools/ttLib/tables/M_E_T_A_.py --- fonttools-2.4/Lib/fontTools/ttLib/tables/M_E_T_A_.py 2013-04-23 04:08:08.000000000 +0000 +++ fonttools-3.0/Lib/fontTools/ttLib/tables/M_E_T_A_.py 2015-08-31 17:57:15.000000000 +0000 @@ -1,9 +1,12 @@ -import DefaultTable -import struct, sstruct +from __future__ import print_function, division, absolute_import +from fontTools.misc.py23 import * +from fontTools.misc import sstruct from fontTools.misc.textTools import safeEval -import string -from types import FloatType, ListType, StringType, TupleType -import sys +from . import DefaultTable +import pdb +import struct + + METAHeaderFormat = """ > # big endian tableVersionMajor: H @@ -24,19 +27,19 @@ nMetaEntry: H """ # This record is followd by a variable data length field: -# USHORT or ULONG hdrOffset +# USHORT or ULONG hdrOffset # Offset from start of META table to the beginning # of this glyphs array of ns Metadata string entries. -# Size determined by metaFlags field +# Size determined by metaFlags field # METAGlyphRecordFormat entries must be sorted by glyph ID - + METAStringRecordFormat = """ > # big endian labelID: H stringLen: H """ # This record is followd by a variable data length field: -# USHORT or ULONG stringOffset +# USHORT or ULONG stringOffset # METAStringRecordFormat entries must be sorted in order of labelID # There may be more than one entry with the same labelID # There may be more than one strign with the same content. @@ -44,17 +47,17 @@ # Strings shall be Unicode UTF-8 encoded, and null-terminated. METALabelDict = { - 0 : "MojikumiX4051", # An integer in the range 1-20 - 1 : "UNIUnifiedBaseChars", - 2 : "BaseFontName", - 3 : "Language", - 4 : "CreationDate", - 5 : "FoundryName", - 6 : "FoundryCopyright", - 7 : "OwnerURI", - 8 : "WritingScript", - 10 : "StrokeCount", - 11 : "IndexingRadical", + 0: "MojikumiX4051", # An integer in the range 1-20 + 1: "UNIUnifiedBaseChars", + 2: "BaseFontName", + 3: "Language", + 4: "CreationDate", + 5: "FoundryName", + 6: "FoundryCopyright", + 7: "OwnerURI", + 8: "WritingScript", + 10: "StrokeCount", + 11: "IndexingRadical", } @@ -67,9 +70,9 @@ class table_M_E_T_A_(DefaultTable.DefaultTable): - + dependencies = [] - + def decompile(self, data, ttFont): dummy, newData = sstruct.unpack2(METAHeaderFormat, data, self) self.glyphRecords = [] @@ -95,16 +98,16 @@ newData = newData[4:] stringRec.string = data[stringRec.offset:stringRec.offset + stringRec.stringLen] glyphRecord.stringRecs.append(stringRec) - self.glyphRecords.append(glyphRecord) - + self.glyphRecords.append(glyphRecord) + def compile(self, ttFont): offsetOK = 0 self.nMetaRecs = len(self.glyphRecords) count = 0 - while ( offsetOK != 1): + while (offsetOK != 1): count = count + 1 if count > 4: - pdb_set_trace() + pdb.set_trace() metaData = sstruct.pack(METAHeaderFormat, self) stringRecsOffset = len(metaData) + self.nMetaRecs * (6 + 2*(self.metaFlags & 1)) stringRecSize = (6 + 2*(self.metaFlags & 1)) @@ -115,12 +118,12 @@ offsetOK = -1 break metaData = metaData + glyphRec.compile(self) - stringRecsOffset = stringRecsOffset + (glyphRec.nMetaEntry * stringRecSize) + stringRecsOffset = stringRecsOffset + (glyphRec.nMetaEntry * stringRecSize) # this will be the String Record offset for the next GlyphRecord. - if offsetOK == -1: + if offsetOK == -1: offsetOK = 0 continue - + # metaData now contains the header and all of the GlyphRecords. Its length should bw # the offset to the first StringRecord. stringOffset = stringRecsOffset @@ -137,23 +140,22 @@ if offsetOK == -1: offsetOK = 0 continue - + if ((self.metaFlags & 1) == 1) and (stringOffset < 65536): self.metaFlags = self.metaFlags - 1 continue else: offsetOK = 1 - - + # metaData now contains the header and all of the GlyphRecords and all of the String Records. # Its length should be the offset to the first string datum. for glyphRec in self.glyphRecords: for stringRec in glyphRec.stringRecs: assert (stringRec.offset == len(metaData)), "String offset did not compile correctly! for string:" + str(stringRec.string) metaData = metaData + stringRec.string - + return metaData - + def toXML(self, writer, ttFont): writer.comment("Lengths and number of entries in this table will be recalculated by the compiler") writer.newline() @@ -164,35 +166,31 @@ writer.newline() for glyphRec in self.glyphRecords: glyphRec.toXML(writer, ttFont) - - def fromXML(self, (name, attrs, content), ttFont): + + def fromXML(self, name, attrs, content, ttFont): if name == "GlyphRecord": if not hasattr(self, "glyphRecords"): self.glyphRecords = [] glyphRec = GlyphRecord() self.glyphRecords.append(glyphRec) for element in content: - if isinstance(element, StringType): + if isinstance(element, basestring): continue - glyphRec.fromXML(element, ttFont) + name, attrs, content = element + glyphRec.fromXML(name, attrs, content, ttFont) glyphRec.offset = -1 glyphRec.nMetaEntry = len(glyphRec.stringRecs) - else: - value = attrs["value"] - try: - value = safeEval(value) - except OverflowError: - value = long(value) - setattr(self, name, value) + else: + setattr(self, name, safeEval(attrs["value"])) -class GlyphRecord: +class GlyphRecord(object): def __init__(self): self.glyphID = -1 self.nMetaEntry = -1 self.offset = -1 self.stringRecs = [] - + def toXML(self, writer, ttFont): writer.begintag("GlyphRecord") writer.newline() @@ -205,23 +203,17 @@ writer.endtag("GlyphRecord") writer.newline() - - def fromXML(self, (name, attrs, content), ttFont): + def fromXML(self, name, attrs, content, ttFont): if name == "StringRecord": stringRec = StringRecord() self.stringRecs.append(stringRec) for element in content: - if isinstance(element, StringType): + if isinstance(element, basestring): continue - stringRec.fromXML(element, ttFont) + stringRec.fromXML(name, attrs, content, ttFont) stringRec.stringLen = len(stringRec.string) - else: - value = attrs["value"] - try: - value = safeEval(value) - except OverflowError: - value = long(value) - setattr(self, name, value) + else: + setattr(self, name, safeEval(attrs["value"])) def compile(self, parentTable): data = sstruct.pack(METAGlyphRecordFormat, self) @@ -232,18 +224,13 @@ data = data + datum return data - - def __cmp__(self, other): - """Compare method, so a list of NameRecords can be sorted - according to the spec by just sorting it...""" - return cmp(self.glyphID, other.glyphID) - def __repr__(self): return "GlyphRecord[ glyphID: " + str(self.glyphID) + ", nMetaEntry: " + str(self.nMetaEntry) + ", offset: " + str(self.offset) + " ]" +# XXX The following two functions are really broken around UTF-8 vs Unicode def mapXMLToUTF8(string): - uString = u"" + uString = unicode() strLen = len(string) i = 0 while i < strLen: @@ -258,33 +245,28 @@ while string[i] != ";": i = i+1 valStr = string[j:i] - + uString = uString + unichr(eval('0x' + valStr)) else: - uString = uString + unichr(ord(string[i])) + uString = uString + unichr(byteord(string[i])) i = i +1 - - return uString.encode('utf8') + + return uString.encode('utf_8') def mapUTF8toXML(string): - uString = string.decode('utf8') + uString = string.decode('utf_8') string = "" for uChar in uString: i = ord(uChar) if (i < 0x80) and (i > 0x1F): - string = string + chr(i) + string = string + uChar else: string = string + "&#x" + hex(i)[2:] + ";" return string -class StringRecord: - def __init__(self): - self.labelID = -1 - self.string = "" - self.stringLen = -1 - self.offset = -1 +class StringRecord(object): def toXML(self, writer, ttFont): writer.begintag("StringRecord") @@ -298,16 +280,16 @@ writer.endtag("StringRecord") writer.newline() - def fromXML(self, (name, attrs, content), ttFont): - value = attrs["value"] - if name == "string": - self.string = mapXMLToUTF8(value) - else: - try: - value = safeEval(value) - except OverflowError: - value = long(value) - setattr(self, name, value) + def fromXML(self, name, attrs, content, ttFont): + for element in content: + if isinstance(element, basestring): + continue + name, attrs, content = element + value = attrs["value"] + if name == "string": + self.string = mapXMLToUTF8(value) + else: + setattr(self, name, safeEval(value)) def compile(self, parentTable): data = sstruct.pack(METAStringRecordFormat, self) @@ -318,12 +300,6 @@ data = data + datum return data - def __cmp__(self, other): - """Compare method, so a list of NameRecords can be sorted - according to the spec by just sorting it...""" - return cmp(self.labelID, other.labelID) - def __repr__(self): return "StringRecord [ labelID: " + str(self.labelID) + " aka " + getLabelString(self.labelID) \ + ", offset: " + str(self.offset) + ", length: " + str(self.stringLen) + ", string: " +self.string + " ]" - diff -Nru fonttools-2.4/Lib/fontTools/ttLib/tables/_m_e_t_a_test.py fonttools-3.0/Lib/fontTools/ttLib/tables/_m_e_t_a_test.py --- fonttools-2.4/Lib/fontTools/ttLib/tables/_m_e_t_a_test.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.0/Lib/fontTools/ttLib/tables/_m_e_t_a_test.py 2015-08-31 17:57:15.000000000 +0000 @@ -0,0 +1,54 @@ +from __future__ import print_function, division, absolute_import, unicode_literals +from fontTools.misc.py23 import * +from fontTools.misc.textTools import deHexStr +from fontTools.misc.xmlWriter import XMLWriter +from fontTools.ttLib import TTLibError +from fontTools.ttLib.tables._m_e_t_a import table__m_e_t_a +import unittest + + +# From a real font on MacOS X, but substituted 'bild' tag by 'TEST', +# and shortened the payload. Note that from the 'meta' spec, one would +# expect that header.dataOffset is 0x0000001C (pointing to the beginning +# of the data section) and that dataMap[0].dataOffset should be 0 (relative +# to the beginning of the data section). However, in the fonts that Apple +# ships on MacOS X 10.10.4, dataMap[0].dataOffset is actually relative +# to the beginning of the 'meta' table, i.e. 0x0000001C again. While the +# following test data is invalid according to the 'meta' specification, +# it is reflecting the 'meta' table structure in all Apple-supplied fonts. +META_DATA = deHexStr( + "00 00 00 01 00 00 00 00 00 00 00 1C 00 00 00 01 " + "54 45 53 54 00 00 00 1C 00 00 00 04 CA FE BE EF") + + +class MetaTableTest(unittest.TestCase): + def test_decompile(self): + table = table__m_e_t_a() + table.decompile(META_DATA, ttFont={"meta": table}) + self.assertEqual({"TEST": b"\xCA\xFE\xBE\xEF"}, table.data) + + def test_compile(self): + table = table__m_e_t_a() + table.data["TEST"] = b"\xCA\xFE\xBE\xEF" + self.assertEqual(META_DATA, table.compile(ttFont={"meta": table})) + + def test_toXML(self): + table = table__m_e_t_a() + table.data["TEST"] = b"\xCA\xFE\xBE\xEF" + writer = XMLWriter(BytesIO()) + table.toXML(writer, {"meta": table}) + xml = writer.file.getvalue().decode("utf-8") + self.assertEqual([ + '', + 'cafebeef', + '' + ], [line.strip() for line in xml.splitlines()][1:]) + + def test_fromXML(self): + table = table__m_e_t_a() + table.fromXML("hexdata", {"tag": "TEST"}, ['cafebeef'], ttFont=None) + self.assertEqual({"TEST": b"\xCA\xFE\xBE\xEF"}, table.data) + + +if __name__ == "__main__": + unittest.main() diff -Nru fonttools-2.4/Lib/fontTools/ttLib/tables/_n_a_m_e.py fonttools-3.0/Lib/fontTools/ttLib/tables/_n_a_m_e.py --- fonttools-2.4/Lib/fontTools/ttLib/tables/_n_a_m_e.py 2013-04-23 04:08:08.000000000 +0000 +++ fonttools-3.0/Lib/fontTools/ttLib/tables/_n_a_m_e.py 2015-08-31 17:57:15.000000000 +0000 @@ -1,8 +1,10 @@ -import DefaultTable -import struct, sstruct +from __future__ import print_function, division, absolute_import +from fontTools.misc.py23 import * +from fontTools.misc import sstruct from fontTools.misc.textTools import safeEval -import string -import types +from fontTools.misc.encodingTools import getEncoding +from . import DefaultTable +import struct nameRecordFormat = """ > # big endian @@ -18,14 +20,13 @@ class table__n_a_m_e(DefaultTable.DefaultTable): - + def decompile(self, data, ttFont): format, n, stringOffset = struct.unpack(">HHH", data[:6]) expectedStringOffset = 6 + n * nameRecordSize if stringOffset != expectedStringOffset: # XXX we need a warn function - print "Warning: 'name' table stringOffset incorrect.", - print "Expected: %s; Actual: %s" % (expectedStringOffset, stringOffset) + print("Warning: 'name' table stringOffset incorrect. Expected: %s; Actual: %s" % (expectedStringOffset, stringOffset)) stringData = data[stringOffset:] data = data[6:] self.names = [] @@ -42,109 +43,220 @@ # print name.__dict__ del name.offset, name.length self.names.append(name) - + def compile(self, ttFont): if not hasattr(self, "names"): # only happens when there are NO name table entries read # from the TTX file self.names = [] - self.names.sort() # sort according to the spec; see NameRecord.__cmp__() - stringData = "" + names = self.names + names.sort() # sort according to the spec; see NameRecord.__lt__() + stringData = b"" format = 0 - n = len(self.names) + n = len(names) stringOffset = 6 + n * sstruct.calcsize(nameRecordFormat) data = struct.pack(">HHH", format, n, stringOffset) lastoffset = 0 done = {} # remember the data so we can reuse the "pointers" - for name in self.names: - if done.has_key(name.string): - name.offset, name.length = done[name.string] + for name in names: + string = name.toBytes() + if string in done: + name.offset, name.length = done[string] else: - name.offset, name.length = done[name.string] = len(stringData), len(name.string) - stringData = stringData + name.string + name.offset, name.length = done[string] = len(stringData), len(string) + stringData = bytesjoin([stringData, string]) data = data + sstruct.pack(nameRecordFormat, name) return data + stringData - + def toXML(self, writer, ttFont): for name in self.names: name.toXML(writer, ttFont) - - def fromXML(self, (name, attrs, content), ttFont): - if name <> "namerecord": + + def fromXML(self, name, attrs, content, ttFont): + if name != "namerecord": return # ignore unknown tags if not hasattr(self, "names"): self.names = [] name = NameRecord() self.names.append(name) - name.fromXML((name, attrs, content), ttFont) - + name.fromXML(name, attrs, content, ttFont) + def getName(self, nameID, platformID, platEncID, langID=None): for namerecord in self.names: - if ( namerecord.nameID == nameID and - namerecord.platformID == platformID and + if ( namerecord.nameID == nameID and + namerecord.platformID == platformID and namerecord.platEncID == platEncID): if langID is None or namerecord.langID == langID: return namerecord return None # not found - - def __cmp__(self, other): - return cmp(self.names, other.names) - -class NameRecord: - + def getDebugName(self, nameID): + englishName = someName = None + for name in self.names: + if name.nameID != nameID: + continue + try: + unistr = name.toUnicode() + except UnicodeDecodeError: + continue + + someName = unistr + if (name.platformID, name.langID) in ((1, 0), (3, 0x409)): + englishName = unistr + break + if englishName: + return englishName + elif someName: + return someName + else: + return None + +class NameRecord(object): + + def getEncoding(self, default='ascii'): + """Returns the Python encoding name for this name entry based on its platformID, + platEncID, and langID. If encoding for these values is not known, by default + 'ascii' is returned. That can be overriden by passing a value to the default + argument. + """ + return getEncoding(self.platformID, self.platEncID, self.langID, default) + + def encodingIsUnicodeCompatible(self): + return self.getEncoding(None) in ['utf_16_be', 'ucs2be', 'ascii', 'latin1'] + + def __str__(self): + try: + return self.toUnicode() + except UnicodeDecodeError: + return str(self.string) + + def isUnicode(self): + return (self.platformID == 0 or + (self.platformID == 3 and self.platEncID in [0, 1, 10])) + + def toUnicode(self, errors='strict'): + """ + If self.string is a Unicode string, return it; otherwise try decoding the + bytes in self.string to a Unicode string using the encoding of this + entry as returned by self.getEncoding(); Note that self.getEncoding() + returns 'ascii' if the encoding is unknown to the library. + + Certain heuristics are performed to recover data from bytes that are + ill-formed in the chosen encoding, or that otherwise look misencoded + (mostly around bad UTF-16BE encoded bytes, or bytes that look like UTF-16BE + but marked otherwise). If the bytes are ill-formed and the heuristics fail, + the error is handled according to the errors parameter to this function, which is + passed to the underlying decode() function; by default it throws a + UnicodeDecodeError exception. + + Note: The mentioned heuristics mean that roundtripping a font to XML and back + to binary might recover some misencoded data whereas just loading the font + and saving it back will not change them. + """ + def isascii(b): + return (b >= 0x20 and b <= 0x7E) or b in [0x09, 0x0A, 0x0D] + encoding = self.getEncoding() + string = self.string + + if encoding == 'utf_16_be' and len(string) % 2 == 1: + # Recover badly encoded UTF-16 strings that have an odd number of bytes: + # - If the last byte is zero, drop it. Otherwise, + # - If all the odd bytes are zero and all the even bytes are ASCII, + # prepend one zero byte. Otherwise, + # - If first byte is zero and all other bytes are ASCII, insert zero + # bytes between consecutive ASCII bytes. + # + # (Yes, I've seen all of these in the wild... sigh) + if byteord(string[-1]) == 0: + string = string[:-1] + elif all(byteord(b) == 0 if i % 2 else isascii(byteord(b)) for i,b in enumerate(string)): + string = b'\0' + string + elif byteord(string[0]) == 0 and all(isascii(byteord(b)) for b in string[1:]): + string = bytesjoin(b'\0'+bytechr(byteord(b)) for b in string[1:]) + + string = tounicode(string, encoding=encoding, errors=errors) + + # If decoded strings still looks like UTF-16BE, it suggests a double-encoding. + # Fix it up. + if all(ord(c) == 0 if i % 2 == 0 else isascii(ord(c)) for i,c in enumerate(string)): + # If string claims to be Mac encoding, but looks like UTF-16BE with ASCII text, + # narrow it down. + string = ''.join(c for c in string[1::2]) + + return string + + def toBytes(self, errors='strict'): + """ If self.string is a bytes object, return it; otherwise try encoding + the Unicode string in self.string to bytes using the encoding of this + entry as returned by self.getEncoding(); Note that self.getEncoding() + returns 'ascii' if the encoding is unknown to the library. + + If the Unicode string cannot be encoded to bytes in the chosen encoding, + the error is handled according to the errors parameter to this function, + which is passed to the underlying encode() function; by default it throws a + UnicodeEncodeError exception. + """ + return tobytes(self.string, encoding=self.getEncoding(), errors=errors) + def toXML(self, writer, ttFont): - writer.begintag("namerecord", [ + try: + unistr = self.toUnicode() + except UnicodeDecodeError: + unistr = None + attrs = [ ("nameID", self.nameID), ("platformID", self.platformID), ("platEncID", self.platEncID), ("langID", hex(self.langID)), - ]) + ] + + if unistr is None or not self.encodingIsUnicodeCompatible(): + attrs.append(("unicode", unistr is not None)) + + writer.begintag("namerecord", attrs) writer.newline() - if self.platformID == 0 or (self.platformID == 3 and self.platEncID in (0, 1)): - if len(self.string) % 2: - # no, shouldn't happen, but some of the Apple - # tools cause this anyway :-( - writer.write16bit(self.string + "\0") - else: - writer.write16bit(self.string) + if unistr is not None: + writer.write(unistr) else: writer.write8bit(self.string) writer.newline() writer.endtag("namerecord") writer.newline() - - def fromXML(self, (name, attrs, content), ttFont): + + def fromXML(self, name, attrs, content, ttFont): self.nameID = safeEval(attrs["nameID"]) self.platformID = safeEval(attrs["platformID"]) self.platEncID = safeEval(attrs["platEncID"]) self.langID = safeEval(attrs["langID"]) - if self.platformID == 0 or (self.platformID == 3 and self.platEncID in (0, 1)): - s = "" - for element in content: - s = s + element - s = unicode(s, "utf8") - s = s.strip() - self.string = s.encode("utf_16_be") + s = strjoin(content).strip() + encoding = self.getEncoding() + if self.encodingIsUnicodeCompatible() or safeEval(attrs.get("unicode", "False")): + self.string = s.encode(encoding) else: - s = string.strip(string.join(content, "")) - self.string = unicode(s, "utf8").encode("latin1") - - def __cmp__(self, other): - """Compare method, so a list of NameRecords can be sorted - according to the spec by just sorting it...""" - selftuple = (self.platformID, - self.platEncID, - self.langID, - self.nameID, - self.string) - othertuple = (other.platformID, - other.platEncID, - other.langID, - other.nameID, - other.string) - return cmp(selftuple, othertuple) - + # This is the inverse of write8bit... + self.string = s.encode("latin1") + + def __lt__(self, other): + if type(self) != type(other): + return NotImplemented + + # implemented so that list.sort() sorts according to the spec. + selfTuple = ( + getattr(self, "platformID", None), + getattr(self, "platEncID", None), + getattr(self, "langID", None), + getattr(self, "nameID", None), + getattr(self, "string", None), + ) + otherTuple = ( + getattr(other, "platformID", None), + getattr(other, "platEncID", None), + getattr(other, "langID", None), + getattr(other, "nameID", None), + getattr(other, "string", None), + ) + return selfTuple < otherTuple + def __repr__(self): return "" % ( self.nameID, self.platformID, self.langID) diff -Nru fonttools-2.4/Lib/fontTools/ttLib/tables/_n_a_m_e_test.py fonttools-3.0/Lib/fontTools/ttLib/tables/_n_a_m_e_test.py --- fonttools-2.4/Lib/fontTools/ttLib/tables/_n_a_m_e_test.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.0/Lib/fontTools/ttLib/tables/_n_a_m_e_test.py 2015-08-31 17:57:15.000000000 +0000 @@ -0,0 +1,143 @@ +# -*- coding: utf-8 -*- +from __future__ import print_function, division, absolute_import, unicode_literals +from fontTools.misc.py23 import * +from fontTools.misc.xmlWriter import XMLWriter +import unittest +from ._n_a_m_e import table__n_a_m_e, NameRecord + + +def makeName(text, nameID, platformID, platEncID, langID): + name = NameRecord() + name.nameID, name.platformID, name.platEncID, name.langID = ( + nameID, platformID, platEncID, langID) + name.string = tobytes(text, encoding=name.getEncoding()) + return name + + +class NameTableTest(unittest.TestCase): + + def test_getDebugName(self): + table = table__n_a_m_e() + table.names = [ + makeName("Bold", 258, 1, 0, 0), # Mac, MacRoman, English + makeName("Gras", 258, 1, 0, 1), # Mac, MacRoman, French + makeName("Fett", 258, 1, 0, 2), # Mac, MacRoman, German + makeName("Sem Fracções", 292, 1, 0, 8) # Mac, MacRoman, Portuguese + ] + self.assertEqual("Bold", table.getDebugName(258)) + self.assertEqual("Sem Fracções", table.getDebugName(292)) + self.assertEqual(None, table.getDebugName(999)) + + +class NameRecordTest(unittest.TestCase): + + def test_toUnicode_utf16be(self): + name = makeName("Foo Bold", 111, 0, 2, 7) + self.assertEqual("utf_16_be", name.getEncoding()) + self.assertEqual("Foo Bold", name.toUnicode()) + + def test_toUnicode_macroman(self): + name = makeName("Foo Italic", 222, 1, 0, 7) # MacRoman + self.assertEqual("mac_roman", name.getEncoding()) + self.assertEqual("Foo Italic", name.toUnicode()) + + def test_toUnicode_macromanian(self): + name = makeName(b"Foo Italic\xfb", 222, 1, 0, 37) # Mac Romanian + self.assertEqual("mac_romanian", name.getEncoding()) + self.assertEqual("Foo Italic"+unichr(0x02DA), name.toUnicode()) + + def test_toUnicode_UnicodeDecodeError(self): + name = makeName(b"\1", 111, 0, 2, 7) + self.assertEqual("utf_16_be", name.getEncoding()) + self.assertRaises(UnicodeDecodeError, name.toUnicode) + + def toXML(self, name): + writer = XMLWriter(BytesIO()) + name.toXML(writer, ttFont=None) + xml = writer.file.getvalue().decode("utf_8").strip() + return xml.split(writer.newlinestr.decode("utf_8"))[1:] + + def test_toXML_utf16be(self): + name = makeName("Foo Bold", 111, 0, 2, 7) + self.assertEqual([ + '', + ' Foo Bold', + '' + ], self.toXML(name)) + + def test_toXML_utf16be_odd_length1(self): + name = makeName(b"\0F\0o\0o\0", 111, 0, 2, 7) + self.assertEqual([ + '', + ' Foo', + '' + ], self.toXML(name)) + + def test_toXML_utf16be_odd_length2(self): + name = makeName(b"\0Fooz", 111, 0, 2, 7) + self.assertEqual([ + '', + ' Fooz', + '' + ], self.toXML(name)) + + def test_toXML_utf16be_double_encoded(self): + name = makeName(b"\0\0\0F\0\0\0o", 111, 0, 2, 7) + self.assertEqual([ + '', + ' Fo', + '' + ], self.toXML(name)) + + def test_toXML_macroman(self): + name = makeName("Foo Italic", 222, 1, 0, 7) # MacRoman + self.assertEqual([ + '', + ' Foo Italic', + '' + ], self.toXML(name)) + + def test_toXML_macroman_actual_utf16be(self): + name = makeName("\0F\0o\0o", 222, 1, 0, 7) + self.assertEqual([ + '', + ' Foo', + '' + ], self.toXML(name)) + + def test_toXML_unknownPlatEncID_nonASCII(self): + name = makeName(b"B\x8arli", 333, 1, 9876, 7) # Unknown Mac encodingID + self.assertEqual([ + '', + ' BŠrli', + '' + ], self.toXML(name)) + + def test_toXML_unknownPlatEncID_ASCII(self): + name = makeName(b"Barli", 333, 1, 9876, 7) # Unknown Mac encodingID + self.assertEqual([ + '', + ' Barli', + '' + ], self.toXML(name)) + + def test_encoding_macroman_misc(self): + name = makeName('', 123, 1, 0, 17) # Mac Turkish + self.assertEqual(name.getEncoding(), "mac_turkish") + name.langID = 37 + self.assertEqual(name.getEncoding(), "mac_romanian") + name.langID = 45 # Other + self.assertEqual(name.getEncoding(), "mac_roman") + + def test_extended_mac_encodings(self): + name = makeName(b'\xfe', 123, 1, 1, 0) # Mac Japanese + self.assertEqual(name.toUnicode(), unichr(0x2122)) + + def test_extended_unknown(self): + name = makeName(b'\xfe', 123, 10, 11, 12) + self.assertEqual(name.getEncoding(), "ascii") + self.assertEqual(name.getEncoding(None), None) + self.assertEqual(name.getEncoding(default=None), None) + +if __name__ == "__main__": + unittest.main() diff -Nru fonttools-2.4/Lib/fontTools/ttLib/tables/O_S_2f_2.py fonttools-3.0/Lib/fontTools/ttLib/tables/O_S_2f_2.py --- fonttools-2.4/Lib/fontTools/ttLib/tables/O_S_2f_2.py 2013-04-23 04:08:08.000000000 +0000 +++ fonttools-3.0/Lib/fontTools/ttLib/tables/O_S_2f_2.py 2015-08-31 17:57:15.000000000 +0000 @@ -1,7 +1,9 @@ -import DefaultTable -import sstruct +from __future__ import print_function, division, absolute_import +from fontTools.misc.py23 import * +from fontTools.misc import sstruct from fontTools.misc.textTools import safeEval, num2binary, binary2num -from types import TupleType +from . import DefaultTable +import warnings # panose classification @@ -19,15 +21,15 @@ bXHeight: B """ -class Panose: - +class Panose(object): + def toXML(self, writer, ttFont): formatstring, names, fixes = sstruct.getformat(panoseFormat) for name in names: writer.simpletag(name, value=getattr(self, name)) writer.newline() - - def fromXML(self, (name, attrs, content), ttFont): + + def fromXML(self, name, attrs, content, ttFont): setattr(self, name, safeEval(attrs["value"])) @@ -58,8 +60,8 @@ ulUnicodeRange4: L # character range achVendID: 4s # font vendor identification fsSelection: H # font selection flags - fsFirstCharIndex: H # first unicode character index - fsLastCharIndex: H # last unicode character index + usFirstCharIndex: H # first unicode character index + usLastCharIndex: H # last unicode character index sTypoAscender: h # typographic ascender sTypoDescender: h # typographic descender sTypoLineGap: h # typographic line gap @@ -77,44 +79,49 @@ sCapHeight: h usDefaultChar: H usBreakChar: H - usMaxContex: H + usMaxContext: H +""" + +OS2_format_5_addition = OS2_format_2_addition + """ + usLowerOpticalPointSize: H + usUpperOpticalPointSize: H """ bigendian = " > # big endian\n" OS2_format_1 = OS2_format_0 + OS2_format_1_addition OS2_format_2 = OS2_format_0 + OS2_format_2_addition +OS2_format_5 = OS2_format_0 + OS2_format_5_addition OS2_format_1_addition = bigendian + OS2_format_1_addition OS2_format_2_addition = bigendian + OS2_format_2_addition +OS2_format_5_addition = bigendian + OS2_format_5_addition class table_O_S_2f_2(DefaultTable.DefaultTable): - + """the OS/2 table""" - + def decompile(self, data, ttFont): dummy, data = sstruct.unpack2(OS2_format_0, data, self) - # workarounds for buggy fonts (Apple, mona) - if not data: - self.version = 0 - elif len(data) == sstruct.calcsize(OS2_format_1_addition): - self.version = 1 - elif len(data) == sstruct.calcsize(OS2_format_2_addition): - if self.version not in (2, 3, 4): - self.version = 1 - else: - from fontTools import ttLib - raise ttLib.TTLibError, "unknown format for OS/2 table (incorrect length): version %s" % (self.version, len(data)) + if self.version == 1: - sstruct.unpack2(OS2_format_1_addition, data, self) + dummy, data = sstruct.unpack2(OS2_format_1_addition, data, self) elif self.version in (2, 3, 4): - sstruct.unpack2(OS2_format_2_addition, data, self) - elif self.version <> 0: + dummy, data = sstruct.unpack2(OS2_format_2_addition, data, self) + elif self.version == 5: + dummy, data = sstruct.unpack2(OS2_format_5_addition, data, self) + self.usLowerOpticalPointSize /= 20 + self.usUpperOpticalPointSize /= 20 + elif self.version != 0: from fontTools import ttLib - raise ttLib.TTLibError, "unknown format for OS/2 table: version %s" % self.version + raise ttLib.TTLibError("unknown format for OS/2 table: version %s" % self.version) + if len(data): + warnings.warn("too much 'OS/2' table data") + self.panose = sstruct.unpack(panoseFormat, self.panose, Panose()) - + def compile(self, ttFont): + self.updateFirstAndLastCharIndex(ttFont) panose = self.panose self.panose = sstruct.pack(panoseFormat, self.panose) if self.version == 0: @@ -123,30 +130,39 @@ data = sstruct.pack(OS2_format_1, self) elif self.version in (2, 3, 4): data = sstruct.pack(OS2_format_2, self) + elif self.version == 5: + d = self.__dict__.copy() + d['usLowerOpticalPointSize'] = int(round(self.usLowerOpticalPointSize * 20)) + d['usUpperOpticalPointSize'] = int(round(self.usUpperOpticalPointSize * 20)) + data = sstruct.pack(OS2_format_5, d) else: from fontTools import ttLib - raise ttLib.TTLibError, "unknown format for OS/2 table: version %s" % self.version + raise ttLib.TTLibError("unknown format for OS/2 table: version %s" % self.version) self.panose = panose return data - + def toXML(self, writer, ttFont): + writer.comment( + "The fields 'usFirstCharIndex' and 'usLastCharIndex'\n" + "will be recalculated by the compiler") + writer.newline() if self.version == 1: format = OS2_format_1 elif self.version in (2, 3, 4): format = OS2_format_2 + elif self.version == 5: + format = OS2_format_5 else: format = OS2_format_0 formatstring, names, fixes = sstruct.getformat(format) for name in names: value = getattr(self, name) - if type(value) == type(0L): - value = int(value) if name=="panose": writer.begintag("panose") writer.newline() value.toXML(writer, ttFont) writer.endtag("panose") - elif name in ("ulUnicodeRange1", "ulUnicodeRange2", + elif name in ("ulUnicodeRange1", "ulUnicodeRange2", "ulUnicodeRange3", "ulUnicodeRange4", "ulCodePageRange1", "ulCodePageRange2"): writer.simpletag(name, value=num2binary(value)) @@ -157,14 +173,15 @@ else: writer.simpletag(name, value=value) writer.newline() - - def fromXML(self, (name, attrs, content), ttFont): + + def fromXML(self, name, attrs, content, ttFont): if name == "panose": self.panose = panose = Panose() for element in content: - if type(element) == TupleType: - panose.fromXML(element, ttFont) - elif name in ("ulUnicodeRange1", "ulUnicodeRange2", + if isinstance(element, tuple): + name, attrs, content = element + panose.fromXML(name, attrs, content, ttFont) + elif name in ("ulUnicodeRange1", "ulUnicodeRange2", "ulUnicodeRange3", "ulUnicodeRange4", "ulCodePageRange1", "ulCodePageRange2", "fsType", "fsSelection"): @@ -174,4 +191,40 @@ else: setattr(self, name, safeEval(attrs["value"])) - + def updateFirstAndLastCharIndex(self, ttFont): + codes = set() + for table in ttFont['cmap'].tables: + if table.isUnicode(): + codes.update(table.cmap.keys()) + if codes: + minCode = min(codes) + maxCode = max(codes) + # USHORT cannot hold codepoints greater than 0xFFFF + self.usFirstCharIndex = 0xFFFF if minCode > 0xFFFF else minCode + self.usLastCharIndex = 0xFFFF if maxCode > 0xFFFF else maxCode + + # misspelled attributes kept for legacy reasons + + @property + def usMaxContex(self): + return self.usMaxContext + + @usMaxContex.setter + def usMaxContex(self, value): + self.usMaxContext = value + + @property + def fsFirstCharIndex(self): + return self.usFirstCharIndex + + @fsFirstCharIndex.setter + def fsFirstCharIndex(self, value): + self.usFirstCharIndex = value + + @property + def fsLastCharIndex(self): + return self.usLastCharIndex + + @fsLastCharIndex.setter + def fsLastCharIndex(self, value): + self.usLastCharIndex = value diff -Nru fonttools-2.4/Lib/fontTools/ttLib/tables/otBase.py fonttools-3.0/Lib/fontTools/ttLib/tables/otBase.py --- fonttools-2.4/Lib/fontTools/ttLib/tables/otBase.py 2013-04-23 04:08:08.000000000 +0000 +++ fonttools-3.0/Lib/fontTools/ttLib/tables/otBase.py 2015-08-31 17:57:15.000000000 +0000 @@ -1,9 +1,9 @@ -from DefaultTable import DefaultTable -import otData +from __future__ import print_function, division, absolute_import +from fontTools.misc.py23 import * +from .DefaultTable import DefaultTable import struct -from types import TupleType -class OverflowErrorRecord: +class OverflowErrorRecord(object): def __init__(self, overflowTuple): self.tableType = overflowTuple[0] self.LookupListIndex = overflowTuple[1] @@ -23,38 +23,43 @@ class BaseTTXConverter(DefaultTable): - + """Generic base class for TTX table converters. It functions as an adapter between the TTX (ttLib actually) table model and the model we use for OpenType tables, which is necessarily subtly different. """ - + def decompile(self, data, font): - import otTables - cachingStats = None - reader = OTTableReader(data, self.tableTag, cachingStats=cachingStats) + from . import otTables + cachingStats = None if True else {} + class GlobalState(object): + def __init__(self, tableType, cachingStats): + self.tableType = tableType + self.cachingStats = cachingStats + globalState = GlobalState(tableType=self.tableTag, + cachingStats=cachingStats) + reader = OTTableReader(data, globalState) tableClass = getattr(otTables, self.tableTag) self.table = tableClass() self.table.decompile(reader, font) - if 0: - stats = [(v, k) for k, v in cachingStats.items()] - stats.sort() + if cachingStats: + stats = sorted([(v, k) for k, v in cachingStats.items()]) stats.reverse() - print "cachingsstats for ", self.tableTag + print("cachingsstats for ", self.tableTag) for v, k in stats: if v < 2: break - print v, k - print "---", len(stats) - + print(v, k) + print("---", len(stats)) + def compile(self, font): """ Create a top-level OTFWriter for the GPOS/GSUB table. Call the compile method for the the table for each 'converter' record in the table converter list - call converter's write method for each item in the value. + call converter's write method for each item in the value. - For simple items, the write method adds a string to the - writer's self.items list. - - For Struct/Table/Subtable items, it add first adds new writer to the + writer's self.items list. + - For Struct/Table/Subtable items, it add first adds new writer to the to the writer's self.items, then calls the item's compile method. This creates a tree of writers, rooted at the GUSB/GPOS writer, with each writer representing a table, and the writer.items list containing @@ -66,118 +71,164 @@ Traverse the flat list of tables again, calling getData each get the data in the table, now that pos's and offset are known. - If a lookup subtable overflows an offset, we have to start all over. + If a lookup subtable overflows an offset, we have to start all over. """ - writer = OTTableWriter(self.tableTag) - writer.parent = None - self.table.compile(writer, font) - return writer.getAllData() + class GlobalState(object): + def __init__(self, tableType): + self.tableType = tableType + globalState = GlobalState(tableType=self.tableTag) + overflowRecord = None + + while True: + try: + writer = OTTableWriter(globalState) + self.table.compile(writer, font) + return writer.getAllData() + + except OTLOffsetOverflowError as e: + + if overflowRecord == e.value: + raise # Oh well... + + overflowRecord = e.value + print("Attempting to fix OTLOffsetOverflowError", e) + lastItem = overflowRecord + + ok = 0 + if overflowRecord.itemName is None: + from .otTables import fixLookupOverFlows + ok = fixLookupOverFlows(font, overflowRecord) + else: + from .otTables import fixSubTableOverFlows + ok = fixSubTableOverFlows(font, overflowRecord) + if not ok: + raise def toXML(self, writer, font): self.table.toXML2(writer, font) - - def fromXML(self, (name, attrs, content), font): - import otTables + + def fromXML(self, name, attrs, content, font): + from . import otTables if not hasattr(self, "table"): tableClass = getattr(otTables, self.tableTag) self.table = tableClass() - self.table.fromXML((name, attrs, content), font) + self.table.fromXML(name, attrs, content, font) + +class OTTableReader(object): -class OTTableReader: - """Helper class to retrieve data from an OpenType table.""" - - def __init__(self, data, tableType, offset=0, valueFormat=None, cachingStats=None): + + __slots__ = ('data', 'offset', 'pos', 'globalState', 'localState') + + def __init__(self, data, globalState={}, localState=None, offset=0): self.data = data self.offset = offset self.pos = offset - self.tableType = tableType - if valueFormat is None: - valueFormat = (ValueRecordFactory(), ValueRecordFactory()) - self.valueFormat = valueFormat - self.cachingStats = cachingStats - + self.globalState = globalState + self.localState = localState + + def advance(self, count): + self.pos += count + def seek(self, pos): + self.pos = pos + + def copy(self): + other = self.__class__(self.data, self.globalState, self.localState, self.offset) + other.pos = self.pos + return other + def getSubReader(self, offset): offset = self.offset + offset - if self.cachingStats is not None: - try: - self.cachingStats[offset] = self.cachingStats[offset] + 1 - except KeyError: - self.cachingStats[offset] = 1 - - subReader = self.__class__(self.data, self.tableType, offset, - self.valueFormat, self.cachingStats) - return subReader - + cachingStats = self.globalState.cachingStats + if cachingStats is not None: + cachingStats[offset] = cachingStats.get(offset, 0) + 1 + return self.__class__(self.data, self.globalState, self.localState, offset) + def readUShort(self): pos = self.pos newpos = pos + 2 value, = struct.unpack(">H", self.data[pos:newpos]) self.pos = newpos return value - + def readShort(self): pos = self.pos newpos = pos + 2 value, = struct.unpack(">h", self.data[pos:newpos]) self.pos = newpos return value - + def readLong(self): pos = self.pos newpos = pos + 4 value, = struct.unpack(">l", self.data[pos:newpos]) self.pos = newpos return value - + + def readUInt24(self): + pos = self.pos + newpos = pos + 3 + value, = struct.unpack(">l", b'\0'+self.data[pos:newpos]) + self.pos = newpos + return value + def readULong(self): pos = self.pos newpos = pos + 4 value, = struct.unpack(">L", self.data[pos:newpos]) self.pos = newpos return value - + def readTag(self): pos = self.pos newpos = pos + 4 - value = self.data[pos:newpos] + value = Tag(self.data[pos:newpos]) assert len(value) == 4 self.pos = newpos return value - - def readStruct(self, format, size=None): - if size is None: - size = struct.calcsize(format) - else: - assert size == struct.calcsize(format) + + def readData(self, count): pos = self.pos - newpos = pos + size - values = struct.unpack(format, self.data[pos:newpos]) + newpos = pos + count + value = self.data[pos:newpos] self.pos = newpos - return values - - def setValueFormat(self, format, which): - self.valueFormat[which].setFormat(format) - - def readValueRecord(self, font, which): - return self.valueFormat[which].readValueRecord(self, font) + return value + + def __setitem__(self, name, value): + state = self.localState.copy() if self.localState else dict() + state[name] = value + self.localState = state + + def __getitem__(self, name): + return self.localState and self.localState[name] + + def __contains__(self, name): + return self.localState and name in self.localState -class OTTableWriter: - +class OTTableWriter(object): + """Helper class to gather and assemble data for OpenType tables.""" - - def __init__(self, tableType, valueFormat=None): + + def __init__(self, globalState, localState=None): self.items = [] - self.tableType = tableType - if valueFormat is None: - valueFormat = ValueRecordFactory(), ValueRecordFactory() - self.valueFormat = valueFormat self.pos = None - + self.globalState = globalState + self.localState = localState + self.longOffset = False + self.parent = None + + def __setitem__(self, name, value): + state = self.localState.copy() if self.localState else dict() + state[name] = value + self.localState = state + + def __getitem__(self, name): + return self.localState[name] + # assembler interface - + def getAllData(self): """Assemble all data, including all subtables.""" self._doneWriting() @@ -195,7 +246,6 @@ table.pos = pos pos = pos + table.getDataLength() - data = [] for table in tables: tableData = table.getData() @@ -205,54 +255,44 @@ tableData = table.getData() data.append(tableData) - return "".join(data) - + return bytesjoin(data) + def getDataLength(self): """Return the length of this table in bytes, without subtables.""" l = 0 - if hasattr(self, "Extension"): - longOffset = 1 - else: - longOffset = 0 for item in self.items: if hasattr(item, "getData") or hasattr(item, "getCountData"): - if longOffset: + if item.longOffset: l = l + 4 # sizeof(ULong) else: l = l + 2 # sizeof(UShort) else: l = l + len(item) return l - + def getData(self): """Assemble the data for this writer/table, without subtables.""" items = list(self.items) # make a shallow copy - if hasattr(self,"Extension"): - longOffset = 1 - else: - longOffset = 0 pos = self.pos numItems = len(items) for i in range(numItems): item = items[i] - + if hasattr(item, "getData"): - if longOffset: + if item.longOffset: items[i] = packULong(item.pos - pos) else: try: items[i] = packUShort(item.pos - pos) - except AssertionError: + except struct.error: # provide data to fix overflow problem. - # If the overflow is to a lookup, or from a lookup to a subtable, - # just report the current item. - if self.name in [ 'LookupList', 'Lookup']: - overflowErrorRecord = self.getOverflowErrorRecord(item) - else: + # If the overflow is to a lookup, or from a lookup to a subtable, + # just report the current item. Otherwise... + if self.name not in [ 'LookupList', 'Lookup']: # overflow is within a subTable. Life is more complicated. # If we split the sub-table just before the current item, we may still suffer overflow. # This is because duplicate table merging is done only within an Extension subTable tree; - # when we split the subtable in two, some items may no longer be duplicates. + # when we split the subtable in two, some items may no longer be duplicates. # Get worst case by adding up all the item lengths, depth first traversal. # and then report the first item that overflows a short. def getDeepItemLength(table): @@ -263,36 +303,36 @@ else: length = len(table) return length - + length = self.getDataLength() if hasattr(self, "sortCoverageLast") and item.name == "Coverage": # Coverage is first in the item list, but last in the table list, - # The original overflow is really in the item list. Skip the Coverage + # The original overflow is really in the item list. Skip the Coverage # table in the following test. items = items[i+1:] - + for j in range(len(items)): item = items[j] length = length + getDeepItemLength(item) if length > 65535: break overflowErrorRecord = self.getOverflowErrorRecord(item) - - - raise OTLOffsetOverflowError, overflowErrorRecord - return "".join(items) - + raise OTLOffsetOverflowError(overflowErrorRecord) + + return bytesjoin(items) + def __hash__(self): # only works after self._doneWriting() has been called return hash(self.items) - - def __cmp__(self, other): - if hasattr(other, "items"): - return cmp(self.items, other.items) - else: - return cmp(id(self), id(other)) - + + def __ne__(self, other): + return not self.__eq__(other) + def __eq__(self, other): + if type(self) != type(other): + return NotImplemented + return self.items == other.items + def _doneWriting(self, internedTables=None): # Convert CountData references to data string items # collapse duplicate table references to a unique entry @@ -304,8 +344,8 @@ if internedTables is None: internedTables = {} items = self.items - iRange = range(len(items)) - + iRange = list(range(len(items))) + if hasattr(self, "Extension"): newTree = 1 else: @@ -319,17 +359,18 @@ item._doneWriting() else: item._doneWriting(internedTables) - if internedTables.has_key(item): - items[i] = item = internedTables[item] + internedItem = internedTables.get(item) + if internedItem: + items[i] = item = internedItem else: internedTables[item] = item self.items = tuple(items) - + def _gatherTables(self, tables=None, extTables=None, done=None): # Convert table references in self.items tree to a flat # list of tables in depth-first traversal order. # "tables" are OTTableWriter objects. - # We do the traversal in reverse order at each level, in order to + # We do the traversal in reverse order at each level, in order to # resolve duplicate references to be the last reference in the list of tables. # For extension lookups, duplicate references can be merged only within the # writer tree under the extension lookup. @@ -341,7 +382,7 @@ done[self] = 1 numItems = len(self.items) - iRange = range(numItems) + iRange = list(range(numItems)) iRange.reverse() if hasattr(self, "Extension"): @@ -358,13 +399,12 @@ if hasattr(item, "name") and (item.name == "Coverage"): sortCoverageLast = 1 break - if not done.has_key(item): + if item not in done: item._gatherTables(tables, extTables, done) else: - index = max(item.parent.keys()) - item.parent[index + 1] = self + # We're a new parent of item + pass - saveItem = None for i in iRange: item = self.items[i] if not hasattr(item, "getData"): @@ -375,64 +415,66 @@ continue if appendExtensions: - assert extTables != None, "Program or XML editing error. Extension subtables cannot contain extensions subtables" + assert extTables is not None, "Program or XML editing error. Extension subtables cannot contain extensions subtables" newDone = {} item._gatherTables(extTables, None, newDone) - elif not done.has_key(item): + elif item not in done: item._gatherTables(tables, extTables, done) else: - index = max(item.parent.keys()) - item.parent[index + 1] = self - + # We're a new parent of item + pass tables.append(self) return tables, extTables - + # interface for gathering data, as used by table.compile() - + def getSubWriter(self): - subwriter = self.__class__(self.tableType, self.valueFormat) - subwriter.parent = {0:self} # because some subtables have idential values, we discard - # the duplicates under the getAllData method. Hence some - # subtable writers can have more than one parent writer. + subwriter = self.__class__(self.globalState, self.localState) + subwriter.parent = self # because some subtables have idential values, we discard + # the duplicates under the getAllData method. Hence some + # subtable writers can have more than one parent writer. + # But we just care about first one right now. return subwriter - + def writeUShort(self, value): assert 0 <= value < 0x10000 self.items.append(struct.pack(">H", value)) - + def writeShort(self, value): self.items.append(struct.pack(">h", value)) - + + def writeUInt24(self, value): + assert 0 <= value < 0x1000000 + b = struct.pack(">L", value) + self.items.append(b[1:]) + def writeLong(self, value): self.items.append(struct.pack(">l", value)) - + def writeULong(self, value): self.items.append(struct.pack(">L", value)) - + def writeTag(self, tag): + tag = Tag(tag).tobytes() assert len(tag) == 4 self.items.append(tag) - + def writeSubTable(self, subWriter): self.items.append(subWriter) - + def writeCountReference(self, table, name): - self.items.append(CountReference(table, name)) - + ref = CountReference(table, name) + self.items.append(ref) + return ref + def writeStruct(self, format, values): - data = apply(struct.pack, (format,) + values) + data = struct.pack(*(format,) + values) self.items.append(data) - + def writeData(self, data): self.items.append(data) - - def setValueFormat(self, format, which): - self.valueFormat[which].setFormat(format) - - def writeValueRecord(self, value, font, which): - return self.valueFormat[which].writeValueRecord(self, font, value) def getOverflowErrorRecord(self, item): LookupListIndex = SubTableIndex = itemName = itemIndex = None @@ -446,39 +488,45 @@ if hasattr(item, 'repeatIndex'): itemIndex = item.repeatIndex if self.name == 'SubTable': - LookupListIndex = self.parent[0].repeatIndex + LookupListIndex = self.parent.repeatIndex SubTableIndex = self.repeatIndex elif self.name == 'ExtSubTable': - LookupListIndex = self.parent[0].parent[0].repeatIndex - SubTableIndex = self.parent[0].repeatIndex + LookupListIndex = self.parent.parent.repeatIndex + SubTableIndex = self.parent.repeatIndex else: # who knows how far below the SubTable level we are! Climb back up to the nearest subtable. - itemName = ".".join(self.name, item.name) - p1 = self.parent[0] + itemName = ".".join([self.name, item.name]) + p1 = self.parent while p1 and p1.name not in ['ExtSubTable', 'SubTable']: - itemName = ".".join(p1.name, item.name) - p1 = p1.parent[0] + itemName = ".".join([p1.name, item.name]) + p1 = p1.parent if p1: if p1.name == 'ExtSubTable': - LookupListIndex = self.parent[0].parent[0].repeatIndex - SubTableIndex = self.parent[0].repeatIndex + LookupListIndex = p1.parent.parent.repeatIndex + SubTableIndex = p1.parent.repeatIndex else: - LookupListIndex = self.parent[0].repeatIndex - SubTableIndex = self.repeatIndex + LookupListIndex = p1.parent.repeatIndex + SubTableIndex = p1.repeatIndex - return OverflowErrorRecord( (self.tableType, LookupListIndex, SubTableIndex, itemName, itemIndex) ) + return OverflowErrorRecord( (self.globalState.tableType, LookupListIndex, SubTableIndex, itemName, itemIndex) ) -class CountReference: +class CountReference(object): """A reference to a Count value, not a count of references.""" def __init__(self, table, name): self.table = table self.name = name + def setValue(self, value): + table = self.table + name = self.name + if table[name] is None: + table[name] = value + else: + assert table[name] == value, (name, table[name], value) def getCountData(self): return packUShort(self.table[self.name]) def packUShort(value): - assert 0 <= value < 0x10000, value return struct.pack(">H", value) @@ -487,149 +535,143 @@ return struct.pack(">L", value) +class BaseTable(object): -class TableStack: - """A stack of table dicts, working as a stack of namespaces so we can - retrieve values from (and store values to) tables higher up the stack.""" - def __init__(self): - self.stack = [] - def push(self, table): - self.stack.insert(0, table) - def pop(self): - self.stack.pop(0) - def getTop(self): - return self.stack[0] - def getValue(self, name): - return self.__findTable(name)[name] - def storeValue(self, name, value): - table = self.__findTable(name) - if table[name] is None: - table[name] = value - else: - assert table[name] == value, (table[name], value) - def __findTable(self, name): - for table in self.stack: - if table.has_key(name): - return table - raise KeyError, name - - -class BaseTable: - def __init__(self): - self.compileStatus = 0 # 0 means table was created - # 1 means the table.read() function was called by a table which is subject - # to delayed compilation - # 2 means that it was subject to delayed compilation, and - # has been decompiled - # 3 means that the start and end fields have been filled out, and that we - # can use the data string rather than compiling from the table data. + """Generic base class for all OpenType (sub)tables.""" - self.recurse = 0 - def __getattr__(self, attr): - # we get here only when the table does not have the attribute. - # This method ovveride exists so that we can try to de-compile - # a table which is subject to delayed decompilation, and then try - # to get the value again after decompilation. - self.recurse +=1 - if self.recurse > 2: - # shouldn't ever get here - we should only get to two levels of recursion. - # this guards against self.decompile NOT setting compileStatus to other than 1. - raise AttributeError, attr - if self.compileStatus == 1: - # table.read() has been called, but table has not yet been decompiled - # This happens only for extension tables. - self.decompile(self.reader, self.font) - val = getattr(self, attr) - self.recurse -=1 - return val - - raise AttributeError, attr - + reader = self.__dict__.get("reader") + if reader: + del self.reader + font = self.font + del self.font + self.decompile(reader, font) + return getattr(self, attr) + + raise AttributeError(attr) + + def ensureDecompiled(self): + reader = self.__dict__.get("reader") + if reader: + del self.reader + font = self.font + del self.font + self.decompile(reader, font) + + @classmethod + def getRecordSize(cls, reader): + totalSize = 0 + for conv in cls.converters: + size = conv.getRecordSize(reader) + if size is NotImplemented: return NotImplemented + countValue = 1 + if conv.repeat: + if conv.repeat in reader: + countValue = reader[conv.repeat] + else: + return NotImplemented + totalSize += size * countValue + return totalSize - """Generic base class for all OpenType (sub)tables.""" - def getConverters(self): return self.converters - + def getConverterByName(self, name): return self.convertersByName[name] - - def decompile(self, reader, font, tableStack=None): - self.compileStatus = 2 # table has been decompiled. - if tableStack is None: - tableStack = TableStack() + + def decompile(self, reader, font): self.readFormat(reader) table = {} self.__rawTable = table # for debugging - tableStack.push(table) - for conv in self.getConverters(): + converters = self.getConverters() + for conv in converters: if conv.name == "SubTable": - conv = conv.getConverter(reader.tableType, + conv = conv.getConverter(reader.globalState.tableType, table["LookupType"]) if conv.name == "ExtSubTable": - conv = conv.getConverter(reader.tableType, + conv = conv.getConverter(reader.globalState.tableType, table["ExtensionLookupType"]) + if conv.name == "FeatureParams": + conv = conv.getConverter(reader["FeatureTag"]) if conv.repeat: - l = [] - for i in range(tableStack.getValue(conv.repeat) + conv.repeatOffset): - l.append(conv.read(reader, font, tableStack)) - table[conv.name] = l + if conv.repeat in table: + countValue = table[conv.repeat] + else: + # conv.repeat is a propagated count + countValue = reader[conv.repeat] + countValue += conv.aux + table[conv.name] = conv.readArray(reader, font, table, countValue) else: - table[conv.name] = conv.read(reader, font, tableStack) - tableStack.pop() + if conv.aux and not eval(conv.aux, None, table): + continue + table[conv.name] = conv.read(reader, font, table) + if conv.isPropagated: + reader[conv.name] = table[conv.name] + self.postRead(table, font) - del self.__rawTable # succeeded, get rid of debugging info - def preCompile(self): - pass # used only by the LookupList class + del self.__rawTable # succeeded, get rid of debugging info - def compile(self, writer, font, tableStack=None): - if tableStack is None: - tableStack = TableStack() + def compile(self, writer, font): + self.ensureDecompiled() table = self.preWrite(font) if hasattr(self, 'sortCoverageLast'): writer.sortCoverageLast = 1 + if hasattr(self.__class__, 'LookupType'): + writer['LookupType'].setValue(self.__class__.LookupType) + self.writeFormat(writer) - tableStack.push(table) for conv in self.getConverters(): value = table.get(conv.name) if conv.repeat: if value is None: value = [] - tableStack.storeValue(conv.repeat, len(value) - conv.repeatOffset) - for i in range(len(value)): - conv.write(writer, font, tableStack, value[i], i) + countValue = len(value) - conv.aux + if conv.repeat in table: + CountReference(table, conv.repeat).setValue(countValue) + else: + # conv.repeat is a propagated count + writer[conv.repeat].setValue(countValue) + conv.writeArray(writer, font, table, value) elif conv.isCount: # Special-case Count values. # Assumption: a Count field will *always* precede - # the actual array. + # the actual array(s). # We need a default value, as it may be set later by a nested - # table. TableStack.storeValue() will then find it here. - table[conv.name] = None + # table. We will later store it here. # We add a reference: by the time the data is assembled # the Count value will be filled in. - writer.writeCountReference(table, conv.name) + ref = writer.writeCountReference(table, conv.name) + table[conv.name] = None + if conv.isPropagated: + writer[conv.name] = ref + elif conv.isLookupType: + ref = writer.writeCountReference(table, conv.name) + table[conv.name] = None + writer['LookupType'] = ref else: - conv.write(writer, font, tableStack, value) - tableStack.pop() - + if conv.aux and not eval(conv.aux, None, table): + continue + conv.write(writer, font, table, value) + if conv.isPropagated: + writer[conv.name] = value + def readFormat(self, reader): pass - + def writeFormat(self, writer): pass - + def postRead(self, table, font): self.__dict__.update(table) - + def preWrite(self, font): return self.__dict__.copy() - - def toXML(self, xmlWriter, font, attrs=None): - tableName = self.__class__.__name__ + + def toXML(self, xmlWriter, font, attrs=None, name=None): + tableName = name if name else self.__class__.__name__ if attrs is None: attrs = [] if hasattr(self, "Format"): @@ -639,22 +681,25 @@ self.toXML2(xmlWriter, font) xmlWriter.endtag(tableName) xmlWriter.newline() - + def toXML2(self, xmlWriter, font): # Simpler variant of toXML, *only* for the top level tables (like GPOS, GSUB). # This is because in TTX our parent writes our main tag, and in otBase.py we # do it ourselves. I think I'm getting schizophrenic... for conv in self.getConverters(): - value = getattr(self, conv.name) if conv.repeat: + value = getattr(self, conv.name) for i in range(len(value)): item = value[i] conv.xmlWrite(xmlWriter, font, item, conv.name, [("index", i)]) else: + if conv.aux and not eval(conv.aux, None, vars(self)): + continue + value = getattr(self, conv.name) conv.xmlWrite(xmlWriter, font, value, conv.name, []) - - def fromXML(self, (name, attrs, content), font): + + def fromXML(self, name, attrs, content, font): try: conv = self.getConverterByName(name) except KeyError: @@ -668,36 +713,44 @@ seq.append(value) else: setattr(self, conv.name, value) - - def __cmp__(self, other): - # this is only for debugging, so it's ok to barf - # when 'other' has no __dict__ or __class__ - rv = cmp(self.__class__, other.__class__) - if not rv: - rv = cmp(self.__dict__, other.__dict__) - return rv - else: - return rv + + def __ne__(self, other): + return not self.__eq__(other) + def __eq__(self, other): + if type(self) != type(other): + return NotImplemented + + self.ensureDecompiled() + other.ensureDecompiled() + + return self.__dict__ == other.__dict__ class FormatSwitchingBaseTable(BaseTable): - + """Minor specialization of BaseTable, for tables that have multiple formats, eg. CoverageFormat1 vs. CoverageFormat2.""" - + + @classmethod + def getRecordSize(cls, reader): + return NotImplemented + def getConverters(self): return self.converters[self.Format] - + def getConverterByName(self, name): return self.convertersByName[self.Format][name] - + def readFormat(self, reader): self.Format = reader.readUShort() - assert self.Format <> 0, (self, reader.pos, len(reader.data)) - + assert self.Format != 0, (self, reader.pos, len(reader.data)) + def writeFormat(self, writer): writer.writeUShort(self.Format) + def toXML(self, xmlWriter, font, attrs=None, name=None): + BaseTable.toXML(self, xmlWriter, font, attrs, name=self.__class__.__name__) + # # Support for ValueRecords @@ -708,24 +761,24 @@ # valueRecordFormat = [ -# Mask Name isDevice signed - (0x0001, "XPlacement", 0, 1), - (0x0002, "YPlacement", 0, 1), - (0x0004, "XAdvance", 0, 1), - (0x0008, "YAdvance", 0, 1), - (0x0010, "XPlaDevice", 1, 0), - (0x0020, "YPlaDevice", 1, 0), - (0x0040, "XAdvDevice", 1, 0), - (0x0080, "YAdvDevice", 1, 0), -# reserved: - (0x0100, "Reserved1", 0, 0), - (0x0200, "Reserved2", 0, 0), - (0x0400, "Reserved3", 0, 0), - (0x0800, "Reserved4", 0, 0), - (0x1000, "Reserved5", 0, 0), - (0x2000, "Reserved6", 0, 0), - (0x4000, "Reserved7", 0, 0), - (0x8000, "Reserved8", 0, 0), +# Mask Name isDevice signed + (0x0001, "XPlacement", 0, 1), + (0x0002, "YPlacement", 0, 1), + (0x0004, "XAdvance", 0, 1), + (0x0008, "YAdvance", 0, 1), + (0x0010, "XPlaDevice", 1, 0), + (0x0020, "YPlaDevice", 1, 0), + (0x0040, "XAdvDevice", 1, 0), + (0x0080, "YAdvDevice", 1, 0), +# reserved: + (0x0100, "Reserved1", 0, 0), + (0x0200, "Reserved2", 0, 0), + (0x0400, "Reserved3", 0, 0), + (0x0800, "Reserved4", 0, 0), + (0x1000, "Reserved5", 0, 0), + (0x2000, "Reserved6", 0, 0), + (0x4000, "Reserved7", 0, 0), + (0x8000, "Reserved8", 0, 0), ] def _buildDict(): @@ -737,17 +790,20 @@ valueRecordFormatDict = _buildDict() -class ValueRecordFactory: - +class ValueRecordFactory(object): + """Given a format code, this object convert ValueRecords.""" - - def setFormat(self, valueFormat): + + def __init__(self, valueFormat): format = [] for mask, name, isDevice, signed in valueRecordFormat: if valueFormat & mask: format.append((name, isDevice, signed)) self.format = format - + + def __len__(self): + return len(self.format) + def readValueRecord(self, reader, font): format = self.format if not format: @@ -760,7 +816,7 @@ value = reader.readUShort() if isDevice: if value: - import otTables + from . import otTables subReader = reader.getSubReader(value) value = getattr(otTables, name)() value.decompile(subReader, font) @@ -768,7 +824,7 @@ value = None setattr(valueRecord, name, value) return valueRecord - + def writeValueRecord(self, writer, font, valueRecord): for name, isDevice, signed in self.format: value = getattr(valueRecord, name, 0) @@ -785,16 +841,16 @@ writer.writeUShort(value) -class ValueRecord: - +class ValueRecord(object): + # see ValueRecordFactory - + def getFormat(self): format = 0 for name in self.__dict__.keys(): format = format | valueRecordFormatDict[name][0] return format - + def toXML(self, xmlWriter, font, valueName, attrs=None): if attrs is None: simpleItems = [] @@ -820,29 +876,26 @@ else: xmlWriter.simpletag(valueName, simpleItems) xmlWriter.newline() - - def fromXML(self, (name, attrs, content), font): - import otTables + + def fromXML(self, name, attrs, content, font): + from . import otTables for k, v in attrs.items(): setattr(self, k, int(v)) for element in content: - if type(element) <> TupleType: + if not isinstance(element, tuple): continue name, attrs, content = element value = getattr(otTables, name)() for elem2 in content: - if type(elem2) <> TupleType: + if not isinstance(elem2, tuple): continue - value.fromXML(elem2, font) + name2, attrs2, content2 = elem2 + value.fromXML(name2, attrs2, content2, font) setattr(self, name, value) - - def __cmp__(self, other): - # this is only for debugging, so it's ok to barf - # when 'other' has no __dict__ or __class__ - rv = cmp(self.__class__, other.__class__) - if not rv: - rv = cmp(self.__dict__, other.__dict__) - return rv - else: - return rv + def __ne__(self, other): + return not self.__eq__(other) + def __eq__(self, other): + if type(self) != type(other): + return NotImplemented + return self.__dict__ == other.__dict__ diff -Nru fonttools-2.4/Lib/fontTools/ttLib/tables/otConverters.py fonttools-3.0/Lib/fontTools/ttLib/tables/otConverters.py --- fonttools-2.4/Lib/fontTools/ttLib/tables/otConverters.py 2013-06-22 08:34:08.000000000 +0000 +++ fonttools-3.0/Lib/fontTools/ttLib/tables/otConverters.py 2015-08-31 17:57:15.000000000 +0000 @@ -1,5 +1,9 @@ -from types import TupleType +from __future__ import print_function, division, absolute_import +from fontTools.misc.py23 import * from fontTools.misc.textTools import safeEval +from fontTools.misc.fixedTools import fixedToFloat as fi2fl, floatToFixed as fl2fi +from .otBase import ValueRecordFactory +import array def buildConverters(tableSpec, tableNamespace): @@ -8,63 +12,128 @@ the results are assigned to the corresponding class in otTables.py.""" converters = [] convertersByName = {} - for tp, name, repeat, repeatOffset, descr in tableSpec: + for tp, name, repeat, aux, descr in tableSpec: + tableName = name if name.startswith("ValueFormat"): assert tp == "uint16" converterClass = ValueFormat - elif name == "DeltaValue": + elif name.endswith("Count") or name.endswith("LookupType"): assert tp == "uint16" - converterClass = DeltaValue - elif name.endswith("Count"): - assert tp == "uint16" - converterClass = Count + converterClass = ComputedUShort elif name == "SubTable": converterClass = SubTable elif name == "ExtSubTable": converterClass = ExtSubTable + elif name == "FeatureParams": + converterClass = FeatureParams else: - converterClass = converterMapping[tp] - tableClass = tableNamespace.get(name) - conv = converterClass(name, repeat, repeatOffset, tableClass) + if not tp in converterMapping: + tableName = tp + converterClass = Struct + else: + converterClass = converterMapping[tp] + tableClass = tableNamespace.get(tableName) + conv = converterClass(name, repeat, aux, tableClass) if name in ["SubTable", "ExtSubTable"]: conv.lookupTypes = tableNamespace['lookupTypes'] # also create reverse mapping for t in conv.lookupTypes.values(): for cls in t.values(): - convertersByName[cls.__name__] = Table(name, repeat, repeatOffset, cls) + convertersByName[cls.__name__] = Table(name, repeat, aux, cls) + if name == "FeatureParams": + conv.featureParamTypes = tableNamespace['featureParamTypes'] + conv.defaultFeatureParams = tableNamespace['FeatureParams'] + for cls in conv.featureParamTypes.values(): + convertersByName[cls.__name__] = Table(name, repeat, aux, cls) converters.append(conv) - assert not convertersByName.has_key(name) + assert name not in convertersByName, name convertersByName[name] = conv return converters, convertersByName -class BaseConverter: - +class _MissingItem(tuple): + __slots__ = () + +try: + from collections import UserList +except: + from UserList import UserList + +class _LazyList(UserList): + + def __getslice__(self, i, j): + return self.__getitem__(slice(i, j)) + def __getitem__(self, k): + if isinstance(k, slice): + indices = range(*k.indices(len(self))) + return [self[i] for i in indices] + item = self.data[k] + if isinstance(item, _MissingItem): + self.reader.seek(self.pos + item[0] * self.recordSize) + item = self.conv.read(self.reader, self.font, {}) + self.data[k] = item + return item + +class BaseConverter(object): + """Base class for converter objects. Apart from the constructor, this is an abstract class.""" - - def __init__(self, name, repeat, repeatOffset, tableClass): + + def __init__(self, name, repeat, aux, tableClass): self.name = name self.repeat = repeat - self.repeatOffset = repeatOffset + self.aux = aux self.tableClass = tableClass self.isCount = name.endswith("Count") - - def read(self, reader, font, tableStack): + self.isLookupType = name.endswith("LookupType") + self.isPropagated = name in ["ClassCount", "Class2Count", "FeatureTag", "SettingsCount", "AxisCount"] + + def readArray(self, reader, font, tableDict, count): + """Read an array of values from the reader.""" + lazy = font.lazy and count > 8 + if lazy: + recordSize = self.getRecordSize(reader) + if recordSize is NotImplemented: + lazy = False + if not lazy: + l = [] + for i in range(count): + l.append(self.read(reader, font, tableDict)) + return l + else: + l = _LazyList() + l.reader = reader.copy() + l.pos = l.reader.pos + l.font = font + l.conv = self + l.recordSize = recordSize + l.extend(_MissingItem([i]) for i in range(count)) + reader.advance(count * recordSize) + return l + + def getRecordSize(self, reader): + if hasattr(self, 'staticSize'): return self.staticSize + return NotImplemented + + def read(self, reader, font, tableDict): """Read a value from the reader.""" - raise NotImplementedError, self - - def write(self, writer, font, tableStack, value, repeatIndex=None): + raise NotImplementedError(self) + + def writeArray(self, writer, font, tableDict, values): + for i in range(len(values)): + self.write(writer, font, tableDict, values[i], i) + + def write(self, writer, font, tableDict, value, repeatIndex=None): """Write a value to the writer.""" - raise NotImplementedError, self - + raise NotImplementedError(self) + def xmlRead(self, attrs, content, font): """Read a value from XML.""" - raise NotImplementedError, self - + raise NotImplementedError(self) + def xmlWrite(self, xmlWriter, font, value, name, attrs): """Write a value to XML.""" - raise NotImplementedError, self + raise NotImplementedError(self) class SimpleValue(BaseConverter): @@ -76,203 +145,282 @@ class IntValue(SimpleValue): def xmlRead(self, attrs, content, font): - return int(attrs["value"]) + return int(attrs["value"], 0) class Long(IntValue): - def read(self, reader, font, tableStack): + staticSize = 4 + def read(self, reader, font, tableDict): return reader.readLong() - def write(self, writer, font, tableStack, value, repeatIndex=None): + def write(self, writer, font, tableDict, value, repeatIndex=None): writer.writeLong(value) -class Fixed(IntValue): - def read(self, reader, font, tableStack): - return float(reader.readLong()) / 0x10000 - def write(self, writer, font, tableStack, value, repeatIndex=None): - writer.writeLong(int(round(value * 0x10000))) - def xmlRead(self, attrs, content, font): - return float(attrs["value"]) +class ULong(IntValue): + staticSize = 4 + def read(self, reader, font, tableDict): + return reader.readULong() + def write(self, writer, font, tableDict, value, repeatIndex=None): + writer.writeULong(value) class Short(IntValue): - def read(self, reader, font, tableStack): + staticSize = 2 + def read(self, reader, font, tableDict): return reader.readShort() - def write(self, writer, font, tableStack, value, repeatIndex=None): + def write(self, writer, font, tableDict, value, repeatIndex=None): writer.writeShort(value) class UShort(IntValue): - def read(self, reader, font, tableStack): + staticSize = 2 + def read(self, reader, font, tableDict): return reader.readUShort() - def write(self, writer, font, tableStack, value, repeatIndex=None): + def write(self, writer, font, tableDict, value, repeatIndex=None): writer.writeUShort(value) -class Count(Short): +class UInt24(IntValue): + staticSize = 3 + def read(self, reader, font, tableDict): + return reader.readUInt24() + def write(self, writer, font, tableDict, value, repeatIndex=None): + writer.writeUInt24(value) + +class ComputedUShort(UShort): def xmlWrite(self, xmlWriter, font, value, name, attrs): xmlWriter.comment("%s=%s" % (name, value)) xmlWriter.newline() class Tag(SimpleValue): - def read(self, reader, font, tableStack): + staticSize = 4 + def read(self, reader, font, tableDict): return reader.readTag() - def write(self, writer, font, tableStack, value, repeatIndex=None): + def write(self, writer, font, tableDict, value, repeatIndex=None): writer.writeTag(value) class GlyphID(SimpleValue): - def read(self, reader, font, tableStack): - value = reader.readUShort() - value = font.getGlyphName(value) - return value + staticSize = 2 + def readArray(self, reader, font, tableDict, count): + glyphOrder = font.getGlyphOrder() + gids = array.array("H", reader.readData(2 * count)) + if sys.byteorder != "big": + gids.byteswap() + try: + l = [glyphOrder[gid] for gid in gids] + except IndexError: + # Slower, but will not throw an IndexError on an invalid glyph id. + l = [font.getGlyphName(gid) for gid in gids] + return l + def read(self, reader, font, tableDict): + return font.getGlyphName(reader.readUShort()) + def write(self, writer, font, tableDict, value, repeatIndex=None): + writer.writeUShort(font.getGlyphID(value)) - def write(self, writer, font, tableStack, value, repeatIndex=None): - value = font.getGlyphID(value) - writer.writeUShort(value) +class FloatValue(SimpleValue): + def xmlRead(self, attrs, content, font): + return float(attrs["value"]) + +class DeciPoints(FloatValue): + staticSize = 2 + def read(self, reader, font, tableDict): + return reader.readUShort() / 10 + + def write(self, writer, font, tableDict, value, repeatIndex=None): + writer.writeUShort(int(round(value * 10))) + +class Fixed(FloatValue): + staticSize = 4 + def read(self, reader, font, tableDict): + return fi2fl(reader.readLong(), 16) + def write(self, writer, font, tableDict, value, repeatIndex=None): + writer.writeLong(fl2fi(value, 16)) + +class Version(BaseConverter): + staticSize = 4 + def read(self, reader, font, tableDict): + value = reader.readLong() + assert (value >> 16) == 1, "Unsupported version 0x%08x" % value + return fi2fl(value, 16) + def write(self, writer, font, tableDict, value, repeatIndex=None): + if value < 0x10000: + value = fl2fi(value, 16) + value = int(round(value)) + assert (value >> 16) == 1, "Unsupported version 0x%08x" % value + writer.writeLong(value) + def xmlRead(self, attrs, content, font): + value = attrs["value"] + value = float(int(value, 0)) if value.startswith("0") else float(value) + if value >= 0x10000: + value = fi2fl(value, 16) + return value + def xmlWrite(self, xmlWriter, font, value, name, attrs): + if value >= 0x10000: + value = fi2fl(value, 16) + if value % 1 != 0: + # Write as hex + value = "0x%08x" % fl2fi(value, 16) + xmlWriter.simpletag(name, attrs + [("value", value)]) + xmlWriter.newline() class Struct(BaseConverter): - - def read(self, reader, font, tableStack): + + def getRecordSize(self, reader): + return self.tableClass and self.tableClass.getRecordSize(reader) + + def read(self, reader, font, tableDict): table = self.tableClass() - table.decompile(reader, font, tableStack) + table.decompile(reader, font) return table - - def write(self, writer, font, tableStack, value, repeatIndex=None): - value.compile(writer, font, tableStack) - + + def write(self, writer, font, tableDict, value, repeatIndex=None): + value.compile(writer, font) + def xmlWrite(self, xmlWriter, font, value, name, attrs): if value is None: - pass # NULL table, ignore + if attrs: + # If there are attributes (probably index), then + # don't drop this even if it's NULL. It will mess + # up the array indices of the containing element. + xmlWriter.simpletag(name, attrs + [("empty", 1)]) + xmlWriter.newline() + else: + pass # NULL table, ignore else: - value.toXML(xmlWriter, font, attrs) - + value.toXML(xmlWriter, font, attrs, name=name) + def xmlRead(self, attrs, content, font): + if "empty" in attrs and safeEval(attrs["empty"]): + return None table = self.tableClass() Format = attrs.get("Format") if Format is not None: table.Format = int(Format) for element in content: - if type(element) == TupleType: + if isinstance(element, tuple): name, attrs, content = element - table.fromXML((name, attrs, content), font) + table.fromXML(name, attrs, content, font) else: pass return table + def __repr__(self): + return "Struct of " + repr(self.tableClass) + class Table(Struct): - - def read(self, reader, font, tableStack): - offset = reader.readUShort() + + longOffset = False + staticSize = 2 + + def readOffset(self, reader): + return reader.readUShort() + + def writeNullOffset(self, writer): + if self.longOffset: + writer.writeULong(0) + else: + writer.writeUShort(0) + + def read(self, reader, font, tableDict): + offset = self.readOffset(reader) if offset == 0: return None if offset <= 3: # XXX hack to work around buggy pala.ttf - print "*** Warning: offset is not 0, yet suspiciously low (%s). table: %s" \ - % (offset, self.tableClass.__name__) + print("*** Warning: offset is not 0, yet suspiciously low (%s). table: %s" \ + % (offset, self.tableClass.__name__)) return None - subReader = reader.getSubReader(offset) table = self.tableClass() - table.decompile(subReader, font, tableStack) + reader = reader.getSubReader(offset) + if font.lazy: + table.reader = reader + table.font = font + else: + table.decompile(reader, font) return table - - def write(self, writer, font, tableStack, value, repeatIndex=None): + + def write(self, writer, font, tableDict, value, repeatIndex=None): if value is None: - writer.writeUShort(0) + self.writeNullOffset(writer) else: subWriter = writer.getSubWriter() + subWriter.longOffset = self.longOffset subWriter.name = self.name if repeatIndex is not None: subWriter.repeatIndex = repeatIndex - value.preCompile() writer.writeSubTable(subWriter) - value.compile(subWriter, font, tableStack) + value.compile(subWriter, font) + +class LTable(Table): + + longOffset = True + staticSize = 4 + + def readOffset(self, reader): + return reader.readULong() + class SubTable(Table): def getConverter(self, tableType, lookupType): - lookupTypes = self.lookupTypes[tableType] - tableClass = lookupTypes[lookupType] - return SubTable(self.name, self.repeat, self.repeatOffset, tableClass) + tableClass = self.lookupTypes[tableType][lookupType] + return self.__class__(self.name, self.repeat, self.aux, tableClass) -class ExtSubTable(Table): - def getConverter(self, tableType, lookupType): - lookupTypes = self.lookupTypes[tableType] - tableClass = lookupTypes[lookupType] - return ExtSubTable(self.name, self.repeat, self.repeatOffset, tableClass) - - def read(self, reader, font, tableStack): - offset = reader.readULong() - if offset == 0: - return None - subReader = reader.getSubReader(offset) - table = self.tableClass() - table.reader = subReader - table.font = font - table.compileStatus = 1 - table.start = table.reader.offset - return table - - def write(self, writer, font, tableStack, value, repeatIndex=None): +class ExtSubTable(LTable, SubTable): + + def write(self, writer, font, tableDict, value, repeatIndex=None): writer.Extension = 1 # actually, mere presence of the field flags it as an Ext Subtable writer. - if value is None: - writer.writeULong(0) - else: - # If the subtable has not yet been decompiled, we need to do so. - if value.compileStatus == 1: - value.decompile(value.reader, value.font, tableStack) - subWriter = writer.getSubWriter() - subWriter.name = self.name - writer.writeSubTable(subWriter) - # If the subtable has been sorted and we can just write the original - # data, then do so. - if value.compileStatus == 3: - data = value.reader.data[value.start:value.end] - subWriter.writeData(data) - else: - value.compile(subWriter, font, tableStack) + Table.write(self, writer, font, tableDict, value, repeatIndex) + +class FeatureParams(Table): + def getConverter(self, featureTag): + tableClass = self.featureParamTypes.get(featureTag, self.defaultFeatureParams) + return self.__class__(self.name, self.repeat, self.aux, tableClass) class ValueFormat(IntValue): - def __init__(self, name, repeat, repeatOffset, tableClass): - BaseConverter.__init__(self, name, repeat, repeatOffset, tableClass) - self.which = name[-1] == "2" - def read(self, reader, font, tableStack): + staticSize = 2 + def __init__(self, name, repeat, aux, tableClass): + BaseConverter.__init__(self, name, repeat, aux, tableClass) + self.which = "ValueFormat" + ("2" if name[-1] == "2" else "1") + def read(self, reader, font, tableDict): format = reader.readUShort() - reader.setValueFormat(format, self.which) + reader[self.which] = ValueRecordFactory(format) return format - def write(self, writer, font, tableStack, format, repeatIndex=None): + def write(self, writer, font, tableDict, format, repeatIndex=None): writer.writeUShort(format) - writer.setValueFormat(format, self.which) + writer[self.which] = ValueRecordFactory(format) class ValueRecord(ValueFormat): - def read(self, reader, font, tableStack): - return reader.readValueRecord(font, self.which) - def write(self, writer, font, tableStack, value, repeatIndex=None): - writer.writeValueRecord(value, font, self.which) + def getRecordSize(self, reader): + return 2 * len(reader[self.which]) + def read(self, reader, font, tableDict): + return reader[self.which].readValueRecord(reader, font) + def write(self, writer, font, tableDict, value, repeatIndex=None): + writer[self.which].writeValueRecord(writer, font, value) def xmlWrite(self, xmlWriter, font, value, name, attrs): if value is None: pass # NULL table, ignore else: value.toXML(xmlWriter, font, self.name, attrs) def xmlRead(self, attrs, content, font): - from otBase import ValueRecord + from .otBase import ValueRecord value = ValueRecord() - value.fromXML((None, attrs, content), font) + value.fromXML(None, attrs, content, font) return value class DeltaValue(BaseConverter): - - def read(self, reader, font, tableStack): - table = tableStack.getTop() - StartSize = table["StartSize"] - EndSize = table["EndSize"] - DeltaFormat = table["DeltaFormat"] + + def read(self, reader, font, tableDict): + StartSize = tableDict["StartSize"] + EndSize = tableDict["EndSize"] + DeltaFormat = tableDict["DeltaFormat"] assert DeltaFormat in (1, 2, 3), "illegal DeltaFormat" nItems = EndSize - StartSize + 1 nBits = 1 << DeltaFormat minusOffset = 1 << nBits mask = (1 << nBits) - 1 signMask = 1 << (nBits - 1) - + DeltaValue = [] tmp, shift = 0, 0 for i in range(nItems): @@ -284,19 +432,18 @@ value = value - minusOffset DeltaValue.append(value) return DeltaValue - - def write(self, writer, font, tableStack, value, repeatIndex=None): - table = tableStack.getTop() - StartSize = table["StartSize"] - EndSize = table["EndSize"] - DeltaFormat = table["DeltaFormat"] - DeltaValue = table["DeltaValue"] + + def write(self, writer, font, tableDict, value, repeatIndex=None): + StartSize = tableDict["StartSize"] + EndSize = tableDict["EndSize"] + DeltaFormat = tableDict["DeltaFormat"] + DeltaValue = value assert DeltaFormat in (1, 2, 3), "illegal DeltaFormat" nItems = EndSize - StartSize + 1 nBits = 1 << DeltaFormat assert len(DeltaValue) == nItems mask = (1 << nBits) - 1 - + tmp, shift = 0, 16 for value in DeltaValue: shift = shift - nBits @@ -304,33 +451,31 @@ if shift == 0: writer.writeUShort(tmp) tmp, shift = 0, 16 - if shift <> 16: + if shift != 16: writer.writeUShort(tmp) - + def xmlWrite(self, xmlWriter, font, value, name, attrs): - # XXX this could do with a nicer format xmlWriter.simpletag(name, attrs + [("value", value)]) xmlWriter.newline() - + def xmlRead(self, attrs, content, font): return safeEval(attrs["value"]) converterMapping = { - # type class - "int16": Short, - "uint16": UShort, - "ULONG": Long, - "Fixed": Fixed, - "Tag": Tag, - "GlyphID": GlyphID, - "struct": Struct, - "Offset": Table, - "LOffset": ExtSubTable, - "ValueRecord": ValueRecord, + # type class + "int16": Short, + "uint16": UShort, + "uint24": UInt24, + "uint32": ULong, + "Version": Version, + "Tag": Tag, + "GlyphID": GlyphID, + "DeciPoints": DeciPoints, + "Fixed": Fixed, + "struct": Struct, + "Offset": Table, + "LOffset": LTable, + "ValueRecord": ValueRecord, + "DeltaValue": DeltaValue, } - -# equivalents: -converterMapping["USHORT"] = converterMapping["uint16"] -converterMapping["fixed32"] = converterMapping["Fixed"] - diff -Nru fonttools-2.4/Lib/fontTools/ttLib/tables/otData.py fonttools-3.0/Lib/fontTools/ttLib/tables/otData.py --- fonttools-2.4/Lib/fontTools/ttLib/tables/otData.py 2013-06-22 14:25:28.000000000 +0000 +++ fonttools-3.0/Lib/fontTools/ttLib/tables/otData.py 2015-08-31 17:57:15.000000000 +0000 @@ -1,9 +1,14 @@ +from __future__ import print_function, division, absolute_import +from fontTools.misc.py23 import * + otData = [ # - # common (generated from chapter2.htm) + # common # + ('LookupOrder', []), + ('ScriptList', [ ('uint16', 'ScriptCount', None, None, 'Number of ScriptRecords'), ('struct', 'ScriptRecord', 'ScriptCount', 0, 'Array of ScriptRecords -listed alphabetically by ScriptTag'), @@ -48,6 +53,33 @@ ('uint16', 'LookupListIndex', 'LookupCount', 0, 'Array of LookupList indices for this feature -zero-based (first lookup is LookupListIndex = 0)'), ]), + ('FeatureParams', [ + ]), + + ('FeatureParamsSize', [ + ('DeciPoints', 'DesignSize', None, None, 'The design size in 720/inch units (decipoints).'), + ('uint16', 'SubfamilyID', None, None, 'Serves as an identifier that associates fonts in a subfamily.'), + ('uint16', 'SubfamilyNameID', None, None, 'Subfamily NameID.'), + ('DeciPoints', 'RangeStart', None, None, 'Small end of recommended usage range (exclusive) in 720/inch units.'), + ('DeciPoints', 'RangeEnd', None, None, 'Large end of recommended usage range (inclusive) in 720/inch units.'), + ]), + + ('FeatureParamsStylisticSet', [ + ('uint16', 'Version', None, None, 'Set to 0.'), + ('uint16', 'UINameID', None, None, 'UI NameID.'), + ]), + + ('FeatureParamsCharacterVariants', [ + ('uint16', 'Format', None, None, 'Set to 0.'), + ('uint16', 'FeatUILabelNameID', None, None, 'Feature UI label NameID.'), + ('uint16', 'FeatUITooltipTextNameID', None, None, 'Feature UI tooltip text NameID.'), + ('uint16', 'SampleTextNameID', None, None, 'Sample text NameID.'), + ('uint16', 'NumNamedParameters', None, None, 'Number of named parameters.'), + ('uint16', 'FirstParamUILabelNameID', None, None, 'First NameID of UI feature parameters.'), + ('uint16', 'CharCount', None, None, 'Count of characters this feature provides glyph variants for.'), + ('uint24', 'Character', 'CharCount', 0, 'Unicode characters for which this feature provides glyph variants.'), + ]), + ('LookupList', [ ('uint16', 'LookupCount', None, None, 'Number of lookups in this table'), ('Offset', 'Lookup', 'LookupCount', 0, 'Array of offsets to Lookup tables-from beginning of LookupList -zero based (first lookup is Lookup index = 0)'), @@ -58,6 +90,7 @@ ('uint16', 'LookupFlag', None, None, 'Lookup qualifiers'), ('uint16', 'SubTableCount', None, None, 'Number of SubTables for this lookup'), ('Offset', 'SubTable', 'SubTableCount', 0, 'Array of offsets to SubTables-from beginning of Lookup table'), + ('uint16', 'MarkFilteringSet', None, 'LookupFlag & 0x0010', 'If set, indicates that the lookup table structure is followed by a MarkFilteringSet field. The layout engine skips over all mark glyphs not in the mark filtering set indicated.'), ]), ('CoverageFormat1', [ @@ -101,16 +134,16 @@ ('uint16', 'StartSize', None, None, 'Smallest size to correct-in ppem'), ('uint16', 'EndSize', None, None, 'Largest size to correct-in ppem'), ('uint16', 'DeltaFormat', None, None, 'Format of DeltaValue array data: 1, 2, or 3'), - ('uint16', 'DeltaValue', '', 0, 'Array of compressed data'), + ('DeltaValue', 'DeltaValue', '', 0, 'Array of compressed data'), ]), # - # gpos (generated from gpos.htm) + # gpos # ('GPOS', [ - ('Fixed', 'Version', None, None, 'Version of the GPOS table-initially = 0x00010000'), + ('Version', 'Version', None, None, 'Version of the GPOS table-initially = 0x00010000'), ('Offset', 'ScriptList', None, None, 'Offset to ScriptList table-from beginning of GPOS table'), ('Offset', 'FeatureList', None, None, 'Offset to FeatureList table-from beginning of GPOS table'), ('Offset', 'LookupList', None, None, 'Offset to LookupList table-from beginning of GPOS table'), @@ -357,9 +390,9 @@ ]), ('ExtensionPosFormat1', [ - ('USHORT', 'ExtFormat', None, None, 'Format identifier. Set to 1.'), - ('USHORT', 'ExtensionLookupType', None, None, 'Lookup type of subtable referenced by ExtensionOffset (i.e. the extension subtable).'), - ('LOffset', 'ExtSubTable', None, None, 'Array of offsets to Lookup tables-from beginning of LookupList -zero based (first lookup is Lookup index = 0)'), + ('uint16', 'ExtFormat', None, None, 'Format identifier. Set to 1.'), + ('uint16', 'ExtensionLookupType', None, None, 'Lookup type of subtable referenced by ExtensionOffset (i.e. the extension subtable).'), + ('LOffset', 'ExtSubTable', None, None, 'Offset to SubTable'), ]), ('ValueRecord', [ @@ -406,11 +439,11 @@ # - # gsub (generated from gsub.htm) + # gsub # ('GSUB', [ - ('Fixed', 'Version', None, None, 'Version of the GSUB table-initially set to 0x00010000'), + ('Version', 'Version', None, None, 'Version of the GSUB table-initially set to 0x00010000'), ('Offset', 'ScriptList', None, None, 'Offset to ScriptList table-from beginning of GSUB table'), ('Offset', 'FeatureList', None, None, 'Offset to FeatureList table-from beginning of GSUB table'), ('Offset', 'LookupList', None, None, 'Offset to LookupList table-from beginning of GSUB table'), @@ -419,7 +452,7 @@ ('SingleSubstFormat1', [ ('uint16', 'SubstFormat', None, None, 'Format identifier-format = 1'), ('Offset', 'Coverage', None, None, 'Offset to Coverage table-from beginning of Substitution table'), - ('int16', 'DeltaGlyphID', None, None, 'Add to original GlyphID to get substitute GlyphID'), + ('uint16', 'DeltaGlyphID', None, None, 'Add to original GlyphID modulo 65536 to get substitute GlyphID'), ]), ('SingleSubstFormat2', [ @@ -585,8 +618,8 @@ ]), ('ExtensionSubstFormat1', [ - ('USHORT', 'ExtFormat', None, None, 'Format identifier. Set to 1.'), - ('USHORT', 'ExtensionLookupType', None, None, 'Lookup type of subtable referenced by ExtensionOffset (i.e. the extension subtable).'), + ('uint16', 'ExtFormat', None, None, 'Format identifier. Set to 1.'), + ('uint16', 'ExtensionLookupType', None, None, 'Lookup type of subtable referenced by ExtensionOffset (i.e. the extension subtable).'), ('LOffset', 'ExtSubTable', None, None, 'Array of offsets to Lookup tables-from beginning of LookupList -zero based (first lookup is Lookup index = 0)'), ]), @@ -602,15 +635,16 @@ ]), # - # gdef (generated from gdef.htm) + # gdef # ('GDEF', [ - ('Fixed', 'Version', None, None, 'Version of the GDEF table-initially 0x00010000'), + ('Version', 'Version', None, None, 'Version of the GDEF table-initially 0x00010000'), ('Offset', 'GlyphClassDef', None, None, 'Offset to class definition table for glyph type-from beginning of GDEF header (may be NULL)'), ('Offset', 'AttachList', None, None, 'Offset to list of glyphs with attachment points-from beginning of GDEF header (may be NULL)'), ('Offset', 'LigCaretList', None, None, 'Offset to list of positioning points for ligature carets-from beginning of GDEF header (may be NULL)'), ('Offset', 'MarkAttachClassDef', None, None, 'Offset to class definition table for mark attachment type-from beginning of GDEF header (may be NULL)'), + ('Offset', 'MarkGlyphSetsDef', None, 'int(round(Version*0x10000)) >= 0x00010002', 'Offset to the table of mark set definitions-from beginning of GDEF header (may be NULL)'), ]), ('AttachList', [ @@ -651,13 +685,18 @@ ('Offset', 'DeviceTable', None, None, 'Offset to Device table for X or Y value-from beginning of CaretValue table'), ]), + ('MarkGlyphSetsDef', [ + ('uint16', 'MarkSetTableFormat', None, None, 'Format identifier == 1'), + ('uint16', 'MarkSetCount', None, None, 'Number of mark sets defined'), + ('LOffset', 'Coverage', 'MarkSetCount', 0, 'Array of offsets to mark set coverage tables.'), + ]), # - # base (generated from base.htm) + # base # ('BASE', [ - ('fixed32', 'Version', None, None, 'Version of the BASE table-initially 0x00010000'), + ('Version', 'Version', None, None, 'Version of the BASE table-initially 0x00010000'), ('Offset', 'HorizAxis', None, None, 'Offset to horizontal Axis table-from beginning of BASE table-may be NULL'), ('Offset', 'VertAxis', None, None, 'Offset to vertical Axis table-from beginning of BASE table-may be NULL'), ]), @@ -733,11 +772,11 @@ # - # jstf (generated from jstf.htm) + # jstf # ('JSTF', [ - ('fixed32', 'Version', None, None, 'Version of the JSTF table-initially set to 0x00010000'), + ('Version', 'Version', None, None, 'Version of the JSTF table-initially set to 0x00010000'), ('uint16', 'JstfScriptCount', None, None, 'Number of JstfScriptRecords in this table'), ('struct', 'JstfScriptRecord', 'JstfScriptCount', 0, 'Array of JstfScriptRecords-in alphabetical order, by JstfScriptTag'), ]), @@ -797,5 +836,190 @@ ('Offset', 'Lookup', 'LookupCount', 0, 'Array of offsets to GPOS-type lookup tables-from beginning of JstfMax table-in design order'), ]), -] + # + # math + # + + ('MATH', [ + ('Version', 'Version', None, None, 'Version of the MATH table-initially set to 0x00010000.'), + ('Offset', 'MathConstants', None, None, 'Offset to MathConstants table - from the beginning of MATH table.'), + ('Offset', 'MathGlyphInfo', None, None, 'Offset to MathGlyphInfo table - from the beginning of MATH table.'), + ('Offset', 'MathVariants', None, None, 'Offset to MathVariants table - from the beginning of MATH table.'), + ]), + + ('MathValueRecord', [ + ('int16', 'Value', None, None, 'The X or Y value in design units.'), + ('Offset', 'DeviceTable', None, None, 'Offset to the device table - from the beginning of parent table. May be NULL. Suggested format for device table is 1.'), + ]), + + ('MathConstants', [ + ('int16', 'ScriptPercentScaleDown', None, None, 'Percentage of scaling down for script level 1. Suggested value: 80%.'), + ('int16', 'ScriptScriptPercentScaleDown', None, None, 'Percentage of scaling down for script level 2 (ScriptScript). Suggested value: 60%.'), + ('uint16', 'DelimitedSubFormulaMinHeight', None, None, 'Minimum height required for a delimited expression to be treated as a subformula. Suggested value: normal line height x1.5.'), + ('uint16', 'DisplayOperatorMinHeight', None, None, 'Minimum height of n-ary operators (such as integral and summation) for formulas in display mode.'), + ('MathValueRecord', 'MathLeading', None, None, 'White space to be left between math formulas to ensure proper line spacing. For example, for applications that treat line gap as a part of line ascender, formulas with ink going above (os2.sTypoAscender + os2.sTypoLineGap - MathLeading) or with ink going below os2.sTypoDescender will result in increasing line height.'), + ('MathValueRecord', 'AxisHeight', None, None, 'Axis height of the font.'), + ('MathValueRecord', 'AccentBaseHeight', None, None, 'Maximum (ink) height of accent base that does not require raising the accents. Suggested: x-height of the font (os2.sxHeight) plus any possible overshots.'), + ('MathValueRecord', 'FlattenedAccentBaseHeight', None, None, 'Maximum (ink) height of accent base that does not require flattening the accents. Suggested: cap height of the font (os2.sCapHeight).'), + ('MathValueRecord', 'SubscriptShiftDown', None, None, 'The standard shift down applied to subscript elements. Positive for moving in the downward direction. Suggested: os2.ySubscriptYOffset.'), + ('MathValueRecord', 'SubscriptTopMax', None, None, 'Maximum allowed height of the (ink) top of subscripts that does not require moving subscripts further down. Suggested: 4/5 x-height.'), + ('MathValueRecord', 'SubscriptBaselineDropMin', None, None, 'Minimum allowed drop of the baseline of subscripts relative to the (ink) bottom of the base. Checked for bases that are treated as a box or extended shape. Positive for subscript baseline dropped below the base bottom.'), + ('MathValueRecord', 'SuperscriptShiftUp', None, None, 'Standard shift up applied to superscript elements. Suggested: os2.ySuperscriptYOffset.'), + ('MathValueRecord', 'SuperscriptShiftUpCramped', None, None, 'Standard shift of superscripts relative to the base, in cramped style.'), + ('MathValueRecord', 'SuperscriptBottomMin', None, None, 'Minimum allowed height of the (ink) bottom of superscripts that does not require moving subscripts further up. Suggested: 1/4 x-height.'), + ('MathValueRecord', 'SuperscriptBaselineDropMax', None, None, 'Maximum allowed drop of the baseline of superscripts relative to the (ink) top of the base. Checked for bases that are treated as a box or extended shape. Positive for superscript baseline below the base top.'), + ('MathValueRecord', 'SubSuperscriptGapMin', None, None, 'Minimum gap between the superscript and subscript ink. Suggested: 4x default rule thickness.'), + ('MathValueRecord', 'SuperscriptBottomMaxWithSubscript', None, None, 'The maximum level to which the (ink) bottom of superscript can be pushed to increase the gap between superscript and subscript, before subscript starts being moved down. Suggested: 4/5 x-height.'), + ('MathValueRecord', 'SpaceAfterScript', None, None, 'Extra white space to be added after each subscript and superscript. Suggested: 0.5pt for a 12 pt font.'), + ('MathValueRecord', 'UpperLimitGapMin', None, None, 'Minimum gap between the (ink) bottom of the upper limit, and the (ink) top of the base operator.'), + ('MathValueRecord', 'UpperLimitBaselineRiseMin', None, None, 'Minimum distance between baseline of upper limit and (ink) top of the base operator.'), + ('MathValueRecord', 'LowerLimitGapMin', None, None, 'Minimum gap between (ink) top of the lower limit, and (ink) bottom of the base operator.'), + ('MathValueRecord', 'LowerLimitBaselineDropMin', None, None, 'Minimum distance between baseline of the lower limit and (ink) bottom of the base operator.'), + ('MathValueRecord', 'StackTopShiftUp', None, None, 'Standard shift up applied to the top element of a stack.'), + ('MathValueRecord', 'StackTopDisplayStyleShiftUp', None, None, 'Standard shift up applied to the top element of a stack in display style.'), + ('MathValueRecord', 'StackBottomShiftDown', None, None, 'Standard shift down applied to the bottom element of a stack. Positive for moving in the downward direction.'), + ('MathValueRecord', 'StackBottomDisplayStyleShiftDown', None, None, 'Standard shift down applied to the bottom element of a stack in display style. Positive for moving in the downward direction.'), + ('MathValueRecord', 'StackGapMin', None, None, 'Minimum gap between (ink) bottom of the top element of a stack, and the (ink) top of the bottom element. Suggested: 3x default rule thickness.'), + ('MathValueRecord', 'StackDisplayStyleGapMin', None, None, 'Minimum gap between (ink) bottom of the top element of a stack, and the (ink) top of the bottom element in display style. Suggested: 7x default rule thickness.'), + ('MathValueRecord', 'StretchStackTopShiftUp', None, None, 'Standard shift up applied to the top element of the stretch stack.'), + ('MathValueRecord', 'StretchStackBottomShiftDown', None, None, 'Standard shift down applied to the bottom element of the stretch stack. Positive for moving in the downward direction.'), + ('MathValueRecord', 'StretchStackGapAboveMin', None, None, 'Minimum gap between the ink of the stretched element, and the (ink) bottom of the element above. Suggested: UpperLimitGapMin'), + ('MathValueRecord', 'StretchStackGapBelowMin', None, None, 'Minimum gap between the ink of the stretched element, and the (ink) top of the element below. Suggested: LowerLimitGapMin.'), + ('MathValueRecord', 'FractionNumeratorShiftUp', None, None, 'Standard shift up applied to the numerator.'), + ('MathValueRecord', 'FractionNumeratorDisplayStyleShiftUp', None, None, 'Standard shift up applied to the numerator in display style. Suggested: StackTopDisplayStyleShiftUp.'), + ('MathValueRecord', 'FractionDenominatorShiftDown', None, None, 'Standard shift down applied to the denominator. Positive for moving in the downward direction.'), + ('MathValueRecord', 'FractionDenominatorDisplayStyleShiftDown', None, None, 'Standard shift down applied to the denominator in display style. Positive for moving in the downward direction. Suggested: StackBottomDisplayStyleShiftDown.'), + ('MathValueRecord', 'FractionNumeratorGapMin', None, None, 'Minimum tolerated gap between the (ink) bottom of the numerator and the ink of the fraction bar. Suggested: default rule thickness'), + ('MathValueRecord', 'FractionNumDisplayStyleGapMin', None, None, 'Minimum tolerated gap between the (ink) bottom of the numerator and the ink of the fraction bar in display style. Suggested: 3x default rule thickness.'), + ('MathValueRecord', 'FractionRuleThickness', None, None, 'Thickness of the fraction bar. Suggested: default rule thickness.'), + ('MathValueRecord', 'FractionDenominatorGapMin', None, None, 'Minimum tolerated gap between the (ink) top of the denominator and the ink of the fraction bar. Suggested: default rule thickness'), + ('MathValueRecord', 'FractionDenomDisplayStyleGapMin', None, None, 'Minimum tolerated gap between the (ink) top of the denominator and the ink of the fraction bar in display style. Suggested: 3x default rule thickness.'), + ('MathValueRecord', 'SkewedFractionHorizontalGap', None, None, 'Horizontal distance between the top and bottom elements of a skewed fraction.'), + ('MathValueRecord', 'SkewedFractionVerticalGap', None, None, 'Vertical distance between the ink of the top and bottom elements of a skewed fraction.'), + ('MathValueRecord', 'OverbarVerticalGap', None, None, 'Distance between the overbar and the (ink) top of he base. Suggested: 3x default rule thickness.'), + ('MathValueRecord', 'OverbarRuleThickness', None, None, 'Thickness of overbar. Suggested: default rule thickness.'), + ('MathValueRecord', 'OverbarExtraAscender', None, None, 'Extra white space reserved above the overbar. Suggested: default rule thickness.'), + ('MathValueRecord', 'UnderbarVerticalGap', None, None, 'Distance between underbar and (ink) bottom of the base. Suggested: 3x default rule thickness.'), + ('MathValueRecord', 'UnderbarRuleThickness', None, None, 'Thickness of underbar. Suggested: default rule thickness.'), + ('MathValueRecord', 'UnderbarExtraDescender', None, None, 'Extra white space reserved below the underbar. Always positive. Suggested: default rule thickness.'), + ('MathValueRecord', 'RadicalVerticalGap', None, None, 'Space between the (ink) top of the expression and the bar over it. Suggested: 1 1/4 default rule thickness.'), + ('MathValueRecord', 'RadicalDisplayStyleVerticalGap', None, None, 'Space between the (ink) top of the expression and the bar over it. Suggested: default rule thickness + 1/4 x-height.'), + ('MathValueRecord', 'RadicalRuleThickness', None, None, 'Thickness of the radical rule. This is the thickness of the rule in designed or constructed radical signs. Suggested: default rule thickness.'), + ('MathValueRecord', 'RadicalExtraAscender', None, None, 'Extra white space reserved above the radical. Suggested: RadicalRuleThickness.'), + ('MathValueRecord', 'RadicalKernBeforeDegree', None, None, 'Extra horizontal kern before the degree of a radical, if such is present. Suggested: 5/18 of em.'), + ('MathValueRecord', 'RadicalKernAfterDegree', None, None, 'Negative kern after the degree of a radical, if such is present. Suggested: 10/18 of em.'), + ('uint16', 'RadicalDegreeBottomRaisePercent', None, None, 'Height of the bottom of the radical degree, if such is present, in proportion to the ascender of the radical sign. Suggested: 60%.'), + ]), + + ('MathGlyphInfo', [ + ('Offset', 'MathItalicsCorrectionInfo', None, None, 'Offset to MathItalicsCorrectionInfo table - from the beginning of MathGlyphInfo table.'), + ('Offset', 'MathTopAccentAttachment', None, None, 'Offset to MathTopAccentAttachment table - from the beginning of MathGlyphInfo table.'), + ('Offset', 'ExtendedShapeCoverage', None, None, 'Offset to coverage table for Extended Shape glyphs - from the beginning of MathGlyphInfo table. When the left or right glyph of a box is an extended shape variant, the (ink) box (and not the default position defined by values in MathConstants table) should be used for vertical positioning purposes. May be NULL.'), + ('Offset', 'MathKernInfo', None, None, 'Offset to MathKernInfo table - from the beginning of MathGlyphInfo table.'), + ]), + + ('MathItalicsCorrectionInfo', [ + ('Offset', 'Coverage', None, None, 'Offset to Coverage table - from the beginning of MathItalicsCorrectionInfo table.'), + ('uint16', 'ItalicsCorrectionCount', None, None, 'Number of italics correction values. Should coincide with the number of covered glyphs.'), + ('MathValueRecord', 'ItalicsCorrection', 'ItalicsCorrectionCount', 0, 'Array of MathValueRecords defining italics correction values for each covered glyph.'), + ]), + + ('MathTopAccentAttachment', [ + ('Offset', 'TopAccentCoverage', None, None, 'Offset to Coverage table - from the beginning of MathTopAccentAttachment table.'), + ('uint16', 'TopAccentAttachmentCount', None, None, 'Number of top accent attachment point values. Should coincide with the number of covered glyphs'), + ('MathValueRecord', 'TopAccentAttachment', 'TopAccentAttachmentCount', 0, 'Array of MathValueRecords defining top accent attachment points for each covered glyph'), + ]), + + ('MathKernInfo', [ + ('Offset', 'MathKernCoverage', None, None, 'Offset to Coverage table - from the beginning of the MathKernInfo table.'), + ('uint16', 'MathKernCount', None, None, 'Number of MathKernInfoRecords.'), + ('MathKernInfoRecord', 'MathKernInfoRecords', 'MathKernCount', 0, 'Array of MathKernInfoRecords, per-glyph information for mathematical positioning of subscripts and superscripts.'), + ]), + + ('MathKernInfoRecord', [ + ('Offset', 'TopRightMathKern', None, None, 'Offset to MathKern table for top right corner - from the beginning of MathKernInfo table. May be NULL.'), + ('Offset', 'TopLeftMathKern', None, None, 'Offset to MathKern table for the top left corner - from the beginning of MathKernInfo table. May be NULL.'), + ('Offset', 'BottomRightMathKern', None, None, 'Offset to MathKern table for bottom right corner - from the beginning of MathKernInfo table. May be NULL.'), + ('Offset', 'BottomLeftMathKern', None, None, 'Offset to MathKern table for bottom left corner - from the beginning of MathKernInfo table. May be NULL.'), + ]), + + ('MathKern', [ + ('uint16', 'HeightCount', None, None, 'Number of heights on which the kern value changes.'), + ('MathValueRecord', 'CorrectionHeight', 'HeightCount', 0, 'Array of correction heights at which the kern value changes. Sorted by the height value in design units.'), + ('MathValueRecord', 'KernValue', 'HeightCount', 1, 'Array of kern values corresponding to heights. First value is the kern value for all heights less or equal than the first height in this table.Last value is the value to be applied for all heights greater than the last height in this table. Negative values are interpreted as move glyphs closer to each other.'), + ]), + + ('MathVariants', [ + ('uint16', 'MinConnectorOverlap', None, None, 'Minimum overlap of connecting glyphs during glyph construction, in design units.'), + ('Offset', 'VertGlyphCoverage', None, None, 'Offset to Coverage table - from the beginning of MathVariants table.'), + ('Offset', 'HorizGlyphCoverage', None, None, 'Offset to Coverage table - from the beginning of MathVariants table.'), + ('uint16', 'VertGlyphCount', None, None, 'Number of glyphs for which information is provided for vertically growing variants.'), + ('uint16', 'HorizGlyphCount', None, None, 'Number of glyphs for which information is provided for horizontally growing variants.'), + ('Offset', 'VertGlyphConstruction', 'VertGlyphCount', 0, 'Array of offsets to MathGlyphConstruction tables - from the beginning of the MathVariants table, for shapes growing in vertical direction.'), + ('Offset', 'HorizGlyphConstruction', 'HorizGlyphCount', 0, 'Array of offsets to MathGlyphConstruction tables - from the beginning of the MathVariants table, for shapes growing in horizontal direction.'), + ]), + + ('MathGlyphConstruction', [ + ('Offset', 'GlyphAssembly', None, None, 'Offset to GlyphAssembly table for this shape - from the beginning of MathGlyphConstruction table. May be NULL'), + ('uint16', 'VariantCount', None, None, 'Count of glyph growing variants for this glyph.'), + ('MathGlyphVariantRecord', 'MathGlyphVariantRecord', 'VariantCount', 0, 'MathGlyphVariantRecords for alternative variants of the glyphs.'), + ]), + + ('MathGlyphVariantRecord', [ + ('GlyphID', 'VariantGlyph', None, None, 'Glyph ID for the variant.'), + ('uint16', 'AdvanceMeasurement', None, None, 'Advance width/height, in design units, of the variant, in the direction of requested glyph extension.'), + ]), + + ('GlyphAssembly', [ + ('MathValueRecord', 'ItalicsCorrection', None, None, 'Italics correction of this GlyphAssembly. Should not depend on the assembly size.'), + ('uint16', 'PartCount', None, None, 'Number of parts in this assembly.'), + ('GlyphPartRecord', 'PartRecords', 'PartCount', 0, 'Array of part records, from left to right and bottom to top.'), + ]), + + ('GlyphPartRecord', [ + ('GlyphID', 'glyph', None, None, 'Glyph ID for the part.'), + ('uint16', 'StartConnectorLength', None, None, 'Advance width/ height of the straight bar connector material, in design units, is at the beginning of the glyph, in the direction of the extension.'), + ('uint16', 'EndConnectorLength', None, None, 'Advance width/ height of the straight bar connector material, in design units, is at the end of the glyph, in the direction of the extension.'), + ('uint16', 'FullAdvance', None, None, 'Full advance width/height for this part, in the direction of the extension. In design units.'), + ('uint16', 'PartFlags', None, None, 'Part qualifiers. PartFlags enumeration currently uses only one bit: 0x0001 fExtender: If set, the part can be skipped or repeated. 0xFFFE Reserved'), + ]), + + + ## + ## Apple Advanced Typography (AAT) tables + ## + + # + # feat + # + + ('feat', [ + ('Version', 'Version', None, None, 'Version of the feat table-initially set to 0x00010000.'), + ('FeatureNames', 'FeatureNames', None, None, 'The feature names.'), + ]), + ('FeatureNames', [ + ('uint16', 'FeatureNameCount', None, None, 'Number of entries in the feature name array.'), + ('uint16', 'Reserved1', None, None, 'Reserved (set to zero).'), + ('uint32', 'Reserved2', None, None, 'Reserved (set to zero).'), + ('FeatureName', 'FeatureName', 'FeatureNameCount', 0, 'The feature name array.'), + ]), + + ('FeatureName', [ + ('uint16', 'FeatureType', None, None, 'Feature type.'), + ('uint16', 'SettingsCount', None, None, 'The number of records in the setting name array.'), + ('LOffset', 'Settings', None, None, 'Offset to setting table for this feature.'), + ('uint16', 'FeatureFlags', None, None, 'Single-bit flags associated with the feature type.'), + ('uint16', 'FeatureNameID', None, None, 'The name table index for the feature name.'), + ]), + + ('Settings', [ + ('Setting', 'Setting', 'SettingsCount', 0, 'The setting array.'), + ]), + + ('Setting', [ + ('uint16', 'SettingValue', None, None, 'The setting.'), + ('uint16', 'SettingNameID', None, None, 'The name table index for the setting name.'), + ]), + +] diff -Nru fonttools-2.4/Lib/fontTools/ttLib/tables/otTables.py fonttools-3.0/Lib/fontTools/ttLib/tables/otTables.py --- fonttools-2.4/Lib/fontTools/ttLib/tables/otTables.py 2013-04-23 04:08:08.000000000 +0000 +++ fonttools-3.0/Lib/fontTools/ttLib/tables/otTables.py 2015-08-31 17:57:15.000000000 +0000 @@ -4,51 +4,75 @@ Most are constructed upon import from data in otData.py, all are populated with converter objects from otConverters.py. """ +from __future__ import print_function, division, absolute_import +from fontTools.misc.py23 import * +from .otBase import BaseTable, FormatSwitchingBaseTable import operator -from otBase import BaseTable, FormatSwitchingBaseTable -from types import TupleType +import warnings -class LookupOrder(BaseTable): - """Dummy class; this table isn't defined, but is used, and is always NULL.""" +class FeatureParams(BaseTable): + def compile(self, writer, font): + assert featureParamTypes.get(writer['FeatureTag']) == self.__class__, "Wrong FeatureParams type for feature '%s': %s" % (writer['FeatureTag'], self.__class__.__name__) + BaseTable.compile(self, writer, font) -class FeatureParams(BaseTable): - """This class has been used by Adobe, but but this one implementation was done wrong. - No other use has been made, becuase there is no way to know how to interpret - the data at the offset.. For now, if we see one, just skip the data on - decompiling and dumping to XML. """ - # XXX The above is no longer true; the 'size' feature uses FeatureParams now. - def __init__(self): - BaseTable.__init__(self) - self.converters = [] + def toXML(self, xmlWriter, font, attrs=None, name=None): + BaseTable.toXML(self, xmlWriter, font, attrs, name=self.__class__.__name__) + +class FeatureParamsSize(FeatureParams): + pass + +class FeatureParamsStylisticSet(FeatureParams): + pass + +class FeatureParamsCharacterVariants(FeatureParams): + pass class Coverage(FormatSwitchingBaseTable): - + # manual implementation to get rid of glyphID dependencies - + def postRead(self, rawTable, font): if self.Format == 1: + # TODO only allow glyphs that are valid? self.glyphs = rawTable["GlyphArray"] elif self.Format == 2: glyphs = self.glyphs = [] ranges = rawTable["RangeRecord"] - getGlyphName = font.getGlyphName + glyphOrder = font.getGlyphOrder() + # Some SIL fonts have coverage entries that don't have sorted + # StartCoverageIndex. If it is so, fixup and warn. We undo + # this when writing font out. + sorted_ranges = sorted(ranges, key=lambda a: a.StartCoverageIndex) + if ranges != sorted_ranges: + warnings.warn("GSUB/GPOS Coverage is not sorted by glyph ids.") + ranges = sorted_ranges + del sorted_ranges for r in ranges: assert r.StartCoverageIndex == len(glyphs), \ (r.StartCoverageIndex, len(glyphs)) start = r.Start end = r.End - startID = font.getGlyphID(start) - endID = font.getGlyphID(end) - glyphs.append(start) - rangeList = [getGlyphName(glyphID) for glyphID in range(startID + 1, endID) ] - glyphs += rangeList - if start != end: - glyphs.append(end) + try: + startID = font.getGlyphID(start, requireReal=True) + except KeyError: + warnings.warn("Coverage table has start glyph ID out of range: %s." % start) + continue + try: + endID = font.getGlyphID(end, requireReal=True) + 1 + except KeyError: + # Apparently some tools use 65535 to "match all" the range + if end != 'glyph65535': + warnings.warn("Coverage table has end glyph ID out of range: %s." % end) + # NOTE: We clobber out-of-range things here. There are legit uses for those, + # but none that we have seen in the wild. + endID = len(glyphOrder) + glyphs.extend(glyphOrder[glyphID] for glyphID in range(startID, endID)) else: assert 0, "unknown format: %s" % self.Format - + del self.Format # Don't need this anymore + def preWrite(self, font): glyphs = getattr(self, "glyphs", None) if glyphs is None: @@ -59,7 +83,8 @@ if glyphs: # find out whether Format 2 is more compact or not glyphIDs = [getGlyphID(glyphName) for glyphName in glyphs ] - + brokenOrder = sorted(glyphIDs) != glyphIDs + last = glyphIDs[0] ranges = [[last]] for glyphID in glyphIDs[1:]: @@ -68,31 +93,37 @@ ranges.append([glyphID]) last = glyphID ranges[-1].append(last) - - if len(ranges) * 3 < len(glyphs): # 3 words vs. 1 word + + if brokenOrder or len(ranges) * 3 < len(glyphs): # 3 words vs. 1 word # Format 2 is more compact index = 0 for i in range(len(ranges)): start, end = ranges[i] r = RangeRecord() + r.StartID = start r.Start = font.getGlyphName(start) r.End = font.getGlyphName(end) r.StartCoverageIndex = index ranges[i] = r index = index + end - start + 1 + if brokenOrder: + warnings.warn("GSUB/GPOS Coverage is not sorted by glyph ids.") + ranges.sort(key=lambda a: a.StartID) + for r in ranges: + del r.StartID format = 2 rawTable = {"RangeRecord": ranges} #else: # fallthrough; Format 1 is more compact self.Format = format return rawTable - + def toXML2(self, xmlWriter, font): for glyphName in getattr(self, "glyphs", []): xmlWriter.simpletag("Glyph", value=glyphName) xmlWriter.newline() - - def fromXML(self, (name, attrs, content), font): + + def fromXML(self, name, attrs, content, font): glyphs = getattr(self, "glyphs", None) if glyphs is None: glyphs = [] @@ -100,60 +131,6 @@ glyphs.append(attrs["value"]) -class LookupList(BaseTable): - def preCompile(self): - """ This function is used to optimize writing out extension subtables. This is useful - when a font has been read in, modified, and we are now writing out a new version. If the - the extension subtables have not been touched (proof being that they have not been decompiled) - then we can write them out using the original data, and do not have to recompile them. This can save - 20-30% of the compile time for fonts with large extension tables, such as Japanese Pro fonts.""" - - if hasattr(self, 'LookupCount'): #not defined if loading from xml - lookupCount = self.LookupCount - else: - return # The optimization of not recompiling extension lookup subtables is not possible - # when reading from XML. - - liRange = range(lookupCount) - extTables = [] - for li in liRange: - lookup = self.Lookup[li] - if hasattr(lookup, 'SubTableCount'): #not defined if loading from xml - subtableCount = lookup.SubTableCount - else: - subtableCount = len(lookup.SubTable) - siRange = range(subtableCount) - for si in siRange: - subtable = lookup.SubTable[si] - if hasattr(subtable, 'ExtSubTable'): - extTable = subtable.ExtSubTable - extTables.append([extTable.start, extTable] ) - - # Since offsets in one subtable can and do point forward into later - # subtables, we can afford to simply copy data only for the last subtables - # which were not decompiled. So we start figuring out the - # data segments starting with the last subtTable, and work our way towards - # the first subtable, and then quit as soon as we see a subtable that was decompiled. - if extTables: - extTables.sort() - extTables.reverse() - lastTable = extTables[0][1] - if lastTable.compileStatus == 1: - lastTable.end = len(lastTable.reader.data) - lastTable.compileStatus = 3 - for i in range(1, len(extTables)): - extTable = extTables[i][1] - if extTable.compileStatus != 1: - break - extTable.end = lastTable.start - extTable.compileStatus = 3 - lastTable = extTable - -def doModulo(value): - if value < 0: - return value + 65536 - return value - class SingleSubst(FormatSwitchingBaseTable): def postRead(self, rawTable, font): @@ -163,38 +140,36 @@ if self.Format == 1: delta = rawTable["DeltaGlyphID"] inputGIDS = [ font.getGlyphID(name) for name in input ] - inputGIDS = map(doModulo, inputGIDS) - outGIDS = [ glyphID + delta for glyphID in inputGIDS ] - outGIDS = map(doModulo, outGIDS) + outGIDS = [ (glyphID + delta) % 65536 for glyphID in inputGIDS ] outNames = [ font.getGlyphName(glyphID) for glyphID in outGIDS ] - map(operator.setitem, [mapping]*lenMapping, input, outNames) + list(map(operator.setitem, [mapping]*lenMapping, input, outNames)) elif self.Format == 2: assert len(input) == rawTable["GlyphCount"], \ "invalid SingleSubstFormat2 table" subst = rawTable["Substitute"] - map(operator.setitem, [mapping]*lenMapping, input, subst) + list(map(operator.setitem, [mapping]*lenMapping, input, subst)) else: assert 0, "unknown format: %s" % self.Format self.mapping = mapping - + del self.Format # Don't need this anymore + def preWrite(self, font): mapping = getattr(self, "mapping", None) if mapping is None: mapping = self.mapping = {} - items = mapping.items() + items = list(mapping.items()) getGlyphID = font.getGlyphID - gidItems = [(getGlyphID(item[0]), getGlyphID(item[1])) for item in items] - sortableItems = zip(gidItems, items) - sortableItems.sort() + gidItems = [(getGlyphID(a), getGlyphID(b)) for a,b in items] + sortableItems = sorted(zip(gidItems, items)) # figure out format format = 2 delta = None for inID, outID in gidItems: if delta is None: - delta = outID - inID - else: - if delta != outID - inID: + delta = (outID - inID) % 65536 + + if (inID + delta) % 65536 != outID: break else: format = 1 @@ -212,16 +187,15 @@ else: rawTable["Substitute"] = subst return rawTable - + def toXML2(self, xmlWriter, font): - items = self.mapping.items() - items.sort() + items = sorted(self.mapping.items()) for inGlyph, outGlyph in items: xmlWriter.simpletag("Substitution", [("in", inGlyph), ("out", outGlyph)]) xmlWriter.newline() - - def fromXML(self, (name, attrs, content), font): + + def fromXML(self, name, attrs, content, font): mapping = getattr(self, "mapping", None) if mapping is None: mapping = {} @@ -230,20 +204,28 @@ class ClassDef(FormatSwitchingBaseTable): - + def postRead(self, rawTable, font): classDefs = {} - getGlyphName = font.getGlyphName + glyphOrder = font.getGlyphOrder() if self.Format == 1: start = rawTable["StartGlyph"] classList = rawTable["ClassValueArray"] - lenList = len(classList) - glyphID = font.getGlyphID(start) - gidList = range(glyphID, glyphID + len(classList)) - keyList = [getGlyphName(glyphID) for glyphID in gidList] + try: + startID = font.getGlyphID(start, requireReal=True) + except KeyError: + warnings.warn("ClassDef table has start glyph ID out of range: %s." % start) + startID = len(glyphOrder) + endID = startID + len(classList) + if endID > len(glyphOrder): + warnings.warn("ClassDef table has entries for out of range glyph IDs: %s,%s." % (start, len(classList))) + # NOTE: We clobber out-of-range things here. There are legit uses for those, + # but none that we have seen in the wild. + endID = len(glyphOrder) - map(operator.setitem, [classDefs]*lenList, keyList, classList) + for glyphID, cls in zip(range(startID, endID), classList): + classDefs[glyphOrder[glyphID]] = cls elif self.Format == 2: records = rawTable["ClassRangeRecord"] @@ -251,21 +233,34 @@ start = rec.Start end = rec.End cls = rec.Class - classDefs[start] = cls - glyphIDs = range(font.getGlyphID(start) + 1, font.getGlyphID(end)) - lenList = len(glyphIDs) - keyList = [getGlyphName(glyphID) for glyphID in glyphIDs] - map(operator.setitem, [classDefs]*lenList, keyList, [cls]*lenList) - classDefs[end] = cls + try: + startID = font.getGlyphID(start, requireReal=True) + except KeyError: + warnings.warn("ClassDef table has start glyph ID out of range: %s." % start) + continue + try: + endID = font.getGlyphID(end, requireReal=True) + 1 + except KeyError: + # Apparently some tools use 65535 to "match all" the range + if end != 'glyph65535': + warnings.warn("ClassDef table has end glyph ID out of range: %s." % end) + # NOTE: We clobber out-of-range things here. There are legit uses for those, + # but none that we have seen in the wild. + endID = len(glyphOrder) + for glyphID in range(startID, endID): + classDefs[glyphOrder[glyphID]] = cls else: assert 0, "unknown format: %s" % self.Format self.classDefs = classDefs - + del self.Format # Don't need this anymore + def preWrite(self, font): classDefs = getattr(self, "classDefs", None) if classDefs is None: classDefs = self.classDefs = {} - items = classDefs.items() + items = list(classDefs.items()) + format = 2 + rawTable = {"ClassRangeRecord": []} getGlyphID = font.getGlyphID for i in range(len(items)): glyphName, cls = items[i] @@ -273,34 +268,49 @@ items.sort() if items: last, lastName, lastCls = items[0] - rec = ClassRangeRecord() - rec.Start = lastName - rec.Class = lastCls - ranges = [rec] + ranges = [[lastCls, last, lastName]] for glyphID, glyphName, cls in items[1:]: if glyphID != last + 1 or cls != lastCls: - rec.End = lastName - rec = ClassRangeRecord() - rec.Start = glyphName - rec.Class = cls - ranges.append(rec) + ranges[-1].extend([last, lastName]) + ranges.append([cls, glyphID, glyphName]) last = glyphID lastName = glyphName lastCls = cls - rec.End = lastName - else: - ranges = [] - self.Format = 2 # currently no support for Format 1 - return {"ClassRangeRecord": ranges} - + ranges[-1].extend([last, lastName]) + + startGlyph = ranges[0][1] + endGlyph = ranges[-1][3] + glyphCount = endGlyph - startGlyph + 1 + if len(ranges) * 3 < glyphCount + 1: + # Format 2 is more compact + for i in range(len(ranges)): + cls, start, startName, end, endName = ranges[i] + rec = ClassRangeRecord() + rec.Start = startName + rec.End = endName + rec.Class = cls + ranges[i] = rec + format = 2 + rawTable = {"ClassRangeRecord": ranges} + else: + # Format 1 is more compact + startGlyphName = ranges[0][2] + classes = [0] * glyphCount + for cls, start, startName, end, endName in ranges: + for g in range(start - startGlyph, end - startGlyph + 1): + classes[g] = cls + format = 1 + rawTable = {"StartGlyph": startGlyphName, "ClassValueArray": classes} + self.Format = format + return rawTable + def toXML2(self, xmlWriter, font): - items = self.classDefs.items() - items.sort() + items = sorted(self.classDefs.items()) for glyphName, cls in items: xmlWriter.simpletag("ClassDef", [("glyph", glyphName), ("class", cls)]) xmlWriter.newline() - - def fromXML(self, (name, attrs, content), font): + + def fromXML(self, name, attrs, content, font): classDefs = getattr(self, "classDefs", None) if classDefs is None: classDefs = {} @@ -309,7 +319,7 @@ class AlternateSubst(FormatSwitchingBaseTable): - + def postRead(self, rawTable, font): alternates = {} if self.Format == 1: @@ -322,13 +332,14 @@ else: assert 0, "unknown format: %s" % self.Format self.alternates = alternates - + del self.Format # Don't need this anymore + def preWrite(self, font): self.Format = 1 alternates = getattr(self, "alternates", None) if alternates is None: alternates = self.alternates = {} - items = alternates.items() + items = list(alternates.items()) for i in range(len(items)): glyphName, set = items[i] items[i] = font.getGlyphID(glyphName), glyphName, set @@ -346,12 +357,11 @@ # Also useful in that when splitting a sub-table because of an offset overflow # I don't need to calculate the change in the subtable offset due to the change in the coverage table size. # Allows packing more rules in subtable. - self.sortCoverageLast = 1 + self.sortCoverageLast = 1 return {"Coverage": cov, "AlternateSet": alternates} - + def toXML2(self, xmlWriter, font): - items = self.alternates.items() - items.sort() + items = sorted(self.alternates.items()) for glyphName, alternates in items: xmlWriter.begintag("AlternateSet", glyph=glyphName) xmlWriter.newline() @@ -360,8 +370,8 @@ xmlWriter.newline() xmlWriter.endtag("AlternateSet") xmlWriter.newline() - - def fromXML(self, (name, attrs, content), font): + + def fromXML(self, name, attrs, content, font): alternates = getattr(self, "alternates", None) if alternates is None: alternates = {} @@ -370,18 +380,18 @@ set = [] alternates[glyphName] = set for element in content: - if type(element) != TupleType: + if not isinstance(element, tuple): continue name, attrs, content = element set.append(attrs["glyph"]) class LigatureSubst(FormatSwitchingBaseTable): - + def postRead(self, rawTable, font): ligatures = {} if self.Format == 1: - input = rawTable["Coverage"].glyphs + input = _getGlyphsFromCoverageTable(rawTable["Coverage"]) ligSets = rawTable["LigatureSet"] assert len(input) == len(ligSets) for i in range(len(input)): @@ -389,12 +399,14 @@ else: assert 0, "unknown format: %s" % self.Format self.ligatures = ligatures - + del self.Format # Don't need this anymore + def preWrite(self, font): + self.Format = 1 ligatures = getattr(self, "ligatures", None) if ligatures is None: ligatures = self.ligatures = {} - items = ligatures.items() + items = list(ligatures.items()) for i in range(len(items)): glyphName, set = items[i] items[i] = font.getGlyphID(glyphName), glyphName, set @@ -413,12 +425,11 @@ # Useful in that when splitting a sub-table because of an offset overflow # I don't need to calculate the change in subtabl offset due to the coverage table size. # Allows packing more rules in subtable. - self.sortCoverageLast = 1 + self.sortCoverageLast = 1 return {"Coverage": cov, "LigatureSet": ligSets} - + def toXML2(self, xmlWriter, font): - items = self.ligatures.items() - items.sort() + items = sorted(self.ligatures.items()) for glyphName, ligSets in items: xmlWriter.begintag("LigatureSet", glyph=glyphName) xmlWriter.newline() @@ -428,8 +439,8 @@ xmlWriter.newline() xmlWriter.endtag("LigatureSet") xmlWriter.newline() - - def fromXML(self, (name, attrs, content), font): + + def fromXML(self, name, attrs, content, font): ligatures = getattr(self, "ligatures", None) if ligatures is None: ligatures = {} @@ -438,12 +449,13 @@ ligs = [] ligatures[glyphName] = ligs for element in content: - if type(element) != TupleType: + if not isinstance(element, tuple): continue name, attrs, content = element lig = Ligature() lig.LigGlyph = attrs["glyph"] - lig.Component = attrs["components"].split(",") + components = attrs["components"] + lig.Component = components.split(",") if components else [] ligs.append(lig) @@ -459,7 +471,8 @@ 'LangSys': ('DefaultLangSys',), 'Coverage': ('MarkCoverage', 'BaseCoverage', 'LigatureCoverage', 'Mark1Coverage', 'Mark2Coverage', 'BacktrackCoverage', 'InputCoverage', - 'LookAheadCoverage'), + 'LookAheadCoverage', 'VertGlyphCoverage', 'HorizGlyphCoverage', + 'TopAccentCoverage', 'ExtendedShapeCoverage', 'MathKernCoverage'), 'ClassDef': ('ClassDef1', 'ClassDef2', 'BacktrackClassDef', 'InputClassDef', 'LookAheadClassDef', 'GlyphClassDef', 'MarkAttachClassDef'), 'Anchor': ('EntryAnchor', 'ExitAnchor', 'BaseAnchor', 'LigatureAnchor', @@ -475,6 +488,9 @@ 'JstfGPOSModList': ('ShrinkageEnableGPOS', 'ShrinkageDisableGPOS', 'ExtensionEnableGPOS', 'ExtensionDisableGPOS',), 'JstfMax': ('ShrinkageJstfMax', 'ExtensionJstfMax',), + 'MathKern': ('TopRightMathKern', 'TopLeftMathKern', 'BottomRightMathKern', + 'BottomLeftMathKern'), + 'MathGlyphConstruction': ('VertGlyphConstruction', 'HorizGlyphConstruction'), } # @@ -484,7 +500,7 @@ def fixLookupOverFlows(ttf, overflowRecord): """ Either the offset from the LookupList to a lookup overflowed, or - an offset from a lookup to a subtable overflowed. + an offset from a lookup to a subtable overflowed. The table layout is: GPSO/GUSB Script List @@ -501,14 +517,14 @@ SubTable[0] and contents ... SubTable[n] and contents - If the offset to a lookup overflowed (SubTableIndex == None) + If the offset to a lookup overflowed (SubTableIndex is None) we must promote the *previous* lookup to an Extension type. - If the offset from a lookup to subtable overflowed, then we must promote it + If the offset from a lookup to subtable overflowed, then we must promote it to an Extension Lookup type. """ ok = 0 lookupIndex = overflowRecord.LookupListIndex - if (overflowRecord.SubTableIndex == None): + if (overflowRecord.SubTableIndex is None): lookupIndex = lookupIndex - 1 if lookupIndex < 0: return ok @@ -520,21 +536,19 @@ lookups = ttf[overflowRecord.tableType].table.LookupList.Lookup lookup = lookups[lookupIndex] # If the previous lookup is an extType, look further back. Very unlikely, but possible. - while lookup.LookupType == extType: + while lookup.SubTable[0].__class__.LookupType == extType: lookupIndex = lookupIndex -1 if lookupIndex < 0: return ok lookup = lookups[lookupIndex] - + for si in range(len(lookup.SubTable)): subTable = lookup.SubTable[si] extSubTableClass = lookupTypes[overflowRecord.tableType][extType] extSubTable = extSubTableClass() extSubTable.Format = 1 - extSubTable.ExtensionLookupType = lookup.LookupType extSubTable.ExtSubTable = subTable lookup.SubTable[si] = extSubTable - lookup.LookupType = extType ok = 1 return ok @@ -543,21 +557,20 @@ newSubTable.Format = oldSubTable.Format if hasattr(oldSubTable, 'sortCoverageLast'): newSubTable.sortCoverageLast = oldSubTable.sortCoverageLast - - oldAlts = oldSubTable.alternates.items() - oldAlts.sort() + + oldAlts = sorted(oldSubTable.alternates.items()) oldLen = len(oldAlts) if overflowRecord.itemName in [ 'Coverage', 'RangeRecord']: # Coverage table is written last. overflow is to or within the # the coverage table. We will just cut the subtable in half. - newLen = int(oldLen/2) + newLen = oldLen//2 elif overflowRecord.itemName == 'AlternateSet': - # We just need to back up by two items + # We just need to back up by two items # from the overflowed AlternateSet index to make sure the offset # to the Coverage table doesn't overflow. - newLen = overflowRecord.itemIndex - 1 + newLen = overflowRecord.itemIndex - 1 newSubTable.alternates = {} for i in range(newLen, oldLen): @@ -566,27 +579,25 @@ newSubTable.alternates[key] = item[1] del oldSubTable.alternates[key] - return ok def splitLigatureSubst(oldSubTable, newSubTable, overflowRecord): ok = 1 newSubTable.Format = oldSubTable.Format - oldLigs = oldSubTable.ligatures.items() - oldLigs.sort() + oldLigs = sorted(oldSubTable.ligatures.items()) oldLen = len(oldLigs) if overflowRecord.itemName in [ 'Coverage', 'RangeRecord']: # Coverage table is written last. overflow is to or within the # the coverage table. We will just cut the subtable in half. - newLen = int(oldLen/2) + newLen = oldLen//2 elif overflowRecord.itemName == 'LigatureSet': - # We just need to back up by two items + # We just need to back up by two items # from the overflowed AlternateSet index to make sure the offset # to the Coverage table doesn't overflow. - newLen = overflowRecord.itemIndex - 1 + newLen = overflowRecord.itemIndex - 1 newSubTable.ligatures = {} for i in range(newLen, oldLen): @@ -623,7 +634,7 @@ } def fixSubTableOverFlows(ttf, overflowRecord): - """ + """ An offset has overflowed within a sub-table. We need to divide this subtable into smaller parts. """ ok = 0 @@ -636,20 +647,19 @@ # We split the subtable of the Extension table, and add a new Extension table # to contain the new subtable. - subTableType = subtable.ExtensionLookupType + subTableType = subtable.ExtSubTable.__class__.LookupType extSubTable = subtable subtable = extSubTable.ExtSubTable - newExtSubTableClass = lookupTypes[overflowRecord.tableType][lookup.LookupType] + newExtSubTableClass = lookupTypes[overflowRecord.tableType][subtable.__class__.LookupType] newExtSubTable = newExtSubTableClass() newExtSubTable.Format = extSubTable.Format - newExtSubTable.ExtensionLookupType = extSubTable.ExtensionLookupType lookup.SubTable.insert(subIndex + 1, newExtSubTable) newSubTableClass = lookupTypes[overflowRecord.tableType][subTableType] newSubTable = newSubTableClass() newExtSubTable.ExtSubTable = newSubTable else: - subTableType = lookup.LookupType + subTableType = subtable.__class__.LookupType newSubTableClass = lookupTypes[overflowRecord.tableType][subTableType] newSubTable = newSubTableClass() lookup.SubTable.insert(subIndex + 1, newSubTable) @@ -669,12 +679,12 @@ def _buildClasses(): - import new, re - from otData import otData - + import re + from .otData import otData + formatPat = re.compile("([A-Za-z0-9]+)Format(\d+)$") namespace = globals() - + # populate module with classes for name, table in otData: baseClass = BaseTable @@ -683,16 +693,16 @@ # XxxFormatN subtable, we only add the "base" table name = m.group(1) baseClass = FormatSwitchingBaseTable - if not namespace.has_key(name): + if name not in namespace: # the class doesn't exist yet, so the base implementation is used. - cls = new.classobj(name, (baseClass,), {}) + cls = type(name, (baseClass,), {}) namespace[name] = cls - + for base, alts in _equivalents.items(): base = namespace[base] for alt in alts: - namespace[alt] = new.classobj(alt, (base,), {}) - + namespace[alt] = type(alt, (base,), {}) + global lookupTypes lookupTypes = { 'GSUB': { @@ -721,9 +731,18 @@ for lookupEnum in lookupTypes.values(): for enum, cls in lookupEnum.items(): cls.LookupType = enum - + + global featureParamTypes + featureParamTypes = { + 'size': FeatureParamsSize, + } + for i in range(1, 20+1): + featureParamTypes['ss%02d' % i] = FeatureParamsStylisticSet + for i in range(1, 99+1): + featureParamTypes['cv%02d' % i] = FeatureParamsCharacterVariants + # add converters to classes - from otConverters import buildConverters + from .otConverters import buildConverters for name, table in otData: m = formatPat.match(name) if m: @@ -737,9 +756,11 @@ converters, convertersByName = buildConverters(table[1:], namespace) cls.converters[format] = converters cls.convertersByName[format] = convertersByName + # XXX Add staticSize? else: cls = namespace[name] cls.converters, cls.convertersByName = buildConverters(table, namespace) + # XXX Add staticSize? _buildClasses() diff -Nru fonttools-2.4/Lib/fontTools/ttLib/tables/_p_o_s_t.py fonttools-3.0/Lib/fontTools/ttLib/tables/_p_o_s_t.py --- fonttools-2.4/Lib/fontTools/ttLib/tables/_p_o_s_t.py 2013-04-23 04:08:08.000000000 +0000 +++ fonttools-3.0/Lib/fontTools/ttLib/tables/_p_o_s_t.py 2015-08-31 17:57:15.000000000 +0000 @@ -1,17 +1,19 @@ -import sys -from fontTools.ttLib.standardGlyphOrder import standardGlyphOrder -import DefaultTable -import struct, sstruct -import array +from __future__ import print_function, division, absolute_import +from fontTools.misc.py23 import * from fontTools import ttLib +from fontTools.ttLib.standardGlyphOrder import standardGlyphOrder +from fontTools.misc import sstruct from fontTools.misc.textTools import safeEval, readHex -from types import TupleType +from . import DefaultTable +import sys +import struct +import array postFormat = """ > formatType: 16.16F - italicAngle: 16.16F # italic angle in degrees + italicAngle: 16.16F # italic angle in degrees underlinePosition: h underlineThickness: h isFixedPitch: L @@ -25,7 +27,7 @@ class table__p_o_s_t(DefaultTable.DefaultTable): - + def decompile(self, data, ttFont): sstruct.unpack(postFormat, data[:postFormatSize], self) data = data[postFormatSize:] @@ -35,10 +37,12 @@ self.decode_format_2_0(data, ttFont) elif self.formatType == 3.0: self.decode_format_3_0(data, ttFont) + elif self.formatType == 4.0: + self.decode_format_4_0(data, ttFont) else: # supported format - raise ttLib.TTLibError, "'post' table format %f not supported" % self.formatType - + raise ttLib.TTLibError("'post' table format %f not supported" % self.formatType) + def compile(self, ttFont): data = sstruct.pack(postFormat, self) if self.formatType == 1.0: @@ -47,25 +51,27 @@ data = data + self.encode_format_2_0(ttFont) elif self.formatType == 3.0: pass # we're done + elif self.formatType == 4.0: + data = data + self.encode_format_4_0(ttFont) else: # supported format - raise ttLib.TTLibError, "'post' table format %f not supported" % self.formatType + raise ttLib.TTLibError("'post' table format %f not supported" % self.formatType) return data - + def getGlyphOrder(self): """This function will get called by a ttLib.TTFont instance. Do not call this function yourself, use TTFont().getGlyphOrder() or its relatives instead! """ if not hasattr(self, "glyphOrder"): - raise ttLib.TTLibError, "illegal use of getGlyphOrder()" + raise ttLib.TTLibError("illegal use of getGlyphOrder()") glyphOrder = self.glyphOrder del self.glyphOrder return glyphOrder - + def decode_format_1_0(self, data, ttFont): self.glyphOrder = standardGlyphOrder[:ttFont["maxp"].numGlyphs] - + def decode_format_2_0(self, data, ttFont): numGlyphs, = struct.unpack(">H", data[:2]) numGlyphs = int(numGlyphs) @@ -78,48 +84,72 @@ data = data[2:] indices = array.array("H") indices.fromstring(data[:2*numGlyphs]) - if sys.byteorder <> "big": + if sys.byteorder != "big": indices.byteswap() data = data[2*numGlyphs:] self.extraNames = extraNames = unpackPStrings(data) - self.glyphOrder = glyphOrder = [None] * int(ttFont['maxp'].numGlyphs) + self.glyphOrder = glyphOrder = [""] * int(ttFont['maxp'].numGlyphs) for glyphID in range(numGlyphs): index = indices[glyphID] - if index > 257: - name = extraNames[index-258] + if index > 32767: # reserved for future use; ignore + name = "" + elif index > 257: + try: + name = extraNames[index-258] + except IndexError: + name = "" else: # fetch names from standard list name = standardGlyphOrder[index] glyphOrder[glyphID] = name - #AL990511: code added to handle the case of new glyphs without - # entries into the 'post' table - if numGlyphs < ttFont['maxp'].numGlyphs: - for i in range(numGlyphs, ttFont['maxp'].numGlyphs): - glyphOrder[i] = "glyph#%.5d" % i - self.extraNames.append(glyphOrder[i]) self.build_psNameMapping(ttFont) - + def build_psNameMapping(self, ttFont): mapping = {} allNames = {} for i in range(ttFont['maxp'].numGlyphs): glyphName = psName = self.glyphOrder[i] - if allNames.has_key(glyphName): + if glyphName == "": + glyphName = "glyph%.5d" % i + if glyphName in allNames: # make up a new glyphName that's unique - n = 1 - while allNames.has_key(glyphName + "#" + `n`): - n = n + 1 - glyphName = glyphName + "#" + `n` - self.glyphOrder[i] = glyphName + n = allNames[glyphName] + while (glyphName + "#" + str(n)) in allNames: + n += 1 + allNames[glyphName] = n + 1 + glyphName = glyphName + "#" + str(n) + + self.glyphOrder[i] = glyphName + allNames[glyphName] = 1 + if glyphName != psName: mapping[glyphName] = psName - allNames[glyphName] = psName + self.mapping = mapping - + def decode_format_3_0(self, data, ttFont): # Setting self.glyphOrder to None will cause the TTFont object # try and construct glyph names from a Unicode cmap table. self.glyphOrder = None - + + def decode_format_4_0(self, data, ttFont): + from fontTools import agl + numGlyphs = ttFont['maxp'].numGlyphs + indices = array.array("H") + indices.fromstring(data) + if sys.byteorder != "big": + indices.byteswap() + # In some older fonts, the size of the post table doesn't match + # the number of glyphs. Sometimes it's bigger, sometimes smaller. + self.glyphOrder = glyphOrder = [''] * int(numGlyphs) + for i in range(min(len(indices),numGlyphs)): + if indices[i] == 0xFFFF: + self.glyphOrder[i] = '' + elif indices[i] in agl.UV2AGL: + self.glyphOrder[i] = agl.UV2AGL[indices[i]] + else: + self.glyphOrder[i] = "uni%04X" % indices[i] + self.build_psNameMapping(ttFont) + def encode_format_2_0(self, ttFont): numGlyphs = ttFont['maxp'].numGlyphs glyphOrder = ttFont.getGlyphOrder() @@ -131,23 +161,42 @@ extraDict[extraNames[i]] = i for glyphID in range(numGlyphs): glyphName = glyphOrder[glyphID] - if self.mapping.has_key(glyphName): + if glyphName in self.mapping: psName = self.mapping[glyphName] else: psName = glyphName - if extraDict.has_key(psName): + if psName in extraDict: index = 258 + extraDict[psName] elif psName in standardGlyphOrder: index = standardGlyphOrder.index(psName) else: index = 258 + len(extraNames) + assert index < 32768, "Too many glyph names for 'post' table format 2" extraDict[psName] = len(extraNames) extraNames.append(psName) indices.append(index) - if sys.byteorder <> "big": + if sys.byteorder != "big": indices.byteswap() return struct.pack(">H", numGlyphs) + indices.tostring() + packPStrings(extraNames) - + + def encode_format_4_0(self, ttFont): + from fontTools import agl + numGlyphs = ttFont['maxp'].numGlyphs + glyphOrder = ttFont.getGlyphOrder() + assert len(glyphOrder) == numGlyphs + indices = array.array("H") + for glyphID in glyphOrder: + glyphID = glyphID.split('#')[0] + if glyphID in agl.AGL2UV: + indices.append(agl.AGL2UV[glyphID]) + elif len(glyphID) == 7 and glyphID[:3] == 'uni': + indices.append(int(glyphID[3:],16)) + else: + indices.append(0xFFFF) + if sys.byteorder != "big": + indices.byteswap() + return indices.tostring() + def toXML(self, writer, ttFont): formatstring, names, fixes = sstruct.getformat(postFormat) for name in names: @@ -164,8 +213,7 @@ "ps name mapping for those cases where they differ. That's what\n" "you see below.\n") writer.newline() - items = self.mapping.items() - items.sort() + items = sorted(self.mapping.items()) for name, psName in items: writer.simpletag("psName", name=name, psName=psName) writer.newline() @@ -187,14 +235,14 @@ writer.dumphex(self.data) writer.endtag("hexdata") writer.newline() - - def fromXML(self, (name, attrs, content), ttFont): + + def fromXML(self, name, attrs, content, ttFont): if name not in ("psNames", "extraNames", "hexdata"): setattr(self, name, safeEval(attrs["value"])) elif name == "psNames": self.mapping = {} for element in content: - if type(element) <> TupleType: + if not isinstance(element, tuple): continue name, attrs, content = element if name == "psName": @@ -202,7 +250,7 @@ elif name == "extraNames": self.extraNames = [] for element in content: - if type(element) <> TupleType: + if not isinstance(element, tuple): continue name, attrs, content = element if name == "psName": @@ -216,15 +264,14 @@ index = 0 dataLen = len(data) while index < dataLen: - length = ord(data[index]) - strings.append(data[index+1:index+1+length]) + length = byteord(data[index]) + strings.append(tostr(data[index+1:index+1+length], encoding="latin1")) index = index + 1 + length return strings def packPStrings(strings): - data = "" + data = b"" for s in strings: - data = data + chr(len(s)) + s + data = data + bytechr(len(s)) + tobytes(s, encoding="latin1") return data - diff -Nru fonttools-2.4/Lib/fontTools/ttLib/tables/_p_r_e_p.py fonttools-3.0/Lib/fontTools/ttLib/tables/_p_r_e_p.py --- fonttools-2.4/Lib/fontTools/ttLib/tables/_p_r_e_p.py 2013-04-23 04:08:08.000000000 +0000 +++ fonttools-3.0/Lib/fontTools/ttLib/tables/_p_r_e_p.py 2015-08-31 17:57:15.000000000 +0000 @@ -1,7 +1,8 @@ +from __future__ import print_function, division, absolute_import +from fontTools.misc.py23 import * from fontTools import ttLib superclass = ttLib.getTableClass("fpgm") class table__p_r_e_p(superclass): pass - diff -Nru fonttools-2.4/Lib/fontTools/ttLib/tables/sbixGlyph.py fonttools-3.0/Lib/fontTools/ttLib/tables/sbixGlyph.py --- fonttools-2.4/Lib/fontTools/ttLib/tables/sbixGlyph.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.0/Lib/fontTools/ttLib/tables/sbixGlyph.py 2015-08-31 17:57:15.000000000 +0000 @@ -0,0 +1,119 @@ +from __future__ import print_function, division, absolute_import +from fontTools.misc.py23 import * +from fontTools.misc import sstruct +from fontTools.misc.textTools import readHex, safeEval +import struct + + +sbixGlyphHeaderFormat = """ + > + originOffsetX: h # The x-value of the point in the glyph relative to its + # lower-left corner which corresponds to the origin of + # the glyph on the screen, that is the point on the + # baseline at the left edge of the glyph. + originOffsetY: h # The y-value of the point in the glyph relative to its + # lower-left corner which corresponds to the origin of + # the glyph on the screen, that is the point on the + # baseline at the left edge of the glyph. + graphicType: 4s # e.g. "png " +""" + +sbixGlyphHeaderFormatSize = sstruct.calcsize(sbixGlyphHeaderFormat) + + +class Glyph(object): + def __init__(self, glyphName=None, referenceGlyphName=None, originOffsetX=0, originOffsetY=0, graphicType=None, imageData=None, rawdata=None, gid=0): + self.gid = gid + self.glyphName = glyphName + self.referenceGlyphName = referenceGlyphName + self.originOffsetX = originOffsetX + self.originOffsetY = originOffsetY + self.rawdata = rawdata + self.graphicType = graphicType + self.imageData = imageData + + # fix self.graphicType if it is null terminated or too short + if self.graphicType is not None: + if self.graphicType[-1] == "\0": + self.graphicType = self.graphicType[:-1] + if len(self.graphicType) > 4: + from fontTools import ttLib + raise ttLib.TTLibError("Glyph.graphicType must not be longer than 4 characters.") + elif len(self.graphicType) < 4: + # pad with spaces + self.graphicType += " "[:(4 - len(self.graphicType))] + + def decompile(self, ttFont): + self.glyphName = ttFont.getGlyphName(self.gid) + if self.rawdata is None: + from fontTools import ttLib + raise ttLib.TTLibError("No table data to decompile") + if len(self.rawdata) > 0: + if len(self.rawdata) < sbixGlyphHeaderFormatSize: + from fontTools import ttLib + #print "Glyph %i header too short: Expected %x, got %x." % (self.gid, sbixGlyphHeaderFormatSize, len(self.rawdata)) + raise ttLib.TTLibError("Glyph header too short.") + + sstruct.unpack(sbixGlyphHeaderFormat, self.rawdata[:sbixGlyphHeaderFormatSize], self) + + if self.graphicType == "dupe": + # this glyph is a reference to another glyph's image data + gid, = struct.unpack(">H", self.rawdata[sbixGlyphHeaderFormatSize:]) + self.referenceGlyphName = ttFont.getGlyphName(gid) + else: + self.imageData = self.rawdata[sbixGlyphHeaderFormatSize:] + self.referenceGlyphName = None + # clean up + del self.rawdata + del self.gid + + def compile(self, ttFont): + if self.glyphName is None: + from fontTools import ttLib + raise ttLib.TTLibError("Can't compile Glyph without glyph name") + # TODO: if ttFont has no maxp, cmap etc., ignore glyph names and compile by index? + # (needed if you just want to compile the sbix table on its own) + self.gid = struct.pack(">H", ttFont.getGlyphID(self.glyphName)) + if self.graphicType is None: + self.rawdata = "" + else: + self.rawdata = sstruct.pack(sbixGlyphHeaderFormat, self) + self.imageData + + def toXML(self, xmlWriter, ttFont): + if self.graphicType == None: + # TODO: ignore empty glyphs? + # a glyph data entry is required for each glyph, + # but empty ones can be calculated at compile time + xmlWriter.simpletag("glyph", name=self.glyphName) + xmlWriter.newline() + return + xmlWriter.begintag("glyph", + graphicType=self.graphicType, + name=self.glyphName, + originOffsetX=self.originOffsetX, + originOffsetY=self.originOffsetY, + ) + xmlWriter.newline() + if self.graphicType == "dupe": + # graphicType == "dupe" is a reference to another glyph id. + xmlWriter.simpletag("ref", glyphname=self.referenceGlyphName) + else: + xmlWriter.begintag("hexdata") + xmlWriter.newline() + xmlWriter.dumphex(self.imageData) + xmlWriter.endtag("hexdata") + xmlWriter.newline() + xmlWriter.endtag("glyph") + xmlWriter.newline() + + def fromXML(self, name, attrs, content, ttFont): + if name == "ref": + # glyph is a "dupe", i.e. a reference to another glyph's image data. + # in this case imageData contains the glyph id of the reference glyph + # get glyph id from glyphname + self.imageData = struct.pack(">H", ttFont.getGlyphID(safeEval("'''" + attrs["glyphname"] + "'''"))) + elif name == "hexdata": + self.imageData = readHex(content) + else: + from fontTools import ttLib + raise ttLib.TTLibError("can't handle '%s' element" % name) diff -Nru fonttools-2.4/Lib/fontTools/ttLib/tables/_s_b_i_x.py fonttools-3.0/Lib/fontTools/ttLib/tables/_s_b_i_x.py --- fonttools-2.4/Lib/fontTools/ttLib/tables/_s_b_i_x.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.0/Lib/fontTools/ttLib/tables/_s_b_i_x.py 2015-08-31 17:57:15.000000000 +0000 @@ -0,0 +1,117 @@ +from __future__ import print_function, division, absolute_import +from fontTools.misc.py23 import * +from fontTools.misc import sstruct +from fontTools.misc.textTools import safeEval, num2binary, binary2num +from . import DefaultTable +from .sbixGlyph import * +from .sbixStrike import * + + +sbixHeaderFormat = """ + > + version: H # Version number (set to 1) + flags: H # The only two bits used in the flags field are bits 0 + # and 1. For historical reasons, bit 0 must always be 1. + # Bit 1 is a sbixDrawOutlines flag and is interpreted as + # follows: + # 0: Draw only 'sbix' bitmaps + # 1: Draw both 'sbix' bitmaps and outlines, in that + # order + numStrikes: L # Number of bitmap strikes to follow +""" +sbixHeaderFormatSize = sstruct.calcsize(sbixHeaderFormat) + + +sbixStrikeOffsetFormat = """ + > + strikeOffset: L # Offset from begining of table to data for the + # individual strike +""" +sbixStrikeOffsetFormatSize = sstruct.calcsize(sbixStrikeOffsetFormat) + + +class table__s_b_i_x(DefaultTable.DefaultTable): + def __init__(self, tag): + self.tableTag = tag + self.version = 1 + self.flags = 1 + self.numStrikes = 0 + self.strikes = {} + self.strikeOffsets = [] + + def decompile(self, data, ttFont): + # read table header + sstruct.unpack(sbixHeaderFormat, data[ : sbixHeaderFormatSize], self) + # collect offsets to individual strikes in self.strikeOffsets + for i in range(self.numStrikes): + current_offset = sbixHeaderFormatSize + i * sbixStrikeOffsetFormatSize + offset_entry = sbixStrikeOffset() + sstruct.unpack(sbixStrikeOffsetFormat, \ + data[current_offset:current_offset+sbixStrikeOffsetFormatSize], \ + offset_entry) + self.strikeOffsets.append(offset_entry.strikeOffset) + + # decompile Strikes + for i in range(self.numStrikes-1, -1, -1): + current_strike = Strike(rawdata=data[self.strikeOffsets[i]:]) + data = data[:self.strikeOffsets[i]] + current_strike.decompile(ttFont) + #print " Strike length: %xh" % len(bitmapSetData) + #print "Number of Glyph entries:", len(current_strike.glyphs) + if current_strike.ppem in self.strikes: + from fontTools import ttLib + raise ttLib.TTLibError("Pixel 'ppem' must be unique for each Strike") + self.strikes[current_strike.ppem] = current_strike + + # after the glyph data records have been extracted, we don't need the offsets anymore + del self.strikeOffsets + del self.numStrikes + + def compile(self, ttFont): + sbixData = "" + self.numStrikes = len(self.strikes) + sbixHeader = sstruct.pack(sbixHeaderFormat, self) + + # calculate offset to start of first strike + setOffset = sbixHeaderFormatSize + sbixStrikeOffsetFormatSize * self.numStrikes + + for si in sorted(self.strikes.keys()): + current_strike = self.strikes[si] + current_strike.compile(ttFont) + # append offset to this strike to table header + current_strike.strikeOffset = setOffset + sbixHeader += sstruct.pack(sbixStrikeOffsetFormat, current_strike) + setOffset += len(current_strike.data) + sbixData += current_strike.data + + return sbixHeader + sbixData + + def toXML(self, xmlWriter, ttFont): + xmlWriter.simpletag("version", value=self.version) + xmlWriter.newline() + xmlWriter.simpletag("flags", value=num2binary(self.flags, 16)) + xmlWriter.newline() + for i in sorted(self.strikes.keys()): + self.strikes[i].toXML(xmlWriter, ttFont) + + def fromXML(self, name, attrs, content, ttFont): + if name =="version": + setattr(self, name, safeEval(attrs["value"])) + elif name == "flags": + setattr(self, name, binary2num(attrs["value"])) + elif name == "strike": + current_strike = Strike() + for element in content: + if isinstance(element, tuple): + name, attrs, content = element + current_strike.fromXML(name, attrs, content, ttFont) + self.strikes[current_strike.ppem] = current_strike + else: + from fontTools import ttLib + raise ttLib.TTLibError("can't handle '%s' element" % name) + + +# Helper classes + +class sbixStrikeOffset(object): + pass diff -Nru fonttools-2.4/Lib/fontTools/ttLib/tables/sbixStrike.py fonttools-3.0/Lib/fontTools/ttLib/tables/sbixStrike.py --- fonttools-2.4/Lib/fontTools/ttLib/tables/sbixStrike.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.0/Lib/fontTools/ttLib/tables/sbixStrike.py 2015-08-31 17:57:15.000000000 +0000 @@ -0,0 +1,150 @@ +from __future__ import print_function, division, absolute_import +from fontTools.misc.py23 import * +from fontTools.misc import sstruct +from fontTools.misc.textTools import readHex +from .sbixGlyph import * +import struct + +sbixStrikeHeaderFormat = """ + > + ppem: H # The PPEM for which this strike was designed (e.g., 9, + # 12, 24) + resolution: H # The screen resolution (in dpi) for which this strike + # was designed (e.g., 72) +""" + +sbixGlyphDataOffsetFormat = """ + > + glyphDataOffset: L # Offset from the beginning of the strike data record + # to data for the individual glyph +""" + +sbixStrikeHeaderFormatSize = sstruct.calcsize(sbixStrikeHeaderFormat) +sbixGlyphDataOffsetFormatSize = sstruct.calcsize(sbixGlyphDataOffsetFormat) + + +class Strike(object): + def __init__(self, rawdata=None, ppem=0, resolution=72): + self.data = rawdata + self.ppem = ppem + self.resolution = resolution + self.glyphs = {} + + def decompile(self, ttFont): + if self.data is None: + from fontTools import ttLib + raise ttLib.TTLibError + if len(self.data) < sbixStrikeHeaderFormatSize: + from fontTools import ttLib + raise(ttLib.TTLibError, "Strike header too short: Expected %x, got %x.") \ + % (sbixStrikeHeaderFormatSize, len(self.data)) + + # read Strike header from raw data + sstruct.unpack(sbixStrikeHeaderFormat, self.data[:sbixStrikeHeaderFormatSize], self) + + # calculate number of glyphs + firstGlyphDataOffset, = struct.unpack(">L", \ + self.data[sbixStrikeHeaderFormatSize:sbixStrikeHeaderFormatSize + sbixGlyphDataOffsetFormatSize]) + self.numGlyphs = (firstGlyphDataOffset - sbixStrikeHeaderFormatSize) // sbixGlyphDataOffsetFormatSize - 1 + # ^ -1 because there's one more offset than glyphs + + # build offset list for single glyph data offsets + self.glyphDataOffsets = [] + for i in range(self.numGlyphs + 1): # + 1 because there's one more offset than glyphs + start = i * sbixGlyphDataOffsetFormatSize + sbixStrikeHeaderFormatSize + current_offset, = struct.unpack(">L", self.data[start:start + sbixGlyphDataOffsetFormatSize]) + self.glyphDataOffsets.append(current_offset) + + # iterate through offset list and slice raw data into glyph data records + for i in range(self.numGlyphs): + current_glyph = Glyph(rawdata=self.data[self.glyphDataOffsets[i]:self.glyphDataOffsets[i+1]], gid=i) + current_glyph.decompile(ttFont) + self.glyphs[current_glyph.glyphName] = current_glyph + del self.glyphDataOffsets + del self.numGlyphs + del self.data + + def compile(self, ttFont): + self.glyphDataOffsets = "" + self.bitmapData = "" + + glyphOrder = ttFont.getGlyphOrder() + + # first glyph starts right after the header + currentGlyphDataOffset = sbixStrikeHeaderFormatSize + sbixGlyphDataOffsetFormatSize * (len(glyphOrder) + 1) + for glyphName in glyphOrder: + if glyphName in self.glyphs: + # we have glyph data for this glyph + current_glyph = self.glyphs[glyphName] + else: + # must add empty glyph data record for this glyph + current_glyph = Glyph(glyphName=glyphName) + current_glyph.compile(ttFont) + current_glyph.glyphDataOffset = currentGlyphDataOffset + self.bitmapData += current_glyph.rawdata + currentGlyphDataOffset += len(current_glyph.rawdata) + self.glyphDataOffsets += sstruct.pack(sbixGlyphDataOffsetFormat, current_glyph) + + # add last "offset", really the end address of the last glyph data record + dummy = Glyph() + dummy.glyphDataOffset = currentGlyphDataOffset + self.glyphDataOffsets += sstruct.pack(sbixGlyphDataOffsetFormat, dummy) + + # pack header + self.data = sstruct.pack(sbixStrikeHeaderFormat, self) + # add offsets and image data after header + self.data += self.glyphDataOffsets + self.bitmapData + + def toXML(self, xmlWriter, ttFont): + xmlWriter.begintag("strike") + xmlWriter.newline() + xmlWriter.simpletag("ppem", value=self.ppem) + xmlWriter.newline() + xmlWriter.simpletag("resolution", value=self.resolution) + xmlWriter.newline() + glyphOrder = ttFont.getGlyphOrder() + for i in range(len(glyphOrder)): + if glyphOrder[i] in self.glyphs: + self.glyphs[glyphOrder[i]].toXML(xmlWriter, ttFont) + # TODO: what if there are more glyph data records than (glyf table) glyphs? + xmlWriter.endtag("strike") + xmlWriter.newline() + + def fromXML(self, name, attrs, content, ttFont): + if name in ["ppem", "resolution"]: + setattr(self, name, safeEval(attrs["value"])) + elif name == "glyph": + if "graphicType" in attrs: + myFormat = safeEval("'''" + attrs["graphicType"] + "'''") + else: + myFormat = None + if "glyphname" in attrs: + myGlyphName = safeEval("'''" + attrs["glyphname"] + "'''") + elif "name" in attrs: + myGlyphName = safeEval("'''" + attrs["name"] + "'''") + else: + from fontTools import ttLib + raise ttLib.TTLibError("Glyph must have a glyph name.") + if "originOffsetX" in attrs: + myOffsetX = safeEval(attrs["originOffsetX"]) + else: + myOffsetX = 0 + if "originOffsetY" in attrs: + myOffsetY = safeEval(attrs["originOffsetY"]) + else: + myOffsetY = 0 + current_glyph = Glyph( + glyphName=myGlyphName, + graphicType=myFormat, + originOffsetX=myOffsetX, + originOffsetY=myOffsetY, + ) + for element in content: + if isinstance(element, tuple): + name, attrs, content = element + current_glyph.fromXML(name, attrs, content, ttFont) + current_glyph.compile(ttFont) + self.glyphs[current_glyph.glyphName] = current_glyph + else: + from fontTools import ttLib + raise ttLib.TTLibError("can't handle '%s' element" % name) diff -Nru fonttools-2.4/Lib/fontTools/ttLib/tables/S_I_N_G_.py fonttools-3.0/Lib/fontTools/ttLib/tables/S_I_N_G_.py --- fonttools-2.4/Lib/fontTools/ttLib/tables/S_I_N_G_.py 2013-04-23 04:08:08.000000000 +0000 +++ fonttools-3.0/Lib/fontTools/ttLib/tables/S_I_N_G_.py 2015-08-31 17:57:15.000000000 +0000 @@ -1,9 +1,8 @@ -import DefaultTable -import sstruct -import struct -import time -import string -from fontTools.misc.textTools import safeEval, num2binary, binary2num +from __future__ import print_function, division, absolute_import +from fontTools.misc.py23 import * +from fontTools.misc import sstruct +from fontTools.misc.textTools import safeEval +from . import DefaultTable SINGFormat = """ > # big endian @@ -20,65 +19,63 @@ nameLength: 1s """ # baseGlyphName is a byte string which follows the record above. - class table_S_I_N_G_(DefaultTable.DefaultTable): - + dependencies = [] - + def decompile(self, data, ttFont): dummy, rest = sstruct.unpack2(SINGFormat, data, self) self.uniqueName = self.decompileUniqueName(self.uniqueName) - self.nameLength = ord(self.nameLength) + self.nameLength = byteord(self.nameLength) assert len(rest) == self.nameLength - self.baseGlyphName = rest - + self.baseGlyphName = tostr(rest) + rawMETAMD5 = self.METAMD5 - self.METAMD5 = "[" + hex(ord(self.METAMD5[0])) + self.METAMD5 = "[" + hex(byteord(self.METAMD5[0])) for char in rawMETAMD5[1:]: - self.METAMD5 = self.METAMD5 + ", " + hex(ord(char)) + self.METAMD5 = self.METAMD5 + ", " + hex(byteord(char)) self.METAMD5 = self.METAMD5 + "]" - + def decompileUniqueName(self, data): name = "" for char in data: - val = ord(char) + val = byteord(char) if val == 0: break if (val > 31) or (val < 128): - name = name + char + name += chr(val) else: octString = oct(val) if len(octString) > 3: octString = octString[1:] # chop off that leading zero. elif len(octString) < 3: octString.zfill(3) - name = name + "\\" + octString + name += "\\" + octString return name - - + def compile(self, ttFont): - self.nameLength = chr(len(self.baseGlyphName)) - self.uniqueName = self.compilecompileUniqueName(self.uniqueName, 28) + d = self.__dict__.copy() + d["nameLength"] = bytechr(len(self.baseGlyphName)) + d["uniqueName"] = self.compilecompileUniqueName(self.uniqueName, 28) METAMD5List = eval(self.METAMD5) - self.METAMD5 = "" + d["METAMD5"] = b"" for val in METAMD5List: - self.METAMD5 = self.METAMD5 + chr(val) - assert (len(self.METAMD5) == 16), "Failed to pack 16 byte MD5 hash in SING table" - data = sstruct.pack(SINGFormat, self) - data = data + self.baseGlyphName + d["METAMD5"] += bytechr(val) + assert (len(d["METAMD5"]) == 16), "Failed to pack 16 byte MD5 hash in SING table" + data = sstruct.pack(SINGFormat, d) + data = data + tobytes(self.baseGlyphName) return data - + def compilecompileUniqueName(self, name, length): nameLen = len(name) if length <= nameLen: - name[:length-1] + "\000" + name = name[:length-1] + "\000" else: - name.join( (nameLen - length)* "\000") + name += (nameLen - length) * "\000" return name - def toXML(self, writer, ttFont): writer.comment("Most of this table will be recalculated by the compiler") writer.newline() @@ -89,14 +86,10 @@ writer.newline() writer.simpletag("baseGlyphName", value=self.baseGlyphName) writer.newline() - - def fromXML(self, (name, attrs, content), ttFont): + + def fromXML(self, name, attrs, content, ttFont): value = attrs["value"] if name in ["uniqueName", "METAMD5", "baseGlyphName"]: setattr(self, name, value) else: - try: - value = safeEval(value) - except OverflowError: - value = long(value) - setattr(self, name, value) + setattr(self, name, safeEval(value)) diff -Nru fonttools-2.4/Lib/fontTools/ttLib/tables/S_V_G_.py fonttools-3.0/Lib/fontTools/ttLib/tables/S_V_G_.py --- fonttools-2.4/Lib/fontTools/ttLib/tables/S_V_G_.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.0/Lib/fontTools/ttLib/tables/S_V_G_.py 2015-08-31 17:57:15.000000000 +0000 @@ -0,0 +1,379 @@ +from __future__ import print_function, division, absolute_import +from fontTools.misc.py23 import * +from fontTools.misc import sstruct +from . import DefaultTable +try: + import xml.etree.cElementTree as ET +except ImportError: + import xml.etree.ElementTree as ET +import struct +import re + +__doc__=""" +Compiles/decompiles version 0 and 1 SVG tables from/to XML. + +Version 1 is the first SVG definition, implemented in Mozilla before Aug 2013, now deprecated. +This module will decompile this correctly, but will compile a version 1 table +only if you add the secret element "" to the SVG element in the TTF file. + +Version 0 is the joint Adobe-Mozilla proposal, which supports color palettes. + +The XML format is: + + + <complete SVG doc> ]] + </svgDoc> +... + <svgDoc endGlyphID="n" startGlyphID="m"> + <![CDATA[ <complete SVG doc> ]] + </svgDoc> + + <colorPalettes> + <colorParamUINameID>n</colorParamUINameID> + ... + <colorParamUINameID>m</colorParamUINameID> + <colorPalette uiNameID="n"> + <colorRecord red="<int>" green="<int>" blue="<int>" alpha="<int>" /> + ... + <colorRecord red="<int>" green="<int>" blue="<int>" alpha="<int>" /> + </colorPalette> + ... + <colorPalette uiNameID="m"> + <colorRecord red="<int> green="<int>" blue="<int>" alpha="<int>" /> + ... + <colorRecord red=<int>" green="<int>" blue="<int>" alpha="<int>" /> + </colorPalette> + </colorPalettes> +</SVG> + +Color values must be less than 256. + +The number of color records in each </colorPalette> must be the same as +the number of <colorParamUINameID> elements. + +""" + +XML = ET.XML +XMLElement = ET.Element +xmlToString = ET.tostring + +SVG_format_0 = """ + > # big endian + version: H + offsetToSVGDocIndex: L + offsetToColorPalettes: L +""" + +SVG_format_0Size = sstruct.calcsize(SVG_format_0) + +SVG_format_1 = """ + > # big endian + version: H + numIndicies: H +""" + +SVG_format_1Size = sstruct.calcsize(SVG_format_1) + +doc_index_entry_format_0 = """ + > # big endian + startGlyphID: H + endGlyphID: H + svgDocOffset: L + svgDocLength: L +""" + +doc_index_entry_format_0Size = sstruct.calcsize(doc_index_entry_format_0) + +colorRecord_format_0 = """ + red: B + green: B + blue: B + alpha: B +""" + + +class table_S_V_G_(DefaultTable.DefaultTable): + + def decompile(self, data, ttFont): + self.docList = None + self.colorPalettes = None + pos = 0 + self.version = struct.unpack(">H", data[pos:pos+2])[0] + + if self.version == 1: + self.decompile_format_1(data, ttFont) + else: + if self.version != 0: + print("Unknown SVG table version '%s'. Decompiling as version 0." % (self.version)) + self.decompile_format_0(data, ttFont) + + def decompile_format_0(self, data, ttFont): + dummy, data2 = sstruct.unpack2(SVG_format_0, data, self) + # read in SVG Documents Index + self.decompileEntryList(data) + + # read in colorPalettes table. + self.colorPalettes = colorPalettes = ColorPalettes() + pos = self.offsetToColorPalettes + if pos > 0: + colorPalettes.numColorParams = numColorParams = struct.unpack(">H", data[pos:pos+2])[0] + if numColorParams > 0: + colorPalettes.colorParamUINameIDs = colorParamUINameIDs = [] + pos = pos + 2 + for i in range(numColorParams): + nameID = struct.unpack(">H", data[pos:pos+2])[0] + colorParamUINameIDs.append(nameID) + pos = pos + 2 + + colorPalettes.numColorPalettes = numColorPalettes = struct.unpack(">H", data[pos:pos+2])[0] + pos = pos + 2 + if numColorPalettes > 0: + colorPalettes.colorPaletteList = colorPaletteList = [] + for i in range(numColorPalettes): + colorPalette = ColorPalette() + colorPaletteList.append(colorPalette) + colorPalette.uiNameID = struct.unpack(">H", data[pos:pos+2])[0] + pos = pos + 2 + colorPalette.paletteColors = paletteColors = [] + for j in range(numColorParams): + colorRecord, colorPaletteData = sstruct.unpack2(colorRecord_format_0, data[pos:], ColorRecord()) + paletteColors.append(colorRecord) + pos += 4 + + def decompile_format_1(self, data, ttFont): + pos = 2 + self.numEntries = struct.unpack(">H", data[pos:pos+2])[0] + pos += 2 + self.decompileEntryList(data, pos) + + def decompileEntryList(self, data): + # data starts with the first entry of the entry list. + pos = subTableStart = self.offsetToSVGDocIndex + self.numEntries = numEntries = struct.unpack(">H", data[pos:pos+2])[0] + pos += 2 + if self.numEntries > 0: + data2 = data[pos:] + self.docList = [] + self.entries = entries = [] + for i in range(self.numEntries): + docIndexEntry, data2 = sstruct.unpack2(doc_index_entry_format_0, data2, DocumentIndexEntry()) + entries.append(docIndexEntry) + + for entry in entries: + start = entry.svgDocOffset + subTableStart + end = start + entry.svgDocLength + doc = data[start:end] + if doc.startswith(b"\x1f\x8b"): + import gzip + bytesIO = BytesIO(doc) + with gzip.GzipFile(None, "r", fileobj=bytesIO) as gunzipper: + doc = gunzipper.read() + self.compressed = True + del bytesIO + doc = tostr(doc, "utf_8") + self.docList.append( [doc, entry.startGlyphID, entry.endGlyphID] ) + + def compile(self, ttFont): + if hasattr(self, "version1"): + data = self.compileFormat1(ttFont) + else: + data = self.compileFormat0(ttFont) + return data + + def compileFormat0(self, ttFont): + version = 0 + offsetToSVGDocIndex = SVG_format_0Size # I start the SVGDocIndex right after the header. + # get SGVDoc info. + docList = [] + entryList = [] + numEntries = len(self.docList) + datum = struct.pack(">H",numEntries) + entryList.append(datum) + curOffset = len(datum) + doc_index_entry_format_0Size*numEntries + for doc, startGlyphID, endGlyphID in self.docList: + docOffset = curOffset + docBytes = tobytes(doc, encoding="utf_8") + if getattr(self, "compressed", False) and not docBytes.startswith(b"\x1f\x8b"): + import gzip + bytesIO = BytesIO() + with gzip.GzipFile(None, "w", fileobj=bytesIO) as gzipper: + gzipper.write(docBytes) + gzipped = bytesIO.getvalue() + if len(gzipped) < len(docBytes): + docBytes = gzipped + del gzipped, bytesIO + docLength = len(docBytes) + curOffset += docLength + entry = struct.pack(">HHLL", startGlyphID, endGlyphID, docOffset, docLength) + entryList.append(entry) + docList.append(docBytes) + entryList.extend(docList) + svgDocData = bytesjoin(entryList) + + # get colorpalette info. + if self.colorPalettes is None: + offsetToColorPalettes = 0 + palettesData = "" + else: + offsetToColorPalettes = SVG_format_0Size + len(svgDocData) + dataList = [] + numColorParams = len(self.colorPalettes.colorParamUINameIDs) + datum = struct.pack(">H", numColorParams) + dataList.append(datum) + for uiNameId in self.colorPalettes.colorParamUINameIDs: + datum = struct.pack(">H", uiNameId) + dataList.append(datum) + numColorPalettes = len(self.colorPalettes.colorPaletteList) + datum = struct.pack(">H", numColorPalettes) + dataList.append(datum) + for colorPalette in self.colorPalettes.colorPaletteList: + datum = struct.pack(">H", colorPalette.uiNameID) + dataList.append(datum) + for colorRecord in colorPalette.paletteColors: + data = struct.pack(">BBBB", colorRecord.red, colorRecord.green, colorRecord.blue, colorRecord.alpha) + dataList.append(data) + palettesData = bytesjoin(dataList) + + header = struct.pack(">HLL", version, offsetToSVGDocIndex, offsetToColorPalettes) + data = [header, svgDocData, palettesData] + data = bytesjoin(data) + return data + + def compileFormat1(self, ttFont): + version = 1 + numEntries = len(self.docList) + header = struct.pack(">HH", version, numEntries) + dataList = [header] + docList = [] + curOffset = SVG_format_1Size + doc_index_entry_format_0Size*numEntries + for doc, startGlyphID, endGlyphID in self.docList: + docOffset = curOffset + docBytes = tobytes(doc, encoding="utf_8") + docLength = len(docBytes) + curOffset += docLength + entry = struct.pack(">HHLL", startGlyphID, endGlyphID, docOffset, docLength) + dataList.append(entry) + docList.append(docBytes) + dataList.extend(docList) + data = bytesjoin(dataList) + return data + + def toXML(self, writer, ttFont): + writer.newline() + for doc, startGID, endGID in self.docList: + writer.begintag("svgDoc", startGlyphID=startGID, endGlyphID=endGID) + writer.newline() + writer.writecdata(doc) + writer.newline() + writer.endtag("svgDoc") + writer.newline() + + if (self.colorPalettes is not None) and (self.colorPalettes.numColorParams is not None): + writer.begintag("colorPalettes") + writer.newline() + for uiNameID in self.colorPalettes.colorParamUINameIDs: + writer.begintag("colorParamUINameID") + writer.writeraw(str(uiNameID)) + writer.endtag("colorParamUINameID") + writer.newline() + for colorPalette in self.colorPalettes.colorPaletteList: + writer.begintag("colorPalette", [("uiNameID", str(colorPalette.uiNameID))]) + writer.newline() + for colorRecord in colorPalette.paletteColors: + colorAttributes = [ + ("red", hex(colorRecord.red)), + ("green", hex(colorRecord.green)), + ("blue", hex(colorRecord.blue)), + ("alpha", hex(colorRecord.alpha)), + ] + writer.begintag("colorRecord", colorAttributes) + writer.endtag("colorRecord") + writer.newline() + writer.endtag("colorPalette") + writer.newline() + + writer.endtag("colorPalettes") + writer.newline() + else: + writer.begintag("colorPalettes") + writer.endtag("colorPalettes") + writer.newline() + + def fromXML(self, name, attrs, content, ttFont): + if name == "svgDoc": + if not hasattr(self, "docList"): + self.docList = [] + doc = strjoin(content) + doc = doc.strip() + startGID = int(attrs["startGlyphID"]) + endGID = int(attrs["endGlyphID"]) + self.docList.append( [doc, startGID, endGID] ) + elif name == "colorPalettes": + self.colorPalettes = ColorPalettes() + self.colorPalettes.fromXML(name, attrs, content, ttFont) + if self.colorPalettes.numColorParams == 0: + self.colorPalettes = None + else: + print("Unknown", name, content) + +class DocumentIndexEntry(object): + def __init__(self): + self.startGlyphID = None # USHORT + self.endGlyphID = None # USHORT + self.svgDocOffset = None # ULONG + self.svgDocLength = None # ULONG + + def __repr__(self): + return "startGlyphID: %s, endGlyphID: %s, svgDocOffset: %s, svgDocLength: %s" % (self.startGlyphID, self.endGlyphID, self.svgDocOffset, self.svgDocLength) + +class ColorPalettes(object): + def __init__(self): + self.numColorParams = None # USHORT + self.colorParamUINameIDs = [] # list of name table name ID values that provide UI description of each color palette. + self.numColorPalettes = None # USHORT + self.colorPaletteList = [] # list of ColorPalette records + + def fromXML(self, name, attrs, content, ttFont): + for element in content: + if isinstance(element, type("")): + continue + name, attrib, content = element + if name == "colorParamUINameID": + uiNameID = int(content[0]) + self.colorParamUINameIDs.append(uiNameID) + elif name == "colorPalette": + colorPalette = ColorPalette() + self.colorPaletteList.append(colorPalette) + colorPalette.fromXML((name, attrib, content), ttFont) + + self.numColorParams = len(self.colorParamUINameIDs) + self.numColorPalettes = len(self.colorPaletteList) + for colorPalette in self.colorPaletteList: + if len(colorPalette.paletteColors) != self.numColorParams: + raise ValueError("Number of color records in a colorPalette ('%s') does not match the number of colorParamUINameIDs elements ('%s')." % (len(colorPalette.paletteColors), self.numColorParams)) + +class ColorPalette(object): + def __init__(self): + self.uiNameID = None # USHORT. name table ID that describes user interface strings associated with this color palette. + self.paletteColors = [] # list of ColorRecords + + def fromXML(self, name, attrs, content, ttFont): + self.uiNameID = int(attrs["uiNameID"]) + for element in content: + if isinstance(element, type("")): + continue + name, attrib, content = element + if name == "colorRecord": + colorRecord = ColorRecord() + self.paletteColors.append(colorRecord) + colorRecord.red = eval(attrib["red"]) + colorRecord.green = eval(attrib["green"]) + colorRecord.blue = eval(attrib["blue"]) + colorRecord.alpha = eval(attrib["alpha"]) + +class ColorRecord(object): + def __init__(self): + self.red = 255 # all are one byte values. + self.green = 255 + self.blue = 255 + self.alpha = 255 diff -Nru fonttools-2.4/Lib/fontTools/ttLib/tables/T_S_I__0.py fonttools-3.0/Lib/fontTools/ttLib/tables/T_S_I__0.py --- fonttools-2.4/Lib/fontTools/ttLib/tables/T_S_I__0.py 2013-04-23 04:08:08.000000000 +0000 +++ fonttools-3.0/Lib/fontTools/ttLib/tables/T_S_I__0.py 2015-08-31 17:57:15.000000000 +0000 @@ -1,48 +1,49 @@ -import DefaultTable +from __future__ import print_function, division, absolute_import +from fontTools.misc.py23 import * +from . import DefaultTable import struct tsi0Format = '>HHl' -def fixlongs((glyphID, textLength, textOffset)): - return int(glyphID), int(textLength), textOffset +def fixlongs(glyphID, textLength, textOffset): + return int(glyphID), int(textLength), textOffset class table_T_S_I__0(DefaultTable.DefaultTable): - + dependencies = ["TSI1"] - + def decompile(self, data, ttFont): numGlyphs = ttFont['maxp'].numGlyphs indices = [] size = struct.calcsize(tsi0Format) for i in range(numGlyphs + 5): - glyphID, textLength, textOffset = fixlongs(struct.unpack(tsi0Format, data[:size])) + glyphID, textLength, textOffset = fixlongs(*struct.unpack(tsi0Format, data[:size])) indices.append((glyphID, textLength, textOffset)) data = data[size:] assert len(data) == 0 assert indices[-5] == (0XFFFE, 0, -1409540300), "bad magic number" # 0xABFC1F34 self.indices = indices[:-5] self.extra_indices = indices[-4:] - + def compile(self, ttFont): if not hasattr(self, "indices"): - # We have no corresponging table (TSI1 or TSI3); let's return + # We have no corresponding table (TSI1 or TSI3); let's return # no data, which effectively means "ignore us". return "" - data = "" + data = b"" for index, textLength, textOffset in self.indices: data = data + struct.pack(tsi0Format, index, textLength, textOffset) data = data + struct.pack(tsi0Format, 0XFFFE, 0, -1409540300) # 0xABFC1F34 for index, textLength, textOffset in self.extra_indices: data = data + struct.pack(tsi0Format, index, textLength, textOffset) return data - + def set(self, indices, extra_indices): # gets called by 'TSI1' or 'TSI3' self.indices = indices self.extra_indices = extra_indices - + def toXML(self, writer, ttFont): writer.comment("This table will be calculated by the compiler") writer.newline() - diff -Nru fonttools-2.4/Lib/fontTools/ttLib/tables/T_S_I__1.py fonttools-3.0/Lib/fontTools/ttLib/tables/T_S_I__1.py --- fonttools-2.4/Lib/fontTools/ttLib/tables/T_S_I__1.py 2013-04-23 04:08:08.000000000 +0000 +++ fonttools-3.0/Lib/fontTools/ttLib/tables/T_S_I__1.py 2015-08-31 17:57:15.000000000 +0000 @@ -1,12 +1,13 @@ -import DefaultTable -import string +from __future__ import print_function, division, absolute_import +from fontTools.misc.py23 import * +from . import DefaultTable class table_T_S_I__1(DefaultTable.DefaultTable): - + extras = {0xfffa: "ppgm", 0xfffb: "cvt", 0xfffc: "reserved", 0xfffd: "fpgm"} - + indextable = "TSI0" - + def decompile(self, data, ttFont): indextable = ttFont[self.indextable] self.glyphPrograms = {} @@ -21,7 +22,7 @@ assert len(text) == textLength if text: self.glyphPrograms[ttFont.getGlyphName(glyphID)] = text - + self.extraPrograms = {} for i in range(len(indextable.extra_indices)): extraCode, textLength, textOffset = indextable.extra_indices[i] @@ -34,41 +35,40 @@ assert len(text) == textLength if text: self.extraPrograms[self.extras[extraCode]] = text - + def compile(self, ttFont): if not hasattr(self, "glyphPrograms"): self.glyphPrograms = {} self.extraPrograms = {} - data = '' + data = b'' indextable = ttFont[self.indextable] glyphNames = ttFont.getGlyphOrder() - + indices = [] for i in range(len(glyphNames)): if len(data) % 2: - data = data + "\015" # align on 2-byte boundaries, fill with return chars. Yum. + data = data + b"\015" # align on 2-byte boundaries, fill with return chars. Yum. name = glyphNames[i] - if self.glyphPrograms.has_key(name): - text = self.glyphPrograms[name] + if name in self.glyphPrograms: + text = tobytes(self.glyphPrograms[name]) else: - text = "" + text = b"" textLength = len(text) if textLength >= 0x8000: textLength = 0x8000 # XXX ??? indices.append((i, textLength, len(data))) data = data + text - + extra_indices = [] - codes = self.extras.items() - codes.sort() + codes = sorted(self.extras.items()) for i in range(len(codes)): if len(data) % 2: - data = data + "\015" # align on 2-byte boundaries, fill with return chars. + data = data + b"\015" # align on 2-byte boundaries, fill with return chars. code, name = codes[i] - if self.extraPrograms.has_key(name): - text = self.extraPrograms[name] + if name in self.extraPrograms: + text = tobytes(self.extraPrograms[name]) else: - text = "" + text = b"" textLength = len(text) if textLength >= 0x8000: textLength = 0x8000 # XXX ??? @@ -76,10 +76,9 @@ data = data + text indextable.set(indices, extra_indices) return data - + def toXML(self, writer, ttFont): - names = self.glyphPrograms.keys() - names.sort() + names = sorted(self.glyphPrograms.keys()) writer.newline() for name in names: text = self.glyphPrograms[name] @@ -87,33 +86,31 @@ continue writer.begintag("glyphProgram", name=name) writer.newline() - writer.write_noindent(string.replace(text, "\r", "\n")) + writer.write_noindent(text.replace(b"\r", b"\n")) writer.newline() writer.endtag("glyphProgram") writer.newline() writer.newline() - extra_names = self.extraPrograms.keys() - extra_names.sort() + extra_names = sorted(self.extraPrograms.keys()) for name in extra_names: text = self.extraPrograms[name] if not text: continue writer.begintag("extraProgram", name=name) writer.newline() - writer.write_noindent(string.replace(text, "\r", "\n")) + writer.write_noindent(text.replace(b"\r", b"\n")) writer.newline() writer.endtag("extraProgram") writer.newline() writer.newline() - - def fromXML(self, (name, attrs, content), ttFont): + + def fromXML(self, name, attrs, content, ttFont): if not hasattr(self, "glyphPrograms"): self.glyphPrograms = {} self.extraPrograms = {} - lines = string.split(string.replace(string.join(content, ""), "\r", "\n"), "\n") - text = string.join(lines[1:-1], "\r") + lines = strjoin(content).replace("\r", "\n").split("\n") + text = '\r'.join(lines[1:-1]) if name == "glyphProgram": self.glyphPrograms[attrs["name"]] = text elif name == "extraProgram": self.extraPrograms[attrs["name"]] = text - diff -Nru fonttools-2.4/Lib/fontTools/ttLib/tables/T_S_I__2.py fonttools-3.0/Lib/fontTools/ttLib/tables/T_S_I__2.py --- fonttools-2.4/Lib/fontTools/ttLib/tables/T_S_I__2.py 2013-04-23 04:08:08.000000000 +0000 +++ fonttools-3.0/Lib/fontTools/ttLib/tables/T_S_I__2.py 2015-08-31 17:57:15.000000000 +0000 @@ -1,8 +1,9 @@ +from __future__ import print_function, division, absolute_import +from fontTools.misc.py23 import * from fontTools import ttLib superclass = ttLib.getTableClass("TSI0") class table_T_S_I__2(superclass): - - dependencies = ["TSI3"] + dependencies = ["TSI3"] diff -Nru fonttools-2.4/Lib/fontTools/ttLib/tables/T_S_I__3.py fonttools-3.0/Lib/fontTools/ttLib/tables/T_S_I__3.py --- fonttools-2.4/Lib/fontTools/ttLib/tables/T_S_I__3.py 2013-04-23 04:08:08.000000000 +0000 +++ fonttools-3.0/Lib/fontTools/ttLib/tables/T_S_I__3.py 2015-08-31 17:57:15.000000000 +0000 @@ -1,11 +1,11 @@ +from __future__ import print_function, division, absolute_import +from fontTools.misc.py23 import * from fontTools import ttLib superclass = ttLib.getTableClass("TSI1") class table_T_S_I__3(superclass): - - extras = {0xfffa: "reserved0", 0xfffb: "reserved1", 0xfffc: "reserved2", 0xfffd: "reserved3"} - - indextable = "TSI2" + extras = {0xfffa: "reserved0", 0xfffb: "reserved1", 0xfffc: "reserved2", 0xfffd: "reserved3"} + indextable = "TSI2" diff -Nru fonttools-2.4/Lib/fontTools/ttLib/tables/T_S_I__5.py fonttools-3.0/Lib/fontTools/ttLib/tables/T_S_I__5.py --- fonttools-2.4/Lib/fontTools/ttLib/tables/T_S_I__5.py 2013-04-23 04:08:08.000000000 +0000 +++ fonttools-3.0/Lib/fontTools/ttLib/tables/T_S_I__5.py 2015-08-31 17:57:15.000000000 +0000 @@ -1,43 +1,42 @@ +from __future__ import print_function, division, absolute_import +from fontTools.misc.py23 import * +from fontTools.misc.textTools import safeEval +from . import DefaultTable import sys -import DefaultTable import array -from fontTools import ttLib -from fontTools.misc.textTools import safeEval class table_T_S_I__5(DefaultTable.DefaultTable): - + def decompile(self, data, ttFont): numGlyphs = ttFont['maxp'].numGlyphs assert len(data) == 2 * numGlyphs a = array.array("H") a.fromstring(data) - if sys.byteorder <> "big": + if sys.byteorder != "big": a.byteswap() self.glyphGrouping = {} for i in range(numGlyphs): self.glyphGrouping[ttFont.getGlyphName(i)] = a[i] - + def compile(self, ttFont): glyphNames = ttFont.getGlyphOrder() a = array.array("H") for i in range(len(glyphNames)): a.append(self.glyphGrouping[glyphNames[i]]) - if sys.byteorder <> "big": + if sys.byteorder != "big": a.byteswap() return a.tostring() - + def toXML(self, writer, ttFont): - names = self.glyphGrouping.keys() - names.sort() + names = sorted(self.glyphGrouping.keys()) for glyphName in names: writer.simpletag("glyphgroup", name=glyphName, value=self.glyphGrouping[glyphName]) writer.newline() - - def fromXML(self, (name, attrs, content), ttFont): + + def fromXML(self, name, attrs, content, ttFont): if not hasattr(self, "glyphGrouping"): self.glyphGrouping = {} - if name <> "glyphgroup": + if name != "glyphgroup": return self.glyphGrouping[attrs["name"]] = safeEval(attrs["value"]) - diff -Nru fonttools-2.4/Lib/fontTools/ttLib/tables/T_S_I_B_.py fonttools-3.0/Lib/fontTools/ttLib/tables/T_S_I_B_.py --- fonttools-2.4/Lib/fontTools/ttLib/tables/T_S_I_B_.py 2013-04-23 04:08:08.000000000 +0000 +++ fonttools-3.0/Lib/fontTools/ttLib/tables/T_S_I_B_.py 2015-08-31 17:57:15.000000000 +0000 @@ -1,5 +1,6 @@ -import asciiTable +from __future__ import print_function, division, absolute_import +from fontTools.misc.py23 import * +from . import asciiTable class table_T_S_I_B_(asciiTable.asciiTable): pass - diff -Nru fonttools-2.4/Lib/fontTools/ttLib/tables/T_S_I_D_.py fonttools-3.0/Lib/fontTools/ttLib/tables/T_S_I_D_.py --- fonttools-2.4/Lib/fontTools/ttLib/tables/T_S_I_D_.py 2013-04-23 04:08:08.000000000 +0000 +++ fonttools-3.0/Lib/fontTools/ttLib/tables/T_S_I_D_.py 2015-08-31 17:57:15.000000000 +0000 @@ -1,5 +1,6 @@ -import asciiTable +from __future__ import print_function, division, absolute_import +from fontTools.misc.py23 import * +from . import asciiTable class table_T_S_I_D_(asciiTable.asciiTable): pass - diff -Nru fonttools-2.4/Lib/fontTools/ttLib/tables/T_S_I_J_.py fonttools-3.0/Lib/fontTools/ttLib/tables/T_S_I_J_.py --- fonttools-2.4/Lib/fontTools/ttLib/tables/T_S_I_J_.py 2013-04-23 04:08:08.000000000 +0000 +++ fonttools-3.0/Lib/fontTools/ttLib/tables/T_S_I_J_.py 2015-08-31 17:57:15.000000000 +0000 @@ -1,5 +1,6 @@ -import asciiTable +from __future__ import print_function, division, absolute_import +from fontTools.misc.py23 import * +from . import asciiTable class table_T_S_I_J_(asciiTable.asciiTable): pass - diff -Nru fonttools-2.4/Lib/fontTools/ttLib/tables/T_S_I_P_.py fonttools-3.0/Lib/fontTools/ttLib/tables/T_S_I_P_.py --- fonttools-2.4/Lib/fontTools/ttLib/tables/T_S_I_P_.py 2013-04-23 04:08:08.000000000 +0000 +++ fonttools-3.0/Lib/fontTools/ttLib/tables/T_S_I_P_.py 2015-08-31 17:57:15.000000000 +0000 @@ -1,5 +1,6 @@ -import asciiTable +from __future__ import print_function, division, absolute_import +from fontTools.misc.py23 import * +from . import asciiTable class table_T_S_I_P_(asciiTable.asciiTable): pass - diff -Nru fonttools-2.4/Lib/fontTools/ttLib/tables/T_S_I_S_.py fonttools-3.0/Lib/fontTools/ttLib/tables/T_S_I_S_.py --- fonttools-2.4/Lib/fontTools/ttLib/tables/T_S_I_S_.py 2013-04-23 04:08:08.000000000 +0000 +++ fonttools-3.0/Lib/fontTools/ttLib/tables/T_S_I_S_.py 2015-08-31 17:57:15.000000000 +0000 @@ -1,5 +1,6 @@ -import asciiTable +from __future__ import print_function, division, absolute_import +from fontTools.misc.py23 import * +from . import asciiTable class table_T_S_I_S_(asciiTable.asciiTable): pass - diff -Nru fonttools-2.4/Lib/fontTools/ttLib/tables/T_S_I_V_.py fonttools-3.0/Lib/fontTools/ttLib/tables/T_S_I_V_.py --- fonttools-2.4/Lib/fontTools/ttLib/tables/T_S_I_V_.py 2013-04-23 04:08:08.000000000 +0000 +++ fonttools-3.0/Lib/fontTools/ttLib/tables/T_S_I_V_.py 2015-08-31 17:57:15.000000000 +0000 @@ -1,5 +1,6 @@ -import asciiTable +from __future__ import print_function, division, absolute_import +from fontTools.misc.py23 import * +from . import asciiTable class table_T_S_I_V_(asciiTable.asciiTable): pass - diff -Nru fonttools-2.4/Lib/fontTools/ttLib/tables/ttProgram.py fonttools-3.0/Lib/fontTools/ttLib/tables/ttProgram.py --- fonttools-2.4/Lib/fontTools/ttLib/tables/ttProgram.py 2013-04-23 04:08:08.000000000 +0000 +++ fonttools-3.0/Lib/fontTools/ttLib/tables/ttProgram.py 2015-08-31 17:57:15.000000000 +0000 @@ -1,148 +1,148 @@ """ttLib.tables.ttProgram.py -- Assembler/disassembler for TrueType bytecode programs.""" -import array -import re, string +from __future__ import print_function, division, absolute_import +from fontTools.misc.py23 import * from fontTools.misc.textTools import num2binary, binary2num, readHex +import array +import re # first, the list of instructions that eat bytes or words from the instruction stream streamInstructions = [ -# ------ ----------- ----- ------------------------ --- ------ ---------------------------------- -------------- -# opcode mnemonic argBits descriptive name pops pushes eats from instruction stream pushes -# ------ ----------- ----- ------------------------ --- ------ ---------------------------------- -------------- - (0x40, 'NPUSHB', 0, 'PushNBytes', 0, -1), # n, b1, b2,...bn b1,b2...bn - (0x41, 'NPUSHW', 0, 'PushNWords', 0, -1), # n, w1, w2,...w w1,w2...wn - (0xb0, 'PUSHB', 3, 'PushBytes', 0, -1), # b0, b1,..bn b0, b1, ...,bn - (0xb8, 'PUSHW', 3, 'PushWords', 0, -1), # w0,w1,..wn w0 ,w1, ...wn -# ------ ----------- ----- ------------------------ --- ------ ---------------------------------- -------------- +# +# opcode mnemonic argBits descriptive name pops pushes eats from instruction stream pushes +# + (0x40, 'NPUSHB', 0, 'PushNBytes', 0, -1), # n, b1, b2,...bn b1,b2...bn + (0x41, 'NPUSHW', 0, 'PushNWords', 0, -1), # n, w1, w2,...w w1,w2...wn + (0xb0, 'PUSHB', 3, 'PushBytes', 0, -1), # b0, b1,..bn b0, b1, ...,bn + (0xb8, 'PUSHW', 3, 'PushWords', 0, -1), # w0,w1,..wn w0 ,w1, ...wn ] -# next, the list of "normal" instructions +# next, the list of "normal" instructions instructions = [ -# ------ ----------- ----- ------------------------ --- ------ ---------------------------------- -------------- -# opcode mnemonic argBits descriptive name pops pushes pops pushes -# ------ ----------- ----- ------------------------ --- ------ ---------------------------------- -------------- - (0x7f, 'AA', 0, 'AdjustAngle', 1, 0), # p - - (0x64, 'ABS', 0, 'Absolute', 1, 1), # n |n| - (0x60, 'ADD', 0, 'Add', 2, 1), # n2, n1 (n1 + n2) - (0x27, 'ALIGNPTS', 0, 'AlignPts', 2, 0), # p2, p1 - - (0x3c, 'ALIGNRP', 0, 'AlignRelativePt', -1, 0), # p1, p2, ... , ploopvalue - - (0x5a, 'AND', 0, 'LogicalAnd', 2, 1), # e2, e1 b - (0x2b, 'CALL', 0, 'CallFunction', 1, 0), # f - - (0x67, 'CEILING', 0, 'Ceiling', 1, 1), # n ceil(n) - (0x25, 'CINDEX', 0, 'CopyXToTopStack', 1, 1), # k ek - (0x22, 'CLEAR', 0, 'ClearStack', -1, 0), # all items on the stack - - (0x4f, 'DEBUG', 0, 'DebugCall', 1, 0), # n - - (0x73, 'DELTAC1', 0, 'DeltaExceptionC1', -1, 0), # argn, cn, argn-1,cn-1, , arg1, c1 - - (0x74, 'DELTAC2', 0, 'DeltaExceptionC2', -1, 0), # argn, cn, argn-1,cn-1, , arg1, c1 - - (0x75, 'DELTAC3', 0, 'DeltaExceptionC3', -1, 0), # argn, cn, argn-1,cn-1, , arg1, c1 - - (0x5d, 'DELTAP1', 0, 'DeltaExceptionP1', -1, 0), # argn, pn, argn-1, pn-1, , arg1, p1 - - (0x71, 'DELTAP2', 0, 'DeltaExceptionP2', -1, 0), # argn, pn, argn-1, pn-1, , arg1, p1 - - (0x72, 'DELTAP3', 0, 'DeltaExceptionP3', -1, 0), # argn, pn, argn-1, pn-1, , arg1, p1 - - (0x24, 'DEPTH', 0, 'GetDepthStack', 0, 1), # - n - (0x62, 'DIV', 0, 'Divide', 2, 1), # n2, n1 (n1 * 64)/ n2 - (0x20, 'DUP', 0, 'DuplicateTopStack', 1, 2), # e e, e - (0x59, 'EIF', 0, 'EndIf', 0, 0), # - - - (0x1b, 'ELSE', 0, 'Else', 0, 0), # - - - (0x2d, 'ENDF', 0, 'EndFunctionDefinition', 0, 0), # - - - (0x54, 'EQ', 0, 'Equal', 2, 1), # e2, e1 b - (0x57, 'EVEN', 0, 'Even', 1, 1), # e b - (0x2c, 'FDEF', 0, 'FunctionDefinition', 1, 0), # f - - (0x4e, 'FLIPOFF', 0, 'SetAutoFlipOff', 0, 0), # - - - (0x4d, 'FLIPON', 0, 'SetAutoFlipOn', 0, 0), # - - - (0x80, 'FLIPPT', 0, 'FlipPoint', -1, 0), # p1, p2, ..., ploopvalue - - (0x82, 'FLIPRGOFF', 0, 'FlipRangeOff', 2, 0), # h, l - - (0x81, 'FLIPRGON', 0, 'FlipRangeOn', 2, 0), # h, l - - (0x66, 'FLOOR', 0, 'Floor', 1, 1), # n floor(n) - (0x46, 'GC', 1, 'GetCoordOnPVector', 1, 1), # p c - (0x88, 'GETINFO', 0, 'GetInfo', 1, 1), # selector result - (0x0d, 'GFV', 0, 'GetFVector', 0, 2), # - px, py - (0x0c, 'GPV', 0, 'GetPVector', 0, 2), # - px, py - (0x52, 'GT', 0, 'GreaterThan', 2, 1), # e2, e1 b - (0x53, 'GTEQ', 0, 'GreaterThanOrEqual', 2, 1), # e2, e1 b - (0x89, 'IDEF', 0, 'InstructionDefinition', 1, 0), # f - - (0x58, 'IF', 0, 'If', 1, 0), # e - - (0x8e, 'INSTCTRL', 0, 'SetInstrExecControl', 2, 0), # s, v - - (0x39, 'IP', 0, 'InterpolatePts', -1, 0), # p1, p2, ... , ploopvalue - - (0x0f, 'ISECT', 0, 'MovePtToIntersect', 5, 0), # a1, a0, b1, b0, p - - (0x30, 'IUP', 1, 'InterpolateUntPts', 0, 0), # - - - (0x1c, 'JMPR', 0, 'Jump', 1, 0), # offset - - (0x79, 'JROF', 0, 'JumpRelativeOnFalse', 2, 0), # e, offset - - (0x78, 'JROT', 0, 'JumpRelativeOnTrue', 2, 0), # e, offset - - (0x2a, 'LOOPCALL', 0, 'LoopAndCallFunction', 2, 0), # f, count - - (0x50, 'LT', 0, 'LessThan', 2, 1), # e2, e1 b - (0x51, 'LTEQ', 0, 'LessThenOrEqual', 2, 1), # e2, e1 b - (0x8b, 'MAX', 0, 'Maximum', 2, 1), # e2, e1 max(e1, e2) - (0x49, 'MD', 1, 'MeasureDistance', 2, 1), # p2,p1 d - (0x2e, 'MDAP', 1, 'MoveDirectAbsPt', 1, 0), # p - - (0xc0, 'MDRP', 5, 'MoveDirectRelPt', 1, 0), # p - - (0x3e, 'MIAP', 1, 'MoveIndirectAbsPt', 2, 0), # n, p - - (0x8c, 'MIN', 0, 'Minimum', 2, 1), # e2, e1 min(e1, e2) - (0x26, 'MINDEX', 0, 'MoveXToTopStack', 1, 1), # k ek - (0xe0, 'MIRP', 5, 'MoveIndirectRelPt', 2, 0), # n, p - - (0x4b, 'MPPEM', 0, 'MeasurePixelPerEm', 0, 1), # - ppem - (0x4c, 'MPS', 0, 'MeasurePointSize', 0, 1), # - pointSize - (0x3a, 'MSIRP', 1, 'MoveStackIndirRelPt', 2, 0), # d, p - - (0x63, 'MUL', 0, 'Multiply', 2, 1), # n2, n1 (n1 * n2)/64 - (0x65, 'NEG', 0, 'Negate', 1, 1), # n -n - (0x55, 'NEQ', 0, 'NotEqual', 2, 1), # e2, e1 b - (0x5c, 'NOT', 0, 'LogicalNot', 1, 1), # e ( not e ) - (0x6c, 'NROUND', 2, 'NoRound', 1, 1), # n1 n2 - (0x56, 'ODD', 0, 'Odd', 1, 1), # e b - (0x5b, 'OR', 0, 'LogicalOr', 2, 1), # e2, e1 b - (0x21, 'POP', 0, 'PopTopStack', 1, 0), # e - - (0x45, 'RCVT', 0, 'ReadCVT', 1, 1), # location value - (0x7d, 'RDTG', 0, 'RoundDownToGrid', 0, 0), # - - - (0x7a, 'ROFF', 0, 'RoundOff', 0, 0), # - - - (0x8a, 'ROLL', 0, 'RollTopThreeStack', 3, 3), # a,b,c b,a,c - (0x68, 'ROUND', 2, 'Round', 1, 1), # n1 n2 - (0x43, 'RS', 0, 'ReadStore', 1, 1), # n v - (0x3d, 'RTDG', 0, 'RoundToDoubleGrid', 0, 0), # - - - (0x18, 'RTG', 0, 'RoundToGrid', 0, 0), # - - - (0x19, 'RTHG', 0, 'RoundToHalfGrid', 0, 0), # - - - (0x7c, 'RUTG', 0, 'RoundUpToGrid', 0, 0), # - - - (0x77, 'S45ROUND', 0, 'SuperRound45Degrees', 1, 0), # n - - (0x7e, 'SANGW', 0, 'SetAngleWeight', 1, 0), # weight - - (0x85, 'SCANCTRL', 0, 'ScanConversionControl', 1, 0), # n - - (0x8d, 'SCANTYPE', 0, 'ScanType', 1, 0), # n - - (0x48, 'SCFS', 0, 'SetCoordFromStackFP', 2, 0), # c, p - - (0x1d, 'SCVTCI', 0, 'SetCVTCutIn', 1, 0), # n - - (0x5e, 'SDB', 0, 'SetDeltaBaseInGState', 1, 0), # n - - (0x86, 'SDPVTL', 1, 'SetDualPVectorToLine', 2, 0), # p2, p1 - - (0x5f, 'SDS', 0, 'SetDeltaShiftInGState', 1, 0), # n - - (0x0b, 'SFVFS', 0, 'SetFVectorFromStack', 2, 0), # y, x - - (0x04, 'SFVTCA', 1, 'SetFVectorToAxis', 0, 0), # - - - (0x08, 'SFVTL', 1, 'SetFVectorToLine', 2, 0), # p2, p1 - - (0x0e, 'SFVTPV', 0, 'SetFVectorToPVector', 0, 0), # - - - (0x34, 'SHC', 1, 'ShiftContourByLastPt', 1, 0), # c - - (0x32, 'SHP', 1, 'ShiftPointByLastPoint', -1, 0), # p1, p2, ..., ploopvalue - - (0x38, 'SHPIX', 0, 'ShiftZoneByPixel', -1, 0), # d, p1, p2, ..., ploopvalue - - (0x36, 'SHZ', 1, 'ShiftZoneByLastPoint', 1, 0), # e - - (0x17, 'SLOOP', 0, 'SetLoopVariable', 1, 0), # n - - (0x1a, 'SMD', 0, 'SetMinimumDistance', 1, 0), # distance - - (0x0a, 'SPVFS', 0, 'SetPVectorFromStack', 2, 0), # y, x - - (0x02, 'SPVTCA', 1, 'SetPVectorToAxis', 0, 0), # - - - (0x06, 'SPVTL', 1, 'SetPVectorToLine', 2, 0), # p2, p1 - - (0x76, 'SROUND', 0, 'SuperRound', 1, 0), # n - - (0x10, 'SRP0', 0, 'SetRefPoint0', 1, 0), # p - - (0x11, 'SRP1', 0, 'SetRefPoint1', 1, 0), # p - - (0x12, 'SRP2', 0, 'SetRefPoint2', 1, 0), # p - - (0x1f, 'SSW', 0, 'SetSingleWidth', 1, 0), # n - - (0x1e, 'SSWCI', 0, 'SetSingleWidthCutIn', 1, 0), # n - - (0x61, 'SUB', 0, 'Subtract', 2, 1), # n2, n1 (n1 - n2) - (0x00, 'SVTCA', 1, 'SetFPVectorToAxis', 0, 0), # - - - (0x23, 'SWAP', 0, 'SwapTopStack', 2, 2), # e2, e1 e1, e2 - (0x13, 'SZP0', 0, 'SetZonePointer0', 1, 0), # n - - (0x14, 'SZP1', 0, 'SetZonePointer1', 1, 0), # n - - (0x15, 'SZP2', 0, 'SetZonePointer2', 1, 0), # n - - (0x16, 'SZPS', 0, 'SetZonePointerS', 1, 0), # n - - (0x29, 'UTP', 0, 'UnTouchPt', 1, 0), # p - - (0x70, 'WCVTF', 0, 'WriteCVTInFUnits', 2, 0), # n, l - - (0x44, 'WCVTP', 0, 'WriteCVTInPixels', 2, 0), # v, l - - (0x42, 'WS', 0, 'WriteStore', 2, 0), # v, l - -# ------ ----------- ----- ------------------------ --- ------ ---------------------------------- -------------- +# +#, opcode mnemonic argBits descriptive name pops pushes eats from instruction stream pushes +# + (0x7f, 'AA', 0, 'AdjustAngle', 1, 0), # p - + (0x64, 'ABS', 0, 'Absolute', 1, 1), # n |n| + (0x60, 'ADD', 0, 'Add', 2, 1), # n2, n1 (n1 + n2) + (0x27, 'ALIGNPTS', 0, 'AlignPts', 2, 0), # p2, p1 - + (0x3c, 'ALIGNRP', 0, 'AlignRelativePt', -1, 0), # p1, p2, ... , ploopvalue - + (0x5a, 'AND', 0, 'LogicalAnd', 2, 1), # e2, e1 b + (0x2b, 'CALL', 0, 'CallFunction', 1, 0), # f - + (0x67, 'CEILING', 0, 'Ceiling', 1, 1), # n ceil(n) + (0x25, 'CINDEX', 0, 'CopyXToTopStack', 1, 1), # k ek + (0x22, 'CLEAR', 0, 'ClearStack', -1, 0), # all items on the stack - + (0x4f, 'DEBUG', 0, 'DebugCall', 1, 0), # n - + (0x73, 'DELTAC1', 0, 'DeltaExceptionC1', -1, 0), # argn, cn, argn-1,cn-1, , arg1, c1 - + (0x74, 'DELTAC2', 0, 'DeltaExceptionC2', -1, 0), # argn, cn, argn-1,cn-1, , arg1, c1 - + (0x75, 'DELTAC3', 0, 'DeltaExceptionC3', -1, 0), # argn, cn, argn-1,cn-1, , arg1, c1 - + (0x5d, 'DELTAP1', 0, 'DeltaExceptionP1', -1, 0), # argn, pn, argn-1, pn-1, , arg1, p1 - + (0x71, 'DELTAP2', 0, 'DeltaExceptionP2', -1, 0), # argn, pn, argn-1, pn-1, , arg1, p1 - + (0x72, 'DELTAP3', 0, 'DeltaExceptionP3', -1, 0), # argn, pn, argn-1, pn-1, , arg1, p1 - + (0x24, 'DEPTH', 0, 'GetDepthStack', 0, 1), # - n + (0x62, 'DIV', 0, 'Divide', 2, 1), # n2, n1 (n1 * 64)/ n2 + (0x20, 'DUP', 0, 'DuplicateTopStack', 1, 2), # e e, e + (0x59, 'EIF', 0, 'EndIf', 0, 0), # - - + (0x1b, 'ELSE', 0, 'Else', 0, 0), # - - + (0x2d, 'ENDF', 0, 'EndFunctionDefinition', 0, 0), # - - + (0x54, 'EQ', 0, 'Equal', 2, 1), # e2, e1 b + (0x57, 'EVEN', 0, 'Even', 1, 1), # e b + (0x2c, 'FDEF', 0, 'FunctionDefinition', 1, 0), # f - + (0x4e, 'FLIPOFF', 0, 'SetAutoFlipOff', 0, 0), # - - + (0x4d, 'FLIPON', 0, 'SetAutoFlipOn', 0, 0), # - - + (0x80, 'FLIPPT', 0, 'FlipPoint', -1, 0), # p1, p2, ..., ploopvalue - + (0x82, 'FLIPRGOFF', 0, 'FlipRangeOff', 2, 0), # h, l - + (0x81, 'FLIPRGON', 0, 'FlipRangeOn', 2, 0), # h, l - + (0x66, 'FLOOR', 0, 'Floor', 1, 1), # n floor(n) + (0x46, 'GC', 1, 'GetCoordOnPVector', 1, 1), # p c + (0x88, 'GETINFO', 0, 'GetInfo', 1, 1), # selector result + (0x0d, 'GFV', 0, 'GetFVector', 0, 2), # - px, py + (0x0c, 'GPV', 0, 'GetPVector', 0, 2), # - px, py + (0x52, 'GT', 0, 'GreaterThan', 2, 1), # e2, e1 b + (0x53, 'GTEQ', 0, 'GreaterThanOrEqual', 2, 1), # e2, e1 b + (0x89, 'IDEF', 0, 'InstructionDefinition', 1, 0), # f - + (0x58, 'IF', 0, 'If', 1, 0), # e - + (0x8e, 'INSTCTRL', 0, 'SetInstrExecControl', 2, 0), # s, v - + (0x39, 'IP', 0, 'InterpolatePts', -1, 0), # p1, p2, ... , ploopvalue - + (0x0f, 'ISECT', 0, 'MovePtToIntersect', 5, 0), # a1, a0, b1, b0, p - + (0x30, 'IUP', 1, 'InterpolateUntPts', 0, 0), # - - + (0x1c, 'JMPR', 0, 'Jump', 1, 0), # offset - + (0x79, 'JROF', 0, 'JumpRelativeOnFalse', 2, 0), # e, offset - + (0x78, 'JROT', 0, 'JumpRelativeOnTrue', 2, 0), # e, offset - + (0x2a, 'LOOPCALL', 0, 'LoopAndCallFunction', 2, 0), # f, count - + (0x50, 'LT', 0, 'LessThan', 2, 1), # e2, e1 b + (0x51, 'LTEQ', 0, 'LessThenOrEqual', 2, 1), # e2, e1 b + (0x8b, 'MAX', 0, 'Maximum', 2, 1), # e2, e1 max(e1, e2) + (0x49, 'MD', 1, 'MeasureDistance', 2, 1), # p2,p1 d + (0x2e, 'MDAP', 1, 'MoveDirectAbsPt', 1, 0), # p - + (0xc0, 'MDRP', 5, 'MoveDirectRelPt', 1, 0), # p - + (0x3e, 'MIAP', 1, 'MoveIndirectAbsPt', 2, 0), # n, p - + (0x8c, 'MIN', 0, 'Minimum', 2, 1), # e2, e1 min(e1, e2) + (0x26, 'MINDEX', 0, 'MoveXToTopStack', 1, 1), # k ek + (0xe0, 'MIRP', 5, 'MoveIndirectRelPt', 2, 0), # n, p - + (0x4b, 'MPPEM', 0, 'MeasurePixelPerEm', 0, 1), # - ppem + (0x4c, 'MPS', 0, 'MeasurePointSize', 0, 1), # - pointSize + (0x3a, 'MSIRP', 1, 'MoveStackIndirRelPt', 2, 0), # d, p - + (0x63, 'MUL', 0, 'Multiply', 2, 1), # n2, n1 (n1 * n2)/64 + (0x65, 'NEG', 0, 'Negate', 1, 1), # n -n + (0x55, 'NEQ', 0, 'NotEqual', 2, 1), # e2, e1 b + (0x5c, 'NOT', 0, 'LogicalNot', 1, 1), # e ( not e ) + (0x6c, 'NROUND', 2, 'NoRound', 1, 1), # n1 n2 + (0x56, 'ODD', 0, 'Odd', 1, 1), # e b + (0x5b, 'OR', 0, 'LogicalOr', 2, 1), # e2, e1 b + (0x21, 'POP', 0, 'PopTopStack', 1, 0), # e - + (0x45, 'RCVT', 0, 'ReadCVT', 1, 1), # location value + (0x7d, 'RDTG', 0, 'RoundDownToGrid', 0, 0), # - - + (0x7a, 'ROFF', 0, 'RoundOff', 0, 0), # - - + (0x8a, 'ROLL', 0, 'RollTopThreeStack', 3, 3), # a,b,c b,a,c + (0x68, 'ROUND', 2, 'Round', 1, 1), # n1 n2 + (0x43, 'RS', 0, 'ReadStore', 1, 1), # n v + (0x3d, 'RTDG', 0, 'RoundToDoubleGrid', 0, 0), # - - + (0x18, 'RTG', 0, 'RoundToGrid', 0, 0), # - - + (0x19, 'RTHG', 0, 'RoundToHalfGrid', 0, 0), # - - + (0x7c, 'RUTG', 0, 'RoundUpToGrid', 0, 0), # - - + (0x77, 'S45ROUND', 0, 'SuperRound45Degrees', 1, 0), # n - + (0x7e, 'SANGW', 0, 'SetAngleWeight', 1, 0), # weight - + (0x85, 'SCANCTRL', 0, 'ScanConversionControl', 1, 0), # n - + (0x8d, 'SCANTYPE', 0, 'ScanType', 1, 0), # n - + (0x48, 'SCFS', 0, 'SetCoordFromStackFP', 2, 0), # c, p - + (0x1d, 'SCVTCI', 0, 'SetCVTCutIn', 1, 0), # n - + (0x5e, 'SDB', 0, 'SetDeltaBaseInGState', 1, 0), # n - + (0x86, 'SDPVTL', 1, 'SetDualPVectorToLine', 2, 0), # p2, p1 - + (0x5f, 'SDS', 0, 'SetDeltaShiftInGState',1, 0), # n - + (0x0b, 'SFVFS', 0, 'SetFVectorFromStack', 2, 0), # y, x - + (0x04, 'SFVTCA', 1, 'SetFVectorToAxis', 0, 0), # - - + (0x08, 'SFVTL', 1, 'SetFVectorToLine', 2, 0), # p2, p1 - + (0x0e, 'SFVTPV', 0, 'SetFVectorToPVector', 0, 0), # - - + (0x34, 'SHC', 1, 'ShiftContourByLastPt', 1, 0), # c - + (0x32, 'SHP', 1, 'ShiftPointByLastPoint',-1, 0), # p1, p2, ..., ploopvalue - + (0x38, 'SHPIX', 0, 'ShiftZoneByPixel', -1, 0), # d, p1, p2, ..., ploopvalue - + (0x36, 'SHZ', 1, 'ShiftZoneByLastPoint', 1, 0), # e - + (0x17, 'SLOOP', 0, 'SetLoopVariable', 1, 0), # n - + (0x1a, 'SMD', 0, 'SetMinimumDistance', 1, 0), # distance - + (0x0a, 'SPVFS', 0, 'SetPVectorFromStack', 2, 0), # y, x - + (0x02, 'SPVTCA', 1, 'SetPVectorToAxis', 0, 0), # - - + (0x06, 'SPVTL', 1, 'SetPVectorToLine', 2, 0), # p2, p1 - + (0x76, 'SROUND', 0, 'SuperRound', 1, 0), # n - + (0x10, 'SRP0', 0, 'SetRefPoint0', 1, 0), # p - + (0x11, 'SRP1', 0, 'SetRefPoint1', 1, 0), # p - + (0x12, 'SRP2', 0, 'SetRefPoint2', 1, 0), # p - + (0x1f, 'SSW', 0, 'SetSingleWidth', 1, 0), # n - + (0x1e, 'SSWCI', 0, 'SetSingleWidthCutIn', 1, 0), # n - + (0x61, 'SUB', 0, 'Subtract', 2, 1), # n2, n1 (n1 - n2) + (0x00, 'SVTCA', 1, 'SetFPVectorToAxis', 0, 0), # - - + (0x23, 'SWAP', 0, 'SwapTopStack', 2, 2), # e2, e1 e1, e2 + (0x13, 'SZP0', 0, 'SetZonePointer0', 1, 0), # n - + (0x14, 'SZP1', 0, 'SetZonePointer1', 1, 0), # n - + (0x15, 'SZP2', 0, 'SetZonePointer2', 1, 0), # n - + (0x16, 'SZPS', 0, 'SetZonePointerS', 1, 0), # n - + (0x29, 'UTP', 0, 'UnTouchPt', 1, 0), # p - + (0x70, 'WCVTF', 0, 'WriteCVTInFUnits', 2, 0), # n, l - + (0x44, 'WCVTP', 0, 'WriteCVTInPixels', 2, 0), # v, l - + (0x42, 'WS', 0, 'WriteStore', 2, 0), # v, l - ] @@ -188,43 +188,43 @@ _tokenRE = re.compile(_token) _whiteRE = re.compile(r"\s*") -_pushCountPat = re.compile(r"[A-Z][A-Z0-9]*\s*\[.*?\]\s*/\* ([0-9]*).*?\*/") +_pushCountPat = re.compile(r"[A-Z][A-Z0-9]*\s*\[.*?\]\s*/\* ([0-9]+).*?\*/") -def _skipWhite(data, pos, _whiteRE=_whiteRE): +def _skipWhite(data, pos): m = _whiteRE.match(data, pos) newPos = m.regs[0][1] assert newPos >= pos return newPos -class Program: - +class Program(object): + def __init__(self): pass - + def fromBytecode(self, bytecode): self.bytecode = array.array("B", bytecode) if hasattr(self, "assembly"): del self.assembly - + def fromAssembly(self, assembly): self.assembly = assembly if hasattr(self, "bytecode"): del self.bytecode - + def getBytecode(self): if not hasattr(self, "bytecode"): self._assemble() return self.bytecode.tostring() - - def getAssembly(self): + + def getAssembly(self, preserve=False): if not hasattr(self, "assembly"): - self._disassemble() + self._disassemble(preserve=preserve) return self.assembly - + def toXML(self, writer, ttFont): - if ttFont.disassembleInstructions: + if not hasattr (ttFont, "disassembleInstructions") or ttFont.disassembleInstructions: assembly = self.getAssembly() writer.begintag("assembly") writer.newline() @@ -242,11 +242,11 @@ j = 0 for j in range(nValues): if j and not (j % 25): - writer.write(string.join(line, " ")) + writer.write(' '.join(line)) writer.newline() line = [] line.append(assembly[i+j]) - writer.write(string.join(line, " ")) + writer.write(' '.join(line)) writer.newline() i = i + j + 1 writer.endtag("assembly") @@ -255,40 +255,43 @@ writer.newline() writer.dumphex(self.getBytecode()) writer.endtag("bytecode") - - def fromXML(self, (name, attrs, content), ttFont): + + def fromXML(self, name, attrs, content, ttFont): if name == "assembly": - self.fromAssembly(string.join(content, "")) + self.fromAssembly(strjoin(content)) self._assemble() del self.assembly else: assert name == "bytecode" self.fromBytecode(readHex(content)) - - def _assemble(self, - skipWhite=_skipWhite, mnemonicDict=mnemonicDict, strip=string.strip, - binary2num=binary2num): + + def _assemble(self): assembly = self.assembly - if type(assembly) == type([]): - assembly = string.join(assembly, " ") + if isinstance(assembly, type([])): + assembly = ' '.join(assembly) bytecode = [] push = bytecode.append lenAssembly = len(assembly) - pos = skipWhite(assembly, 0) + pos = _skipWhite(assembly, 0) while pos < lenAssembly: m = _tokenRE.match(assembly, pos) if m is None: - raise tt_instructions_error, "Syntax error in TT program (%s)" % assembly[pos-5:pos+15] + raise tt_instructions_error("Syntax error in TT program (%s)" % assembly[pos-5:pos+15]) dummy, mnemonic, arg, number, comment = m.groups() pos = m.regs[0][1] if comment: + pos = _skipWhite(assembly, pos) continue - - arg = strip(arg) - if mnemonic not in ("NPUSHB", "NPUSHW", "PUSHB", "PUSHW"): + + arg = arg.strip() + if mnemonic.startswith("INSTR"): + # Unknown instruction + op = int(mnemonic[5:]) + push(op) + elif mnemonic not in ("PUSH", "NPUSHB", "NPUSHW", "PUSHB", "PUSHW"): op, argBits, name = mnemonicDict[mnemonic] - if len(arg) <> argBits: - raise tt_instructions_error, "Incorrect number of argument bits (%s[%s])" % (mnemonic, arg) + if len(arg) != argBits: + raise tt_instructions_error("Incorrect number of argument bits (%s[%s])" % (mnemonic, arg)) if arg: arg = binary2num(arg) push(op + arg) @@ -296,109 +299,200 @@ push(op) else: args = [] + pos = _skipWhite(assembly, pos) while pos < lenAssembly: - pos = skipWhite(assembly, pos) m = _tokenRE.match(assembly, pos) if m is None: - raise tt_instructions_error, "Syntax error in TT program (%s)" % assembly[pos:pos+15] - dummy, mnemonic, arg, number, comment = m.groups() + raise tt_instructions_error("Syntax error in TT program (%s)" % assembly[pos:pos+15]) + dummy, _mnemonic, arg, number, comment = m.groups() if number is None and comment is None: break pos = m.regs[0][1] + pos = _skipWhite(assembly, pos) if comment is not None: continue args.append(int(number)) - if max(args) > 255 or min(args) < 0: - words = 1 - mnemonic = "PUSHW" - else: - words = 0 - mnemonic = "PUSHB" nArgs = len(args) - if nArgs <= 8: - op, argBits, name = streamMnemonicDict[mnemonic] - op = op + nArgs - 1 - push(op) - elif nArgs < 256: - mnemonic = "N" + mnemonic - op, argBits, name = streamMnemonicDict[mnemonic] - push(op) - push(nArgs) + if mnemonic == "PUSH": + # Automatically choose the most compact representation + nWords = 0 + while nArgs: + while nWords < nArgs and nWords < 255 and not (0 <= args[nWords] <= 255): + nWords += 1 + nBytes = 0 + while nWords+nBytes < nArgs and nBytes < 255 and 0 <= args[nWords+nBytes] <= 255: + nBytes += 1 + if nBytes < 2 and nWords + nBytes < 255 and nWords + nBytes != nArgs: + # Will write bytes as words + nWords += nBytes + continue + + # Write words + if nWords: + if nWords <= 8: + op, argBits, name = streamMnemonicDict["PUSHW"] + op = op + nWords - 1 + push(op) + else: + op, argBits, name = streamMnemonicDict["NPUSHW"] + push(op) + push(nWords) + for value in args[:nWords]: + assert -32768 <= value < 32768, "PUSH value out of range %d" % value + push((value >> 8) & 0xff) + push(value & 0xff) + + # Write bytes + if nBytes: + pass + if nBytes <= 8: + op, argBits, name = streamMnemonicDict["PUSHB"] + op = op + nBytes - 1 + push(op) + else: + op, argBits, name = streamMnemonicDict["NPUSHB"] + push(op) + push(nBytes) + for value in args[nWords:nWords+nBytes]: + push(value) + + nTotal = nWords + nBytes + args = args[nTotal:] + nArgs -= nTotal + nWords = 0 else: - raise tt_instructions_error, "More than 255 push arguments (%s)" % nArgs - if words: - for value in args: - push((value >> 8) & 0xff) - push(value & 0xff) - else: - for value in args: - push(value) - pos = skipWhite(assembly, pos) - + # Write exactly what we've been asked to + words = mnemonic[-1] == "W" + op, argBits, name = streamMnemonicDict[mnemonic] + if mnemonic[0] != "N": + assert nArgs <= 8, nArgs + op = op + nArgs - 1 + push(op) + else: + assert nArgs < 256 + push(op) + push(nArgs) + if words: + for value in args: + assert -32768 <= value < 32768, "PUSHW value out of range %d" % value + push((value >> 8) & 0xff) + push(value & 0xff) + else: + for value in args: + assert 0 <= value < 256, "PUSHB value out of range %d" % value + push(value) + + pos = _skipWhite(assembly, pos) + if bytecode: assert max(bytecode) < 256 and min(bytecode) >= 0 self.bytecode = array.array("B", bytecode) - - def _disassemble(self): + + def _disassemble(self, preserve=False): assembly = [] i = 0 bytecode = self.bytecode numBytecode = len(bytecode) while i < numBytecode: op = bytecode[i] - arg = 0 try: mnemonic, argBits, argoffset, name = opcodeDict[op] except KeyError: - try: - mnemonic, argBits, argoffset, name = streamOpcodeDict[op] - except KeyError: - raise tt_instructions_error, "illegal opcode: 0x%.2x" % op - pushBytes = pushWords = 0 - if argBits: - if mnemonic == "PUSHB": - pushBytes = op - argoffset + 1 - else: - pushWords = op - argoffset + 1 - else: - i = i + 1 - if mnemonic == "NPUSHB": - pushBytes = bytecode[i] + if op in streamOpcodeDict: + values = [] + + # Merge consecutive PUSH operations + while bytecode[i] in streamOpcodeDict: + op = bytecode[i] + mnemonic, argBits, argoffset, name = streamOpcodeDict[op] + words = mnemonic[-1] == "W" + if argBits: + nValues = op - argoffset + 1 + else: + i = i + 1 + nValues = bytecode[i] + i = i + 1 + assert nValues > 0 + if not words: + for j in range(nValues): + value = bytecode[i] + values.append(repr(value)) + i = i + 1 + else: + for j in range(nValues): + # cast to signed int16 + value = (bytecode[i] << 8) | bytecode[i+1] + if value >= 0x8000: + value = value - 0x10000 + values.append(repr(value)) + i = i + 2 + if preserve: + break + + if not preserve: + mnemonic = "PUSH" + nValues = len(values) + if nValues == 1: + assembly.append("%s[ ] /* 1 value pushed */" % mnemonic) else: - pushWords = bytecode[i] - i = i + 1 - nValues = pushBytes or pushWords - assert nValues > 0 - if nValues == 1: - assembly.append("%s[ ] /* %s value pushed */" % (mnemonic, nValues)) + assembly.append("%s[ ] /* %s values pushed */" % (mnemonic, nValues)) + assembly.extend(values) else: - assembly.append("%s[ ] /* %s values pushed */" % (mnemonic, nValues)) - for j in range(pushBytes): - value = bytecode[i] - assembly.append(`value`) + assembly.append("INSTR%d[ ]" % op) i = i + 1 - for j in range(pushWords): - # cast to signed int16 - value = (bytecode[i] << 8) | bytecode[i+1] - if value >= 0x8000: - value = value - 0x10000 - assembly.append(`value`) - i = i + 2 else: if argBits: - assembly.append(mnemonic + "[%s]" % num2binary(op - argoffset, argBits)) + assembly.append(mnemonic + "[%s] /* %s */" % (num2binary(op - argoffset, argBits), name)) else: - assembly.append(mnemonic + "[ ]") + assembly.append(mnemonic + "[ ] /* %s */" % name) i = i + 1 self.assembly = assembly + def __bool__(self): + """ + >>> p = Program() + >>> bool(p) + False + >>> bc = array.array("B", [0]) + >>> p.fromBytecode(bc) + >>> bool(p) + True + >>> p.bytecode.pop() + 0 + >>> bool(p) + False + + >>> p = Program() + >>> asm = ['SVTCA[0]'] + >>> p.fromAssembly(asm) + >>> bool(p) + True + >>> p.assembly.pop() + 'SVTCA[0]' + >>> bool(p) + False + """ + return ((hasattr(self, 'assembly') and len(self.assembly) > 0) or + (hasattr(self, 'bytecode') and len(self.bytecode) > 0)) + + __nonzero__ = __bool__ + + +def _test(): + """ + >>> _test() + True + """ + + bc = b"""@;:9876543210/.-,+*)(\'&%$#"! \037\036\035\034\033\032\031\030\027\026\025\024\023\022\021\020\017\016\015\014\013\012\011\010\007\006\005\004\003\002\001\000,\001\260\030CXEj\260\031C`\260F#D#\020 \260FN\360M/\260\000\022\033!#\0213Y-,\001\260\030CX\260\005+\260\000\023K\260\024PX\261\000@8Y\260\006+\033!#\0213Y-,\001\260\030CXN\260\003%\020\362!\260\000\022M\033 E\260\004%\260\004%#Jad\260(RX!#\020\326\033\260\003%\020\362!\260\000\022YY-,\260\032CX!!\033\260\002%\260\002%I\260\003%\260\003%Ja d\260\020PX!!!\033\260\003%\260\003%I\260\000PX\260\000PX\270\377\3428!\033\260\0208!Y\033\260\000RX\260\0368!\033\270\377\3608!YYYY-,\001\260\030CX\260\005+\260\000\023K\260\024PX\271\000\000\377\3008Y\260\006+\033!#\0213Y-,N\001\212\020\261F\031CD\260\000\024\261\000F\342\260\000\025\271\000\000\377\3608\000\260\000<\260(+\260\002%\020\260\000<-,\001\030\260\000/\260\001\024\362\260\001\023\260\001\025M\260\000\022-,\001\260\030CX\260\005+\260\000\023\271\000\000\377\3408\260\006+\033!#\0213Y-,\001\260\030CXEdj#Edi\260\031Cd``\260F#D#\020 \260F\360/\260\000\022\033!! \212 \212RX\0213\033!!YY-,\001\261\013\012C#Ce\012-,\000\261\012\013C#C\013-,\000\260F#p\261\001F>\001\260F#p\261\002FE:\261\002\000\010\015-,\260\022+\260\002%E\260\002%Ej\260@\213`\260\002%#D!!!-,\260\023+\260\002%E\260\002%Ej\270\377\300\214`\260\002%#D!!!-,\260\000\260\022+!!!-,\260\000\260\023+!!!-,\001\260\006C\260\007Ce\012-, i\260@a\260\000\213 \261,\300\212\214\270\020\000b`+\014d#da\\X\260\003aY-,\261\000\003%EhT\260\034KPZX\260\003%E\260\003%E`h \260\004%#D\260\004%#D\033\260\003% Eh \212#D\260\003%Eh`\260\003%#DY-,\260\003% Eh \212#D\260\003%Edhe`\260\004%\260\001`#D-,\260\011CX\207!\300\033\260\022CX\207E\260\021+\260G#D\260Gz\344\033\003\212E\030i \260G#D\212\212\207 \260\240QX\260\021+\260G#D\260Gz\344\033!\260Gz\344YYY\030-, \212E#Eh`D-,EjB-,\001\030/-,\001\260\030CX\260\004%\260\004%Id#Edi\260@\213a \260\200bj\260\002%\260\002%a\214\260\031C`\260F#D!\212\020\260F\366!\033!!!!Y-,\001\260\030CX\260\002%E\260\002%Ed`j\260\003%Eja \260\004%Ej \212\213e\260\004%#D\214\260\003%#D!!\033 EjD EjDY-,\001 E\260\000U\260\030CZXEh#Ei\260@\213a \260\200bj \212#a \260\003%\213e\260\004%#D\214\260\003%#D!!\033!!\260\031+Y-,\001\212\212Ed#EdadB-,\260\004%\260\004%\260\031+\260\030CX\260\004%\260\004%\260\003%\260\033+\001\260\002%C\260@T\260\002%C\260\000TZX\260\003% E\260@aDY\260\002%C\260\000T\260\002%C\260@TZX\260\004% E\260@`DYY!!!!-,\001KRXC\260\002%E#aD\033!!Y-,\001KRXC\260\002%E#`D\033!!Y-,KRXED\033!!Y-,\001 \260\003%#I\260@`\260 c \260\000RX#\260\002%8#\260\002%e8\000\212c8\033!!!!!Y\001-,KPXED\033!!Y-,\001\260\005%\020# \212\365\000\260\001`#\355\354-,\001\260\005%\020# \212\365\000\260\001a#\355\354-,\001\260\006%\020\365\000\355\354-,F#F`\212\212F# F\212`\212a\270\377\200b# \020#\212\261KK\212pE` \260\000PX\260\001a\270\377\272\213\033\260F\214Y\260\020`h\001:-, E\260\003%FRX\260\002%F ha\260\003%\260\003%?#!8\033!\021Y-, E\260\003%FPX\260\002%F ha\260\003%\260\003%?#!8\033!\021Y-,\000\260\007C\260\006C\013-,\212\020\354-,\260\014CX!\033 F\260\000RX\270\377\3608\033\260\0208YY-, \260\000UX\270\020\000c\260\003%Ed\260\003%Eda\260\000SX\260\002\033\260@a\260\003Y%EiSXED\033!!Y\033!\260\002%E\260\002%Ead\260(QXED\033!!YY-,!!\014d#d\213\270@\000b-,!\260\200QX\014d#d\213\270 \000b\033\262\000@/+Y\260\002`-,!\260\300QX\014d#d\213\270\025Ub\033\262\000\200/+Y\260\002`-,\014d#d\213\270@\000b`#!-,KSX\260\004%\260\004%Id#Edi\260@\213a \260\200bj\260\002%\260\002%a\214\260F#D!\212\020\260F\366!\033!\212\021#\022 9/Y-,\260\002%\260\002%Id\260\300TX\270\377\3708\260\0108\033!!Y-,\260\023CX\003\033\002Y-,\260\023CX\002\033\003Y-,\260\012+#\020 <\260\027+-,\260\002%\270\377\3608\260(+\212\020# \320#\260\020+\260\005CX\300\033<Y \020\021\260\000\022\001-,KS#KQZX8\033!!Y-,\001\260\002%\020\320#\311\001\260\001\023\260\000\024\020\260\001<\260\001\026-,\001\260\000\023\260\001\260\003%I\260\003\0278\260\001\023-,KS#KQZX E\212`D\033!!Y-, 9/-""" -if __name__ == "__main__": - bc = """@;:9876543210/.-,+*)(\'&%$#"! \037\036\035\034\033\032\031\030\027\026\025\024\023\022\021\020\017\016\015\014\013\012\011\010\007\006\005\004\003\002\001\000,\001\260\030CXEj\260\031C`\260F#D#\020 \260FN\360M/\260\000\022\033!#\0213Y-,\001\260\030CX\260\005+\260\000\023K\260\024PX\261\000@8Y\260\006+\033!#\0213Y-,\001\260\030CXN\260\003%\020\362!\260\000\022M\033 E\260\004%\260\004%#Jad\260(RX!#\020\326\033\260\003%\020\362!\260\000\022YY-,\260\032CX!!\033\260\002%\260\002%I\260\003%\260\003%Ja d\260\020PX!!!\033\260\003%\260\003%I\260\000PX\260\000PX\270\377\3428!\033\260\0208!Y\033\260\000RX\260\0368!\033\270\377\3608!YYYY-,\001\260\030CX\260\005+\260\000\023K\260\024PX\271\000\000\377\3008Y\260\006+\033!#\0213Y-,N\001\212\020\261F\031CD\260\000\024\261\000F\342\260\000\025\271\000\000\377\3608\000\260\000<\260(+\260\002%\020\260\000<-,\001\030\260\000/\260\001\024\362\260\001\023\260\001\025M\260\000\022-,\001\260\030CX\260\005+\260\000\023\271\000\000\377\3408\260\006+\033!#\0213Y-,\001\260\030CXEdj#Edi\260\031Cd``\260F#D#\020 \260F\360/\260\000\022\033!! \212 \212RX\0213\033!!YY-,\001\261\013\012C#Ce\012-,\000\261\012\013C#C\013-,\000\260F#p\261\001F>\001\260F#p\261\002FE:\261\002\000\010\015-,\260\022+\260\002%E\260\002%Ej\260@\213`\260\002%#D!!!-,\260\023+\260\002%E\260\002%Ej\270\377\300\214`\260\002%#D!!!-,\260\000\260\022+!!!-,\260\000\260\023+!!!-,\001\260\006C\260\007Ce\012-, i\260@a\260\000\213 \261,\300\212\214\270\020\000b`+\014d#da\\X\260\003aY-,\261\000\003%EhT\260\034KPZX\260\003%E\260\003%E`h \260\004%#D\260\004%#D\033\260\003% Eh \212#D\260\003%Eh`\260\003%#DY-,\260\003% Eh \212#D\260\003%Edhe`\260\004%\260\001`#D-,\260\011CX\207!\300\033\260\022CX\207E\260\021+\260G#D\260Gz\344\033\003\212E\030i \260G#D\212\212\207 \260\240QX\260\021+\260G#D\260Gz\344\033!\260Gz\344YYY\030-, \212E#Eh`D-,EjB-,\001\030/-,\001\260\030CX\260\004%\260\004%Id#Edi\260@\213a \260\200bj\260\002%\260\002%a\214\260\031C`\260F#D!\212\020\260F\366!\033!!!!Y-,\001\260\030CX\260\002%E\260\002%Ed`j\260\003%Eja \260\004%Ej \212\213e\260\004%#D\214\260\003%#D!!\033 EjD EjDY-,\001 E\260\000U\260\030CZXEh#Ei\260@\213a \260\200bj \212#a \260\003%\213e\260\004%#D\214\260\003%#D!!\033!!\260\031+Y-,\001\212\212Ed#EdadB-,\260\004%\260\004%\260\031+\260\030CX\260\004%\260\004%\260\003%\260\033+\001\260\002%C\260@T\260\002%C\260\000TZX\260\003% E\260@aDY\260\002%C\260\000T\260\002%C\260@TZX\260\004% E\260@`DYY!!!!-,\001KRXC\260\002%E#aD\033!!Y-,\001KRXC\260\002%E#`D\033!!Y-,KRXED\033!!Y-,\001 \260\003%#I\260@`\260 c \260\000RX#\260\002%8#\260\002%e8\000\212c8\033!!!!!Y\001-,KPXED\033!!Y-,\001\260\005%\020# \212\365\000\260\001`#\355\354-,\001\260\005%\020# \212\365\000\260\001a#\355\354-,\001\260\006%\020\365\000\355\354-,F#F`\212\212F# F\212`\212a\270\377\200b# \020#\212\261KK\212pE` \260\000PX\260\001a\270\377\272\213\033\260F\214Y\260\020`h\001:-, E\260\003%FRX\260\002%F ha\260\003%\260\003%?#!8\033!\021Y-, E\260\003%FPX\260\002%F ha\260\003%\260\003%?#!8\033!\021Y-,\000\260\007C\260\006C\013-,\212\020\354-,\260\014CX!\033 F\260\000RX\270\377\3608\033\260\0208YY-, \260\000UX\270\020\000c\260\003%Ed\260\003%Eda\260\000SX\260\002\033\260@a\260\003Y%EiSXED\033!!Y\033!\260\002%E\260\002%Ead\260(QXED\033!!YY-,!!\014d#d\213\270@\000b-,!\260\200QX\014d#d\213\270 \000b\033\262\000@/+Y\260\002`-,!\260\300QX\014d#d\213\270\025Ub\033\262\000\200/+Y\260\002`-,\014d#d\213\270@\000b`#!-,KSX\260\004%\260\004%Id#Edi\260@\213a \260\200bj\260\002%\260\002%a\214\260F#D!\212\020\260F\366!\033!\212\021#\022 9/Y-,\260\002%\260\002%Id\260\300TX\270\377\3708\260\0108\033!!Y-,\260\023CX\003\033\002Y-,\260\023CX\002\033\003Y-,\260\012+#\020 <\260\027+-,\260\002%\270\377\3608\260(+\212\020# \320#\260\020+\260\005CX\300\033<Y \020\021\260\000\022\001-,KS#KQZX8\033!!Y-,\001\260\002%\020\320#\311\001\260\001\023\260\000\024\020\260\001<\260\001\026-,\001\260\000\023\260\001\260\003%I\260\003\0278\260\001\023-,KS#KQZX E\212`D\033!!Y-, 9/-""" - p = Program() p.fromBytecode(bc) - asm = p.getAssembly() + asm = p.getAssembly(preserve=True) p.fromAssembly(asm) - print bc == p.getBytecode() + print(bc == p.getBytecode()) +if __name__ == "__main__": + import sys + import doctest + sys.exit(doctest.testmod().failed) diff -Nru fonttools-2.4/Lib/fontTools/ttLib/tables/V_D_M_X_.py fonttools-3.0/Lib/fontTools/ttLib/tables/V_D_M_X_.py --- fonttools-2.4/Lib/fontTools/ttLib/tables/V_D_M_X_.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.0/Lib/fontTools/ttLib/tables/V_D_M_X_.py 2015-08-31 17:57:15.000000000 +0000 @@ -0,0 +1,234 @@ +from __future__ import print_function, division, absolute_import +from fontTools.misc.py23 import * +from . import DefaultTable +from fontTools.misc import sstruct +from fontTools.misc.textTools import safeEval +import struct + +VDMX_HeaderFmt = """ + > # big endian + version: H # Version number (0 or 1) + numRecs: H # Number of VDMX groups present + numRatios: H # Number of aspect ratio groupings +""" +# the VMDX header is followed by an array of RatRange[numRatios] (i.e. aspect +# ratio ranges); +VDMX_RatRangeFmt = """ + > # big endian + bCharSet: B # Character set + xRatio: B # Value to use for x-Ratio + yStartRatio: B # Starting y-Ratio value + yEndRatio: B # Ending y-Ratio value +""" +# followed by an array of offset[numRatios] from start of VDMX table to the +# VDMX Group for this ratio range (offsets will be re-calculated on compile); +# followed by an array of Group[numRecs] records; +VDMX_GroupFmt = """ + > # big endian + recs: H # Number of height records in this group + startsz: B # Starting yPelHeight + endsz: B # Ending yPelHeight +""" +# followed by an array of vTable[recs] records. +VDMX_vTableFmt = """ + > # big endian + yPelHeight: H # yPelHeight to which values apply + yMax: h # Maximum value (in pels) for this yPelHeight + yMin: h # Minimum value (in pels) for this yPelHeight +""" + + +class table_V_D_M_X_(DefaultTable.DefaultTable): + + def decompile(self, data, ttFont): + pos = 0 # track current position from to start of VDMX table + dummy, data = sstruct.unpack2(VDMX_HeaderFmt, data, self) + pos += sstruct.calcsize(VDMX_HeaderFmt) + self.ratRanges = [] + for i in range(self.numRatios): + ratio, data = sstruct.unpack2(VDMX_RatRangeFmt, data) + pos += sstruct.calcsize(VDMX_RatRangeFmt) + # the mapping between a ratio and a group is defined further below + ratio['groupIndex'] = None + self.ratRanges.append(ratio) + lenOffset = struct.calcsize('>H') + _offsets = [] # temporarily store offsets to groups + for i in range(self.numRatios): + offset = struct.unpack('>H', data[0:lenOffset])[0] + data = data[lenOffset:] + pos += lenOffset + _offsets.append(offset) + self.groups = [] + for groupIndex in range(self.numRecs): + # the offset to this group from beginning of the VDMX table + currOffset = pos + group, data = sstruct.unpack2(VDMX_GroupFmt, data) + # the group lenght and bounding sizes are re-calculated on compile + recs = group.pop('recs') + startsz = group.pop('startsz') + endsz = group.pop('endsz') + pos += sstruct.calcsize(VDMX_GroupFmt) + for j in range(recs): + vTable, data = sstruct.unpack2(VDMX_vTableFmt, data) + vTableLength = sstruct.calcsize(VDMX_vTableFmt) + pos += vTableLength + # group is a dict of (yMax, yMin) tuples keyed by yPelHeight + group[vTable['yPelHeight']] = (vTable['yMax'], vTable['yMin']) + # make sure startsz and endsz match the calculated values + minSize = min(group.keys()) + maxSize = max(group.keys()) + assert startsz == minSize, \ + "startsz (%s) must equal min yPelHeight (%s): group %d" % \ + (group.startsz, minSize, groupIndex) + assert endsz == maxSize, \ + "endsz (%s) must equal max yPelHeight (%s): group %d" % \ + (group.endsz, maxSize, groupIndex) + self.groups.append(group) + # match the defined offsets with the current group's offset + for offsetIndex, offsetValue in enumerate(_offsets): + # when numRecs < numRatios there can more than one ratio range + # sharing the same VDMX group + if currOffset == offsetValue: + # map the group with the ratio range thas has the same + # index as the offset to that group (it took me a while..) + self.ratRanges[offsetIndex]['groupIndex'] = groupIndex + # check that all ratio ranges have a group + for i in range(self.numRatios): + ratio = self.ratRanges[i] + if ratio['groupIndex'] is None: + from fontTools import ttLib + raise ttLib.TTLibError( + "no group defined for ratRange %d" % i) + + def _getOffsets(self): + """ + Calculate offsets to VDMX_Group records. + For each ratRange return a list of offset values from the beginning of + the VDMX table to a VDMX_Group. + """ + lenHeader = sstruct.calcsize(VDMX_HeaderFmt) + lenRatRange = sstruct.calcsize(VDMX_RatRangeFmt) + lenOffset = struct.calcsize('>H') + lenGroupHeader = sstruct.calcsize(VDMX_GroupFmt) + lenVTable = sstruct.calcsize(VDMX_vTableFmt) + # offset to the first group + pos = lenHeader + self.numRatios*lenRatRange + self.numRatios*lenOffset + groupOffsets = [] + for group in self.groups: + groupOffsets.append(pos) + lenGroup = lenGroupHeader + len(group) * lenVTable + pos += lenGroup # offset to next group + offsets = [] + for ratio in self.ratRanges: + groupIndex = ratio['groupIndex'] + offsets.append(groupOffsets[groupIndex]) + return offsets + + def compile(self, ttFont): + if not(self.version == 0 or self.version == 1): + from fontTools import ttLib + raise ttLib.TTLibError( + "unknown format for VDMX table: version %s" % self.version) + data = sstruct.pack(VDMX_HeaderFmt, self) + for ratio in self.ratRanges: + data += sstruct.pack(VDMX_RatRangeFmt, ratio) + # recalculate offsets to VDMX groups + for offset in self._getOffsets(): + data += struct.pack('>H', offset) + for group in self.groups: + recs = len(group) + startsz = min(group.keys()) + endsz = max(group.keys()) + gHeader = {'recs': recs, 'startsz': startsz, 'endsz': endsz} + data += sstruct.pack(VDMX_GroupFmt, gHeader) + for yPelHeight, (yMax, yMin) in sorted(group.items()): + vTable = {'yPelHeight': yPelHeight, 'yMax': yMax, 'yMin': yMin} + data += sstruct.pack(VDMX_vTableFmt, vTable) + return data + + def toXML(self, writer, ttFont): + writer.simpletag("version", value=self.version) + writer.newline() + writer.begintag("ratRanges") + writer.newline() + for ratio in self.ratRanges: + groupIndex = ratio['groupIndex'] + writer.simpletag( + "ratRange", + bCharSet=ratio['bCharSet'], + xRatio=ratio['xRatio'], + yStartRatio=ratio['yStartRatio'], + yEndRatio=ratio['yEndRatio'], + groupIndex=groupIndex + ) + writer.newline() + writer.endtag("ratRanges") + writer.newline() + writer.begintag("groups") + writer.newline() + for groupIndex in range(self.numRecs): + group = self.groups[groupIndex] + recs = len(group) + startsz = min(group.keys()) + endsz = max(group.keys()) + writer.begintag("group", index=groupIndex) + writer.newline() + writer.comment("recs=%d, startsz=%d, endsz=%d" % + (recs, startsz, endsz)) + writer.newline() + for yPelHeight in group.keys(): + yMax, yMin = group[yPelHeight] + writer.simpletag( + "record", yPelHeight=yPelHeight, yMax=yMax, yMin=yMin) + writer.newline() + writer.endtag("group") + writer.newline() + writer.endtag("groups") + writer.newline() + + def fromXML(self, name, attrs, content, ttFont): + if name == "version": + self.version = safeEval(attrs["value"]) + elif name == "ratRanges": + if not hasattr(self, "ratRanges"): + self.ratRanges = [] + for element in content: + if not isinstance(element, tuple): + continue + name, attrs, content = element + if name == "ratRange": + if not hasattr(self, "numRatios"): + self.numRatios = 1 + else: + self.numRatios += 1 + ratio = { + "bCharSet": safeEval(attrs["bCharSet"]), + "xRatio": safeEval(attrs["xRatio"]), + "yStartRatio": safeEval(attrs["yStartRatio"]), + "yEndRatio": safeEval(attrs["yEndRatio"]), + "groupIndex": safeEval(attrs["groupIndex"]) + } + self.ratRanges.append(ratio) + elif name == "groups": + if not hasattr(self, "groups"): + self.groups = [] + for element in content: + if not isinstance(element, tuple): + continue + name, attrs, content = element + if name == "group": + if not hasattr(self, "numRecs"): + self.numRecs = 1 + else: + self.numRecs += 1 + group = {} + for element in content: + if not isinstance(element, tuple): + continue + name, attrs, content = element + if name == "record": + yPelHeight = safeEval(attrs["yPelHeight"]) + yMax = safeEval(attrs["yMax"]) + yMin = safeEval(attrs["yMin"]) + group[yPelHeight] = (yMax, yMin) + self.groups.append(group) diff -Nru fonttools-2.4/Lib/fontTools/ttLib/tables/_v_h_e_a.py fonttools-3.0/Lib/fontTools/ttLib/tables/_v_h_e_a.py --- fonttools-2.4/Lib/fontTools/ttLib/tables/_v_h_e_a.py 2013-04-23 04:08:08.000000000 +0000 +++ fonttools-3.0/Lib/fontTools/ttLib/tables/_v_h_e_a.py 2015-08-31 17:57:15.000000000 +0000 @@ -1,61 +1,76 @@ -import DefaultTable -import sstruct +from __future__ import print_function, division, absolute_import +from fontTools.misc.py23 import * +from fontTools.misc import sstruct from fontTools.misc.textTools import safeEval +from . import DefaultTable vheaFormat = """ > # big endian - tableVersion: 16.16F - ascent: h - descent: h - lineGap: h - advanceHeightMax: H - minTopSideBearing: h + tableVersion: 16.16F + ascent: h + descent: h + lineGap: h + advanceHeightMax: H + minTopSideBearing: h minBottomSideBearing: h - yMaxExtent: h - caretSlopeRise: h - caretSlopeRun: h - reserved0: h - reserved1: h - reserved2: h - reserved3: h - reserved4: h - metricDataFormat: h - numberOfVMetrics: H + yMaxExtent: h + caretSlopeRise: h + caretSlopeRun: h + reserved0: h + reserved1: h + reserved2: h + reserved3: h + reserved4: h + metricDataFormat: h + numberOfVMetrics: H """ class table__v_h_e_a(DefaultTable.DefaultTable): - + + # Note: Keep in sync with table__h_h_e_a + dependencies = ['vmtx', 'glyf'] - + def decompile(self, data, ttFont): sstruct.unpack(vheaFormat, data, self) - + def compile(self, ttFont): - self.recalc(ttFont) + if ttFont.isLoaded('glyf') and ttFont.recalcBBoxes: + self.recalc(ttFont) return sstruct.pack(vheaFormat, self) - + def recalc(self, ttFont): vtmxTable = ttFont['vmtx'] - if ttFont.has_key('glyf'): - if not ttFont.isLoaded('glyf'): - return + if 'glyf' in ttFont: glyfTable = ttFont['glyf'] - advanceHeightMax = -100000 # arbitrary big negative number - minTopSideBearing = 100000 # arbitrary big number - minBottomSideBearing = 100000 # arbitrary big number - yMaxExtent = -100000 # arbitrary big negative number - + INFINITY = 100000 + advanceHeightMax = 0 + minTopSideBearing = +INFINITY # arbitrary big number + minBottomSideBearing = +INFINITY # arbitrary big number + yMaxExtent = -INFINITY # arbitrary big negative number + for name in ttFont.getGlyphOrder(): height, tsb = vtmxTable[name] + advanceHeightMax = max(advanceHeightMax, height) g = glyfTable[name] - if g.numberOfContours <= 0: + if g.numberOfContours == 0: continue - advanceHeightMax = max(advanceHeightMax, height) + if g.numberOfContours < 0 and not hasattr(g, "yMax"): + # Composite glyph without extents set. + # Calculate those. + g.recalcBounds(glyfTable) minTopSideBearing = min(minTopSideBearing, tsb) - rsb = height - tsb - (g.yMax - g.yMin) - minBottomSideBearing = min(minBottomSideBearing, rsb) + bsb = height - tsb - (g.yMax - g.yMin) + minBottomSideBearing = min(minBottomSideBearing, bsb) extent = tsb + (g.yMax - g.yMin) yMaxExtent = max(yMaxExtent, extent) + + if yMaxExtent == -INFINITY: + # No glyph has outlines. + minTopSideBearing = 0 + minBottomSideBearing = 0 + yMaxExtent = 0 + self.advanceHeightMax = advanceHeightMax self.minTopSideBearing = minTopSideBearing self.minBottomSideBearing = minBottomSideBearing @@ -63,16 +78,13 @@ else: # XXX CFF recalc... pass - + def toXML(self, writer, ttFont): formatstring, names, fixes = sstruct.getformat(vheaFormat) for name in names: value = getattr(self, name) - if type(value) == type(0L): - value = int(value) writer.simpletag(name, value=value) writer.newline() - - def fromXML(self, (name, attrs, content), ttFont): - setattr(self, name, safeEval(attrs["value"])) + def fromXML(self, name, attrs, content, ttFont): + setattr(self, name, safeEval(attrs["value"])) diff -Nru fonttools-2.4/Lib/fontTools/ttLib/tables/_v_m_t_x.py fonttools-3.0/Lib/fontTools/ttLib/tables/_v_m_t_x.py --- fonttools-2.4/Lib/fontTools/ttLib/tables/_v_m_t_x.py 2013-04-23 04:08:08.000000000 +0000 +++ fonttools-3.0/Lib/fontTools/ttLib/tables/_v_m_t_x.py 2015-08-31 17:57:15.000000000 +0000 @@ -1,9 +1,11 @@ +from __future__ import print_function, division, absolute_import +from fontTools.misc.py23 import * from fontTools import ttLib superclass = ttLib.getTableClass("hmtx") class table__v_m_t_x(superclass): - + headerTag = 'vhea' advanceName = 'height' sideBearingName = 'tsb' diff -Nru fonttools-2.4/Lib/fontTools/ttLib/tables/V_O_R_G_.py fonttools-3.0/Lib/fontTools/ttLib/tables/V_O_R_G_.py --- fonttools-2.4/Lib/fontTools/ttLib/tables/V_O_R_G_.py 2013-04-23 04:08:08.000000000 +0000 +++ fonttools-3.0/Lib/fontTools/ttLib/tables/V_O_R_G_.py 2015-08-31 17:57:15.000000000 +0000 @@ -1,9 +1,9 @@ +from __future__ import print_function, division, absolute_import +from fontTools.misc.py23 import * +from fontTools.misc.textTools import safeEval +from . import DefaultTable import operator -import DefaultTable import struct -from fontTools.ttLib import sfnt -from fontTools.misc.textTools import safeEval, readHex -from types import IntType, StringType class table_V_O_R_G_(DefaultTable.DefaultTable): @@ -30,31 +30,30 @@ self.VOriginRecords = vOrig = {} glyphOrder = ttFont.getGlyphOrder() try: - names = map(operator.getitem, [glyphOrder]*self.numVertOriginYMetrics, gids ) + names = map(operator.getitem, [glyphOrder]*self.numVertOriginYMetrics, gids) except IndexError: getGlyphName = self.getGlyphName names = map(getGlyphName, gids ) - map(operator.setitem, [vOrig]*self.numVertOriginYMetrics, names, vids) - + list(map(operator.setitem, [vOrig]*self.numVertOriginYMetrics, names, vids)) def compile(self, ttFont): - vorgs = self.VOriginRecords.values() - names = self.VOriginRecords.keys() + vorgs = list(self.VOriginRecords.values()) + names = list(self.VOriginRecords.keys()) nameMap = ttFont.getReverseGlyphMap() - lenRecords = len(vorgs) + lenRecords = len(vorgs) try: gids = map(operator.getitem, [nameMap]*lenRecords, names) except KeyError: - nameMap = ttFont.getReverseGlyphMap(rebuild=1) + nameMap = ttFont.getReverseGlyphMap(rebuild=True) gids = map(operator.getitem, [nameMap]*lenRecords, names) - vOriginTable = map(None, gids, vorgs) + vOriginTable = list(zip(gids, vorgs)) self.numVertOriginYMetrics = lenRecords vOriginTable.sort() # must be in ascending GID order dataList = [ struct.pack(">Hh", rec[0], rec[1]) for rec in vOriginTable] header = struct.pack(">HHhH", self.majorVersion, self.minorVersion, self.defaultVertOriginY, self.numVertOriginYMetrics) dataList.insert(0, header) - data = "".join(dataList) + data = bytesjoin(dataList) return data def toXML(self, writer, ttFont): @@ -79,48 +78,47 @@ vOriginRec = VOriginRecord(entry[1], entry[2]) vOriginRec.toXML(writer, ttFont) - def fromXML(self, (name, attrs, content), ttFont): + def fromXML(self, name, attrs, content, ttFont): if not hasattr(self, "VOriginRecords"): self.VOriginRecords = {} self.getGlyphName = ttFont.getGlyphName # for use in get/set item functions, for access by GID if name == "VOriginRecord": - for element in content: - if isinstance(element, StringType): - continue vOriginRec = VOriginRecord() for element in content: - if isinstance(element, StringType): + if isinstance(element, basestring): continue - vOriginRec.fromXML(element, ttFont) + name, attrs, content = element + vOriginRec.fromXML(name, attrs, content, ttFont) self.VOriginRecords[vOriginRec.glyphName] = vOriginRec.vOrigin - elif attrs.has_key("value"): - value = safeEval(attrs["value"]) - setattr(self, name, value) - + elif "value" in attrs: + setattr(self, name, safeEval(attrs["value"])) def __getitem__(self, glyphSelector): - if type(glyphSelector) == IntType: + if isinstance(glyphSelector, int): # its a gid, convert to glyph name glyphSelector = self.getGlyphName(glyphSelector) - if not self.VOriginRecords.has_key(glyphSelector): + if glyphSelector not in self.VOriginRecords: return self.defaultVertOriginY - + return self.VOriginRecords[glyphSelector] def __setitem__(self, glyphSelector, value): - if type(glyphSelector) == IntType: + if isinstance(glyphSelector, int): # its a gid, convert to glyph name glyphSelector = self.getGlyphName(glyphSelector) if value != self.defaultVertOriginY: self.VOriginRecords[glyphSelector] = value - elif self.VOriginRecords.has_key(glyphSelector): + elif glyphSelector in self.VOriginRecords: del self.VOriginRecords[glyphSelector] -class VOriginRecord: + def __delitem__(self, glyphSelector): + del self.VOriginRecords[glyphSelector] + +class VOriginRecord(object): - def __init__(self, name = None, vOrigin = None): + def __init__(self, name=None, vOrigin=None): self.glyphName = name self.vOrigin = vOrigin @@ -134,13 +132,9 @@ writer.endtag("VOriginRecord") writer.newline() - def fromXML(self, (name, attrs, content), ttFont): + def fromXML(self, name, attrs, content, ttFont): value = attrs["value"] if name == "glyphName": setattr(self, name, value) else: - try: - value = safeEval(value) - except OverflowError: - value = long(value) - setattr(self, name, value) + setattr(self, name, safeEval(value)) diff -Nru fonttools-2.4/Lib/fontTools/ttLib/test/__init__.py fonttools-3.0/Lib/fontTools/ttLib/test/__init__.py --- fonttools-2.4/Lib/fontTools/ttLib/test/__init__.py 2013-04-23 04:08:08.000000000 +0000 +++ fonttools-3.0/Lib/fontTools/ttLib/test/__init__.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,3 +0,0 @@ -"""Empty __init__.py file to signal Python this directory is a package. -(It can't be completely empty since WinZip seems to skip empty files.) -""" diff -Nru fonttools-2.4/Lib/fontTools/ttLib/test/ttBrowser.py fonttools-3.0/Lib/fontTools/ttLib/test/ttBrowser.py --- fonttools-2.4/Lib/fontTools/ttLib/test/ttBrowser.py 2013-04-23 04:08:08.000000000 +0000 +++ fonttools-3.0/Lib/fontTools/ttLib/test/ttBrowser.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,351 +0,0 @@ -"""Mac-OS9-only TrueType browser window, deprecated and no longer maintained.""" - -from fontTools import ttLib -from fontTools.ttLib import macUtils -import macfs -import PyBrowser -import W, Lists -import os -import ATM -import numpy -import Qd -from rf.views.wGlyphList import GlyphList - - -class TableBrowser: - - def __init__(self, path=None, ttFont=None, res_index=None): - W.SetCursor('watch') - if path is None: - self.ttFont = ttFont - self.filename = "????" - else: - self.ttFont = ttLib.TTFont(path, res_index) - if res_index is None: - self.filename = os.path.basename(path) - else: - self.filename = os.path.basename(path) + " - " + str(res_index) - self.currentglyph = None - self.glyphs = {} - self.buildinterface() - - def buildinterface(self): - buttonwidth = 120 - glyphlistwidth = 150 - hmargin = 10 - vmargin = 8 - title = self.filename - tables = self.ttFont.keys() - tables.sort() - self.w = w = W.Window((500, 300), title, minsize = (400, 200)) - w.browsetablebutton = W.Button((hmargin, 32, buttonwidth, 16), "Browse tableŠ", - self.browsetable) - w.browsefontbutton = W.Button((hmargin, vmargin, buttonwidth, 16), "Browse fontŠ", - self.browsefont) - w.tablelist = W.List((hmargin, 56, buttonwidth, -128), tables, self.tablelisthit) - - w.divline1 = W.VerticalLine((buttonwidth + 2 * hmargin, vmargin, 1, -vmargin)) - - gleft = buttonwidth + 3 * hmargin + 1 - - hasGlyfTable = self.ttFont.has_key('glyf') - - glyphnames = self.ttFont.getGlyphNames2() # caselessly sorted glyph names - - if hasGlyfTable: - w.glyphlist = GlyphList((gleft, 56, glyphlistwidth, -vmargin), - glyphnames, self.glyphlisthit) - - w.divline2 = W.VerticalLine((buttonwidth + glyphlistwidth + 4 * hmargin + 2, - vmargin, 1, -vmargin)) - - yMin = self.ttFont['head'].yMin - yMax = self.ttFont['head'].yMax - w.gviewer = GlyphViewer((buttonwidth + glyphlistwidth + 5 * hmargin + 3, - vmargin, -hmargin, -vmargin), yMin, yMax) - - w.showpoints = W.CheckBox((gleft, vmargin, glyphlistwidth, 16), "Show points", - self.w.gviewer.toggleshowpoints) - w.showpoints.set(self.w.gviewer.showpoints) - w.showlines = W.CheckBox((gleft, vmargin + 24, glyphlistwidth, 16), "Show lines", - self.w.gviewer.toggleshowlines) - w.showlines.set(self.w.gviewer.showlines) - else: - w.glyphlist = GlyphList((gleft, 56, glyphlistwidth, -vmargin), - glyphnames) - w.noGlyphTable = W.TextBox((gleft, vmargin, -20, 20), "no 'glyf' table found") - - - w.setdefaultbutton(w.browsetablebutton) - - w.tocurrentfont = W.Button((hmargin, -120, buttonwidth, 16), "Copy to current font", self.copytocurrentfont) - w.fromcurrentfont = W.Button((hmargin, -96, buttonwidth, 16), "Copy from current font", self.copyfromcurrentfont) - w.saveflat = W.Button((hmargin, -72, buttonwidth, 16), "Save as flat fileŠ", self.saveflat) - w.savesuitcasebutton = W.Button((hmargin, -48, buttonwidth, 16), "Save as suitcaseŠ", self.savesuitcase) - w.savexmlbutton = W.Button((hmargin, -24, buttonwidth, 16), "Save as XMLŠ", self.saveXML) - - w.open() - w.browsetablebutton.enable(0) - - def browsetable(self): - self.tablelisthit(1) - - def browsefont(self): - PyBrowser.Browser(self.ttFont) - - def copytocurrentfont(self): - pass - - def copyfromcurrentfont(self): - pass - - def saveflat(self): - path = putfile("Save font as flat file:", self.filename, ".TTF") - if path: - W.SetCursor('watch') - self.ttFont.save(path) - - def savesuitcase(self): - path = putfile("Save font as suitcase:", self.filename, ".suit") - if path: - W.SetCursor('watch') - self.ttFont.save(path, 1) - - def saveXML(self): - path = putfile("Save font as XML text file:", self.filename, ".ttx") - if path: - W.SetCursor('watch') - pb = macUtils.ProgressBar("Saving %s as XMLŠ" % self.filename) - try: - self.ttFont.saveXML(path, pb) - finally: - pb.close() - - def glyphlisthit(self, isDbl): - sel = self.w.glyphlist.getselectedobjects() - if not sel or sel[0] == self.currentglyph: - return - self.currentglyph = sel[0] - if self.glyphs.has_key(self.currentglyph): - g = self.glyphs[self.currentglyph] - else: - g = Glyph(self.ttFont, self.currentglyph) - self.glyphs[self.currentglyph] = g - self.w.gviewer.setglyph(g) - - def tablelisthit(self, isdbl): - if isdbl: - for tag in self.w.tablelist.getselectedobjects(): - table = self.ttFont[tag] - if tag == 'glyf': - W.SetCursor('watch') - for glyphname in self.ttFont.getGlyphOrder(): - try: - glyph = table[glyphname] - except KeyError: - pass # incomplete font, oh well. - PyBrowser.Browser(table) - else: - sel = self.w.tablelist.getselection() - if sel: - self.w.browsetablebutton.enable(1) - else: - self.w.browsetablebutton.enable(0) - - -class Glyph: - - def __init__(self, ttFont, glyphName): - ttglyph = ttFont['glyf'][glyphName] - self.iscomposite = ttglyph.numberOfContours == -1 - self.width, self.lsb = ttFont['hmtx'][glyphName] - if ttglyph.numberOfContours == 0: - self.xMin = 0 - self.contours = [] - return - self.xMin = ttglyph.xMin - coordinates, endPts, flags = ttglyph.getCoordinates(ttFont['glyf']) - self.contours = [] - self.flags = [] - startpt = 0 - for endpt in endPts: - self.contours.append(numpy.array(coordinates[startpt:endpt+1])) - self.flags.append(flags[startpt:endpt+1]) - startpt = endpt + 1 - - def getcontours(self, scale, move): - contours = [] - for i in range(len(self.contours)): - contours.append(((self.contours[i] * numpy.array(scale) + move), self.flags[i])) - return contours - - -class GlyphViewer(W.Widget): - - def __init__(self, possize, yMin, yMax): - W.Widget.__init__(self, possize) - self.glyph = None - extra = 0.02 * (yMax-yMin) - self.yMin, self.yMax = yMin - extra, yMax + extra - self.showpoints = 1 - self.showlines = 1 - - def toggleshowpoints(self, onoff): - self.showpoints = onoff - self.SetPort() - self.draw() - - def toggleshowlines(self, onoff): - self.showlines = onoff - self.SetPort() - self.draw() - - def setglyph(self, glyph): - self.glyph = glyph - self.SetPort() - self.draw() - - def draw(self, visRgn=None): - # This a HELL of a routine, but it's pretty damn fast... - import Qd - if not self._visible: - return - Qd.EraseRect(Qd.InsetRect(self._bounds, 1, 1)) - cliprgn = Qd.NewRgn() - savergn = Qd.NewRgn() - Qd.RectRgn(cliprgn, self._bounds) - Qd.GetClip(savergn) - Qd.SetClip(cliprgn) - try: - if self.glyph: - l, t, r, b = Qd.InsetRect(self._bounds, 1, 1) - height = b - t - scale = float(height) / (self.yMax - self.yMin) - topoffset = t + scale * self.yMax - width = scale * self.glyph.width - lsb = scale * self.glyph.lsb - xMin = scale * self.glyph.xMin - # XXXX this is not correct when USE_MY_METRICS is set in component! - leftoffset = l + 0.5 * (r - l - width) - gleftoffset = leftoffset - xMin + lsb - if self.showlines: - Qd.RGBForeColor((0xafff, 0xafff, 0xafff)) - # left sidebearing - Qd.MoveTo(leftoffset, t) - Qd.LineTo(leftoffset, b - 1) - # right sidebearing - Qd.MoveTo(leftoffset + width, t) - Qd.LineTo(leftoffset + width, b - 1) - # baseline - Qd.MoveTo(l, topoffset) - Qd.LineTo(r - 1, topoffset) - - # origin - Qd.RGBForeColor((0x5fff, 0, 0)) - Qd.MoveTo(gleftoffset, topoffset - 16) - Qd.LineTo(gleftoffset, topoffset + 16) - # reset color - Qd.RGBForeColor((0, 0, 0)) - - if self.glyph.iscomposite: - Qd.RGBForeColor((0x7fff, 0x7fff, 0x7fff)) - - ATM.startFillATM() - contours = self.glyph.getcontours((scale, -scale), (gleftoffset, topoffset)) - for contour, flags in contours: - currentpoint = None - done_moveto = 0 - i = 0 - nPoints = len(contour) - while i < nPoints: - pt = contour[i] - if flags[i]: - # onCurve - currentpoint = lineto(pt, done_moveto) - else: - if not currentpoint: - if not flags[i-1]: - currentpoint = 0.5 * (contour[i-1] + pt) - else: - currentpoint = contour[i-1] - if not flags[(i+1) % nPoints]: - endPt = 0.5 * (pt + contour[(i+1) % nPoints]) - else: - endPt = contour[(i+1) % nPoints] - i = i + 1 - # offCurve - currentpoint = qcurveto(currentpoint, - pt, endPt, done_moveto) - done_moveto = 1 - i = i + 1 - ATM.fillClosePathATM() - ATM.endFillATM() - # draw point markers - if self.showpoints: - for contour, flags in contours: - Qd.RGBForeColor((0, 0xffff, 0)) - for i in range(len(contour)): - (x, y) = contour[i] - onCurve = flags[i] & 0x1 - if onCurve: - Qd.PaintRect(Qd.InsetRect((x, y, x, y), -2, -2)) - else: - Qd.PaintOval(Qd.InsetRect((x, y, x, y), -2, -2)) - Qd.RGBForeColor((0xffff, 0, 0)) - Qd.RGBForeColor((0, 0, 0)) - Qd.FrameRect(self._bounds) - finally: - Qd.SetClip(savergn) - Qd.DisposeRgn(cliprgn) - Qd.DisposeRgn(savergn) - - -extensions = [".suit", ".xml", ".ttx", ".TTF", ".ttf"] - -def putfile(prompt, filename, newextension): - for ext in extensions: - if filename[-len(ext):] == ext: - filename = filename[:-len(ext)] + newextension - break - else: - filename = filename + newextension - fss, ok = macfs.StandardPutFile(prompt, filename) - if ok: - return fss.as_pathname() - - -def lineto(pt, done_moveto): - x, y = pt - if done_moveto: - ATM.fillLineToATM((x, y)) - else: - ATM.fillMoveToATM((x, y)) - return pt - -def qcurveto(pt0, pt1, pt2, done_moveto): - if not done_moveto: - x0, y0 = pt0 - ATM.fillMoveToATM((x0, y0)) - x1a, y1a = pt0 + 0.6666666666667 * (pt1 - pt0) - x1b, y1b = pt2 + 0.6666666666667 * (pt1 - pt2) - x2, y2 = pt2 - ATM.fillCurveToATM((x1a, y1a), (x1b, y1b), (x2, y2)) - return pt2 - - -def browseTTFont(): - fss, ok = macfs.StandardGetFile() - if not ok: - return - path = fss.as_pathname() - indices = macUtils.getSFNTResIndices(path) - if indices: - for i in indices: - TableBrowser(path, res_index=i) - else: - TableBrowser(path) - - -if __name__ == "__main__": - browseTTFont() - diff -Nru fonttools-2.4/Lib/fontTools/ttLib/testdata/TestOTF-Regular.otx fonttools-3.0/Lib/fontTools/ttLib/testdata/TestOTF-Regular.otx --- fonttools-2.4/Lib/fontTools/ttLib/testdata/TestOTF-Regular.otx 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.0/Lib/fontTools/ttLib/testdata/TestOTF-Regular.otx 2015-08-31 17:57:15.000000000 +0000 @@ -0,0 +1,519 @@ +<?xml version="1.0" encoding="UTF-8"?> +<ttFont sfntVersion="OTTO" ttLibVersion="2.5"> + + <GlyphOrder> + <!-- The 'id' attribute is only for humans; it is ignored when parsed. --> + <GlyphID id="0" name=".notdef"/> + <GlyphID id="1" name=".null"/> + <GlyphID id="2" name="CR"/> + <GlyphID id="3" name="space"/> + <GlyphID id="4" name="period"/> + <GlyphID id="5" name="ellipsis"/> + </GlyphOrder> + + <head> + <!-- Most of this table will be recalculated by the compiler --> + <tableVersion value="1.0"/> + <fontRevision value="1.0"/> + <checkSumAdjustment value="0x34034793"/> + <magicNumber value="0x5f0f3cf5"/> + <flags value="00000000 00000011"/> + <unitsPerEm value="1000"/> + <created value="Thu Jun 4 14:29:11 2015"/> + <modified value="Sat Aug 1 10:07:17 2015"/> + <xMin value="50"/> + <yMin value="0"/> + <xMax value="668"/> + <yMax value="750"/> + <macStyle value="00000000 00000000"/> + <lowestRecPPEM value="9"/> + <fontDirectionHint value="2"/> + <indexToLocFormat value="0"/> + <glyphDataFormat value="0"/> + </head> + + <hhea> + <tableVersion value="1.0"/> + <ascent value="900"/> + <descent value="-300"/> + <lineGap value="0"/> + <advanceWidthMax value="723"/> + <minLeftSideBearing value="50"/> + <minRightSideBearing value="50"/> + <xMaxExtent value="668"/> + <caretSlopeRise value="1"/> + <caretSlopeRun value="0"/> + <caretOffset value="0"/> + <reserved0 value="0"/> + <reserved1 value="0"/> + <reserved2 value="0"/> + <reserved3 value="0"/> + <metricDataFormat value="0"/> + <numberOfHMetrics value="6"/> + </hhea> + + <maxp> + <tableVersion value="0x5000"/> + <numGlyphs value="6"/> + </maxp> + + <OS_2> + <!-- The fields 'usFirstCharIndex' and 'usLastCharIndex' + will be recalculated by the compiler --> + <version value="4"/> + <xAvgCharWidth value="392"/> + <usWeightClass value="400"/> + <usWidthClass value="5"/> + <fsType value="00000000 00000000"/> + <ySubscriptXSize value="700"/> + <ySubscriptYSize value="650"/> + <ySubscriptXOffset value="0"/> + <ySubscriptYOffset value="140"/> + <ySuperscriptXSize value="700"/> + <ySuperscriptYSize value="650"/> + <ySuperscriptXOffset value="0"/> + <ySuperscriptYOffset value="477"/> + <yStrikeoutSize value="50"/> + <yStrikeoutPosition value="250"/> + <sFamilyClass value="2050"/> + <panose> + <bFamilyType value="2"/> + <bSerifStyle value="11"/> + <bWeight value="6"/> + <bProportion value="4"/> + <bContrast value="4"/> + <bStrokeVariation value="2"/> + <bArmStyle value="7"/> + <bLetterForm value="8"/> + <bMidline value="1"/> + <bXHeight value="4"/> + </panose> + <ulUnicodeRange1 value="10000000 00000000 00000000 00000001"/> + <ulUnicodeRange2 value="00000000 00000000 00000000 00000000"/> + <ulUnicodeRange3 value="00000000 00000000 00000000 00000000"/> + <ulUnicodeRange4 value="00000000 00000000 00000000 00000000"/> + <achVendID value="NONE"/> + <fsSelection value="00000000 11000000"/> + <usFirstCharIndex value="0"/> + <usLastCharIndex value="8230"/> + <sTypoAscender value="750"/> + <sTypoDescender value="-250"/> + <sTypoLineGap value="200"/> + <usWinAscent value="900"/> + <usWinDescent value="300"/> + <ulCodePageRange1 value="00000000 00000000 00000000 00000001"/> + <ulCodePageRange2 value="00000000 00000000 00000000 00000000"/> + <sxHeight value="500"/> + <sCapHeight value="700"/> + <usDefaultChar value="0"/> + <usBreakChar value="32"/> + <usMaxContext value="0"/> + </OS_2> + + <name> + <namerecord nameID="0" platformID="1" platEncID="0" langID="0x0" unicode="True"> + Copyright (c) 2015 by FontTools. No rights reserved. + </namerecord> + <namerecord nameID="1" platformID="1" platEncID="0" langID="0x0" unicode="True"> + Test OTF + </namerecord> + <namerecord nameID="2" platformID="1" platEncID="0" langID="0x0" unicode="True"> + Regular + </namerecord> + <namerecord nameID="3" platformID="1" platEncID="0" langID="0x0" unicode="True"> + FontTools: Test OTF: 2015 + </namerecord> + <namerecord nameID="4" platformID="1" platEncID="0" langID="0x0" unicode="True"> + Test OTF + </namerecord> + <namerecord nameID="5" platformID="1" platEncID="0" langID="0x0" unicode="True"> + Version 1.000 + </namerecord> + <namerecord nameID="6" platformID="1" platEncID="0" langID="0x0" unicode="True"> + TestOTF-Regular + </namerecord> + <namerecord nameID="7" platformID="1" platEncID="0" langID="0x0" unicode="True"> + Test OTF is not a trademark of FontTools. + </namerecord> + <namerecord nameID="8" platformID="1" platEncID="0" langID="0x0" unicode="True"> + FontTools + </namerecord> + <namerecord nameID="9" platformID="1" platEncID="0" langID="0x0" unicode="True"> + FontTools + </namerecord> + <namerecord nameID="11" platformID="1" platEncID="0" langID="0x0" unicode="True"> + https://github.com/behdad/fonttools + </namerecord> + <namerecord nameID="12" platformID="1" platEncID="0" langID="0x0" unicode="True"> + https://github.com/behdad/fonttools + </namerecord> + <namerecord nameID="14" platformID="1" platEncID="0" langID="0x0" unicode="True"> + https://github.com/behdad/fonttools/blob/master/LICENSE.txt + </namerecord> + <namerecord nameID="18" platformID="1" platEncID="0" langID="0x0" unicode="True"> + Test TTF + </namerecord> + <namerecord nameID="0" platformID="3" platEncID="1" langID="0x409"> + Copyright (c) 2015 by FontTools. No rights reserved. + </namerecord> + <namerecord nameID="1" platformID="3" platEncID="1" langID="0x409"> + Test OTF + </namerecord> + <namerecord nameID="2" platformID="3" platEncID="1" langID="0x409"> + Regular + </namerecord> + <namerecord nameID="3" platformID="3" platEncID="1" langID="0x409"> + FontTools: Test OTF: 2015 + </namerecord> + <namerecord nameID="4" platformID="3" platEncID="1" langID="0x409"> + Test OTF + </namerecord> + <namerecord nameID="5" platformID="3" platEncID="1" langID="0x409"> + Version 1.000 + </namerecord> + <namerecord nameID="6" platformID="3" platEncID="1" langID="0x409"> + TestOTF-Regular + </namerecord> + <namerecord nameID="7" platformID="3" platEncID="1" langID="0x409"> + Test OTF is not a trademark of FontTools. + </namerecord> + <namerecord nameID="8" platformID="3" platEncID="1" langID="0x409"> + FontTools + </namerecord> + <namerecord nameID="9" platformID="3" platEncID="1" langID="0x409"> + FontTools + </namerecord> + <namerecord nameID="11" platformID="3" platEncID="1" langID="0x409"> + https://github.com/behdad/fonttools + </namerecord> + <namerecord nameID="12" platformID="3" platEncID="1" langID="0x409"> + https://github.com/behdad/fonttools + </namerecord> + <namerecord nameID="14" platformID="3" platEncID="1" langID="0x409"> + https://github.com/behdad/fonttools/blob/master/LICENSE.txt + </namerecord> + </name> + + <cmap> + <tableVersion version="0"/> + <cmap_format_4 platformID="0" platEncID="3" language="0"> + <map code="0x0" name=".null"/><!-- ???? --> + <map code="0xd" name="CR"/><!-- ???? --> + <map code="0x20" name="space"/><!-- SPACE --> + <map code="0x2e" name="period"/><!-- FULL STOP --> + <map code="0x2026" name="ellipsis"/><!-- HORIZONTAL ELLIPSIS --> + </cmap_format_4> + <cmap_format_6 platformID="1" platEncID="0" language="0"> + <map code="0x0" name=".null"/> + <map code="0x1" name=".notdef"/> + <map code="0x2" name=".notdef"/> + <map code="0x3" name=".notdef"/> + <map code="0x4" name=".notdef"/> + <map code="0x5" name=".notdef"/> + <map code="0x6" name=".notdef"/> + <map code="0x7" name=".notdef"/> + <map code="0x8" name=".notdef"/> + <map code="0x9" name=".notdef"/> + <map code="0xa" name=".notdef"/> + <map code="0xb" name=".notdef"/> + <map code="0xc" name=".notdef"/> + <map code="0xd" name="CR"/> + <map code="0xe" name=".notdef"/> + <map code="0xf" name=".notdef"/> + <map code="0x10" name=".notdef"/> + <map code="0x11" name=".notdef"/> + <map code="0x12" name=".notdef"/> + <map code="0x13" name=".notdef"/> + <map code="0x14" name=".notdef"/> + <map code="0x15" name=".notdef"/> + <map code="0x16" name=".notdef"/> + <map code="0x17" name=".notdef"/> + <map code="0x18" name=".notdef"/> + <map code="0x19" name=".notdef"/> + <map code="0x1a" name=".notdef"/> + <map code="0x1b" name=".notdef"/> + <map code="0x1c" name=".notdef"/> + <map code="0x1d" name=".notdef"/> + <map code="0x1e" name=".notdef"/> + <map code="0x1f" name=".notdef"/> + <map code="0x20" name="space"/> + <map code="0x21" name=".notdef"/> + <map code="0x22" name=".notdef"/> + <map code="0x23" name=".notdef"/> + <map code="0x24" name=".notdef"/> + <map code="0x25" name=".notdef"/> + <map code="0x26" name=".notdef"/> + <map code="0x27" name=".notdef"/> + <map code="0x28" name=".notdef"/> + <map code="0x29" name=".notdef"/> + <map code="0x2a" name=".notdef"/> + <map code="0x2b" name=".notdef"/> + <map code="0x2c" name=".notdef"/> + <map code="0x2d" name=".notdef"/> + <map code="0x2e" name="period"/> + <map code="0x2f" name=".notdef"/> + <map code="0x30" name=".notdef"/> + <map code="0x31" name=".notdef"/> + <map code="0x32" name=".notdef"/> + <map code="0x33" name=".notdef"/> + <map code="0x34" name=".notdef"/> + <map code="0x35" name=".notdef"/> + <map code="0x36" name=".notdef"/> + <map code="0x37" name=".notdef"/> + <map code="0x38" name=".notdef"/> + <map code="0x39" name=".notdef"/> + <map code="0x3a" name=".notdef"/> + <map code="0x3b" name=".notdef"/> + <map code="0x3c" name=".notdef"/> + <map code="0x3d" name=".notdef"/> + <map code="0x3e" name=".notdef"/> + <map code="0x3f" name=".notdef"/> + <map code="0x40" name=".notdef"/> + <map code="0x41" name=".notdef"/> + <map code="0x42" name=".notdef"/> + <map code="0x43" name=".notdef"/> + <map code="0x44" name=".notdef"/> + <map code="0x45" name=".notdef"/> + <map code="0x46" name=".notdef"/> + <map code="0x47" name=".notdef"/> + <map code="0x48" name=".notdef"/> + <map code="0x49" name=".notdef"/> + <map code="0x4a" name=".notdef"/> + <map code="0x4b" name=".notdef"/> + <map code="0x4c" name=".notdef"/> + <map code="0x4d" name=".notdef"/> + <map code="0x4e" name=".notdef"/> + <map code="0x4f" name=".notdef"/> + <map code="0x50" name=".notdef"/> + <map code="0x51" name=".notdef"/> + <map code="0x52" name=".notdef"/> + <map code="0x53" name=".notdef"/> + <map code="0x54" name=".notdef"/> + <map code="0x55" name=".notdef"/> + <map code="0x56" name=".notdef"/> + <map code="0x57" name=".notdef"/> + <map code="0x58" name=".notdef"/> + <map code="0x59" name=".notdef"/> + <map code="0x5a" name=".notdef"/> + <map code="0x5b" name=".notdef"/> + <map code="0x5c" name=".notdef"/> + <map code="0x5d" name=".notdef"/> + <map code="0x5e" name=".notdef"/> + <map code="0x5f" name=".notdef"/> + <map code="0x60" name=".notdef"/> + <map code="0x61" name=".notdef"/> + <map code="0x62" name=".notdef"/> + <map code="0x63" name=".notdef"/> + <map code="0x64" name=".notdef"/> + <map code="0x65" name=".notdef"/> + <map code="0x66" name=".notdef"/> + <map code="0x67" name=".notdef"/> + <map code="0x68" name=".notdef"/> + <map code="0x69" name=".notdef"/> + <map code="0x6a" name=".notdef"/> + <map code="0x6b" name=".notdef"/> + <map code="0x6c" name=".notdef"/> + <map code="0x6d" name=".notdef"/> + <map code="0x6e" name=".notdef"/> + <map code="0x6f" name=".notdef"/> + <map code="0x70" name=".notdef"/> + <map code="0x71" name=".notdef"/> + <map code="0x72" name=".notdef"/> + <map code="0x73" name=".notdef"/> + <map code="0x74" name=".notdef"/> + <map code="0x75" name=".notdef"/> + <map code="0x76" name=".notdef"/> + <map code="0x77" name=".notdef"/> + <map code="0x78" name=".notdef"/> + <map code="0x79" name=".notdef"/> + <map code="0x7a" name=".notdef"/> + <map code="0x7b" name=".notdef"/> + <map code="0x7c" name=".notdef"/> + <map code="0x7d" name=".notdef"/> + <map code="0x7e" name=".notdef"/> + <map code="0x7f" name=".notdef"/> + <map code="0x80" name=".notdef"/> + <map code="0x81" name=".notdef"/> + <map code="0x82" name=".notdef"/> + <map code="0x83" name=".notdef"/> + <map code="0x84" name=".notdef"/> + <map code="0x85" name=".notdef"/> + <map code="0x86" name=".notdef"/> + <map code="0x87" name=".notdef"/> + <map code="0x88" name=".notdef"/> + <map code="0x89" name=".notdef"/> + <map code="0x8a" name=".notdef"/> + <map code="0x8b" name=".notdef"/> + <map code="0x8c" name=".notdef"/> + <map code="0x8d" name=".notdef"/> + <map code="0x8e" name=".notdef"/> + <map code="0x8f" name=".notdef"/> + <map code="0x90" name=".notdef"/> + <map code="0x91" name=".notdef"/> + <map code="0x92" name=".notdef"/> + <map code="0x93" name=".notdef"/> + <map code="0x94" name=".notdef"/> + <map code="0x95" name=".notdef"/> + <map code="0x96" name=".notdef"/> + <map code="0x97" name=".notdef"/> + <map code="0x98" name=".notdef"/> + <map code="0x99" name=".notdef"/> + <map code="0x9a" name=".notdef"/> + <map code="0x9b" name=".notdef"/> + <map code="0x9c" name=".notdef"/> + <map code="0x9d" name=".notdef"/> + <map code="0x9e" name=".notdef"/> + <map code="0x9f" name=".notdef"/> + <map code="0xa0" name=".notdef"/> + <map code="0xa1" name=".notdef"/> + <map code="0xa2" name=".notdef"/> + <map code="0xa3" name=".notdef"/> + <map code="0xa4" name=".notdef"/> + <map code="0xa5" name=".notdef"/> + <map code="0xa6" name=".notdef"/> + <map code="0xa7" name=".notdef"/> + <map code="0xa8" name=".notdef"/> + <map code="0xa9" name=".notdef"/> + <map code="0xaa" name=".notdef"/> + <map code="0xab" name=".notdef"/> + <map code="0xac" name=".notdef"/> + <map code="0xad" name=".notdef"/> + <map code="0xae" name=".notdef"/> + <map code="0xaf" name=".notdef"/> + <map code="0xb0" name=".notdef"/> + <map code="0xb1" name=".notdef"/> + <map code="0xb2" name=".notdef"/> + <map code="0xb3" name=".notdef"/> + <map code="0xb4" name=".notdef"/> + <map code="0xb5" name=".notdef"/> + <map code="0xb6" name=".notdef"/> + <map code="0xb7" name=".notdef"/> + <map code="0xb8" name=".notdef"/> + <map code="0xb9" name=".notdef"/> + <map code="0xba" name=".notdef"/> + <map code="0xbb" name=".notdef"/> + <map code="0xbc" name=".notdef"/> + <map code="0xbd" name=".notdef"/> + <map code="0xbe" name=".notdef"/> + <map code="0xbf" name=".notdef"/> + <map code="0xc0" name=".notdef"/> + <map code="0xc1" name=".notdef"/> + <map code="0xc2" name=".notdef"/> + <map code="0xc3" name=".notdef"/> + <map code="0xc4" name=".notdef"/> + <map code="0xc5" name=".notdef"/> + <map code="0xc6" name=".notdef"/> + <map code="0xc7" name=".notdef"/> + <map code="0xc8" name=".notdef"/> + <map code="0xc9" name="ellipsis"/> + </cmap_format_6> + <cmap_format_4 platformID="3" platEncID="1" language="0"> + <map code="0x0" name=".null"/><!-- ???? --> + <map code="0xd" name="CR"/><!-- ???? --> + <map code="0x20" name="space"/><!-- SPACE --> + <map code="0x2e" name="period"/><!-- FULL STOP --> + <map code="0x2026" name="ellipsis"/><!-- HORIZONTAL ELLIPSIS --> + </cmap_format_4> + </cmap> + + <post> + <formatType value="3.0"/> + <italicAngle value="0.0"/> + <underlinePosition value="-75"/> + <underlineThickness value="50"/> + <isFixedPitch value="0"/> + <minMemType42 value="0"/> + <maxMemType42 value="0"/> + <minMemType1 value="0"/> + <maxMemType1 value="0"/> + </post> + + <CFF> + <CFFFont name="TestOTF-Regular"> + <version value="001.001"/> + <Notice value="Copyright \(c\) 2015 by FontTools. No rights reserved."/> + <FullName value="Test OTF"/> + <FamilyName value="Test OTF"/> + <Weight value="Regular"/> + <isFixedPitch value="0"/> + <ItalicAngle value="0"/> + <UnderlineThickness value="50"/> + <PaintType value="0"/> + <CharstringType value="2"/> + <FontMatrix value="0.001 0 0 0.001 0 0"/> + <FontBBox value="50 0 668 750"/> + <StrokeWidth value="0"/> + <!-- charset is dumped separately as the 'GlyphOrder' element --> + <Encoding name="StandardEncoding"/> + <Private> + <BlueScale value="0.039625"/> + <BlueShift value="7"/> + <BlueFuzz value="1"/> + <ForceBold value="0"/> + <LanguageGroup value="0"/> + <ExpansionFactor value="0.06"/> + <initialRandomSeed value="0"/> + <defaultWidthX value="0"/> + <nominalWidthX value="0"/> + <Subrs> + <!-- The 'index' attribute is only for humans; it is ignored when parsed. --> + <CharString index="0"> + 131 122 -131 hlineto + return + </CharString> + </Subrs> + </Private> + <CharStrings> + <CharString name=".notdef"> + 500 450 hmoveto + 750 -400 -750 vlineto + 50 50 rmoveto + 650 300 -650 vlineto + endchar + </CharString> + <CharString name=".null"> + 0 endchar + </CharString> + <CharString name="CR"> + 250 endchar + </CharString> + <CharString name="ellipsis"> + 723 55 hmoveto + -107 callsubr + 241 -122 rmoveto + -107 callsubr + 241 -122 rmoveto + -107 callsubr + endchar + </CharString> + <CharString name="period"> + 241 55 hmoveto + -107 callsubr + endchar + </CharString> + <CharString name="space"> + 250 endchar + </CharString> + </CharStrings> + </CFFFont> + + <GlobalSubrs> + <!-- The 'index' attribute is only for humans; it is ignored when parsed. --> + </GlobalSubrs> + </CFF> + + <hmtx> + <mtx name=".notdef" width="500" lsb="50"/> + <mtx name=".null" width="0" lsb="0"/> + <mtx name="CR" width="250" lsb="0"/> + <mtx name="ellipsis" width="723" lsb="55"/> + <mtx name="period" width="241" lsb="55"/> + <mtx name="space" width="250" lsb="0"/> + </hmtx> + + <DSIG> + <!-- note that the Digital Signature will be invalid after recompilation! --> + <tableHeader flag="0x0" numSigs="0" version="1"/> + </DSIG> + +</ttFont> diff -Nru fonttools-2.4/Lib/fontTools/ttLib/testdata/TestTTF-Regular.ttx fonttools-3.0/Lib/fontTools/ttLib/testdata/TestTTF-Regular.ttx --- fonttools-2.4/Lib/fontTools/ttLib/testdata/TestTTF-Regular.ttx 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.0/Lib/fontTools/ttLib/testdata/TestTTF-Regular.ttx 2015-08-31 17:57:15.000000000 +0000 @@ -0,0 +1,553 @@ +<?xml version="1.0" encoding="UTF-8"?> +<ttFont sfntVersion="\x00\x01\x00\x00" ttLibVersion="2.5"> + + <GlyphOrder> + <!-- The 'id' attribute is only for humans; it is ignored when parsed. --> + <GlyphID id="0" name=".notdef"/> + <GlyphID id="1" name=".null"/> + <GlyphID id="2" name="CR"/> + <GlyphID id="3" name="space"/> + <GlyphID id="4" name="period"/> + <GlyphID id="5" name="ellipsis"/> + </GlyphOrder> + + <head> + <!-- Most of this table will be recalculated by the compiler --> + <tableVersion value="1.0"/> + <fontRevision value="1.0"/> + <checkSumAdjustment value="0x2ee689e2"/> + <magicNumber value="0x5f0f3cf5"/> + <flags value="00000000 00000011"/> + <unitsPerEm value="1000"/> + <created value="Thu Jun 4 14:29:11 2015"/> + <modified value="Mon Aug 3 13:04:43 2015"/> + <xMin value="50"/> + <yMin value="0"/> + <xMax value="668"/> + <yMax value="750"/> + <macStyle value="00000000 00000000"/> + <lowestRecPPEM value="9"/> + <fontDirectionHint value="2"/> + <indexToLocFormat value="0"/> + <glyphDataFormat value="0"/> + </head> + + <hhea> + <tableVersion value="1.0"/> + <ascent value="900"/> + <descent value="-300"/> + <lineGap value="0"/> + <advanceWidthMax value="723"/> + <minLeftSideBearing value="50"/> + <minRightSideBearing value="50"/> + <xMaxExtent value="668"/> + <caretSlopeRise value="1"/> + <caretSlopeRun value="0"/> + <caretOffset value="0"/> + <reserved0 value="0"/> + <reserved1 value="0"/> + <reserved2 value="0"/> + <reserved3 value="0"/> + <metricDataFormat value="0"/> + <numberOfHMetrics value="6"/> + </hhea> + + <maxp> + <!-- Most of this table will be recalculated by the compiler --> + <tableVersion value="0x10000"/> + <numGlyphs value="6"/> + <maxPoints value="8"/> + <maxContours value="2"/> + <maxCompositePoints value="12"/> + <maxCompositeContours value="3"/> + <maxZones value="1"/> + <maxTwilightPoints value="0"/> + <maxStorage value="0"/> + <maxFunctionDefs value="0"/> + <maxInstructionDefs value="0"/> + <maxStackElements value="0"/> + <maxSizeOfInstructions value="0"/> + <maxComponentElements value="3"/> + <maxComponentDepth value="1"/> + </maxp> + + <OS_2> + <!-- The fields 'usFirstCharIndex' and 'usLastCharIndex' + will be recalculated by the compiler --> + <version value="4"/> + <xAvgCharWidth value="392"/> + <usWeightClass value="400"/> + <usWidthClass value="5"/> + <fsType value="00000000 00000000"/> + <ySubscriptXSize value="700"/> + <ySubscriptYSize value="650"/> + <ySubscriptXOffset value="0"/> + <ySubscriptYOffset value="140"/> + <ySuperscriptXSize value="700"/> + <ySuperscriptYSize value="650"/> + <ySuperscriptXOffset value="0"/> + <ySuperscriptYOffset value="477"/> + <yStrikeoutSize value="50"/> + <yStrikeoutPosition value="250"/> + <sFamilyClass value="2050"/> + <panose> + <bFamilyType value="2"/> + <bSerifStyle value="11"/> + <bWeight value="6"/> + <bProportion value="4"/> + <bContrast value="4"/> + <bStrokeVariation value="2"/> + <bArmStyle value="7"/> + <bLetterForm value="8"/> + <bMidline value="1"/> + <bXHeight value="4"/> + </panose> + <ulUnicodeRange1 value="10000000 00000000 00000000 00000001"/> + <ulUnicodeRange2 value="00000000 00000000 00000000 00000000"/> + <ulUnicodeRange3 value="00000000 00000000 00000000 00000000"/> + <ulUnicodeRange4 value="00000000 00000000 00000000 00000000"/> + <achVendID value="NONE"/> + <fsSelection value="00000000 11000000"/> + <usFirstCharIndex value="0"/> + <usLastCharIndex value="8230"/> + <sTypoAscender value="750"/> + <sTypoDescender value="-250"/> + <sTypoLineGap value="200"/> + <usWinAscent value="900"/> + <usWinDescent value="300"/> + <ulCodePageRange1 value="00000000 00000000 00000000 00000001"/> + <ulCodePageRange2 value="00000000 00000000 00000000 00000000"/> + <sxHeight value="500"/> + <sCapHeight value="700"/> + <usDefaultChar value="0"/> + <usBreakChar value="32"/> + <usMaxContext value="0"/> + </OS_2> + + <hmtx> + <mtx name=".notdef" width="500" lsb="50"/> + <mtx name=".null" width="0" lsb="0"/> + <mtx name="CR" width="250" lsb="0"/> + <mtx name="ellipsis" width="723" lsb="55"/> + <mtx name="period" width="241" lsb="55"/> + <mtx name="space" width="250" lsb="0"/> + </hmtx> + + <cmap> + <tableVersion version="0"/> + <cmap_format_4 platformID="0" platEncID="3" language="0"> + <map code="0x0" name=".null"/><!-- ???? --> + <map code="0xd" name="CR"/><!-- ???? --> + <map code="0x20" name="space"/><!-- SPACE --> + <map code="0x2e" name="period"/><!-- FULL STOP --> + <map code="0x2026" name="ellipsis"/><!-- HORIZONTAL ELLIPSIS --> + </cmap_format_4> + <cmap_format_6 platformID="1" platEncID="0" language="0"> + <map code="0x0" name=".null"/> + <map code="0x1" name=".notdef"/> + <map code="0x2" name=".notdef"/> + <map code="0x3" name=".notdef"/> + <map code="0x4" name=".notdef"/> + <map code="0x5" name=".notdef"/> + <map code="0x6" name=".notdef"/> + <map code="0x7" name=".notdef"/> + <map code="0x8" name=".notdef"/> + <map code="0x9" name=".notdef"/> + <map code="0xa" name=".notdef"/> + <map code="0xb" name=".notdef"/> + <map code="0xc" name=".notdef"/> + <map code="0xd" name="CR"/> + <map code="0xe" name=".notdef"/> + <map code="0xf" name=".notdef"/> + <map code="0x10" name=".notdef"/> + <map code="0x11" name=".notdef"/> + <map code="0x12" name=".notdef"/> + <map code="0x13" name=".notdef"/> + <map code="0x14" name=".notdef"/> + <map code="0x15" name=".notdef"/> + <map code="0x16" name=".notdef"/> + <map code="0x17" name=".notdef"/> + <map code="0x18" name=".notdef"/> + <map code="0x19" name=".notdef"/> + <map code="0x1a" name=".notdef"/> + <map code="0x1b" name=".notdef"/> + <map code="0x1c" name=".notdef"/> + <map code="0x1d" name=".notdef"/> + <map code="0x1e" name=".notdef"/> + <map code="0x1f" name=".notdef"/> + <map code="0x20" name="space"/> + <map code="0x21" name=".notdef"/> + <map code="0x22" name=".notdef"/> + <map code="0x23" name=".notdef"/> + <map code="0x24" name=".notdef"/> + <map code="0x25" name=".notdef"/> + <map code="0x26" name=".notdef"/> + <map code="0x27" name=".notdef"/> + <map code="0x28" name=".notdef"/> + <map code="0x29" name=".notdef"/> + <map code="0x2a" name=".notdef"/> + <map code="0x2b" name=".notdef"/> + <map code="0x2c" name=".notdef"/> + <map code="0x2d" name=".notdef"/> + <map code="0x2e" name="period"/> + <map code="0x2f" name=".notdef"/> + <map code="0x30" name=".notdef"/> + <map code="0x31" name=".notdef"/> + <map code="0x32" name=".notdef"/> + <map code="0x33" name=".notdef"/> + <map code="0x34" name=".notdef"/> + <map code="0x35" name=".notdef"/> + <map code="0x36" name=".notdef"/> + <map code="0x37" name=".notdef"/> + <map code="0x38" name=".notdef"/> + <map code="0x39" name=".notdef"/> + <map code="0x3a" name=".notdef"/> + <map code="0x3b" name=".notdef"/> + <map code="0x3c" name=".notdef"/> + <map code="0x3d" name=".notdef"/> + <map code="0x3e" name=".notdef"/> + <map code="0x3f" name=".notdef"/> + <map code="0x40" name=".notdef"/> + <map code="0x41" name=".notdef"/> + <map code="0x42" name=".notdef"/> + <map code="0x43" name=".notdef"/> + <map code="0x44" name=".notdef"/> + <map code="0x45" name=".notdef"/> + <map code="0x46" name=".notdef"/> + <map code="0x47" name=".notdef"/> + <map code="0x48" name=".notdef"/> + <map code="0x49" name=".notdef"/> + <map code="0x4a" name=".notdef"/> + <map code="0x4b" name=".notdef"/> + <map code="0x4c" name=".notdef"/> + <map code="0x4d" name=".notdef"/> + <map code="0x4e" name=".notdef"/> + <map code="0x4f" name=".notdef"/> + <map code="0x50" name=".notdef"/> + <map code="0x51" name=".notdef"/> + <map code="0x52" name=".notdef"/> + <map code="0x53" name=".notdef"/> + <map code="0x54" name=".notdef"/> + <map code="0x55" name=".notdef"/> + <map code="0x56" name=".notdef"/> + <map code="0x57" name=".notdef"/> + <map code="0x58" name=".notdef"/> + <map code="0x59" name=".notdef"/> + <map code="0x5a" name=".notdef"/> + <map code="0x5b" name=".notdef"/> + <map code="0x5c" name=".notdef"/> + <map code="0x5d" name=".notdef"/> + <map code="0x5e" name=".notdef"/> + <map code="0x5f" name=".notdef"/> + <map code="0x60" name=".notdef"/> + <map code="0x61" name=".notdef"/> + <map code="0x62" name=".notdef"/> + <map code="0x63" name=".notdef"/> + <map code="0x64" name=".notdef"/> + <map code="0x65" name=".notdef"/> + <map code="0x66" name=".notdef"/> + <map code="0x67" name=".notdef"/> + <map code="0x68" name=".notdef"/> + <map code="0x69" name=".notdef"/> + <map code="0x6a" name=".notdef"/> + <map code="0x6b" name=".notdef"/> + <map code="0x6c" name=".notdef"/> + <map code="0x6d" name=".notdef"/> + <map code="0x6e" name=".notdef"/> + <map code="0x6f" name=".notdef"/> + <map code="0x70" name=".notdef"/> + <map code="0x71" name=".notdef"/> + <map code="0x72" name=".notdef"/> + <map code="0x73" name=".notdef"/> + <map code="0x74" name=".notdef"/> + <map code="0x75" name=".notdef"/> + <map code="0x76" name=".notdef"/> + <map code="0x77" name=".notdef"/> + <map code="0x78" name=".notdef"/> + <map code="0x79" name=".notdef"/> + <map code="0x7a" name=".notdef"/> + <map code="0x7b" name=".notdef"/> + <map code="0x7c" name=".notdef"/> + <map code="0x7d" name=".notdef"/> + <map code="0x7e" name=".notdef"/> + <map code="0x7f" name=".notdef"/> + <map code="0x80" name=".notdef"/> + <map code="0x81" name=".notdef"/> + <map code="0x82" name=".notdef"/> + <map code="0x83" name=".notdef"/> + <map code="0x84" name=".notdef"/> + <map code="0x85" name=".notdef"/> + <map code="0x86" name=".notdef"/> + <map code="0x87" name=".notdef"/> + <map code="0x88" name=".notdef"/> + <map code="0x89" name=".notdef"/> + <map code="0x8a" name=".notdef"/> + <map code="0x8b" name=".notdef"/> + <map code="0x8c" name=".notdef"/> + <map code="0x8d" name=".notdef"/> + <map code="0x8e" name=".notdef"/> + <map code="0x8f" name=".notdef"/> + <map code="0x90" name=".notdef"/> + <map code="0x91" name=".notdef"/> + <map code="0x92" name=".notdef"/> + <map code="0x93" name=".notdef"/> + <map code="0x94" name=".notdef"/> + <map code="0x95" name=".notdef"/> + <map code="0x96" name=".notdef"/> + <map code="0x97" name=".notdef"/> + <map code="0x98" name=".notdef"/> + <map code="0x99" name=".notdef"/> + <map code="0x9a" name=".notdef"/> + <map code="0x9b" name=".notdef"/> + <map code="0x9c" name=".notdef"/> + <map code="0x9d" name=".notdef"/> + <map code="0x9e" name=".notdef"/> + <map code="0x9f" name=".notdef"/> + <map code="0xa0" name=".notdef"/> + <map code="0xa1" name=".notdef"/> + <map code="0xa2" name=".notdef"/> + <map code="0xa3" name=".notdef"/> + <map code="0xa4" name=".notdef"/> + <map code="0xa5" name=".notdef"/> + <map code="0xa6" name=".notdef"/> + <map code="0xa7" name=".notdef"/> + <map code="0xa8" name=".notdef"/> + <map code="0xa9" name=".notdef"/> + <map code="0xaa" name=".notdef"/> + <map code="0xab" name=".notdef"/> + <map code="0xac" name=".notdef"/> + <map code="0xad" name=".notdef"/> + <map code="0xae" name=".notdef"/> + <map code="0xaf" name=".notdef"/> + <map code="0xb0" name=".notdef"/> + <map code="0xb1" name=".notdef"/> + <map code="0xb2" name=".notdef"/> + <map code="0xb3" name=".notdef"/> + <map code="0xb4" name=".notdef"/> + <map code="0xb5" name=".notdef"/> + <map code="0xb6" name=".notdef"/> + <map code="0xb7" name=".notdef"/> + <map code="0xb8" name=".notdef"/> + <map code="0xb9" name=".notdef"/> + <map code="0xba" name=".notdef"/> + <map code="0xbb" name=".notdef"/> + <map code="0xbc" name=".notdef"/> + <map code="0xbd" name=".notdef"/> + <map code="0xbe" name=".notdef"/> + <map code="0xbf" name=".notdef"/> + <map code="0xc0" name=".notdef"/> + <map code="0xc1" name=".notdef"/> + <map code="0xc2" name=".notdef"/> + <map code="0xc3" name=".notdef"/> + <map code="0xc4" name=".notdef"/> + <map code="0xc5" name=".notdef"/> + <map code="0xc6" name=".notdef"/> + <map code="0xc7" name=".notdef"/> + <map code="0xc8" name=".notdef"/> + <map code="0xc9" name="ellipsis"/> + </cmap_format_6> + <cmap_format_4 platformID="3" platEncID="1" language="0"> + <map code="0x0" name=".null"/><!-- ???? --> + <map code="0xd" name="CR"/><!-- ???? --> + <map code="0x20" name="space"/><!-- SPACE --> + <map code="0x2e" name="period"/><!-- FULL STOP --> + <map code="0x2026" name="ellipsis"/><!-- HORIZONTAL ELLIPSIS --> + </cmap_format_4> + </cmap> + + <fpgm> + <assembly> + SVTCA[0] /* SetFPVectorToAxis */ + </assembly> + </fpgm> + + <prep> + <assembly> + SVTCA[0] /* SetFPVectorToAxis */ + </assembly> + </prep> + + <cvt> + <cv index="0" value="0"/> + </cvt> + + <loca> + <!-- The 'loca' table will be calculated by the compiler --> + </loca> + + <glyf> + + <!-- The xMin, yMin, xMax and yMax values + will be recalculated by the compiler. --> + + <TTGlyph name=".notdef" xMin="50" yMin="0" xMax="450" yMax="750"> + <contour> + <pt x="50" y="0" on="1"/> + <pt x="50" y="750" on="1"/> + <pt x="450" y="750" on="1"/> + <pt x="450" y="0" on="1"/> + </contour> + <contour> + <pt x="400" y="50" on="1"/> + <pt x="400" y="700" on="1"/> + <pt x="100" y="700" on="1"/> + <pt x="100" y="50" on="1"/> + </contour> + <instructions><assembly> + SVTCA[0] /* SetFPVectorToAxis */ + SVTCA[1] /* SetFPVectorToAxis */ + </assembly></instructions> + </TTGlyph> + + <TTGlyph name=".null"/><!-- contains no outline data --> + + <TTGlyph name="CR"/><!-- contains no outline data --> + + <TTGlyph name="ellipsis" xMin="55" yMin="0" xMax="668" yMax="122"> + <component glyphName="period" x="0" y="0" flags="0x4"/> + <component glyphName="period" x="241" y="0" flags="0x4"/> + <component glyphName="period" x="482" y="0" flags="0x4"/> + <instructions><assembly> + SVTCA[0] /* SetFPVectorToAxis */ + SVTCA[1] /* SetFPVectorToAxis */ + </assembly></instructions> + </TTGlyph> + + <TTGlyph name="period" xMin="55" yMin="0" xMax="186" yMax="122"> + <contour> + <pt x="55" y="122" on="1"/> + <pt x="186" y="122" on="1"/> + <pt x="186" y="0" on="1"/> + <pt x="55" y="0" on="1"/> + </contour> + <instructions><assembly> + SVTCA[0] /* SetFPVectorToAxis */ + SVTCA[1] /* SetFPVectorToAxis */ + </assembly></instructions> + </TTGlyph> + + <TTGlyph name="space"/><!-- contains no outline data --> + + </glyf> + + <name> + <namerecord nameID="0" platformID="1" platEncID="0" langID="0x0" unicode="True"> + Copyright (c) 2015 by FontTools. No rights reserved. + </namerecord> + <namerecord nameID="1" platformID="1" platEncID="0" langID="0x0" unicode="True"> + Test TTF + </namerecord> + <namerecord nameID="2" platformID="1" platEncID="0" langID="0x0" unicode="True"> + Regular + </namerecord> + <namerecord nameID="3" platformID="1" platEncID="0" langID="0x0" unicode="True"> + FontTools: Test TTF: 2015 + </namerecord> + <namerecord nameID="4" platformID="1" platEncID="0" langID="0x0" unicode="True"> + Test TTF + </namerecord> + <namerecord nameID="5" platformID="1" platEncID="0" langID="0x0" unicode="True"> + Version 1.000 + </namerecord> + <namerecord nameID="6" platformID="1" platEncID="0" langID="0x0" unicode="True"> + TestTTF-Regular + </namerecord> + <namerecord nameID="7" platformID="1" platEncID="0" langID="0x0" unicode="True"> + Test TTF is not a trademark of FontTools. + </namerecord> + <namerecord nameID="8" platformID="1" platEncID="0" langID="0x0" unicode="True"> + FontTools + </namerecord> + <namerecord nameID="9" platformID="1" platEncID="0" langID="0x0" unicode="True"> + FontTools + </namerecord> + <namerecord nameID="11" platformID="1" platEncID="0" langID="0x0" unicode="True"> + https://github.com/behdad/fonttools + </namerecord> + <namerecord nameID="12" platformID="1" platEncID="0" langID="0x0" unicode="True"> + https://github.com/behdad/fonttools + </namerecord> + <namerecord nameID="14" platformID="1" platEncID="0" langID="0x0" unicode="True"> + https://github.com/behdad/fonttools/blob/master/LICENSE.txt + </namerecord> + <namerecord nameID="18" platformID="1" platEncID="0" langID="0x0" unicode="True"> + Test TTF + </namerecord> + <namerecord nameID="0" platformID="3" platEncID="1" langID="0x409"> + Copyright (c) 2015 by FontTools. No rights reserved. + </namerecord> + <namerecord nameID="1" platformID="3" platEncID="1" langID="0x409"> + Test TTF + </namerecord> + <namerecord nameID="2" platformID="3" platEncID="1" langID="0x409"> + Regular + </namerecord> + <namerecord nameID="3" platformID="3" platEncID="1" langID="0x409"> + FontTools: Test TTF: 2015 + </namerecord> + <namerecord nameID="4" platformID="3" platEncID="1" langID="0x409"> + Test TTF + </namerecord> + <namerecord nameID="5" platformID="3" platEncID="1" langID="0x409"> + Version 1.000 + </namerecord> + <namerecord nameID="6" platformID="3" platEncID="1" langID="0x409"> + TestTTF-Regular + </namerecord> + <namerecord nameID="7" platformID="3" platEncID="1" langID="0x409"> + Test TTF is not a trademark of FontTools. + </namerecord> + <namerecord nameID="8" platformID="3" platEncID="1" langID="0x409"> + FontTools + </namerecord> + <namerecord nameID="9" platformID="3" platEncID="1" langID="0x409"> + FontTools + </namerecord> + <namerecord nameID="11" platformID="3" platEncID="1" langID="0x409"> + https://github.com/behdad/fonttools + </namerecord> + <namerecord nameID="12" platformID="3" platEncID="1" langID="0x409"> + https://github.com/behdad/fonttools + </namerecord> + <namerecord nameID="14" platformID="3" platEncID="1" langID="0x409"> + https://github.com/behdad/fonttools/blob/master/LICENSE.txt + </namerecord> + </name> + + <post> + <formatType value="2.0"/> + <italicAngle value="0.0"/> + <underlinePosition value="-75"/> + <underlineThickness value="50"/> + <isFixedPitch value="0"/> + <minMemType42 value="0"/> + <maxMemType42 value="0"/> + <minMemType1 value="0"/> + <maxMemType1 value="0"/> + <psNames> + <!-- This file uses unique glyph names based on the information + found in the 'post' table. Since these names might not be unique, + we have to invent artificial names in case of clashes. In order to + be able to retain the original information, we need a name to + ps name mapping for those cases where they differ. That's what + you see below. + --> + </psNames> + <extraNames> + <!-- following are the name that are not taken from the standard Mac glyph order --> + <psName name=".null"/> + <psName name="CR"/> + </extraNames> + </post> + + <gasp> + <gaspRange rangeMaxPPEM="8" rangeGaspBehavior="10"/> + <gaspRange rangeMaxPPEM="65535" rangeGaspBehavior="15"/> + </gasp> + + <DSIG> + <!-- note that the Digital Signature will be invalid after recompilation! --> + <tableHeader flag="0x0" numSigs="0" version="1"/> + </DSIG> + +</ttFont> diff -Nru fonttools-2.4/Lib/fontTools/ttLib/testdata/test_woff2_metadata.xml fonttools-3.0/Lib/fontTools/ttLib/testdata/test_woff2_metadata.xml --- fonttools-2.4/Lib/fontTools/ttLib/testdata/test_woff2_metadata.xml 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.0/Lib/fontTools/ttLib/testdata/test_woff2_metadata.xml 2015-08-31 17:57:15.000000000 +0000 @@ -0,0 +1,103 @@ +<?xml version="1.0" encoding="UTF-8"?> +<metadata version="1.0"> + <uniqueid id="org.w3.webfonts.wofftest" /> + <vendor name="Test Vendor" url="http://w3c.org/Fonts" /> + <credits> + <credit name="Credit 1" role="Role 1" url="http://w3c.org/Fonts" /> + <credit name="Credit 2" role="Role 2" url="http://w3c.org/Fonts" /> + </credits> + <description url="http://w3c.org/Fonts"> + <text> + Description without language. + </text> + <text lang="en"> + Description with "en" language. + </text> + <text lang="fr"> + Description with "fr" language. + </text> + </description> + <license url="http://w3c.org/Fonts" id="License ID"> + <text> + License without language. + </text> + <text lang="en"> + License with "en" language. + </text> + <text lang="fr"> + License with "fr" language. + </text> + </license> + <copyright> + <text> + Copyright without language. + </text> + <text lang="en"> + Copyright with "en" language. + </text> + <text lang="fr"> + Copyright with "fr" language. + </text> + </copyright> + <trademark> + <text> + Trademark without language. + </text> + <text lang="en"> + Trademark with "en" language. + </text> + <text lang="fr"> + Trademark with "fr" language. + </text> + </trademark> + <licensee name="Licensee Name" /> + <extension id="Extension 1"> + <name>Extension 1 - Name Without Language</name> + <name lang="en">Extension 1 - Name With "en" Language</name> + <name lang="fr">Extension 1 - Name With "fr" Language</name> + <item id="Extension 1 - Item 1 ID"> + <name>Extension 1 - Item 1 - Name Without Language</name> + <name lang="en">Extension 1 - Item 1 - Name With "en" Language</name> + <name lang="fr">Extension 1 - Item 1 - Name With "fr" Language</name> + <value>Extension 1 - Item 1 - Value Without Language</value> + <value lang="en">Extension 1 - Item 1 - Value With "en" Language</value> + <value lang="fr">Extension 1 - Item 1 - Value With "fr" Language</value> + </item> + <item id="Extension 1 - Item 2 ID"> + <name>Extension 1 - Item 2 - Name Without Language</name> + <name lang="en">Extension 1 - Item 2 - Name With "en" Language</name> + <name lang="fr">Extension 1 - Item 2 - Name With "fr" Language</name> + <value>Extension 1 - Item 2 - Value Without Language</value> + <value lang="en">Extension 1 - Item 2 - Value With "en" Language</value> + <value lang="fr">Extension 1 - Item 2 - Value With "fr" Language</value> + </item> + </extension> + <extension id="Extension 2"> + <name>Extension 2 - Name Without Language</name> + <name lang="en">Extension 2 - Name With "en" Language</name> + <name lang="fr">Extension 2 - Name With "fr" Language</name> + <item id="Extension 2 - Item 1 ID"> + <name>Extension 2 - Item 1 - Name Without Language</name> + <name lang="en">Extension 2 - Item 1 - Name With "en" Language</name> + <name lang="fr">Extension 2 - Item 1 - Name With "fr" Language</name> + <value>Extension 2 - Item 1 - Value Without Language</value> + <value lang="en">Extension 2 - Item 1 - Value With "en" Language</value> + <value lang="fr">Extension 2 - Item 1 - Value With "fr" Language</value> + </item> + <item id="Extension 2 - Item 2 ID"> + <name>Extension 2 - Item 2 - Name Without Language</name> + <name lang="en">Extension 2 - Item 2 - Name With "en" Language</name> + <name lang="fr">Extension 2 - Item 2 - Name With "fr" Language</name> + <value>Extension 2 - Item 2 - Value Without Language</value> + <value lang="en">Extension 2 - Item 2 - Value With "en" Language</value> + <value lang="fr">Extension 2 - Item 2 - Value With "fr" Language</value> + </item> + <item id="Extension 2 - Item 3 ID"> + <name>Extension 2 - Item 3 - Name Without Language</name> + <name lang="en">Extension 2 - Item 3 - Name With "en" Language</name> + <name lang="fr">Extension 2 - Item 3 - Name With "fr" Language</name> + <value>Extension 2 - Item 3 - Value Without Language</value> + <value lang="en">Extension 2 - Item 3 - Value With "en" Language</value> + </item> + </extension> +</metadata> diff -Nru fonttools-2.4/Lib/fontTools/ttLib/woff2.py fonttools-3.0/Lib/fontTools/ttLib/woff2.py --- fonttools-2.4/Lib/fontTools/ttLib/woff2.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.0/Lib/fontTools/ttLib/woff2.py 2015-08-31 17:57:15.000000000 +0000 @@ -0,0 +1,1084 @@ +from __future__ import print_function, division, absolute_import +from fontTools.misc.py23 import * +import sys +import array +import struct +from collections import OrderedDict +from fontTools.misc import sstruct +from fontTools.misc.arrayTools import calcIntBounds +from fontTools.misc.textTools import pad +from fontTools.ttLib import (TTFont, TTLibError, getTableModule, getTableClass, + getSearchRange) +from fontTools.ttLib.sfnt import (SFNTReader, SFNTWriter, DirectoryEntry, + WOFFFlavorData, sfntDirectoryFormat, sfntDirectorySize, SFNTDirectoryEntry, + sfntDirectoryEntrySize, calcChecksum) +from fontTools.ttLib.tables import ttProgram + +haveBrotli = False +try: + import brotli + haveBrotli = True +except ImportError: + pass + + +class WOFF2Reader(SFNTReader): + + flavor = "woff2" + + def __init__(self, file, checkChecksums=1, fontNumber=-1): + if not haveBrotli: + print('The WOFF2 decoder requires the Brotli Python extension, available at:\n' + 'https://github.com/google/brotli', file=sys.stderr) + raise ImportError("No module named brotli") + + self.file = file + + signature = Tag(self.file.read(4)) + if signature != b"wOF2": + raise TTLibError("Not a WOFF2 font (bad signature)") + + self.file.seek(0) + self.DirectoryEntry = WOFF2DirectoryEntry + data = self.file.read(woff2DirectorySize) + if len(data) != woff2DirectorySize: + raise TTLibError('Not a WOFF2 font (not enough data)') + sstruct.unpack(woff2DirectoryFormat, data, self) + + self.tables = OrderedDict() + offset = 0 + for i in range(self.numTables): + entry = self.DirectoryEntry() + entry.fromFile(self.file) + tag = Tag(entry.tag) + self.tables[tag] = entry + entry.offset = offset + offset += entry.length + + totalUncompressedSize = offset + compressedData = self.file.read(self.totalCompressedSize) + decompressedData = brotli.decompress(compressedData) + if len(decompressedData) != totalUncompressedSize: + raise TTLibError( + 'unexpected size for decompressed font data: expected %d, found %d' + % (totalUncompressedSize, len(decompressedData))) + self.transformBuffer = BytesIO(decompressedData) + + self.file.seek(0, 2) + if self.length != self.file.tell(): + raise TTLibError("reported 'length' doesn't match the actual file size") + + self.flavorData = WOFF2FlavorData(self) + + # make empty TTFont to store data while reconstructing tables + self.ttFont = TTFont(recalcBBoxes=False, recalcTimestamp=False) + + def __getitem__(self, tag): + """Fetch the raw table data. Reconstruct transformed tables.""" + entry = self.tables[Tag(tag)] + if not hasattr(entry, 'data'): + if tag in woff2TransformedTableTags: + entry.data = self.reconstructTable(tag) + else: + entry.data = entry.loadData(self.transformBuffer) + return entry.data + + def reconstructTable(self, tag): + """Reconstruct table named 'tag' from transformed data.""" + if tag not in woff2TransformedTableTags: + raise TTLibError("transform for table '%s' is unknown" % tag) + entry = self.tables[Tag(tag)] + rawData = entry.loadData(self.transformBuffer) + if tag == 'glyf': + # no need to pad glyph data when reconstructing + padding = self.padding if hasattr(self, 'padding') else None + data = self._reconstructGlyf(rawData, padding) + elif tag == 'loca': + data = self._reconstructLoca() + else: + raise NotImplementedError + return data + + def _reconstructGlyf(self, data, padding=None): + """ Return recostructed glyf table data, and set the corresponding loca's + locations. Optionally pad glyph offsets to the specified number of bytes. + """ + self.ttFont['loca'] = WOFF2LocaTable() + glyfTable = self.ttFont['glyf'] = WOFF2GlyfTable() + glyfTable.reconstruct(data, self.ttFont) + glyfTable.padding = padding + data = glyfTable.compile(self.ttFont) + return data + + def _reconstructLoca(self): + """ Return reconstructed loca table data. """ + if 'loca' not in self.ttFont: + # make sure glyf is reconstructed first + self.tables['glyf'].data = self.reconstructTable('glyf') + locaTable = self.ttFont['loca'] + data = locaTable.compile(self.ttFont) + if len(data) != self.tables['loca'].origLength: + raise TTLibError( + "reconstructed 'loca' table doesn't match original size: " + "expected %d, found %d" + % (self.tables['loca'].origLength, len(data))) + return data + + +class WOFF2Writer(SFNTWriter): + + flavor = "woff2" + + def __init__(self, file, numTables, sfntVersion="\000\001\000\000", + flavor=None, flavorData=None): + if not haveBrotli: + print('The WOFF2 encoder requires the Brotli Python extension, available at:\n' + 'https://github.com/google/brotli', file=sys.stderr) + raise ImportError("No module named brotli") + + self.file = file + self.numTables = numTables + self.sfntVersion = Tag(sfntVersion) + self.flavorData = flavorData or WOFF2FlavorData() + + self.directoryFormat = woff2DirectoryFormat + self.directorySize = woff2DirectorySize + self.DirectoryEntry = WOFF2DirectoryEntry + + self.signature = Tag("wOF2") + + self.nextTableOffset = 0 + self.transformBuffer = BytesIO() + + self.tables = OrderedDict() + + # make empty TTFont to store data while normalising and transforming tables + self.ttFont = TTFont(recalcBBoxes=False, recalcTimestamp=False) + + def __setitem__(self, tag, data): + """Associate new entry named 'tag' with raw table data.""" + if tag in self.tables: + raise TTLibError("cannot rewrite '%s' table" % tag) + if tag == 'DSIG': + # always drop DSIG table, since the encoding process can invalidate it + self.numTables -= 1 + return + + entry = self.DirectoryEntry() + entry.tag = Tag(tag) + entry.flags = getKnownTagIndex(entry.tag) + # WOFF2 table data are written to disk only on close(), after all tags + # have been specified + entry.data = data + + self.tables[tag] = entry + + def close(self): + """ All tags must have been specified. Now write the table data and directory. + """ + if len(self.tables) != self.numTables: + raise TTLibError("wrong number of tables; expected %d, found %d" % (self.numTables, len(self.tables))) + + if self.sfntVersion in ("\x00\x01\x00\x00", "true"): + isTrueType = True + elif self.sfntVersion == "OTTO": + isTrueType = False + else: + raise TTLibError("Not a TrueType or OpenType font (bad sfntVersion)") + + # The WOFF2 spec no longer requires the glyph offsets to be 4-byte aligned. + # However, the reference WOFF2 implementation still fails to reconstruct + # 'unpadded' glyf tables, therefore we need to 'normalise' them. + # See: + # https://github.com/khaledhosny/ots/issues/60 + # https://github.com/google/woff2/issues/15 + if isTrueType: + self._normaliseGlyfAndLoca(padding=4) + self._setHeadTransformFlag() + + # To pass the legacy OpenType Sanitiser currently included in browsers, + # we must sort the table directory and data alphabetically by tag. + # See: + # https://github.com/google/woff2/pull/3 + # https://lists.w3.org/Archives/Public/public-webfonts-wg/2015Mar/0000.html + # TODO(user): remove to match spec once browsers are on newer OTS + self.tables = OrderedDict(sorted(self.tables.items())) + + self.totalSfntSize = self._calcSFNTChecksumsLengthsAndOffsets() + + fontData = self._transformTables() + compressedFont = brotli.compress(fontData, mode=brotli.MODE_FONT) + + self.totalCompressedSize = len(compressedFont) + self.length = self._calcTotalSize() + self.majorVersion, self.minorVersion = self._getVersion() + self.reserved = 0 + + directory = self._packTableDirectory() + self.file.seek(0) + self.file.write(pad(directory + compressedFont, size=4)) + self._writeFlavorData() + + def _normaliseGlyfAndLoca(self, padding=4): + """ Recompile glyf and loca tables, aligning glyph offsets to multiples of + 'padding' size. Update the head table's 'indexToLocFormat' accordingly while + compiling loca. + """ + if self.sfntVersion == "OTTO": + return + for tag in ('maxp', 'head', 'loca', 'glyf'): + self._decompileTable(tag) + self.ttFont['glyf'].padding = padding + for tag in ('glyf', 'loca'): + self._compileTable(tag) + + def _setHeadTransformFlag(self): + """ Set bit 11 of 'head' table flags to indicate that the font has undergone + a lossless modifying transform. Re-compile head table data.""" + self._decompileTable('head') + self.ttFont['head'].flags |= (1 << 11) + self._compileTable('head') + + def _decompileTable(self, tag): + """ Fetch table data, decompile it, and store it inside self.ttFont. """ + tag = Tag(tag) + if tag not in self.tables: + raise TTLibError("missing required table: %s" % tag) + if self.ttFont.isLoaded(tag): + return + data = self.tables[tag].data + if tag == 'loca': + tableClass = WOFF2LocaTable + elif tag == 'glyf': + tableClass = WOFF2GlyfTable + else: + tableClass = getTableClass(tag) + table = tableClass(tag) + self.ttFont.tables[tag] = table + table.decompile(data, self.ttFont) + + def _compileTable(self, tag): + """ Compile table and store it in its 'data' attribute. """ + self.tables[tag].data = self.ttFont[tag].compile(self.ttFont) + + def _calcSFNTChecksumsLengthsAndOffsets(self): + """ Compute the 'original' SFNT checksums, lengths and offsets for checksum + adjustment calculation. Return the total size of the uncompressed font. + """ + offset = sfntDirectorySize + sfntDirectoryEntrySize * len(self.tables) + for tag, entry in self.tables.items(): + data = entry.data + entry.origOffset = offset + entry.origLength = len(data) + if tag == 'head': + entry.checkSum = calcChecksum(data[:8] + b'\0\0\0\0' + data[12:]) + else: + entry.checkSum = calcChecksum(data) + offset += (entry.origLength + 3) & ~3 + return offset + + def _transformTables(self): + """Return transformed font data.""" + for tag, entry in self.tables.items(): + if tag in woff2TransformedTableTags: + data = self.transformTable(tag) + else: + data = entry.data + entry.offset = self.nextTableOffset + entry.saveData(self.transformBuffer, data) + self.nextTableOffset += entry.length + self.writeMasterChecksum() + fontData = self.transformBuffer.getvalue() + return fontData + + def transformTable(self, tag): + """Return transformed table data.""" + if tag not in woff2TransformedTableTags: + raise TTLibError("Transform for table '%s' is unknown" % tag) + if tag == "loca": + data = b"" + elif tag == "glyf": + for tag in ('maxp', 'head', 'loca', 'glyf'): + self._decompileTable(tag) + glyfTable = self.ttFont['glyf'] + data = glyfTable.transform(self.ttFont) + else: + raise NotImplementedError + return data + + def _calcMasterChecksum(self): + """Calculate checkSumAdjustment.""" + tags = list(self.tables.keys()) + checksums = [] + for i in range(len(tags)): + checksums.append(self.tables[tags[i]].checkSum) + + # Create a SFNT directory for checksum calculation purposes + self.searchRange, self.entrySelector, self.rangeShift = getSearchRange(self.numTables, 16) + directory = sstruct.pack(sfntDirectoryFormat, self) + tables = sorted(self.tables.items()) + for tag, entry in tables: + sfntEntry = SFNTDirectoryEntry() + sfntEntry.tag = entry.tag + sfntEntry.checkSum = entry.checkSum + sfntEntry.offset = entry.origOffset + sfntEntry.length = entry.origLength + directory = directory + sfntEntry.toString() + + directory_end = sfntDirectorySize + len(self.tables) * sfntDirectoryEntrySize + assert directory_end == len(directory) + + checksums.append(calcChecksum(directory)) + checksum = sum(checksums) & 0xffffffff + # BiboAfba! + checksumadjustment = (0xB1B0AFBA - checksum) & 0xffffffff + return checksumadjustment + + def writeMasterChecksum(self): + """Write checkSumAdjustment to the transformBuffer.""" + checksumadjustment = self._calcMasterChecksum() + self.transformBuffer.seek(self.tables['head'].offset + 8) + self.transformBuffer.write(struct.pack(">L", checksumadjustment)) + + def _calcTotalSize(self): + """Calculate total size of WOFF2 font, including any meta- and/or private data.""" + offset = self.directorySize + for entry in self.tables.values(): + offset += len(entry.toString()) + offset += self.totalCompressedSize + offset = (offset + 3) & ~3 + offset = self._calcFlavorDataOffsetsAndSize(offset) + return offset + + def _calcFlavorDataOffsetsAndSize(self, start): + """Calculate offsets and lengths for any meta- and/or private data.""" + offset = start + data = self.flavorData + if data.metaData: + self.metaOrigLength = len(data.metaData) + self.metaOffset = offset + self.compressedMetaData = brotli.compress( + data.metaData, mode=brotli.MODE_TEXT) + self.metaLength = len(self.compressedMetaData) + offset += self.metaLength + else: + self.metaOffset = self.metaLength = self.metaOrigLength = 0 + self.compressedMetaData = b"" + if data.privData: + # make sure private data is padded to 4-byte boundary + offset = (offset + 3) & ~3 + self.privOffset = offset + self.privLength = len(data.privData) + offset += self.privLength + else: + self.privOffset = self.privLength = 0 + return offset + + def _getVersion(self): + """Return the WOFF2 font's (majorVersion, minorVersion) tuple.""" + data = self.flavorData + if data.majorVersion is not None and data.minorVersion is not None: + return data.majorVersion, data.minorVersion + else: + # if None, return 'fontRevision' from 'head' table + if 'head' in self.tables: + return struct.unpack(">HH", self.tables['head'].data[4:8]) + else: + return 0, 0 + + def _packTableDirectory(self): + """Return WOFF2 table directory data.""" + directory = sstruct.pack(self.directoryFormat, self) + for entry in self.tables.values(): + directory = directory + entry.toString() + return directory + + def _writeFlavorData(self): + """Write metadata and/or private data using appropiate padding.""" + compressedMetaData = self.compressedMetaData + privData = self.flavorData.privData + if compressedMetaData and privData: + compressedMetaData = pad(compressedMetaData, size=4) + if compressedMetaData: + self.file.seek(self.metaOffset) + assert self.file.tell() == self.metaOffset + self.file.write(compressedMetaData) + if privData: + self.file.seek(self.privOffset) + assert self.file.tell() == self.privOffset + self.file.write(privData) + + def reordersTables(self): + return True + + +# -- woff2 directory helpers and cruft + +woff2DirectoryFormat = """ + > # big endian + signature: 4s # "wOF2" + sfntVersion: 4s + length: L # total woff2 file size + numTables: H # number of tables + reserved: H # set to 0 + totalSfntSize: L # uncompressed size + totalCompressedSize: L # compressed size + majorVersion: H # major version of WOFF file + minorVersion: H # minor version of WOFF file + metaOffset: L # offset to metadata block + metaLength: L # length of compressed metadata + metaOrigLength: L # length of uncompressed metadata + privOffset: L # offset to private data block + privLength: L # length of private data block +""" + +woff2DirectorySize = sstruct.calcsize(woff2DirectoryFormat) + +woff2KnownTags = ( + "cmap", "head", "hhea", "hmtx", "maxp", "name", "OS/2", "post", "cvt ", + "fpgm", "glyf", "loca", "prep", "CFF ", "VORG", "EBDT", "EBLC", "gasp", + "hdmx", "kern", "LTSH", "PCLT", "VDMX", "vhea", "vmtx", "BASE", "GDEF", + "GPOS", "GSUB", "EBSC", "JSTF", "MATH", "CBDT", "CBLC", "COLR", "CPAL", + "SVG ", "sbix", "acnt", "avar", "bdat", "bloc", "bsln", "cvar", "fdsc", + "feat", "fmtx", "fvar", "gvar", "hsty", "just", "lcar", "mort", "morx", + "opbd", "prop", "trak", "Zapf", "Silf", "Glat", "Gloc", "Feat", "Sill") + +woff2FlagsFormat = """ + > # big endian + flags: B # table type and flags +""" + +woff2FlagsSize = sstruct.calcsize(woff2FlagsFormat) + +woff2UnknownTagFormat = """ + > # big endian + tag: 4s # 4-byte tag (optional) +""" + +woff2UnknownTagSize = sstruct.calcsize(woff2UnknownTagFormat) + +woff2UnknownTagIndex = 0x3F + +woff2Base128MaxSize = 5 +woff2DirectoryEntryMaxSize = woff2FlagsSize + woff2UnknownTagSize + 2 * woff2Base128MaxSize + +woff2TransformedTableTags = ('glyf', 'loca') + +woff2GlyfTableFormat = """ + > # big endian + version: L # = 0x00000000 + numGlyphs: H # Number of glyphs + indexFormat: H # Offset format for loca table + nContourStreamSize: L # Size of nContour stream + nPointsStreamSize: L # Size of nPoints stream + flagStreamSize: L # Size of flag stream + glyphStreamSize: L # Size of glyph stream + compositeStreamSize: L # Size of composite stream + bboxStreamSize: L # Comnined size of bboxBitmap and bboxStream + instructionStreamSize: L # Size of instruction stream +""" + +woff2GlyfTableFormatSize = sstruct.calcsize(woff2GlyfTableFormat) + +bboxFormat = """ + > # big endian + xMin: h + yMin: h + xMax: h + yMax: h +""" + + +def getKnownTagIndex(tag): + """Return index of 'tag' in woff2KnownTags list. Return 63 if not found.""" + for i in range(len(woff2KnownTags)): + if tag == woff2KnownTags[i]: + return i + return woff2UnknownTagIndex + + +class WOFF2DirectoryEntry(DirectoryEntry): + + def fromFile(self, file): + pos = file.tell() + data = file.read(woff2DirectoryEntryMaxSize) + left = self.fromString(data) + consumed = len(data) - len(left) + file.seek(pos + consumed) + + def fromString(self, data): + if len(data) < 1: + raise TTLibError("can't read table 'flags': not enough data") + dummy, data = sstruct.unpack2(woff2FlagsFormat, data, self) + if self.flags & 0x3F == 0x3F: + # if bits [0..5] of the flags byte == 63, read a 4-byte arbitrary tag value + if len(data) < woff2UnknownTagSize: + raise TTLibError("can't read table 'tag': not enough data") + dummy, data = sstruct.unpack2(woff2UnknownTagFormat, data, self) + else: + # otherwise, tag is derived from a fixed 'Known Tags' table + self.tag = woff2KnownTags[self.flags & 0x3F] + self.tag = Tag(self.tag) + if self.flags & 0xC0 != 0: + raise TTLibError('bits 6-7 are reserved and must be 0') + self.origLength, data = unpackBase128(data) + self.length = self.origLength + if self.tag in woff2TransformedTableTags: + self.length, data = unpackBase128(data) + if self.tag == 'loca' and self.length != 0: + raise TTLibError( + "the transformLength of the 'loca' table must be 0") + # return left over data + return data + + def toString(self): + data = bytechr(self.flags) + if (self.flags & 0x3F) == 0x3F: + data += struct.pack('>4s', self.tag.tobytes()) + data += packBase128(self.origLength) + if self.tag in woff2TransformedTableTags: + data += packBase128(self.length) + return data + + +class WOFF2LocaTable(getTableClass('loca')): + """Same as parent class. The only difference is that it attempts to preserve + the 'indexFormat' as encoded in the WOFF2 glyf table. + """ + + def __init__(self, tag=None): + self.tableTag = Tag(tag or 'loca') + + def compile(self, ttFont): + try: + max_location = max(self.locations) + except AttributeError: + self.set([]) + max_location = 0 + if 'glyf' in ttFont and hasattr(ttFont['glyf'], 'indexFormat'): + # copile loca using the indexFormat specified in the WOFF2 glyf table + indexFormat = ttFont['glyf'].indexFormat + if indexFormat == 0: + if max_location >= 0x20000: + raise TTLibError("indexFormat is 0 but local offsets > 0x20000") + if not all(l % 2 == 0 for l in self.locations): + raise TTLibError("indexFormat is 0 but local offsets not multiples of 2") + locations = array.array("H") + for i in range(len(self.locations)): + locations.append(self.locations[i] // 2) + else: + locations = array.array("I", self.locations) + if sys.byteorder != "big": + locations.byteswap() + data = locations.tostring() + else: + # use the most compact indexFormat given the current glyph offsets + data = super(WOFF2LocaTable, self).compile(ttFont) + return data + + +class WOFF2GlyfTable(getTableClass('glyf')): + """Decoder/Encoder for WOFF2 'glyf' table transform.""" + + subStreams = ( + 'nContourStream', 'nPointsStream', 'flagStream', 'glyphStream', + 'compositeStream', 'bboxStream', 'instructionStream') + + def __init__(self, tag=None): + self.tableTag = Tag(tag or 'glyf') + + def reconstruct(self, data, ttFont): + """ Decompile transformed 'glyf' data. """ + inputDataSize = len(data) + + if inputDataSize < woff2GlyfTableFormatSize: + raise TTLibError("not enough 'glyf' data") + dummy, data = sstruct.unpack2(woff2GlyfTableFormat, data, self) + offset = woff2GlyfTableFormatSize + + for stream in self.subStreams: + size = getattr(self, stream + 'Size') + setattr(self, stream, data[:size]) + data = data[size:] + offset += size + + if offset != inputDataSize: + raise TTLibError( + "incorrect size of transformed 'glyf' table: expected %d, received %d bytes" + % (offset, inputDataSize)) + + bboxBitmapSize = ((self.numGlyphs + 31) >> 5) << 2 + bboxBitmap = self.bboxStream[:bboxBitmapSize] + self.bboxBitmap = array.array('B', bboxBitmap) + self.bboxStream = self.bboxStream[bboxBitmapSize:] + + self.nContourStream = array.array("h", self.nContourStream) + if sys.byteorder != "big": + self.nContourStream.byteswap() + assert len(self.nContourStream) == self.numGlyphs + + if 'head' in ttFont: + ttFont['head'].indexToLocFormat = self.indexFormat + try: + self.glyphOrder = ttFont.getGlyphOrder() + except: + self.glyphOrder = None + if self.glyphOrder is None: + self.glyphOrder = [".notdef"] + self.glyphOrder.extend(["glyph%.5d" % i for i in range(1, self.numGlyphs)]) + else: + if len(self.glyphOrder) != self.numGlyphs: + raise TTLibError( + "incorrect glyphOrder: expected %d glyphs, found %d" % + (len(self.glyphOrder), self.numGlyphs)) + + glyphs = self.glyphs = {} + for glyphID, glyphName in enumerate(self.glyphOrder): + glyph = self._decodeGlyph(glyphID) + glyphs[glyphName] = glyph + + def transform(self, ttFont): + """ Return transformed 'glyf' data """ + self.numGlyphs = len(self.glyphs) + if not hasattr(self, "glyphOrder"): + try: + self.glyphOrder = ttFont.getGlyphOrder() + except: + self.glyphOrder = None + if self.glyphOrder is None: + self.glyphOrder = [".notdef"] + self.glyphOrder.extend(["glyph%.5d" % i for i in range(1, self.numGlyphs)]) + if len(self.glyphOrder) != self.numGlyphs: + raise TTLibError( + "incorrect glyphOrder: expected %d glyphs, found %d" % + (len(self.glyphOrder), self.numGlyphs)) + + if 'maxp' in ttFont: + ttFont['maxp'].numGlyphs = self.numGlyphs + self.indexFormat = ttFont['head'].indexToLocFormat + + for stream in self.subStreams: + setattr(self, stream, b"") + bboxBitmapSize = ((self.numGlyphs + 31) >> 5) << 2 + self.bboxBitmap = array.array('B', [0]*bboxBitmapSize) + + for glyphID in range(self.numGlyphs): + self._encodeGlyph(glyphID) + + self.bboxStream = self.bboxBitmap.tostring() + self.bboxStream + for stream in self.subStreams: + setattr(self, stream + 'Size', len(getattr(self, stream))) + self.version = 0 + data = sstruct.pack(woff2GlyfTableFormat, self) + data += bytesjoin([getattr(self, s) for s in self.subStreams]) + return data + + def _decodeGlyph(self, glyphID): + glyph = getTableModule('glyf').Glyph() + glyph.numberOfContours = self.nContourStream[glyphID] + if glyph.numberOfContours == 0: + return glyph + elif glyph.isComposite(): + self._decodeComponents(glyph) + else: + self._decodeCoordinates(glyph) + self._decodeBBox(glyphID, glyph) + return glyph + + def _decodeComponents(self, glyph): + data = self.compositeStream + glyph.components = [] + more = 1 + haveInstructions = 0 + while more: + component = getTableModule('glyf').GlyphComponent() + more, haveInstr, data = component.decompile(data, self) + haveInstructions = haveInstructions | haveInstr + glyph.components.append(component) + self.compositeStream = data + if haveInstructions: + self._decodeInstructions(glyph) + + def _decodeCoordinates(self, glyph): + data = self.nPointsStream + endPtsOfContours = [] + endPoint = -1 + for i in range(glyph.numberOfContours): + ptsOfContour, data = unpack255UShort(data) + endPoint += ptsOfContour + endPtsOfContours.append(endPoint) + glyph.endPtsOfContours = endPtsOfContours + self.nPointsStream = data + self._decodeTriplets(glyph) + self._decodeInstructions(glyph) + + def _decodeInstructions(self, glyph): + glyphStream = self.glyphStream + instructionStream = self.instructionStream + instructionLength, glyphStream = unpack255UShort(glyphStream) + glyph.program = ttProgram.Program() + glyph.program.fromBytecode(instructionStream[:instructionLength]) + self.glyphStream = glyphStream + self.instructionStream = instructionStream[instructionLength:] + + def _decodeBBox(self, glyphID, glyph): + haveBBox = bool(self.bboxBitmap[glyphID >> 3] & (0x80 >> (glyphID & 7))) + if glyph.isComposite() and not haveBBox: + raise TTLibError('no bbox values for composite glyph %d' % glyphID) + if haveBBox: + dummy, self.bboxStream = sstruct.unpack2(bboxFormat, self.bboxStream, glyph) + else: + glyph.recalcBounds(self) + + def _decodeTriplets(self, glyph): + + def withSign(flag, baseval): + assert 0 <= baseval and baseval < 65536, 'integer overflow' + return baseval if flag & 1 else -baseval + + nPoints = glyph.endPtsOfContours[-1] + 1 + flagSize = nPoints + if flagSize > len(self.flagStream): + raise TTLibError("not enough 'flagStream' data") + flagsData = self.flagStream[:flagSize] + self.flagStream = self.flagStream[flagSize:] + flags = array.array('B', flagsData) + + triplets = array.array('B', self.glyphStream) + nTriplets = len(triplets) + assert nPoints <= nTriplets + + x = 0 + y = 0 + glyph.coordinates = getTableModule('glyf').GlyphCoordinates.zeros(nPoints) + glyph.flags = array.array("B") + tripletIndex = 0 + for i in range(nPoints): + flag = flags[i] + onCurve = not bool(flag >> 7) + flag &= 0x7f + if flag < 84: + nBytes = 1 + elif flag < 120: + nBytes = 2 + elif flag < 124: + nBytes = 3 + else: + nBytes = 4 + assert ((tripletIndex + nBytes) <= nTriplets) + if flag < 10: + dx = 0 + dy = withSign(flag, ((flag & 14) << 7) + triplets[tripletIndex]) + elif flag < 20: + dx = withSign(flag, (((flag - 10) & 14) << 7) + triplets[tripletIndex]) + dy = 0 + elif flag < 84: + b0 = flag - 20 + b1 = triplets[tripletIndex] + dx = withSign(flag, 1 + (b0 & 0x30) + (b1 >> 4)) + dy = withSign(flag >> 1, 1 + ((b0 & 0x0c) << 2) + (b1 & 0x0f)) + elif flag < 120: + b0 = flag - 84 + dx = withSign(flag, 1 + ((b0 // 12) << 8) + triplets[tripletIndex]) + dy = withSign(flag >> 1, + 1 + (((b0 % 12) >> 2) << 8) + triplets[tripletIndex + 1]) + elif flag < 124: + b2 = triplets[tripletIndex + 1] + dx = withSign(flag, (triplets[tripletIndex] << 4) + (b2 >> 4)) + dy = withSign(flag >> 1, + ((b2 & 0x0f) << 8) + triplets[tripletIndex + 2]) + else: + dx = withSign(flag, + (triplets[tripletIndex] << 8) + triplets[tripletIndex + 1]) + dy = withSign(flag >> 1, + (triplets[tripletIndex + 2] << 8) + triplets[tripletIndex + 3]) + tripletIndex += nBytes + x += dx + y += dy + glyph.coordinates[i] = (x, y) + glyph.flags.append(int(onCurve)) + bytesConsumed = tripletIndex + self.glyphStream = self.glyphStream[bytesConsumed:] + + def _encodeGlyph(self, glyphID): + glyphName = self.getGlyphName(glyphID) + glyph = self[glyphName] + self.nContourStream += struct.pack(">h", glyph.numberOfContours) + if glyph.numberOfContours == 0: + return + elif glyph.isComposite(): + self._encodeComponents(glyph) + else: + self._encodeCoordinates(glyph) + self._encodeBBox(glyphID, glyph) + + def _encodeComponents(self, glyph): + lastcomponent = len(glyph.components) - 1 + more = 1 + haveInstructions = 0 + for i in range(len(glyph.components)): + if i == lastcomponent: + haveInstructions = hasattr(glyph, "program") + more = 0 + component = glyph.components[i] + self.compositeStream += component.compile(more, haveInstructions, self) + if haveInstructions: + self._encodeInstructions(glyph) + + def _encodeCoordinates(self, glyph): + lastEndPoint = -1 + for endPoint in glyph.endPtsOfContours: + ptsOfContour = endPoint - lastEndPoint + self.nPointsStream += pack255UShort(ptsOfContour) + lastEndPoint = endPoint + self._encodeTriplets(glyph) + self._encodeInstructions(glyph) + + def _encodeInstructions(self, glyph): + instructions = glyph.program.getBytecode() + self.glyphStream += pack255UShort(len(instructions)) + self.instructionStream += instructions + + def _encodeBBox(self, glyphID, glyph): + assert glyph.numberOfContours != 0, "empty glyph has no bbox" + if not glyph.isComposite(): + # for simple glyphs, compare the encoded bounding box info with the calculated + # values, and if they match omit the bounding box info + currentBBox = glyph.xMin, glyph.yMin, glyph.xMax, glyph.yMax + calculatedBBox = calcIntBounds(glyph.coordinates) + if currentBBox == calculatedBBox: + return + self.bboxBitmap[glyphID >> 3] |= 0x80 >> (glyphID & 7) + self.bboxStream += sstruct.pack(bboxFormat, glyph) + + def _encodeTriplets(self, glyph): + assert len(glyph.coordinates) == len(glyph.flags) + coordinates = glyph.coordinates.copy() + coordinates.absoluteToRelative() + + flags = array.array('B') + triplets = array.array('B') + for i in range(len(coordinates)): + onCurve = glyph.flags[i] + x, y = coordinates[i] + absX = abs(x) + absY = abs(y) + onCurveBit = 0 if onCurve else 128 + xSignBit = 0 if (x < 0) else 1 + ySignBit = 0 if (y < 0) else 1 + xySignBits = xSignBit + 2 * ySignBit + + if x == 0 and absY < 1280: + flags.append(onCurveBit + ((absY & 0xf00) >> 7) + ySignBit) + triplets.append(absY & 0xff) + elif y == 0 and absX < 1280: + flags.append(onCurveBit + 10 + ((absX & 0xf00) >> 7) + xSignBit) + triplets.append(absX & 0xff) + elif absX < 65 and absY < 65: + flags.append(onCurveBit + 20 + ((absX - 1) & 0x30) + (((absY - 1) & 0x30) >> 2) + xySignBits) + triplets.append((((absX - 1) & 0xf) << 4) | ((absY - 1) & 0xf)) + elif absX < 769 and absY < 769: + flags.append(onCurveBit + 84 + 12 * (((absX - 1) & 0x300) >> 8) + (((absY - 1) & 0x300) >> 6) + xySignBits) + triplets.append((absX - 1) & 0xff) + triplets.append((absY - 1) & 0xff) + elif absX < 4096 and absY < 4096: + flags.append(onCurveBit + 120 + xySignBits) + triplets.append(absX >> 4) + triplets.append(((absX & 0xf) << 4) | (absY >> 8)) + triplets.append(absY & 0xff) + else: + flags.append(onCurveBit + 124 + xySignBits) + triplets.append(absX >> 8) + triplets.append(absX & 0xff) + triplets.append(absY >> 8) + triplets.append(absY & 0xff) + + self.flagStream += flags.tostring() + self.glyphStream += triplets.tostring() + + +class WOFF2FlavorData(WOFFFlavorData): + + Flavor = 'woff2' + + def __init__(self, reader=None): + if not haveBrotli: + raise ImportError("No module named brotli") + self.majorVersion = None + self.minorVersion = None + self.metaData = None + self.privData = None + if reader: + self.majorVersion = reader.majorVersion + self.minorVersion = reader.minorVersion + if reader.metaLength: + reader.file.seek(reader.metaOffset) + rawData = reader.file.read(reader.metaLength) + assert len(rawData) == reader.metaLength + data = brotli.decompress(rawData) + assert len(data) == reader.metaOrigLength + self.metaData = data + if reader.privLength: + reader.file.seek(reader.privOffset) + data = reader.file.read(reader.privLength) + assert len(data) == reader.privLength + self.privData = data + + +def unpackBase128(data): + r""" Read one to five bytes from UIntBase128-encoded input string, and return + a tuple containing the decoded integer plus any leftover data. + + >>> unpackBase128(b'\x3f\x00\x00') == (63, b"\x00\x00") + True + >>> unpackBase128(b'\x8f\xff\xff\xff\x7f')[0] == 4294967295 + True + >>> unpackBase128(b'\x80\x80\x3f') # doctest: +IGNORE_EXCEPTION_DETAIL + Traceback (most recent call last): + File "<stdin>", line 1, in ? + TTLibError: UIntBase128 value must not start with leading zeros + >>> unpackBase128(b'\x8f\xff\xff\xff\xff\x7f')[0] # doctest: +IGNORE_EXCEPTION_DETAIL + Traceback (most recent call last): + File "<stdin>", line 1, in ? + TTLibError: UIntBase128-encoded sequence is longer than 5 bytes + >>> unpackBase128(b'\x90\x80\x80\x80\x00')[0] # doctest: +IGNORE_EXCEPTION_DETAIL + Traceback (most recent call last): + File "<stdin>", line 1, in ? + TTLibError: UIntBase128 value exceeds 2**32-1 + """ + if len(data) == 0: + raise TTLibError('not enough data to unpack UIntBase128') + result = 0 + if byteord(data[0]) == 0x80: + # font must be rejected if UIntBase128 value starts with 0x80 + raise TTLibError('UIntBase128 value must not start with leading zeros') + for i in range(woff2Base128MaxSize): + if len(data) == 0: + raise TTLibError('not enough data to unpack UIntBase128') + code = byteord(data[0]) + data = data[1:] + # if any of the top seven bits are set then we're about to overflow + if result & 0xFE000000: + raise TTLibError('UIntBase128 value exceeds 2**32-1') + # set current value = old value times 128 bitwise-or (byte bitwise-and 127) + result = (result << 7) | (code & 0x7f) + # repeat until the most significant bit of byte is false + if (code & 0x80) == 0: + # return result plus left over data + return result, data + # make sure not to exceed the size bound + raise TTLibError('UIntBase128-encoded sequence is longer than 5 bytes') + + +def base128Size(n): + """ Return the length in bytes of a UIntBase128-encoded sequence with value n. + + >>> base128Size(0) + 1 + >>> base128Size(24567) + 3 + >>> base128Size(2**32-1) + 5 + """ + assert n >= 0 + size = 1 + while n >= 128: + size += 1 + n >>= 7 + return size + + +def packBase128(n): + r""" Encode unsigned integer in range 0 to 2**32-1 (inclusive) to a string of + bytes using UIntBase128 variable-length encoding. Produce the shortest possible + encoding. + + >>> packBase128(63) == b"\x3f" + True + >>> packBase128(2**32-1) == b'\x8f\xff\xff\xff\x7f' + True + """ + if n < 0 or n >= 2**32: + raise TTLibError( + "UIntBase128 format requires 0 <= integer <= 2**32-1") + data = b'' + size = base128Size(n) + for i in range(size): + b = (n >> (7 * (size - i - 1))) & 0x7f + if i < size - 1: + b |= 0x80 + data += struct.pack('B', b) + return data + + +def unpack255UShort(data): + """ Read one to three bytes from 255UInt16-encoded input string, and return a + tuple containing the decoded integer plus any leftover data. + + >>> unpack255UShort(bytechr(252))[0] + 252 + + Note that some numbers (e.g. 506) can have multiple encodings: + >>> unpack255UShort(struct.pack("BB", 254, 0))[0] + 506 + >>> unpack255UShort(struct.pack("BB", 255, 253))[0] + 506 + >>> unpack255UShort(struct.pack("BBB", 253, 1, 250))[0] + 506 + """ + code = byteord(data[:1]) + data = data[1:] + if code == 253: + # read two more bytes as an unsigned short + if len(data) < 2: + raise TTLibError('not enough data to unpack 255UInt16') + result, = struct.unpack(">H", data[:2]) + data = data[2:] + elif code == 254: + # read another byte, plus 253 * 2 + if len(data) == 0: + raise TTLibError('not enough data to unpack 255UInt16') + result = byteord(data[:1]) + result += 506 + data = data[1:] + elif code == 255: + # read another byte, plus 253 + if len(data) == 0: + raise TTLibError('not enough data to unpack 255UInt16') + result = byteord(data[:1]) + result += 253 + data = data[1:] + else: + # leave as is if lower than 253 + result = code + # return result plus left over data + return result, data + + +def pack255UShort(value): + r""" Encode unsigned integer in range 0 to 65535 (inclusive) to a bytestring + using 255UInt16 variable-length encoding. + + >>> pack255UShort(252) == b'\xfc' + True + >>> pack255UShort(506) == b'\xfe\x00' + True + >>> pack255UShort(762) == b'\xfd\x02\xfa' + True + """ + if value < 0 or value > 0xFFFF: + raise TTLibError( + "255UInt16 format requires 0 <= integer <= 65535") + if value < 253: + return struct.pack(">B", value) + elif value < 506: + return struct.pack(">BB", 255, value - 253) + elif value < 762: + return struct.pack(">BB", 254, value - 506) + else: + return struct.pack(">BH", 253, value) + + +if __name__ == "__main__": + import doctest + sys.exit(doctest.testmod().failed) diff -Nru fonttools-2.4/Lib/fontTools/ttLib/woff2_test.py fonttools-3.0/Lib/fontTools/ttLib/woff2_test.py --- fonttools-2.4/Lib/fontTools/ttLib/woff2_test.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.0/Lib/fontTools/ttLib/woff2_test.py 2015-08-31 17:57:15.000000000 +0000 @@ -0,0 +1,747 @@ +from __future__ import print_function, division, absolute_import, unicode_literals +from fontTools.misc.py23 import * +from fontTools import ttLib +from .woff2 import (WOFF2Reader, woff2DirectorySize, woff2DirectoryFormat, + woff2FlagsSize, woff2UnknownTagSize, woff2Base128MaxSize, WOFF2DirectoryEntry, + getKnownTagIndex, packBase128, base128Size, woff2UnknownTagIndex, + WOFF2FlavorData, woff2TransformedTableTags, WOFF2GlyfTable, WOFF2LocaTable, + WOFF2Writer) +import unittest +import sstruct +import os +import random +import copy +from collections import OrderedDict + +haveBrotli = False +try: + import brotli + haveBrotli = True +except ImportError: + pass + + +# Python 3 renamed 'assertRaisesRegexp' to 'assertRaisesRegex', and fires +# deprecation warnings if a program uses the old name. +if not hasattr(unittest.TestCase, 'assertRaisesRegex'): + unittest.TestCase.assertRaisesRegex = unittest.TestCase.assertRaisesRegexp + + +current_dir = os.path.abspath(os.path.dirname(os.path.realpath(__file__))) +data_dir = os.path.join(current_dir, 'testdata') +TTX = os.path.join(data_dir, 'TestTTF-Regular.ttx') +OTX = os.path.join(data_dir, 'TestOTF-Regular.otx') +METADATA = os.path.join(data_dir, 'test_woff2_metadata.xml') + +TT_WOFF2 = BytesIO() +CFF_WOFF2 = BytesIO() + + +def setUpModule(): + if not haveBrotli: + raise unittest.SkipTest("No module named brotli") + assert os.path.exists(TTX) + assert os.path.exists(OTX) + # import TT-flavoured test font and save it as WOFF2 + ttf = ttLib.TTFont(recalcBBoxes=False, recalcTimestamp=False) + ttf.importXML(TTX, quiet=True) + ttf.flavor = "woff2" + ttf.save(TT_WOFF2, reorderTables=None) + # import CFF-flavoured test font and save it as WOFF2 + otf = ttLib.TTFont(recalcBBoxes=False, recalcTimestamp=False) + otf.importXML(OTX, quiet=True) + otf.flavor = "woff2" + otf.save(CFF_WOFF2, reorderTables=None) + + +class WOFF2ReaderTest(unittest.TestCase): + + @classmethod + def setUpClass(cls): + cls.file = BytesIO(CFF_WOFF2.getvalue()) + cls.font = ttLib.TTFont(recalcBBoxes=False, recalcTimestamp=False) + cls.font.importXML(OTX, quiet=True) + + def setUp(self): + self.file.seek(0) + + def test_bad_signature(self): + with self.assertRaisesRegex(ttLib.TTLibError, 'bad signature'): + WOFF2Reader(BytesIO(b"wOFF")) + + def test_not_enough_data_header(self): + incomplete_header = self.file.read(woff2DirectorySize - 1) + with self.assertRaisesRegex(ttLib.TTLibError, 'not enough data'): + WOFF2Reader(BytesIO(incomplete_header)) + + def test_incorrect_compressed_size(self): + data = self.file.read(woff2DirectorySize) + header = sstruct.unpack(woff2DirectoryFormat, data) + header['totalCompressedSize'] = 0 + data = sstruct.pack(woff2DirectoryFormat, header) + with self.assertRaises(brotli.error): + WOFF2Reader(BytesIO(data + self.file.read())) + + def test_incorrect_uncompressed_size(self): + decompress_backup = brotli.decompress + brotli.decompress = lambda data: b"" # return empty byte string + with self.assertRaisesRegex(ttLib.TTLibError, 'unexpected size for decompressed'): + WOFF2Reader(self.file) + brotli.decompress = decompress_backup + + def test_incorrect_file_size(self): + data = self.file.read(woff2DirectorySize) + header = sstruct.unpack(woff2DirectoryFormat, data) + header['length'] -= 1 + data = sstruct.pack(woff2DirectoryFormat, header) + with self.assertRaisesRegex( + ttLib.TTLibError, "doesn't match the actual file size"): + WOFF2Reader(BytesIO(data + self.file.read())) + + def test_num_tables(self): + tags = [t for t in self.font.keys() if t not in ('GlyphOrder', 'DSIG')] + data = self.file.read(woff2DirectorySize) + header = sstruct.unpack(woff2DirectoryFormat, data) + self.assertEqual(header['numTables'], len(tags)) + + def test_table_tags(self): + tags = set([t for t in self.font.keys() if t not in ('GlyphOrder', 'DSIG')]) + reader = WOFF2Reader(self.file) + self.assertEqual(set(reader.keys()), tags) + + def test_get_normal_tables(self): + woff2Reader = WOFF2Reader(self.file) + specialTags = woff2TransformedTableTags + ('head', 'GlyphOrder', 'DSIG') + for tag in [t for t in self.font.keys() if t not in specialTags]: + origData = self.font.getTableData(tag) + decompressedData = woff2Reader[tag] + self.assertEqual(origData, decompressedData) + + def test_reconstruct_unknown(self): + reader = WOFF2Reader(self.file) + with self.assertRaisesRegex(ttLib.TTLibError, 'transform for table .* unknown'): + reader.reconstructTable('ZZZZ') + + +class WOFF2ReaderTTFTest(WOFF2ReaderTest): + """ Tests specific to TT-flavored fonts. """ + + @classmethod + def setUpClass(cls): + cls.file = BytesIO(TT_WOFF2.getvalue()) + cls.font = ttLib.TTFont(recalcBBoxes=False, recalcTimestamp=False) + cls.font.importXML(TTX, quiet=True) + + def setUp(self): + self.file.seek(0) + + def test_reconstruct_glyf(self): + woff2Reader = WOFF2Reader(self.file) + reconstructedData = woff2Reader['glyf'] + self.assertEqual(self.font.getTableData('glyf'), reconstructedData) + + def test_reconstruct_loca(self): + woff2Reader = WOFF2Reader(self.file) + reconstructedData = woff2Reader['loca'] + self.assertEqual(self.font.getTableData('loca'), reconstructedData) + self.assertTrue(hasattr(woff2Reader.tables['glyf'], 'data')) + + def test_reconstruct_loca_not_match_orig_size(self): + reader = WOFF2Reader(self.file) + reader.tables['loca'].origLength -= 1 + with self.assertRaisesRegex( + ttLib.TTLibError, "'loca' table doesn't match original size"): + reader.reconstructTable('loca') + + +def normalise_table(font, tag, padding=4): + """ Return normalised table data. Keep 'font' instance unmodified. """ + assert tag in ('glyf', 'loca', 'head') + assert tag in font + if tag == 'head': + origHeadFlags = font['head'].flags + font['head'].flags |= (1 << 11) + tableData = font['head'].compile(font) + if font.sfntVersion in ("\x00\x01\x00\x00", "true"): + assert {'glyf', 'loca', 'head'}.issubset(font.keys()) + origIndexFormat = font['head'].indexToLocFormat + if hasattr(font['loca'], 'locations'): + origLocations = font['loca'].locations[:] + else: + origLocations = [] + glyfTable = ttLib.getTableClass('glyf')() + glyfTable.decompile(font.getTableData('glyf'), font) + glyfTable.padding = padding + if tag == 'glyf': + tableData = glyfTable.compile(font) + elif tag == 'loca': + glyfTable.compile(font) + tableData = font['loca'].compile(font) + if tag == 'head': + glyfTable.compile(font) + font['loca'].compile(font) + tableData = font['head'].compile(font) + font['head'].indexToLocFormat = origIndexFormat + font['loca'].set(origLocations) + if tag == 'head': + font['head'].flags = origHeadFlags + return tableData + + +def normalise_font(font, padding=4): + """ Return normalised font data. Keep 'font' instance unmodified. """ + # drop DSIG but keep a copy + DSIG_copy = copy.deepcopy(font['DSIG']) + del font['DSIG'] + # ovverride TTFont attributes + origFlavor = font.flavor + origRecalcBBoxes = font.recalcBBoxes + origRecalcTimestamp = font.recalcTimestamp + origLazy = font.lazy + font.flavor = None + font.recalcBBoxes = False + font.recalcTimestamp = False + font.lazy = True + # save font to temporary stream + infile = BytesIO() + font.save(infile) + infile.seek(0) + # reorder tables alphabetically + outfile = BytesIO() + reader = ttLib.sfnt.SFNTReader(infile) + writer = ttLib.sfnt.SFNTWriter( + outfile, len(reader.tables), reader.sfntVersion, reader.flavor, reader.flavorData) + for tag in sorted(reader.keys()): + if tag in woff2TransformedTableTags + ('head',): + writer[tag] = normalise_table(font, tag, padding) + else: + writer[tag] = reader[tag] + writer.close() + # restore font attributes + font['DSIG'] = DSIG_copy + font.flavor = origFlavor + font.recalcBBoxes = origRecalcBBoxes + font.recalcTimestamp = origRecalcTimestamp + font.lazy = origLazy + return outfile.getvalue() + + +class WOFF2DirectoryEntryTest(unittest.TestCase): + + def setUp(self): + self.entry = WOFF2DirectoryEntry() + + def test_not_enough_data_table_flags(self): + with self.assertRaisesRegex(ttLib.TTLibError, "can't read table 'flags'"): + self.entry.fromString(b"") + + def test_not_enough_data_table_tag(self): + incompleteData = bytearray([0x3F, 0, 0, 0]) + with self.assertRaisesRegex(ttLib.TTLibError, "can't read table 'tag'"): + self.entry.fromString(bytes(incompleteData)) + + def test_table_reserved_flags(self): + with self.assertRaisesRegex(ttLib.TTLibError, "bits 6-7 are reserved"): + self.entry.fromString(bytechr(0xC0)) + + def test_loca_zero_transformLength(self): + data = bytechr(getKnownTagIndex('loca')) # flags + data += packBase128(random.randint(1, 100)) # origLength + data += packBase128(1) # non-zero transformLength + with self.assertRaisesRegex( + ttLib.TTLibError, "transformLength of the 'loca' table must be 0"): + self.entry.fromString(data) + + def test_fromFile(self): + unknownTag = Tag('ZZZZ') + data = bytechr(getKnownTagIndex(unknownTag)) + data += unknownTag.tobytes() + data += packBase128(random.randint(1, 100)) + expectedPos = len(data) + f = BytesIO(data + b'\0'*100) + self.entry.fromFile(f) + self.assertEqual(f.tell(), expectedPos) + + def test_transformed_toString(self): + self.entry.tag = Tag('glyf') + self.entry.flags = getKnownTagIndex(self.entry.tag) + self.entry.origLength = random.randint(101, 200) + self.entry.length = random.randint(1, 100) + expectedSize = (woff2FlagsSize + base128Size(self.entry.origLength) + + base128Size(self.entry.length)) + data = self.entry.toString() + self.assertEqual(len(data), expectedSize) + + def test_known_toString(self): + self.entry.tag = Tag('head') + self.entry.flags = getKnownTagIndex(self.entry.tag) + self.entry.origLength = 54 + expectedSize = (woff2FlagsSize + base128Size(self.entry.origLength)) + data = self.entry.toString() + self.assertEqual(len(data), expectedSize) + + def test_unknown_toString(self): + self.entry.tag = Tag('ZZZZ') + self.entry.flags = woff2UnknownTagIndex + self.entry.origLength = random.randint(1, 100) + expectedSize = (woff2FlagsSize + woff2UnknownTagSize + + base128Size(self.entry.origLength)) + data = self.entry.toString() + self.assertEqual(len(data), expectedSize) + + +class DummyReader(WOFF2Reader): + + def __init__(self, file, checkChecksums=1, fontNumber=-1): + self.file = file + for attr in ('majorVersion', 'minorVersion', 'metaOffset', 'metaLength', + 'metaOrigLength', 'privLength', 'privOffset'): + setattr(self, attr, 0) + + +class WOFF2FlavorDataTest(unittest.TestCase): + + @classmethod + def setUpClass(cls): + assert os.path.exists(METADATA) + with open(METADATA, 'rb') as f: + cls.xml_metadata = f.read() + cls.compressed_metadata = brotli.compress(cls.xml_metadata, mode=brotli.MODE_TEXT) + # make random byte strings; font data must be 4-byte aligned + cls.fontdata = bytes(bytearray(random.sample(range(0, 256), 80))) + cls.privData = bytes(bytearray(random.sample(range(0, 256), 20))) + + def setUp(self): + self.file = BytesIO(self.fontdata) + self.file.seek(0, 2) + + def test_get_metaData_no_privData(self): + self.file.write(self.compressed_metadata) + reader = DummyReader(self.file) + reader.metaOffset = len(self.fontdata) + reader.metaLength = len(self.compressed_metadata) + reader.metaOrigLength = len(self.xml_metadata) + flavorData = WOFF2FlavorData(reader) + self.assertEqual(self.xml_metadata, flavorData.metaData) + + def test_get_privData_no_metaData(self): + self.file.write(self.privData) + reader = DummyReader(self.file) + reader.privOffset = len(self.fontdata) + reader.privLength = len(self.privData) + flavorData = WOFF2FlavorData(reader) + self.assertEqual(self.privData, flavorData.privData) + + def test_get_metaData_and_privData(self): + self.file.write(self.compressed_metadata + self.privData) + reader = DummyReader(self.file) + reader.metaOffset = len(self.fontdata) + reader.metaLength = len(self.compressed_metadata) + reader.metaOrigLength = len(self.xml_metadata) + reader.privOffset = reader.metaOffset + reader.metaLength + reader.privLength = len(self.privData) + flavorData = WOFF2FlavorData(reader) + self.assertEqual(self.xml_metadata, flavorData.metaData) + self.assertEqual(self.privData, flavorData.privData) + + def test_get_major_minorVersion(self): + reader = DummyReader(self.file) + reader.majorVersion = reader.minorVersion = 1 + flavorData = WOFF2FlavorData(reader) + self.assertEqual(flavorData.majorVersion, 1) + self.assertEqual(flavorData.minorVersion, 1) + + +class WOFF2WriterTest(unittest.TestCase): + + @classmethod + def setUpClass(cls): + cls.font = ttLib.TTFont(recalcBBoxes=False, recalcTimestamp=False, flavor="woff2") + cls.font.importXML(OTX, quiet=True) + cls.tags = [t for t in cls.font.keys() if t != 'GlyphOrder'] + cls.numTables = len(cls.tags) + cls.file = BytesIO(CFF_WOFF2.getvalue()) + cls.file.seek(0, 2) + cls.length = (cls.file.tell() + 3) & ~3 + cls.setUpFlavorData() + + @classmethod + def setUpFlavorData(cls): + assert os.path.exists(METADATA) + with open(METADATA, 'rb') as f: + cls.xml_metadata = f.read() + cls.compressed_metadata = brotli.compress(cls.xml_metadata, mode=brotli.MODE_TEXT) + cls.privData = bytes(bytearray(random.sample(range(0, 256), 20))) + + def setUp(self): + self.file.seek(0) + self.writer = WOFF2Writer(BytesIO(), self.numTables, self.font.sfntVersion) + + def test_DSIG_dropped(self): + self.writer['DSIG'] = b"\0" + self.assertEqual(len(self.writer.tables), 0) + self.assertEqual(self.writer.numTables, self.numTables-1) + + def test_no_rewrite_table(self): + self.writer['ZZZZ'] = b"\0" + with self.assertRaisesRegex(ttLib.TTLibError, "cannot rewrite"): + self.writer['ZZZZ'] = b"\0" + + def test_num_tables(self): + self.writer['ABCD'] = b"\0" + with self.assertRaisesRegex(ttLib.TTLibError, "wrong number of tables"): + self.writer.close() + + def test_required_tables(self): + font = ttLib.TTFont(flavor="woff2") + with self.assertRaisesRegex(ttLib.TTLibError, "missing required table"): + font.save(BytesIO()) + + def test_head_transform_flag(self): + headData = self.font.getTableData('head') + origFlags = byteord(headData[16]) + woff2font = ttLib.TTFont(self.file) + newHeadData = woff2font.getTableData('head') + modifiedFlags = byteord(newHeadData[16]) + self.assertNotEqual(origFlags, modifiedFlags) + restoredFlags = modifiedFlags & ~0x08 # turn off bit 11 + self.assertEqual(origFlags, restoredFlags) + + def test_tables_sorted_alphabetically(self): + expected = sorted([t for t in self.tags if t != 'DSIG']) + woff2font = ttLib.TTFont(self.file) + self.assertEqual(expected, list(woff2font.reader.keys())) + + def test_checksums(self): + normFile = BytesIO(normalise_font(self.font, padding=4)) + normFile.seek(0) + normFont = ttLib.TTFont(normFile, checkChecksums=2) + w2font = ttLib.TTFont(self.file) + # force reconstructing glyf table using 4-byte padding + w2font.reader.padding = 4 + for tag in [t for t in self.tags if t != 'DSIG']: + w2data = w2font.reader[tag] + normData = normFont.reader[tag] + if tag == "head": + w2data = w2data[:8] + b'\0\0\0\0' + w2data[12:] + normData = normData[:8] + b'\0\0\0\0' + normData[12:] + w2CheckSum = ttLib.sfnt.calcChecksum(w2data) + normCheckSum = ttLib.sfnt.calcChecksum(normData) + self.assertEqual(w2CheckSum, normCheckSum) + normCheckSumAdjustment = normFont['head'].checkSumAdjustment + self.assertEqual(normCheckSumAdjustment, w2font['head'].checkSumAdjustment) + + def test_calcSFNTChecksumsLengthsAndOffsets(self): + normFont = ttLib.TTFont(BytesIO(normalise_font(self.font, padding=4))) + for tag in self.tags: + self.writer[tag] = self.font.getTableData(tag) + self.writer._normaliseGlyfAndLoca(padding=4) + self.writer._setHeadTransformFlag() + self.writer.tables = OrderedDict(sorted(self.writer.tables.items())) + self.writer._calcSFNTChecksumsLengthsAndOffsets() + for tag, entry in normFont.reader.tables.items(): + self.assertEqual(entry.offset, self.writer.tables[tag].origOffset) + self.assertEqual(entry.length, self.writer.tables[tag].origLength) + self.assertEqual(entry.checkSum, self.writer.tables[tag].checkSum) + + def test_bad_sfntVersion(self): + for i in range(self.numTables): + self.writer[bytechr(65 + i)*4] = b"\0" + self.writer.sfntVersion = 'ZZZZ' + with self.assertRaisesRegex(ttLib.TTLibError, "bad sfntVersion"): + self.writer.close() + + def test_calcTotalSize_no_flavorData(self): + expected = self.length + self.writer.file = BytesIO() + for tag in self.tags: + self.writer[tag] = self.font.getTableData(tag) + self.writer.close() + self.assertEqual(expected, self.writer.length) + self.assertEqual(expected, self.writer.file.tell()) + + def test_calcTotalSize_with_metaData(self): + expected = self.length + len(self.compressed_metadata) + flavorData = self.writer.flavorData = WOFF2FlavorData() + flavorData.metaData = self.xml_metadata + self.writer.file = BytesIO() + for tag in self.tags: + self.writer[tag] = self.font.getTableData(tag) + self.writer.close() + self.assertEqual(expected, self.writer.length) + self.assertEqual(expected, self.writer.file.tell()) + + def test_calcTotalSize_with_privData(self): + expected = self.length + len(self.privData) + flavorData = self.writer.flavorData = WOFF2FlavorData() + flavorData.privData = self.privData + self.writer.file = BytesIO() + for tag in self.tags: + self.writer[tag] = self.font.getTableData(tag) + self.writer.close() + self.assertEqual(expected, self.writer.length) + self.assertEqual(expected, self.writer.file.tell()) + + def test_calcTotalSize_with_metaData_and_privData(self): + metaDataLength = (len(self.compressed_metadata) + 3) & ~3 + expected = self.length + metaDataLength + len(self.privData) + flavorData = self.writer.flavorData = WOFF2FlavorData() + flavorData.metaData = self.xml_metadata + flavorData.privData = self.privData + self.writer.file = BytesIO() + for tag in self.tags: + self.writer[tag] = self.font.getTableData(tag) + self.writer.close() + self.assertEqual(expected, self.writer.length) + self.assertEqual(expected, self.writer.file.tell()) + + def test_getVersion(self): + # no version + self.assertEqual((0, 0), self.writer._getVersion()) + # version from head.fontRevision + fontRevision = self.font['head'].fontRevision + versionTuple = tuple(int(i) for i in str(fontRevision).split(".")) + entry = self.writer.tables['head'] = ttLib.getTableClass('head')() + entry.data = self.font.getTableData('head') + self.assertEqual(versionTuple, self.writer._getVersion()) + # version from writer.flavorData + flavorData = self.writer.flavorData = WOFF2FlavorData() + flavorData.majorVersion, flavorData.minorVersion = (10, 11) + self.assertEqual((10, 11), self.writer._getVersion()) + + +class WOFF2WriterTTFTest(WOFF2WriterTest): + + @classmethod + def setUpClass(cls): + cls.font = ttLib.TTFont(recalcBBoxes=False, recalcTimestamp=False, flavor="woff2") + cls.font.importXML(TTX, quiet=True) + cls.tags = [t for t in cls.font.keys() if t != 'GlyphOrder'] + cls.numTables = len(cls.tags) + cls.file = BytesIO(TT_WOFF2.getvalue()) + cls.file.seek(0, 2) + cls.length = (cls.file.tell() + 3) & ~3 + cls.setUpFlavorData() + + def test_normaliseGlyfAndLoca(self): + normTables = {} + for tag in ('head', 'loca', 'glyf'): + normTables[tag] = normalise_table(self.font, tag, padding=4) + for tag in self.tags: + tableData = self.font.getTableData(tag) + self.writer[tag] = tableData + if tag in normTables: + self.assertNotEqual(tableData, normTables[tag]) + self.writer._normaliseGlyfAndLoca(padding=4) + self.writer._setHeadTransformFlag() + for tag in normTables: + self.assertEqual(self.writer.tables[tag].data, normTables[tag]) + + +class WOFF2LocaTableTest(unittest.TestCase): + + def setUp(self): + self.font = font = ttLib.TTFont(recalcBBoxes=False, recalcTimestamp=False) + font['head'] = ttLib.getTableClass('head') + font['loca'] = WOFF2LocaTable() + font['glyf'] = WOFF2GlyfTable() + + def test_compile_short_loca(self): + locaTable = self.font['loca'] + locaTable.set(list(range(0, 0x20000, 2))) + self.font['glyf'].indexFormat = 0 + locaData = locaTable.compile(self.font) + self.assertEqual(len(locaData), 0x20000) + + def test_compile_short_loca_overflow(self): + locaTable = self.font['loca'] + locaTable.set(list(range(0x20000 + 1))) + self.font['glyf'].indexFormat = 0 + with self.assertRaisesRegex( + ttLib.TTLibError, "indexFormat is 0 but local offsets > 0x20000"): + locaTable.compile(self.font) + + def test_compile_short_loca_not_multiples_of_2(self): + locaTable = self.font['loca'] + locaTable.set([1, 3, 5, 7]) + self.font['glyf'].indexFormat = 0 + with self.assertRaisesRegex(ttLib.TTLibError, "offsets not multiples of 2"): + locaTable.compile(self.font) + + def test_compile_long_loca(self): + locaTable = self.font['loca'] + locaTable.set(list(range(0x20001))) + self.font['glyf'].indexFormat = 1 + locaData = locaTable.compile(self.font) + self.assertEqual(len(locaData), 0x20001 * 4) + + def test_compile_set_indexToLocFormat_0(self): + locaTable = self.font['loca'] + # offsets are all multiples of 2 and max length is < 0x10000 + locaTable.set(list(range(0, 0x20000, 2))) + locaTable.compile(self.font) + newIndexFormat = self.font['head'].indexToLocFormat + self.assertEqual(0, newIndexFormat) + + def test_compile_set_indexToLocFormat_1(self): + locaTable = self.font['loca'] + # offsets are not multiples of 2 + locaTable.set(list(range(10))) + locaTable.compile(self.font) + newIndexFormat = self.font['head'].indexToLocFormat + self.assertEqual(1, newIndexFormat) + # max length is >= 0x10000 + locaTable.set(list(range(0, 0x20000 + 1, 2))) + locaTable.compile(self.font) + newIndexFormat = self.font['head'].indexToLocFormat + self.assertEqual(1, newIndexFormat) + + +class WOFF2GlyfTableTest(unittest.TestCase): + + @classmethod + def setUpClass(cls): + font = ttLib.TTFont(recalcBBoxes=False, recalcTimestamp=False) + font.importXML(TTX, quiet=True) + cls.tables = {} + cls.transformedTags = ('maxp', 'head', 'loca', 'glyf') + for tag in reversed(cls.transformedTags): # compile in inverse order + cls.tables[tag] = font.getTableData(tag) + infile = BytesIO(TT_WOFF2.getvalue()) + reader = WOFF2Reader(infile) + cls.transformedGlyfData = reader.tables['glyf'].loadData( + reader.transformBuffer) + + def setUp(self): + self.font = font = ttLib.TTFont(recalcBBoxes=False, recalcTimestamp=False) + font['head'] = ttLib.getTableClass('head')() + font['maxp'] = ttLib.getTableClass('maxp')() + font['loca'] = WOFF2LocaTable() + font['glyf'] = WOFF2GlyfTable() + for tag in self.transformedTags: + font[tag].decompile(self.tables[tag], font) + + def test_reconstruct_glyf_padded_4(self): + glyfTable = WOFF2GlyfTable() + glyfTable.reconstruct(self.transformedGlyfData, self.font) + glyfTable.padding = 4 + data = glyfTable.compile(self.font) + normGlyfData = normalise_table(self.font, 'glyf', glyfTable.padding) + self.assertEqual(normGlyfData, data) + + def test_reconstruct_glyf_padded_2(self): + glyfTable = WOFF2GlyfTable() + glyfTable.reconstruct(self.transformedGlyfData, self.font) + glyfTable.padding = 2 + data = glyfTable.compile(self.font) + normGlyfData = normalise_table(self.font, 'glyf', glyfTable.padding) + self.assertEqual(normGlyfData, data) + + def test_reconstruct_glyf_unpadded(self): + glyfTable = WOFF2GlyfTable() + glyfTable.reconstruct(self.transformedGlyfData, self.font) + data = glyfTable.compile(self.font) + self.assertEqual(self.tables['glyf'], data) + + def test_reconstruct_glyf_incorrect_glyphOrder(self): + glyfTable = WOFF2GlyfTable() + badGlyphOrder = self.font.getGlyphOrder()[:-1] + self.font.setGlyphOrder(badGlyphOrder) + with self.assertRaisesRegex(ttLib.TTLibError, "incorrect glyphOrder"): + glyfTable.reconstruct(self.transformedGlyfData, self.font) + + def test_reconstruct_glyf_missing_glyphOrder(self): + glyfTable = WOFF2GlyfTable() + del self.font.glyphOrder + numGlyphs = self.font['maxp'].numGlyphs + del self.font['maxp'] + glyfTable.reconstruct(self.transformedGlyfData, self.font) + expected = [".notdef"] + expected.extend(["glyph%.5d" % i for i in range(1, numGlyphs)]) + self.assertEqual(expected, glyfTable.glyphOrder) + + def test_reconstruct_loca_padded_4(self): + locaTable = self.font['loca'] = WOFF2LocaTable() + glyfTable = self.font['glyf'] = WOFF2GlyfTable() + glyfTable.reconstruct(self.transformedGlyfData, self.font) + glyfTable.padding = 4 + glyfTable.compile(self.font) + data = locaTable.compile(self.font) + normLocaData = normalise_table(self.font, 'loca', glyfTable.padding) + self.assertEqual(normLocaData, data) + + def test_reconstruct_loca_padded_2(self): + locaTable = self.font['loca'] = WOFF2LocaTable() + glyfTable = self.font['glyf'] = WOFF2GlyfTable() + glyfTable.reconstruct(self.transformedGlyfData, self.font) + glyfTable.padding = 2 + glyfTable.compile(self.font) + data = locaTable.compile(self.font) + normLocaData = normalise_table(self.font, 'loca', glyfTable.padding) + self.assertEqual(normLocaData, data) + + def test_reconstruct_loca_unpadded(self): + locaTable = self.font['loca'] = WOFF2LocaTable() + glyfTable = self.font['glyf'] = WOFF2GlyfTable() + glyfTable.reconstruct(self.transformedGlyfData, self.font) + glyfTable.compile(self.font) + data = locaTable.compile(self.font) + self.assertEqual(self.tables['loca'], data) + + def test_reconstruct_glyf_header_not_enough_data(self): + with self.assertRaisesRegex(ttLib.TTLibError, "not enough 'glyf' data"): + WOFF2GlyfTable().reconstruct(b"", self.font) + + def test_reconstruct_glyf_table_incorrect_size(self): + msg = "incorrect size of transformed 'glyf'" + with self.assertRaisesRegex(ttLib.TTLibError, msg): + WOFF2GlyfTable().reconstruct(self.transformedGlyfData + b"\x00", self.font) + with self.assertRaisesRegex(ttLib.TTLibError, msg): + WOFF2GlyfTable().reconstruct(self.transformedGlyfData[:-1], self.font) + + def test_transform_glyf(self): + glyfTable = self.font['glyf'] + data = glyfTable.transform(self.font) + self.assertEqual(self.transformedGlyfData, data) + + def test_transform_glyf_incorrect_glyphOrder(self): + glyfTable = self.font['glyf'] + badGlyphOrder = self.font.getGlyphOrder()[:-1] + del glyfTable.glyphOrder + self.font.setGlyphOrder(badGlyphOrder) + with self.assertRaisesRegex(ttLib.TTLibError, "incorrect glyphOrder"): + glyfTable.transform(self.font) + glyfTable.glyphOrder = badGlyphOrder + with self.assertRaisesRegex(ttLib.TTLibError, "incorrect glyphOrder"): + glyfTable.transform(self.font) + + def test_transform_glyf_missing_glyphOrder(self): + glyfTable = self.font['glyf'] + del glyfTable.glyphOrder + del self.font.glyphOrder + numGlyphs = self.font['maxp'].numGlyphs + del self.font['maxp'] + glyfTable.transform(self.font) + expected = [".notdef"] + expected.extend(["glyph%.5d" % i for i in range(1, numGlyphs)]) + self.assertEqual(expected, glyfTable.glyphOrder) + + def test_roundtrip_glyf_reconstruct_and_transform(self): + glyfTable = WOFF2GlyfTable() + glyfTable.reconstruct(self.transformedGlyfData, self.font) + data = glyfTable.transform(self.font) + self.assertEqual(self.transformedGlyfData, data) + + def test_roundtrip_glyf_transform_and_reconstruct(self): + glyfTable = self.font['glyf'] + transformedData = glyfTable.transform(self.font) + newGlyfTable = WOFF2GlyfTable() + newGlyfTable.reconstruct(transformedData, self.font) + newGlyfTable.padding = 4 + reconstructedData = newGlyfTable.compile(self.font) + normGlyfData = normalise_table(self.font, 'glyf', newGlyfTable.padding) + self.assertEqual(normGlyfData, reconstructedData) + + +if __name__ == "__main__": + unittest.main() diff -Nru fonttools-2.4/Lib/fontTools/ttLib/xmlImport.py fonttools-3.0/Lib/fontTools/ttLib/xmlImport.py --- fonttools-2.4/Lib/fontTools/ttLib/xmlImport.py 2013-04-23 04:08:08.000000000 +0000 +++ fonttools-3.0/Lib/fontTools/ttLib/xmlImport.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,135 +0,0 @@ -from fontTools import ttLib -from fontTools.misc.textTools import safeEval -from fontTools.ttLib.tables.DefaultTable import DefaultTable -import os - - -class TTXParseError(Exception): pass - -BUFSIZE = 0x4000 - - -class ExpatParser: - - def __init__(self, ttFont, fileName, progress=None): - self.ttFont = ttFont - self.fileName = fileName - self.progress = progress - self.root = None - self.contentStack = [] - self.stackSize = 0 - - def parse(self): - file = open(self.fileName) - self.parseFile(file) - file.close() - - def parseFile(self, file): - from xml.parsers.expat import ParserCreate - parser = ParserCreate("latin1") - parser.returns_unicode = 0 - parser.StartElementHandler = self.startElementHandler - parser.EndElementHandler = self.endElementHandler - parser.CharacterDataHandler = self.characterDataHandler - - pos = 0 - while 1: - chunk = file.read(BUFSIZE) - if not chunk: - parser.Parse(chunk, 1) - break - pos = pos + len(chunk) - if self.progress: - self.progress.set(pos / 100) - parser.Parse(chunk, 0) - - def startElementHandler(self, name, attrs): - stackSize = self.stackSize - self.stackSize = stackSize + 1 - if not stackSize: - if name <> "ttFont": - raise TTXParseError, "illegal root tag: %s" % name - sfntVersion = attrs.get("sfntVersion") - if sfntVersion is not None: - if len(sfntVersion) <> 4: - sfntVersion = safeEval('"' + sfntVersion + '"') - self.ttFont.sfntVersion = sfntVersion - self.contentStack.append([]) - elif stackSize == 1: - subFile = attrs.get("src") - if subFile is not None: - subFile = os.path.join(os.path.dirname(self.fileName), subFile) - importXML(self.ttFont, subFile, self.progress) - self.contentStack.append([]) - return - tag = ttLib.xmlToTag(name) - msg = "Parsing '%s' table..." % tag - if self.progress: - self.progress.setlabel(msg) - elif self.ttFont.verbose: - ttLib.debugmsg(msg) - else: - print msg - if tag == "GlyphOrder": - tableClass = ttLib.GlyphOrder - elif attrs.has_key("ERROR"): - tableClass = DefaultTable - else: - tableClass = ttLib.getTableClass(tag) - if tableClass is None: - tableClass = DefaultTable - if tag == 'loca' and self.ttFont.has_key(tag): - # Special-case the 'loca' table as we need the - # original if the 'glyf' table isn't recompiled. - self.currentTable = self.ttFont[tag] - else: - self.currentTable = tableClass(tag) - self.ttFont[tag] = self.currentTable - self.contentStack.append([]) - elif stackSize == 2: - self.contentStack.append([]) - self.root = (name, attrs, self.contentStack[-1]) - else: - list = [] - self.contentStack[-1].append((name, attrs, list)) - self.contentStack.append(list) - - def characterDataHandler(self, data): - if self.stackSize > 1: - self.contentStack[-1].append(data) - - def endElementHandler(self, name): - self.stackSize = self.stackSize - 1 - del self.contentStack[-1] - if self.stackSize == 1: - self.root = None - elif self.stackSize == 2: - self.currentTable.fromXML(self.root, self.ttFont) - self.root = None - - -class ProgressPrinter: - - def __init__(self, title, maxval=100): - print title - - def set(self, val, maxval=None): - pass - - def increment(self, val=1): - pass - - def setLabel(self, text): - print text - - -def importXML(ttFont, fileName, progress=None): - """Import a TTX file (an XML-based text format), so as to recreate - a font object. - """ - if progress: - import stat - progress.set(0, os.stat(fileName)[stat.ST_SIZE] / 100 or 1) - p = ExpatParser(ttFont, fileName, progress) - p.parse() - diff -Nru fonttools-2.4/Lib/fontTools/ttx.py fonttools-3.0/Lib/fontTools/ttx.py --- fonttools-2.4/Lib/fontTools/ttx.py 2013-06-22 08:34:08.000000000 +0000 +++ fonttools-3.0/Lib/fontTools/ttx.py 2015-08-31 17:57:15.000000000 +0000 @@ -15,9 +15,13 @@ -h Help: print this message -d <outputfolder> Specify a directory where the output files are to be created. - -o <outputfile> Specify a file to write the output to. + -o <outputfile> Specify a file to write the output to. A special + value of of - would use the standard output. + -f Overwrite existing output file(s), ie. don't append numbers. -v Verbose: more messages will be written to stdout about what is being done. + -q Quiet: No messages will be written to stdout about what + is being done. -a allow virtual glyphs ID's on compile or decompile. Dump options: @@ -38,10 +42,24 @@ pre-program) will be written to the TTX file as hex data instead of assembly. This saves some time and makes the TTX file smaller. + -z <format> Specify a bitmap data export option for EBDT: + {'raw', 'row', 'bitwise', 'extfile'} or for the CBDT: + {'raw', 'extfile'} Each option does one of the following: + -z raw + * export the bitmap data as a hex dump + -z row + * export each row as hex data + -z bitwise + * export each row as binary in an ASCII art style + -z extfile + * export the data as external files with XML references + If no export format is specified 'raw' format is used. -e Don't ignore decompilation errors, but show a full traceback and abort. -y <number> Select font number for TrueType Collection, starting from 0. + --unicodedata <UnicodeData.txt> Use custom database file to write + character names in the comments of the cmap TTX output. Compile options: -m Merge with TrueType-input-file: specify a TrueType or OpenType @@ -49,55 +67,63 @@ valid when at most one TTX file is specified. -b Don't recalc glyph bounding boxes: use the values in the TTX file as-is. + --recalc-timestamp Set font 'modified' timestamp to current time. + By default, the modification time of the TTX file will be used. """ -import sys +from __future__ import print_function, division, absolute_import +from fontTools.misc.py23 import * +from fontTools.ttLib import TTFont, TTLibError +from fontTools.misc.macCreatorType import getMacCreatorAndType +from fontTools.unicode import setUnicodeData +from fontTools.misc.timeTools import timestampSinceEpoch import os +import sys import getopt import re -from fontTools.ttLib import TTFont -from fontTools.ttLib.tables.otBase import OTLOffsetOverflowError -from fontTools.ttLib.tables.otTables import fixLookupOverFlows, fixSubTableOverFlows -from fontTools.misc.macCreatorType import getMacCreatorAndType -from fontTools import version def usage(): - print __doc__ % version + from fontTools import version + print(__doc__ % version) sys.exit(2) - -numberAddedRE = re.compile("(.*)#\d+$") + +numberAddedRE = re.compile("#\d+$") opentypeheaderRE = re.compile('''sfntVersion=['"]OTTO["']''') -def makeOutputFileName(input, outputDir, extension): - dir, file = os.path.split(input) - file, ext = os.path.splitext(file) +def makeOutputFileName(input, outputDir, extension, overWrite=False): + dirName, fileName = os.path.split(input) + fileName, ext = os.path.splitext(fileName) if outputDir: - dir = outputDir - output = os.path.join(dir, file + extension) - m = numberAddedRE.match(file) - if m: - file = m.group(1) + dirName = outputDir + fileName = numberAddedRE.split(fileName)[0] + output = os.path.join(dirName, fileName + extension) n = 1 - while os.path.exists(output): - output = os.path.join(dir, file + "#" + repr(n) + extension) - n = n + 1 + if not overWrite: + while os.path.exists(output): + output = os.path.join(dirName, fileName + "#" + repr(n) + extension) + n = n + 1 return output -class Options: +class Options(object): - listTables = 0 + listTables = False outputDir = None outputFile = None - verbose = 0 - splitTables = 0 - disassembleInstructions = 1 + overWrite = False + verbose = False + quiet = False + splitTables = False + disassembleInstructions = True mergeFile = None - recalcBBoxes = 1 - allowVID = 0 + recalcBBoxes = True + allowVID = False ignoreDecompileErrors = True + bitmapGlyphDataFormat = 'raw' + unicodedata = None + recalcTimestamp = False def __init__(self, rawOptions, numFiles): self.onlyTables = [] @@ -106,116 +132,125 @@ for option, value in rawOptions: # general options if option == "-h": - print __doc__ % version + from fontTools import version + print(__doc__ % version) sys.exit(0) elif option == "-d": if not os.path.isdir(value): - print "The -d option value must be an existing directory" + print("The -d option value must be an existing directory") sys.exit(2) self.outputDir = value elif option == "-o": self.outputFile = value + elif option == "-f": + self.overWrite = True elif option == "-v": - self.verbose = 1 + self.verbose = True + elif option == "-q": + self.quiet = True # dump options elif option == "-l": - self.listTables = 1 + self.listTables = True elif option == "-t": self.onlyTables.append(value) elif option == "-x": self.skipTables.append(value) elif option == "-s": - self.splitTables = 1 + self.splitTables = True elif option == "-i": - self.disassembleInstructions = 0 + self.disassembleInstructions = False + elif option == "-z": + validOptions = ('raw', 'row', 'bitwise', 'extfile') + if value not in validOptions: + print("-z does not allow %s as a format. Use %s" % (option, validOptions)) + sys.exit(2) + self.bitmapGlyphDataFormat = value elif option == "-y": self.fontNumber = int(value) # compile options elif option == "-m": self.mergeFile = value elif option == "-b": - self.recalcBBoxes = 0 + self.recalcBBoxes = False elif option == "-a": - self.allowVID = 1 + self.allowVID = True elif option == "-e": self.ignoreDecompileErrors = False + elif option == "--unicodedata": + self.unicodedata = value + elif option == "--recalc-timestamp": + self.recalcTimestamp = True if self.onlyTables and self.skipTables: - print "-t and -x options are mutually exclusive" + print("-t and -x options are mutually exclusive") sys.exit(2) if self.mergeFile and numFiles > 1: - print "Must specify exactly one TTX source file when using -m" + print("Must specify exactly one TTX source file when using -m") sys.exit(2) def ttList(input, output, options): - import string - ttf = TTFont(input, fontNumber=options.fontNumber) + ttf = TTFont(input, fontNumber=options.fontNumber, lazy=True) reader = ttf.reader - tags = reader.keys() - tags.sort() - print 'Listing table info for "%s":' % input + tags = sorted(reader.keys()) + print('Listing table info for "%s":' % input) format = " %4s %10s %7s %7s" - print format % ("tag ", " checksum", " length", " offset") - print format % ("----", "----------", "-------", "-------") + print(format % ("tag ", " checksum", " length", " offset")) + print(format % ("----", "----------", "-------", "-------")) for tag in tags: entry = reader.tables[tag] - checkSum = long(entry.checkSum) + if ttf.flavor == "woff2": + # WOFF2 doesn't store table checksums, so they must be calculated + from fontTools.ttLib.sfnt import calcChecksum + data = entry.loadData(reader.transformBuffer) + checkSum = calcChecksum(data) + else: + checkSum = int(entry.checkSum) if checkSum < 0: - checkSum = checkSum + 0x100000000L - checksum = "0x" + string.zfill(hex(checkSum)[2:-1], 8) - print format % (tag, checksum, entry.length, entry.offset) - print + checkSum = checkSum + 0x100000000 + checksum = "0x%08X" % checkSum + print(format % (tag, checksum, entry.length, entry.offset)) + print() ttf.close() def ttDump(input, output, options): - print 'Dumping "%s" to "%s"...' % (input, output) + if not options.quiet: + print('Dumping "%s" to "%s"...' % (input, output)) + if options.unicodedata: + setUnicodeData(options.unicodedata) ttf = TTFont(input, 0, verbose=options.verbose, allowVID=options.allowVID, + quiet=options.quiet, ignoreDecompileErrors=options.ignoreDecompileErrors, fontNumber=options.fontNumber) ttf.saveXML(output, + quiet=options.quiet, tables=options.onlyTables, - skipTables=options.skipTables, + skipTables=options.skipTables, splitTables=options.splitTables, - disassembleInstructions=options.disassembleInstructions) + disassembleInstructions=options.disassembleInstructions, + bitmapGlyphDataFormat=options.bitmapGlyphDataFormat) ttf.close() def ttCompile(input, output, options): - print 'Compiling "%s" to "%s"...' % (input, output) + if not options.quiet: + print('Compiling "%s" to "%s"...' % (input, output)) ttf = TTFont(options.mergeFile, recalcBBoxes=options.recalcBBoxes, + recalcTimestamp=options.recalcTimestamp, verbose=options.verbose, allowVID=options.allowVID) - ttf.importXML(input) - try: - ttf.save(output) - except OTLOffsetOverflowError, e: - # XXX This shouldn't be here at all, it should be as close to the - # OTL code as possible. - overflowRecord = e.value - print "Attempting to fix OTLOffsetOverflowError", e - lastItem = overflowRecord - while 1: - ok = 0 - if overflowRecord.itemName == None: - ok = fixLookupOverFlows(ttf, overflowRecord) - else: - ok = fixSubTableOverFlows(ttf, overflowRecord) - if not ok: - raise - - try: - ttf.save(output) - break - except OTLOffsetOverflowError, e: - print "Attempting to fix OTLOffsetOverflowError", e - overflowRecord = e.value - if overflowRecord == lastItem: - raise + ttf.importXML(input, quiet=options.quiet) + + if not options.recalcTimestamp: + # use TTX file modification time for head "modified" timestamp + mtime = os.path.getmtime(input) + ttf['head'].modified = timestampSinceEpoch(mtime) + + ttf.save(output) if options.verbose: import time - print "finished at", time.strftime("%H:%M:%S", time.localtime(time.time())) + print("finished at", time.strftime("%H:%M:%S", time.localtime(time.time()))) def guessFileType(fileName): @@ -230,15 +265,21 @@ if ext == ".dfont": return "TTF" header = f.read(256) - head = header[:4] + head = Tag(header[:4]) if head == "OTTO": return "OTF" elif head == "ttcf": return "TTC" elif head in ("\0\1\0\0", "true"): return "TTF" + elif head == "wOFF": + return "WOFF" + elif head == "wOF2": + return "WOFF2" elif head.lower() == "<?xm": - if opentypeheaderRE.match(header): + # Use 'latin1' because that can't fail. + header = tostr(header, 'latin1') + if opentypeheaderRE.search(header): return "OTX" else: return "TTX" @@ -247,19 +288,20 @@ def parseOptions(args): try: - rawOptions, files = getopt.getopt(args, "ld:o:vht:x:sim:baey:") + rawOptions, files = getopt.getopt(args, "ld:o:fvqht:x:sim:z:baey:", + ['unicodedata=', "recalc-timestamp"]) except getopt.GetoptError: usage() - + if not files: usage() - + options = Options(rawOptions, len(files)) jobs = [] - + for input in files: tp = guessFileType(input) - if tp in ("OTF", "TTF", "TTC"): + if tp in ("OTF", "TTF", "TTC", "WOFF", "WOFF2"): extension = ".ttx" if options.listTables: action = ttList @@ -272,13 +314,16 @@ extension = ".otf" action = ttCompile else: - print 'Unknown file type: "%s"' % input + print('Unknown file type: "%s"' % input) continue - + if options.outputFile: output = options.outputFile else: - output = makeOutputFileName(input, options.outputDir, extension) + output = makeOutputFileName(input, options.outputDir, extension, options.overWrite) + # 'touch' output file to avoid race condition in choosing file names + if action != ttList: + open(output, 'a').close() jobs.append((action, input, output)) return jobs, options @@ -292,22 +337,26 @@ """Force the DOS Prompt window to stay open so the user gets a chance to see what's wrong.""" import msvcrt - print '(Hit any key to exit)' + print('(Hit any key to exit)') while not msvcrt.kbhit(): pass -def main(args): +def main(args=None): + if args is None: + args = sys.argv[1:] jobs, options = parseOptions(args) try: process(jobs, options) except KeyboardInterrupt: - print "(Cancelled.)" + print("(Cancelled.)") except SystemExit: if sys.platform == "win32": waitForKeyPress() else: raise + except TTLibError as e: + print("Error:",e) except: if sys.platform == "win32": import traceback @@ -315,7 +364,7 @@ waitForKeyPress() else: raise - + if __name__ == "__main__": - main(sys.argv[1:]) + main() diff -Nru fonttools-2.4/Lib/fontTools/unicode.py fonttools-3.0/Lib/fontTools/unicode.py --- fonttools-2.4/Lib/fontTools/unicode.py 2013-06-22 14:25:29.000000000 +0000 +++ fonttools-3.0/Lib/fontTools/unicode.py 2015-08-31 17:57:15.000000000 +0000 @@ -1,21875 +1,25 @@ -"""Unicode version 5.2.0""" +from __future__ import print_function, division, absolute_import +from fontTools.misc.py23 import * -_unicode = """\ -0000 <control> -0001 <control> -0002 <control> -0003 <control> -0004 <control> -0005 <control> -0006 <control> -0007 <control> -0008 <control> -0009 <control> -000A <control> -000B <control> -000C <control> -000D <control> -000E <control> -000F <control> -0010 <control> -0011 <control> -0012 <control> -0013 <control> -0014 <control> -0015 <control> -0016 <control> -0017 <control> -0018 <control> -0019 <control> -001A <control> -001B <control> -001C <control> -001D <control> -001E <control> -001F <control> -0020 SPACE -0021 EXCLAMATION MARK -0022 QUOTATION MARK -0023 NUMBER SIGN -0024 DOLLAR SIGN -0025 PERCENT SIGN -0026 AMPERSAND -0027 APOSTROPHE -0028 LEFT PARENTHESIS -0029 RIGHT PARENTHESIS -002A ASTERISK -002B PLUS SIGN -002C COMMA -002D HYPHEN-MINUS -002E FULL STOP -002F SOLIDUS -0030 DIGIT ZERO -0031 DIGIT ONE -0032 DIGIT TWO -0033 DIGIT THREE -0034 DIGIT FOUR -0035 DIGIT FIVE -0036 DIGIT SIX -0037 DIGIT SEVEN -0038 DIGIT EIGHT -0039 DIGIT NINE -003A COLON -003B SEMICOLON -003C LESS-THAN SIGN -003D EQUALS SIGN -003E GREATER-THAN SIGN -003F QUESTION MARK -0040 COMMERCIAL AT -0041 LATIN CAPITAL LETTER A -0042 LATIN CAPITAL LETTER B -0043 LATIN CAPITAL LETTER C -0044 LATIN CAPITAL LETTER D -0045 LATIN CAPITAL LETTER E -0046 LATIN CAPITAL LETTER F -0047 LATIN CAPITAL LETTER G -0048 LATIN CAPITAL LETTER H -0049 LATIN CAPITAL LETTER I -004A LATIN CAPITAL LETTER J -004B LATIN CAPITAL LETTER K -004C LATIN CAPITAL LETTER L -004D LATIN CAPITAL LETTER M -004E LATIN CAPITAL LETTER N -004F LATIN CAPITAL LETTER O -0050 LATIN CAPITAL LETTER P -0051 LATIN CAPITAL LETTER Q -0052 LATIN CAPITAL LETTER R -0053 LATIN CAPITAL LETTER S -0054 LATIN CAPITAL LETTER T -0055 LATIN CAPITAL LETTER U -0056 LATIN CAPITAL LETTER V -0057 LATIN CAPITAL LETTER W -0058 LATIN CAPITAL LETTER X -0059 LATIN CAPITAL LETTER Y -005A LATIN CAPITAL LETTER Z -005B LEFT SQUARE BRACKET -005C REVERSE SOLIDUS -005D RIGHT SQUARE BRACKET -005E CIRCUMFLEX ACCENT -005F LOW LINE -0060 GRAVE ACCENT -0061 LATIN SMALL LETTER A -0062 LATIN SMALL LETTER B -0063 LATIN SMALL LETTER C -0064 LATIN SMALL LETTER D -0065 LATIN SMALL LETTER E -0066 LATIN SMALL LETTER F -0067 LATIN SMALL LETTER G -0068 LATIN SMALL LETTER H -0069 LATIN SMALL LETTER I -006A LATIN SMALL LETTER J -006B LATIN SMALL LETTER K -006C LATIN SMALL LETTER L -006D LATIN SMALL LETTER M -006E LATIN SMALL LETTER N -006F LATIN SMALL LETTER O -0070 LATIN SMALL LETTER P -0071 LATIN SMALL LETTER Q -0072 LATIN SMALL LETTER R -0073 LATIN SMALL LETTER S -0074 LATIN SMALL LETTER T -0075 LATIN SMALL LETTER U -0076 LATIN SMALL LETTER V -0077 LATIN SMALL LETTER W -0078 LATIN SMALL LETTER X -0079 LATIN SMALL LETTER Y -007A LATIN SMALL LETTER Z -007B LEFT CURLY BRACKET -007C VERTICAL LINE -007D RIGHT CURLY BRACKET -007E TILDE -007F <control> -0080 <control> -0081 <control> -0082 <control> -0083 <control> -0084 <control> -0085 <control> -0086 <control> -0087 <control> -0088 <control> -0089 <control> -008A <control> -008B <control> -008C <control> -008D <control> -008E <control> -008F <control> -0090 <control> -0091 <control> -0092 <control> -0093 <control> -0094 <control> -0095 <control> -0096 <control> -0097 <control> -0098 <control> -0099 <control> -009A <control> -009B <control> -009C <control> -009D <control> -009E <control> -009F <control> -00A0 NO-BREAK SPACE -00A1 INVERTED EXCLAMATION MARK -00A2 CENT SIGN -00A3 POUND SIGN -00A4 CURRENCY SIGN -00A5 YEN SIGN -00A6 BROKEN BAR -00A7 SECTION SIGN -00A8 DIAERESIS -00A9 COPYRIGHT SIGN -00AA FEMININE ORDINAL INDICATOR -00AB LEFT-POINTING DOUBLE ANGLE QUOTATION MARK -00AC NOT SIGN -00AD SOFT HYPHEN -00AE REGISTERED SIGN -00AF MACRON -00B0 DEGREE SIGN -00B1 PLUS-MINUS SIGN -00B2 SUPERSCRIPT TWO -00B3 SUPERSCRIPT THREE -00B4 ACUTE ACCENT -00B5 MICRO SIGN -00B6 PILCROW SIGN -00B7 MIDDLE DOT -00B8 CEDILLA -00B9 SUPERSCRIPT ONE -00BA MASCULINE ORDINAL INDICATOR -00BB RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK -00BC VULGAR FRACTION ONE QUARTER -00BD VULGAR FRACTION ONE HALF -00BE VULGAR FRACTION THREE QUARTERS -00BF INVERTED QUESTION MARK -00C0 LATIN CAPITAL LETTER A WITH GRAVE -00C1 LATIN CAPITAL LETTER A WITH ACUTE -00C2 LATIN CAPITAL LETTER A WITH CIRCUMFLEX -00C3 LATIN CAPITAL LETTER A WITH TILDE -00C4 LATIN CAPITAL LETTER A WITH DIAERESIS -00C5 LATIN CAPITAL LETTER A WITH RING ABOVE -00C6 LATIN CAPITAL LETTER AE -00C7 LATIN CAPITAL LETTER C WITH CEDILLA -00C8 LATIN CAPITAL LETTER E WITH GRAVE -00C9 LATIN CAPITAL LETTER E WITH ACUTE -00CA LATIN CAPITAL LETTER E WITH CIRCUMFLEX -00CB LATIN CAPITAL LETTER E WITH DIAERESIS -00CC LATIN CAPITAL LETTER I WITH GRAVE -00CD LATIN CAPITAL LETTER I WITH ACUTE -00CE LATIN CAPITAL LETTER I WITH CIRCUMFLEX -00CF LATIN CAPITAL LETTER I WITH DIAERESIS -00D0 LATIN CAPITAL LETTER ETH -00D1 LATIN CAPITAL LETTER N WITH TILDE -00D2 LATIN CAPITAL LETTER O WITH GRAVE -00D3 LATIN CAPITAL LETTER O WITH ACUTE -00D4 LATIN CAPITAL LETTER O WITH CIRCUMFLEX -00D5 LATIN CAPITAL LETTER O WITH TILDE -00D6 LATIN CAPITAL LETTER O WITH DIAERESIS -00D7 MULTIPLICATION SIGN -00D8 LATIN CAPITAL LETTER O WITH STROKE -00D9 LATIN CAPITAL LETTER U WITH GRAVE -00DA LATIN CAPITAL LETTER U WITH ACUTE -00DB LATIN CAPITAL LETTER U WITH CIRCUMFLEX -00DC LATIN CAPITAL LETTER U WITH DIAERESIS -00DD LATIN CAPITAL LETTER Y WITH ACUTE -00DE LATIN CAPITAL LETTER THORN -00DF LATIN SMALL LETTER SHARP S -00E0 LATIN SMALL LETTER A WITH GRAVE -00E1 LATIN SMALL LETTER A WITH ACUTE -00E2 LATIN SMALL LETTER A WITH CIRCUMFLEX -00E3 LATIN SMALL LETTER A WITH TILDE -00E4 LATIN SMALL LETTER A WITH DIAERESIS -00E5 LATIN SMALL LETTER A WITH RING ABOVE -00E6 LATIN SMALL LETTER AE -00E7 LATIN SMALL LETTER C WITH CEDILLA -00E8 LATIN SMALL LETTER E WITH GRAVE -00E9 LATIN SMALL LETTER E WITH ACUTE -00EA LATIN SMALL LETTER E WITH CIRCUMFLEX -00EB LATIN SMALL LETTER E WITH DIAERESIS -00EC LATIN SMALL LETTER I WITH GRAVE -00ED LATIN SMALL LETTER I WITH ACUTE -00EE LATIN SMALL LETTER I WITH CIRCUMFLEX -00EF LATIN SMALL LETTER I WITH DIAERESIS -00F0 LATIN SMALL LETTER ETH -00F1 LATIN SMALL LETTER N WITH TILDE -00F2 LATIN SMALL LETTER O WITH GRAVE -00F3 LATIN SMALL LETTER O WITH ACUTE -00F4 LATIN SMALL LETTER O WITH CIRCUMFLEX -00F5 LATIN SMALL LETTER O WITH TILDE -00F6 LATIN SMALL LETTER O WITH DIAERESIS -00F7 DIVISION SIGN -00F8 LATIN SMALL LETTER O WITH STROKE -00F9 LATIN SMALL LETTER U WITH GRAVE -00FA LATIN SMALL LETTER U WITH ACUTE -00FB LATIN SMALL LETTER U WITH CIRCUMFLEX -00FC LATIN SMALL LETTER U WITH DIAERESIS -00FD LATIN SMALL LETTER Y WITH ACUTE -00FE LATIN SMALL LETTER THORN -00FF LATIN SMALL LETTER Y WITH DIAERESIS -0100 LATIN CAPITAL LETTER A WITH MACRON -0101 LATIN SMALL LETTER A WITH MACRON -0102 LATIN CAPITAL LETTER A WITH BREVE -0103 LATIN SMALL LETTER A WITH BREVE -0104 LATIN CAPITAL LETTER A WITH OGONEK -0105 LATIN SMALL LETTER A WITH OGONEK -0106 LATIN CAPITAL LETTER C WITH ACUTE -0107 LATIN SMALL LETTER C WITH ACUTE -0108 LATIN CAPITAL LETTER C WITH CIRCUMFLEX -0109 LATIN SMALL LETTER C WITH CIRCUMFLEX -010A LATIN CAPITAL LETTER C WITH DOT ABOVE -010B LATIN SMALL LETTER C WITH DOT ABOVE -010C LATIN CAPITAL LETTER C WITH CARON -010D LATIN SMALL LETTER C WITH CARON -010E LATIN CAPITAL LETTER D WITH CARON -010F LATIN SMALL LETTER D WITH CARON -0110 LATIN CAPITAL LETTER D WITH STROKE -0111 LATIN SMALL LETTER D WITH STROKE -0112 LATIN CAPITAL LETTER E WITH MACRON -0113 LATIN SMALL LETTER E WITH MACRON -0114 LATIN CAPITAL LETTER E WITH BREVE -0115 LATIN SMALL LETTER E WITH BREVE -0116 LATIN CAPITAL LETTER E WITH DOT ABOVE -0117 LATIN SMALL LETTER E WITH DOT ABOVE -0118 LATIN CAPITAL LETTER E WITH OGONEK -0119 LATIN SMALL LETTER E WITH OGONEK -011A LATIN CAPITAL LETTER E WITH CARON -011B LATIN SMALL LETTER E WITH CARON -011C LATIN CAPITAL LETTER G WITH CIRCUMFLEX -011D LATIN SMALL LETTER G WITH CIRCUMFLEX -011E LATIN CAPITAL LETTER G WITH BREVE -011F LATIN SMALL LETTER G WITH BREVE -0120 LATIN CAPITAL LETTER G WITH DOT ABOVE -0121 LATIN SMALL LETTER G WITH DOT ABOVE -0122 LATIN CAPITAL LETTER G WITH CEDILLA -0123 LATIN SMALL LETTER G WITH CEDILLA -0124 LATIN CAPITAL LETTER H WITH CIRCUMFLEX -0125 LATIN SMALL LETTER H WITH CIRCUMFLEX -0126 LATIN CAPITAL LETTER H WITH STROKE -0127 LATIN SMALL LETTER H WITH STROKE -0128 LATIN CAPITAL LETTER I WITH TILDE -0129 LATIN SMALL LETTER I WITH TILDE -012A LATIN CAPITAL LETTER I WITH MACRON -012B LATIN SMALL LETTER I WITH MACRON -012C LATIN CAPITAL LETTER I WITH BREVE -012D LATIN SMALL LETTER I WITH BREVE -012E LATIN CAPITAL LETTER I WITH OGONEK -012F LATIN SMALL LETTER I WITH OGONEK -0130 LATIN CAPITAL LETTER I WITH DOT ABOVE -0131 LATIN SMALL LETTER DOTLESS I -0132 LATIN CAPITAL LIGATURE IJ -0133 LATIN SMALL LIGATURE IJ -0134 LATIN CAPITAL LETTER J WITH CIRCUMFLEX -0135 LATIN SMALL LETTER J WITH CIRCUMFLEX -0136 LATIN CAPITAL LETTER K WITH CEDILLA -0137 LATIN SMALL LETTER K WITH CEDILLA -0138 LATIN SMALL LETTER KRA -0139 LATIN CAPITAL LETTER L WITH ACUTE -013A LATIN SMALL LETTER L WITH ACUTE -013B LATIN CAPITAL LETTER L WITH CEDILLA -013C LATIN SMALL LETTER L WITH CEDILLA -013D LATIN CAPITAL LETTER L WITH CARON -013E LATIN SMALL LETTER L WITH CARON -013F LATIN CAPITAL LETTER L WITH MIDDLE DOT -0140 LATIN SMALL LETTER L WITH MIDDLE DOT -0141 LATIN CAPITAL LETTER L WITH STROKE -0142 LATIN SMALL LETTER L WITH STROKE -0143 LATIN CAPITAL LETTER N WITH ACUTE -0144 LATIN SMALL LETTER N WITH ACUTE -0145 LATIN CAPITAL LETTER N WITH CEDILLA -0146 LATIN SMALL LETTER N WITH CEDILLA -0147 LATIN CAPITAL LETTER N WITH CARON -0148 LATIN SMALL LETTER N WITH CARON -0149 LATIN SMALL LETTER N PRECEDED BY APOSTROPHE -014A LATIN CAPITAL LETTER ENG -014B LATIN SMALL LETTER ENG -014C LATIN CAPITAL LETTER O WITH MACRON -014D LATIN SMALL LETTER O WITH MACRON -014E LATIN CAPITAL LETTER O WITH BREVE -014F LATIN SMALL LETTER O WITH BREVE -0150 LATIN CAPITAL LETTER O WITH DOUBLE ACUTE -0151 LATIN SMALL LETTER O WITH DOUBLE ACUTE -0152 LATIN CAPITAL LIGATURE OE -0153 LATIN SMALL LIGATURE OE -0154 LATIN CAPITAL LETTER R WITH ACUTE -0155 LATIN SMALL LETTER R WITH ACUTE -0156 LATIN CAPITAL LETTER R WITH CEDILLA -0157 LATIN SMALL LETTER R WITH CEDILLA -0158 LATIN CAPITAL LETTER R WITH CARON -0159 LATIN SMALL LETTER R WITH CARON -015A LATIN CAPITAL LETTER S WITH ACUTE -015B LATIN SMALL LETTER S WITH ACUTE -015C LATIN CAPITAL LETTER S WITH CIRCUMFLEX -015D LATIN SMALL LETTER S WITH CIRCUMFLEX -015E LATIN CAPITAL LETTER S WITH CEDILLA -015F LATIN SMALL LETTER S WITH CEDILLA -0160 LATIN CAPITAL LETTER S WITH CARON -0161 LATIN SMALL LETTER S WITH CARON -0162 LATIN CAPITAL LETTER T WITH CEDILLA -0163 LATIN SMALL LETTER T WITH CEDILLA -0164 LATIN CAPITAL LETTER T WITH CARON -0165 LATIN SMALL LETTER T WITH CARON -0166 LATIN CAPITAL LETTER T WITH STROKE -0167 LATIN SMALL LETTER T WITH STROKE -0168 LATIN CAPITAL LETTER U WITH TILDE -0169 LATIN SMALL LETTER U WITH TILDE -016A LATIN CAPITAL LETTER U WITH MACRON -016B LATIN SMALL LETTER U WITH MACRON -016C LATIN CAPITAL LETTER U WITH BREVE -016D LATIN SMALL LETTER U WITH BREVE -016E LATIN CAPITAL LETTER U WITH RING ABOVE -016F LATIN SMALL LETTER U WITH RING ABOVE -0170 LATIN CAPITAL LETTER U WITH DOUBLE ACUTE -0171 LATIN SMALL LETTER U WITH DOUBLE ACUTE -0172 LATIN CAPITAL LETTER U WITH OGONEK -0173 LATIN SMALL LETTER U WITH OGONEK -0174 LATIN CAPITAL LETTER W WITH CIRCUMFLEX -0175 LATIN SMALL LETTER W WITH CIRCUMFLEX -0176 LATIN CAPITAL LETTER Y WITH CIRCUMFLEX -0177 LATIN SMALL LETTER Y WITH CIRCUMFLEX -0178 LATIN CAPITAL LETTER Y WITH DIAERESIS -0179 LATIN CAPITAL LETTER Z WITH ACUTE -017A LATIN SMALL LETTER Z WITH ACUTE -017B LATIN CAPITAL LETTER Z WITH DOT ABOVE -017C LATIN SMALL LETTER Z WITH DOT ABOVE -017D LATIN CAPITAL LETTER Z WITH CARON -017E LATIN SMALL LETTER Z WITH CARON -017F LATIN SMALL LETTER LONG S -0180 LATIN SMALL LETTER B WITH STROKE -0181 LATIN CAPITAL LETTER B WITH HOOK -0182 LATIN CAPITAL LETTER B WITH TOPBAR -0183 LATIN SMALL LETTER B WITH TOPBAR -0184 LATIN CAPITAL LETTER TONE SIX -0185 LATIN SMALL LETTER TONE SIX -0186 LATIN CAPITAL LETTER OPEN O -0187 LATIN CAPITAL LETTER C WITH HOOK -0188 LATIN SMALL LETTER C WITH HOOK -0189 LATIN CAPITAL LETTER AFRICAN D -018A LATIN CAPITAL LETTER D WITH HOOK -018B LATIN CAPITAL LETTER D WITH TOPBAR -018C LATIN SMALL LETTER D WITH TOPBAR -018D LATIN SMALL LETTER TURNED DELTA -018E LATIN CAPITAL LETTER REVERSED E -018F LATIN CAPITAL LETTER SCHWA -0190 LATIN CAPITAL LETTER OPEN E -0191 LATIN CAPITAL LETTER F WITH HOOK -0192 LATIN SMALL LETTER F WITH HOOK -0193 LATIN CAPITAL LETTER G WITH HOOK -0194 LATIN CAPITAL LETTER GAMMA -0195 LATIN SMALL LETTER HV -0196 LATIN CAPITAL LETTER IOTA -0197 LATIN CAPITAL LETTER I WITH STROKE -0198 LATIN CAPITAL LETTER K WITH HOOK -0199 LATIN SMALL LETTER K WITH HOOK -019A LATIN SMALL LETTER L WITH BAR -019B LATIN SMALL LETTER LAMBDA WITH STROKE -019C LATIN CAPITAL LETTER TURNED M -019D LATIN CAPITAL LETTER N WITH LEFT HOOK -019E LATIN SMALL LETTER N WITH LONG RIGHT LEG -019F LATIN CAPITAL LETTER O WITH MIDDLE TILDE -01A0 LATIN CAPITAL LETTER O WITH HORN -01A1 LATIN SMALL LETTER O WITH HORN -01A2 LATIN CAPITAL LETTER OI -01A3 LATIN SMALL LETTER OI -01A4 LATIN CAPITAL LETTER P WITH HOOK -01A5 LATIN SMALL LETTER P WITH HOOK -01A6 LATIN LETTER YR -01A7 LATIN CAPITAL LETTER TONE TWO -01A8 LATIN SMALL LETTER TONE TWO -01A9 LATIN CAPITAL LETTER ESH -01AA LATIN LETTER REVERSED ESH LOOP -01AB LATIN SMALL LETTER T WITH PALATAL HOOK -01AC LATIN CAPITAL LETTER T WITH HOOK -01AD LATIN SMALL LETTER T WITH HOOK -01AE LATIN CAPITAL LETTER T WITH RETROFLEX HOOK -01AF LATIN CAPITAL LETTER U WITH HORN -01B0 LATIN SMALL LETTER U WITH HORN -01B1 LATIN CAPITAL LETTER UPSILON -01B2 LATIN CAPITAL LETTER V WITH HOOK -01B3 LATIN CAPITAL LETTER Y WITH HOOK -01B4 LATIN SMALL LETTER Y WITH HOOK -01B5 LATIN CAPITAL LETTER Z WITH STROKE -01B6 LATIN SMALL LETTER Z WITH STROKE -01B7 LATIN CAPITAL LETTER EZH -01B8 LATIN CAPITAL LETTER EZH REVERSED -01B9 LATIN SMALL LETTER EZH REVERSED -01BA LATIN SMALL LETTER EZH WITH TAIL -01BB LATIN LETTER TWO WITH STROKE -01BC LATIN CAPITAL LETTER TONE FIVE -01BD LATIN SMALL LETTER TONE FIVE -01BE LATIN LETTER INVERTED GLOTTAL STOP WITH STROKE -01BF LATIN LETTER WYNN -01C0 LATIN LETTER DENTAL CLICK -01C1 LATIN LETTER LATERAL CLICK -01C2 LATIN LETTER ALVEOLAR CLICK -01C3 LATIN LETTER RETROFLEX CLICK -01C4 LATIN CAPITAL LETTER DZ WITH CARON -01C5 LATIN CAPITAL LETTER D WITH SMALL LETTER Z WITH CARON -01C6 LATIN SMALL LETTER DZ WITH CARON -01C7 LATIN CAPITAL LETTER LJ -01C8 LATIN CAPITAL LETTER L WITH SMALL LETTER J -01C9 LATIN SMALL LETTER LJ -01CA LATIN CAPITAL LETTER NJ -01CB LATIN CAPITAL LETTER N WITH SMALL LETTER J -01CC LATIN SMALL LETTER NJ -01CD LATIN CAPITAL LETTER A WITH CARON -01CE LATIN SMALL LETTER A WITH CARON -01CF LATIN CAPITAL LETTER I WITH CARON -01D0 LATIN SMALL LETTER I WITH CARON -01D1 LATIN CAPITAL LETTER O WITH CARON -01D2 LATIN SMALL LETTER O WITH CARON -01D3 LATIN CAPITAL LETTER U WITH CARON -01D4 LATIN SMALL LETTER U WITH CARON -01D5 LATIN CAPITAL LETTER U WITH DIAERESIS AND MACRON -01D6 LATIN SMALL LETTER U WITH DIAERESIS AND MACRON -01D7 LATIN CAPITAL LETTER U WITH DIAERESIS AND ACUTE -01D8 LATIN SMALL LETTER U WITH DIAERESIS AND ACUTE -01D9 LATIN CAPITAL LETTER U WITH DIAERESIS AND CARON -01DA LATIN SMALL LETTER U WITH DIAERESIS AND CARON -01DB LATIN CAPITAL LETTER U WITH DIAERESIS AND GRAVE -01DC LATIN SMALL LETTER U WITH DIAERESIS AND GRAVE -01DD LATIN SMALL LETTER TURNED E -01DE LATIN CAPITAL LETTER A WITH DIAERESIS AND MACRON -01DF LATIN SMALL LETTER A WITH DIAERESIS AND MACRON -01E0 LATIN CAPITAL LETTER A WITH DOT ABOVE AND MACRON -01E1 LATIN SMALL LETTER A WITH DOT ABOVE AND MACRON -01E2 LATIN CAPITAL LETTER AE WITH MACRON -01E3 LATIN SMALL LETTER AE WITH MACRON -01E4 LATIN CAPITAL LETTER G WITH STROKE -01E5 LATIN SMALL LETTER G WITH STROKE -01E6 LATIN CAPITAL LETTER G WITH CARON -01E7 LATIN SMALL LETTER G WITH CARON -01E8 LATIN CAPITAL LETTER K WITH CARON -01E9 LATIN SMALL LETTER K WITH CARON -01EA LATIN CAPITAL LETTER O WITH OGONEK -01EB LATIN SMALL LETTER O WITH OGONEK -01EC LATIN CAPITAL LETTER O WITH OGONEK AND MACRON -01ED LATIN SMALL LETTER O WITH OGONEK AND MACRON -01EE LATIN CAPITAL LETTER EZH WITH CARON -01EF LATIN SMALL LETTER EZH WITH CARON -01F0 LATIN SMALL LETTER J WITH CARON -01F1 LATIN CAPITAL LETTER DZ -01F2 LATIN CAPITAL LETTER D WITH SMALL LETTER Z -01F3 LATIN SMALL LETTER DZ -01F4 LATIN CAPITAL LETTER G WITH ACUTE -01F5 LATIN SMALL LETTER G WITH ACUTE -01F6 LATIN CAPITAL LETTER HWAIR -01F7 LATIN CAPITAL LETTER WYNN -01F8 LATIN CAPITAL LETTER N WITH GRAVE -01F9 LATIN SMALL LETTER N WITH GRAVE -01FA LATIN CAPITAL LETTER A WITH RING ABOVE AND ACUTE -01FB LATIN SMALL LETTER A WITH RING ABOVE AND ACUTE -01FC LATIN CAPITAL LETTER AE WITH ACUTE -01FD LATIN SMALL LETTER AE WITH ACUTE -01FE LATIN CAPITAL LETTER O WITH STROKE AND ACUTE -01FF LATIN SMALL LETTER O WITH STROKE AND ACUTE -0200 LATIN CAPITAL LETTER A WITH DOUBLE GRAVE -0201 LATIN SMALL LETTER A WITH DOUBLE GRAVE -0202 LATIN CAPITAL LETTER A WITH INVERTED BREVE -0203 LATIN SMALL LETTER A WITH INVERTED BREVE -0204 LATIN CAPITAL LETTER E WITH DOUBLE GRAVE -0205 LATIN SMALL LETTER E WITH DOUBLE GRAVE -0206 LATIN CAPITAL LETTER E WITH INVERTED BREVE -0207 LATIN SMALL LETTER E WITH INVERTED BREVE -0208 LATIN CAPITAL LETTER I WITH DOUBLE GRAVE -0209 LATIN SMALL LETTER I WITH DOUBLE GRAVE -020A LATIN CAPITAL LETTER I WITH INVERTED BREVE -020B LATIN SMALL LETTER I WITH INVERTED BREVE -020C LATIN CAPITAL LETTER O WITH DOUBLE GRAVE -020D LATIN SMALL LETTER O WITH DOUBLE GRAVE -020E LATIN CAPITAL LETTER O WITH INVERTED BREVE -020F LATIN SMALL LETTER O WITH INVERTED BREVE -0210 LATIN CAPITAL LETTER R WITH DOUBLE GRAVE -0211 LATIN SMALL LETTER R WITH DOUBLE GRAVE -0212 LATIN CAPITAL LETTER R WITH INVERTED BREVE -0213 LATIN SMALL LETTER R WITH INVERTED BREVE -0214 LATIN CAPITAL LETTER U WITH DOUBLE GRAVE -0215 LATIN SMALL LETTER U WITH DOUBLE GRAVE -0216 LATIN CAPITAL LETTER U WITH INVERTED BREVE -0217 LATIN SMALL LETTER U WITH INVERTED BREVE -0218 LATIN CAPITAL LETTER S WITH COMMA BELOW -0219 LATIN SMALL LETTER S WITH COMMA BELOW -021A LATIN CAPITAL LETTER T WITH COMMA BELOW -021B LATIN SMALL LETTER T WITH COMMA BELOW -021C LATIN CAPITAL LETTER YOGH -021D LATIN SMALL LETTER YOGH -021E LATIN CAPITAL LETTER H WITH CARON -021F LATIN SMALL LETTER H WITH CARON -0220 LATIN CAPITAL LETTER N WITH LONG RIGHT LEG -0221 LATIN SMALL LETTER D WITH CURL -0222 LATIN CAPITAL LETTER OU -0223 LATIN SMALL LETTER OU -0224 LATIN CAPITAL LETTER Z WITH HOOK -0225 LATIN SMALL LETTER Z WITH HOOK -0226 LATIN CAPITAL LETTER A WITH DOT ABOVE -0227 LATIN SMALL LETTER A WITH DOT ABOVE -0228 LATIN CAPITAL LETTER E WITH CEDILLA -0229 LATIN SMALL LETTER E WITH CEDILLA -022A LATIN CAPITAL LETTER O WITH DIAERESIS AND MACRON -022B LATIN SMALL LETTER O WITH DIAERESIS AND MACRON -022C LATIN CAPITAL LETTER O WITH TILDE AND MACRON -022D LATIN SMALL LETTER O WITH TILDE AND MACRON -022E LATIN CAPITAL LETTER O WITH DOT ABOVE -022F LATIN SMALL LETTER O WITH DOT ABOVE -0230 LATIN CAPITAL LETTER O WITH DOT ABOVE AND MACRON -0231 LATIN SMALL LETTER O WITH DOT ABOVE AND MACRON -0232 LATIN CAPITAL LETTER Y WITH MACRON -0233 LATIN SMALL LETTER Y WITH MACRON -0234 LATIN SMALL LETTER L WITH CURL -0235 LATIN SMALL LETTER N WITH CURL -0236 LATIN SMALL LETTER T WITH CURL -0237 LATIN SMALL LETTER DOTLESS J -0238 LATIN SMALL LETTER DB DIGRAPH -0239 LATIN SMALL LETTER QP DIGRAPH -023A LATIN CAPITAL LETTER A WITH STROKE -023B LATIN CAPITAL LETTER C WITH STROKE -023C LATIN SMALL LETTER C WITH STROKE -023D LATIN CAPITAL LETTER L WITH BAR -023E LATIN CAPITAL LETTER T WITH DIAGONAL STROKE -023F LATIN SMALL LETTER S WITH SWASH TAIL -0240 LATIN SMALL LETTER Z WITH SWASH TAIL -0241 LATIN CAPITAL LETTER GLOTTAL STOP -0242 LATIN SMALL LETTER GLOTTAL STOP -0243 LATIN CAPITAL LETTER B WITH STROKE -0244 LATIN CAPITAL LETTER U BAR -0245 LATIN CAPITAL LETTER TURNED V -0246 LATIN CAPITAL LETTER E WITH STROKE -0247 LATIN SMALL LETTER E WITH STROKE -0248 LATIN CAPITAL LETTER J WITH STROKE -0249 LATIN SMALL LETTER J WITH STROKE -024A LATIN CAPITAL LETTER SMALL Q WITH HOOK TAIL -024B LATIN SMALL LETTER Q WITH HOOK TAIL -024C LATIN CAPITAL LETTER R WITH STROKE -024D LATIN SMALL LETTER R WITH STROKE -024E LATIN CAPITAL LETTER Y WITH STROKE -024F LATIN SMALL LETTER Y WITH STROKE -0250 LATIN SMALL LETTER TURNED A -0251 LATIN SMALL LETTER ALPHA -0252 LATIN SMALL LETTER TURNED ALPHA -0253 LATIN SMALL LETTER B WITH HOOK -0254 LATIN SMALL LETTER OPEN O -0255 LATIN SMALL LETTER C WITH CURL -0256 LATIN SMALL LETTER D WITH TAIL -0257 LATIN SMALL LETTER D WITH HOOK -0258 LATIN SMALL LETTER REVERSED E -0259 LATIN SMALL LETTER SCHWA -025A LATIN SMALL LETTER SCHWA WITH HOOK -025B LATIN SMALL LETTER OPEN E -025C LATIN SMALL LETTER REVERSED OPEN E -025D LATIN SMALL LETTER REVERSED OPEN E WITH HOOK -025E LATIN SMALL LETTER CLOSED REVERSED OPEN E -025F LATIN SMALL LETTER DOTLESS J WITH STROKE -0260 LATIN SMALL LETTER G WITH HOOK -0261 LATIN SMALL LETTER SCRIPT G -0262 LATIN LETTER SMALL CAPITAL G -0263 LATIN SMALL LETTER GAMMA -0264 LATIN SMALL LETTER RAMS HORN -0265 LATIN SMALL LETTER TURNED H -0266 LATIN SMALL LETTER H WITH HOOK -0267 LATIN SMALL LETTER HENG WITH HOOK -0268 LATIN SMALL LETTER I WITH STROKE -0269 LATIN SMALL LETTER IOTA -026A LATIN LETTER SMALL CAPITAL I -026B LATIN SMALL LETTER L WITH MIDDLE TILDE -026C LATIN SMALL LETTER L WITH BELT -026D LATIN SMALL LETTER L WITH RETROFLEX HOOK -026E LATIN SMALL LETTER LEZH -026F LATIN SMALL LETTER TURNED M -0270 LATIN SMALL LETTER TURNED M WITH LONG LEG -0271 LATIN SMALL LETTER M WITH HOOK -0272 LATIN SMALL LETTER N WITH LEFT HOOK -0273 LATIN SMALL LETTER N WITH RETROFLEX HOOK -0274 LATIN LETTER SMALL CAPITAL N -0275 LATIN SMALL LETTER BARRED O -0276 LATIN LETTER SMALL CAPITAL OE -0277 LATIN SMALL LETTER CLOSED OMEGA -0278 LATIN SMALL LETTER PHI -0279 LATIN SMALL LETTER TURNED R -027A LATIN SMALL LETTER TURNED R WITH LONG LEG -027B LATIN SMALL LETTER TURNED R WITH HOOK -027C LATIN SMALL LETTER R WITH LONG LEG -027D LATIN SMALL LETTER R WITH TAIL -027E LATIN SMALL LETTER R WITH FISHHOOK -027F LATIN SMALL LETTER REVERSED R WITH FISHHOOK -0280 LATIN LETTER SMALL CAPITAL R -0281 LATIN LETTER SMALL CAPITAL INVERTED R -0282 LATIN SMALL LETTER S WITH HOOK -0283 LATIN SMALL LETTER ESH -0284 LATIN SMALL LETTER DOTLESS J WITH STROKE AND HOOK -0285 LATIN SMALL LETTER SQUAT REVERSED ESH -0286 LATIN SMALL LETTER ESH WITH CURL -0287 LATIN SMALL LETTER TURNED T -0288 LATIN SMALL LETTER T WITH RETROFLEX HOOK -0289 LATIN SMALL LETTER U BAR -028A LATIN SMALL LETTER UPSILON -028B LATIN SMALL LETTER V WITH HOOK -028C LATIN SMALL LETTER TURNED V -028D LATIN SMALL LETTER TURNED W -028E LATIN SMALL LETTER TURNED Y -028F LATIN LETTER SMALL CAPITAL Y -0290 LATIN SMALL LETTER Z WITH RETROFLEX HOOK -0291 LATIN SMALL LETTER Z WITH CURL -0292 LATIN SMALL LETTER EZH -0293 LATIN SMALL LETTER EZH WITH CURL -0294 LATIN LETTER GLOTTAL STOP -0295 LATIN LETTER PHARYNGEAL VOICED FRICATIVE -0296 LATIN LETTER INVERTED GLOTTAL STOP -0297 LATIN LETTER STRETCHED C -0298 LATIN LETTER BILABIAL CLICK -0299 LATIN LETTER SMALL CAPITAL B -029A LATIN SMALL LETTER CLOSED OPEN E -029B LATIN LETTER SMALL CAPITAL G WITH HOOK -029C LATIN LETTER SMALL CAPITAL H -029D LATIN SMALL LETTER J WITH CROSSED-TAIL -029E LATIN SMALL LETTER TURNED K -029F LATIN LETTER SMALL CAPITAL L -02A0 LATIN SMALL LETTER Q WITH HOOK -02A1 LATIN LETTER GLOTTAL STOP WITH STROKE -02A2 LATIN LETTER REVERSED GLOTTAL STOP WITH STROKE -02A3 LATIN SMALL LETTER DZ DIGRAPH -02A4 LATIN SMALL LETTER DEZH DIGRAPH -02A5 LATIN SMALL LETTER DZ DIGRAPH WITH CURL -02A6 LATIN SMALL LETTER TS DIGRAPH -02A7 LATIN SMALL LETTER TESH DIGRAPH -02A8 LATIN SMALL LETTER TC DIGRAPH WITH CURL -02A9 LATIN SMALL LETTER FENG DIGRAPH -02AA LATIN SMALL LETTER LS DIGRAPH -02AB LATIN SMALL LETTER LZ DIGRAPH -02AC LATIN LETTER BILABIAL PERCUSSIVE -02AD LATIN LETTER BIDENTAL PERCUSSIVE -02AE LATIN SMALL LETTER TURNED H WITH FISHHOOK -02AF LATIN SMALL LETTER TURNED H WITH FISHHOOK AND TAIL -02B0 MODIFIER LETTER SMALL H -02B1 MODIFIER LETTER SMALL H WITH HOOK -02B2 MODIFIER LETTER SMALL J -02B3 MODIFIER LETTER SMALL R -02B4 MODIFIER LETTER SMALL TURNED R -02B5 MODIFIER LETTER SMALL TURNED R WITH HOOK -02B6 MODIFIER LETTER SMALL CAPITAL INVERTED R -02B7 MODIFIER LETTER SMALL W -02B8 MODIFIER LETTER SMALL Y -02B9 MODIFIER LETTER PRIME -02BA MODIFIER LETTER DOUBLE PRIME -02BB MODIFIER LETTER TURNED COMMA -02BC MODIFIER LETTER APOSTROPHE -02BD MODIFIER LETTER REVERSED COMMA -02BE MODIFIER LETTER RIGHT HALF RING -02BF MODIFIER LETTER LEFT HALF RING -02C0 MODIFIER LETTER GLOTTAL STOP -02C1 MODIFIER LETTER REVERSED GLOTTAL STOP -02C2 MODIFIER LETTER LEFT ARROWHEAD -02C3 MODIFIER LETTER RIGHT ARROWHEAD -02C4 MODIFIER LETTER UP ARROWHEAD -02C5 MODIFIER LETTER DOWN ARROWHEAD -02C6 MODIFIER LETTER CIRCUMFLEX ACCENT -02C7 CARON -02C8 MODIFIER LETTER VERTICAL LINE -02C9 MODIFIER LETTER MACRON -02CA MODIFIER LETTER ACUTE ACCENT -02CB MODIFIER LETTER GRAVE ACCENT -02CC MODIFIER LETTER LOW VERTICAL LINE -02CD MODIFIER LETTER LOW MACRON -02CE MODIFIER LETTER LOW GRAVE ACCENT -02CF MODIFIER LETTER LOW ACUTE ACCENT -02D0 MODIFIER LETTER TRIANGULAR COLON -02D1 MODIFIER LETTER HALF TRIANGULAR COLON -02D2 MODIFIER LETTER CENTRED RIGHT HALF RING -02D3 MODIFIER LETTER CENTRED LEFT HALF RING -02D4 MODIFIER LETTER UP TACK -02D5 MODIFIER LETTER DOWN TACK -02D6 MODIFIER LETTER PLUS SIGN -02D7 MODIFIER LETTER MINUS SIGN -02D8 BREVE -02D9 DOT ABOVE -02DA RING ABOVE -02DB OGONEK -02DC SMALL TILDE -02DD DOUBLE ACUTE ACCENT -02DE MODIFIER LETTER RHOTIC HOOK -02DF MODIFIER LETTER CROSS ACCENT -02E0 MODIFIER LETTER SMALL GAMMA -02E1 MODIFIER LETTER SMALL L -02E2 MODIFIER LETTER SMALL S -02E3 MODIFIER LETTER SMALL X -02E4 MODIFIER LETTER SMALL REVERSED GLOTTAL STOP -02E5 MODIFIER LETTER EXTRA-HIGH TONE BAR -02E6 MODIFIER LETTER HIGH TONE BAR -02E7 MODIFIER LETTER MID TONE BAR -02E8 MODIFIER LETTER LOW TONE BAR -02E9 MODIFIER LETTER EXTRA-LOW TONE BAR -02EA MODIFIER LETTER YIN DEPARTING TONE MARK -02EB MODIFIER LETTER YANG DEPARTING TONE MARK -02EC MODIFIER LETTER VOICING -02ED MODIFIER LETTER UNASPIRATED -02EE MODIFIER LETTER DOUBLE APOSTROPHE -02EF MODIFIER LETTER LOW DOWN ARROWHEAD -02F0 MODIFIER LETTER LOW UP ARROWHEAD -02F1 MODIFIER LETTER LOW LEFT ARROWHEAD -02F2 MODIFIER LETTER LOW RIGHT ARROWHEAD -02F3 MODIFIER LETTER LOW RING -02F4 MODIFIER LETTER MIDDLE GRAVE ACCENT -02F5 MODIFIER LETTER MIDDLE DOUBLE GRAVE ACCENT -02F6 MODIFIER LETTER MIDDLE DOUBLE ACUTE ACCENT -02F7 MODIFIER LETTER LOW TILDE -02F8 MODIFIER LETTER RAISED COLON -02F9 MODIFIER LETTER BEGIN HIGH TONE -02FA MODIFIER LETTER END HIGH TONE -02FB MODIFIER LETTER BEGIN LOW TONE -02FC MODIFIER LETTER END LOW TONE -02FD MODIFIER LETTER SHELF -02FE MODIFIER LETTER OPEN SHELF -02FF MODIFIER LETTER LOW LEFT ARROW -0300 COMBINING GRAVE ACCENT -0301 COMBINING ACUTE ACCENT -0302 COMBINING CIRCUMFLEX ACCENT -0303 COMBINING TILDE -0304 COMBINING MACRON -0305 COMBINING OVERLINE -0306 COMBINING BREVE -0307 COMBINING DOT ABOVE -0308 COMBINING DIAERESIS -0309 COMBINING HOOK ABOVE -030A COMBINING RING ABOVE -030B COMBINING DOUBLE ACUTE ACCENT -030C COMBINING CARON -030D COMBINING VERTICAL LINE ABOVE -030E COMBINING DOUBLE VERTICAL LINE ABOVE -030F COMBINING DOUBLE GRAVE ACCENT -0310 COMBINING CANDRABINDU -0311 COMBINING INVERTED BREVE -0312 COMBINING TURNED COMMA ABOVE -0313 COMBINING COMMA ABOVE -0314 COMBINING REVERSED COMMA ABOVE -0315 COMBINING COMMA ABOVE RIGHT -0316 COMBINING GRAVE ACCENT BELOW -0317 COMBINING ACUTE ACCENT BELOW -0318 COMBINING LEFT TACK BELOW -0319 COMBINING RIGHT TACK BELOW -031A COMBINING LEFT ANGLE ABOVE -031B COMBINING HORN -031C COMBINING LEFT HALF RING BELOW -031D COMBINING UP TACK BELOW -031E COMBINING DOWN TACK BELOW -031F COMBINING PLUS SIGN BELOW -0320 COMBINING MINUS SIGN BELOW -0321 COMBINING PALATALIZED HOOK BELOW -0322 COMBINING RETROFLEX HOOK BELOW -0323 COMBINING DOT BELOW -0324 COMBINING DIAERESIS BELOW -0325 COMBINING RING BELOW -0326 COMBINING COMMA BELOW -0327 COMBINING CEDILLA -0328 COMBINING OGONEK -0329 COMBINING VERTICAL LINE BELOW -032A COMBINING BRIDGE BELOW -032B COMBINING INVERTED DOUBLE ARCH BELOW -032C COMBINING CARON BELOW -032D COMBINING CIRCUMFLEX ACCENT BELOW -032E COMBINING BREVE BELOW -032F COMBINING INVERTED BREVE BELOW -0330 COMBINING TILDE BELOW -0331 COMBINING MACRON BELOW -0332 COMBINING LOW LINE -0333 COMBINING DOUBLE LOW LINE -0334 COMBINING TILDE OVERLAY -0335 COMBINING SHORT STROKE OVERLAY -0336 COMBINING LONG STROKE OVERLAY -0337 COMBINING SHORT SOLIDUS OVERLAY -0338 COMBINING LONG SOLIDUS OVERLAY -0339 COMBINING RIGHT HALF RING BELOW -033A COMBINING INVERTED BRIDGE BELOW -033B COMBINING SQUARE BELOW -033C COMBINING SEAGULL BELOW -033D COMBINING X ABOVE -033E COMBINING VERTICAL TILDE -033F COMBINING DOUBLE OVERLINE -0340 COMBINING GRAVE TONE MARK -0341 COMBINING ACUTE TONE MARK -0342 COMBINING GREEK PERISPOMENI -0343 COMBINING GREEK KORONIS -0344 COMBINING GREEK DIALYTIKA TONOS -0345 COMBINING GREEK YPOGEGRAMMENI -0346 COMBINING BRIDGE ABOVE -0347 COMBINING EQUALS SIGN BELOW -0348 COMBINING DOUBLE VERTICAL LINE BELOW -0349 COMBINING LEFT ANGLE BELOW -034A COMBINING NOT TILDE ABOVE -034B COMBINING HOMOTHETIC ABOVE -034C COMBINING ALMOST EQUAL TO ABOVE -034D COMBINING LEFT RIGHT ARROW BELOW -034E COMBINING UPWARDS ARROW BELOW -034F COMBINING GRAPHEME JOINER -0350 COMBINING RIGHT ARROWHEAD ABOVE -0351 COMBINING LEFT HALF RING ABOVE -0352 COMBINING FERMATA -0353 COMBINING X BELOW -0354 COMBINING LEFT ARROWHEAD BELOW -0355 COMBINING RIGHT ARROWHEAD BELOW -0356 COMBINING RIGHT ARROWHEAD AND UP ARROWHEAD BELOW -0357 COMBINING RIGHT HALF RING ABOVE -0358 COMBINING DOT ABOVE RIGHT -0359 COMBINING ASTERISK BELOW -035A COMBINING DOUBLE RING BELOW -035B COMBINING ZIGZAG ABOVE -035C COMBINING DOUBLE BREVE BELOW -035D COMBINING DOUBLE BREVE -035E COMBINING DOUBLE MACRON -035F COMBINING DOUBLE MACRON BELOW -0360 COMBINING DOUBLE TILDE -0361 COMBINING DOUBLE INVERTED BREVE -0362 COMBINING DOUBLE RIGHTWARDS ARROW BELOW -0363 COMBINING LATIN SMALL LETTER A -0364 COMBINING LATIN SMALL LETTER E -0365 COMBINING LATIN SMALL LETTER I -0366 COMBINING LATIN SMALL LETTER O -0367 COMBINING LATIN SMALL LETTER U -0368 COMBINING LATIN SMALL LETTER C -0369 COMBINING LATIN SMALL LETTER D -036A COMBINING LATIN SMALL LETTER H -036B COMBINING LATIN SMALL LETTER M -036C COMBINING LATIN SMALL LETTER R -036D COMBINING LATIN SMALL LETTER T -036E COMBINING LATIN SMALL LETTER V -036F COMBINING LATIN SMALL LETTER X -0370 GREEK CAPITAL LETTER HETA -0371 GREEK SMALL LETTER HETA -0372 GREEK CAPITAL LETTER ARCHAIC SAMPI -0373 GREEK SMALL LETTER ARCHAIC SAMPI -0374 GREEK NUMERAL SIGN -0375 GREEK LOWER NUMERAL SIGN -0376 GREEK CAPITAL LETTER PAMPHYLIAN DIGAMMA -0377 GREEK SMALL LETTER PAMPHYLIAN DIGAMMA -037A GREEK YPOGEGRAMMENI -037B GREEK SMALL REVERSED LUNATE SIGMA SYMBOL -037C GREEK SMALL DOTTED LUNATE SIGMA SYMBOL -037D GREEK SMALL REVERSED DOTTED LUNATE SIGMA SYMBOL -037E GREEK QUESTION MARK -0384 GREEK TONOS -0385 GREEK DIALYTIKA TONOS -0386 GREEK CAPITAL LETTER ALPHA WITH TONOS -0387 GREEK ANO TELEIA -0388 GREEK CAPITAL LETTER EPSILON WITH TONOS -0389 GREEK CAPITAL LETTER ETA WITH TONOS -038A GREEK CAPITAL LETTER IOTA WITH TONOS -038C GREEK CAPITAL LETTER OMICRON WITH TONOS -038E GREEK CAPITAL LETTER UPSILON WITH TONOS -038F GREEK CAPITAL LETTER OMEGA WITH TONOS -0390 GREEK SMALL LETTER IOTA WITH DIALYTIKA AND TONOS -0391 GREEK CAPITAL LETTER ALPHA -0392 GREEK CAPITAL LETTER BETA -0393 GREEK CAPITAL LETTER GAMMA -0394 GREEK CAPITAL LETTER DELTA -0395 GREEK CAPITAL LETTER EPSILON -0396 GREEK CAPITAL LETTER ZETA -0397 GREEK CAPITAL LETTER ETA -0398 GREEK CAPITAL LETTER THETA -0399 GREEK CAPITAL LETTER IOTA -039A GREEK CAPITAL LETTER KAPPA -039B GREEK CAPITAL LETTER LAMDA -039C GREEK CAPITAL LETTER MU -039D GREEK CAPITAL LETTER NU -039E GREEK CAPITAL LETTER XI -039F GREEK CAPITAL LETTER OMICRON -03A0 GREEK CAPITAL LETTER PI -03A1 GREEK CAPITAL LETTER RHO -03A3 GREEK CAPITAL LETTER SIGMA -03A4 GREEK CAPITAL LETTER TAU -03A5 GREEK CAPITAL LETTER UPSILON -03A6 GREEK CAPITAL LETTER PHI -03A7 GREEK CAPITAL LETTER CHI -03A8 GREEK CAPITAL LETTER PSI -03A9 GREEK CAPITAL LETTER OMEGA -03AA GREEK CAPITAL LETTER IOTA WITH DIALYTIKA -03AB GREEK CAPITAL LETTER UPSILON WITH DIALYTIKA -03AC GREEK SMALL LETTER ALPHA WITH TONOS -03AD GREEK SMALL LETTER EPSILON WITH TONOS -03AE GREEK SMALL LETTER ETA WITH TONOS -03AF GREEK SMALL LETTER IOTA WITH TONOS -03B0 GREEK SMALL LETTER UPSILON WITH DIALYTIKA AND TONOS -03B1 GREEK SMALL LETTER ALPHA -03B2 GREEK SMALL LETTER BETA -03B3 GREEK SMALL LETTER GAMMA -03B4 GREEK SMALL LETTER DELTA -03B5 GREEK SMALL LETTER EPSILON -03B6 GREEK SMALL LETTER ZETA -03B7 GREEK SMALL LETTER ETA -03B8 GREEK SMALL LETTER THETA -03B9 GREEK SMALL LETTER IOTA -03BA GREEK SMALL LETTER KAPPA -03BB GREEK SMALL LETTER LAMDA -03BC GREEK SMALL LETTER MU -03BD GREEK SMALL LETTER NU -03BE GREEK SMALL LETTER XI -03BF GREEK SMALL LETTER OMICRON -03C0 GREEK SMALL LETTER PI -03C1 GREEK SMALL LETTER RHO -03C2 GREEK SMALL LETTER FINAL SIGMA -03C3 GREEK SMALL LETTER SIGMA -03C4 GREEK SMALL LETTER TAU -03C5 GREEK SMALL LETTER UPSILON -03C6 GREEK SMALL LETTER PHI -03C7 GREEK SMALL LETTER CHI -03C8 GREEK SMALL LETTER PSI -03C9 GREEK SMALL LETTER OMEGA -03CA GREEK SMALL LETTER IOTA WITH DIALYTIKA -03CB GREEK SMALL LETTER UPSILON WITH DIALYTIKA -03CC GREEK SMALL LETTER OMICRON WITH TONOS -03CD GREEK SMALL LETTER UPSILON WITH TONOS -03CE GREEK SMALL LETTER OMEGA WITH TONOS -03CF GREEK CAPITAL KAI SYMBOL -03D0 GREEK BETA SYMBOL -03D1 GREEK THETA SYMBOL -03D2 GREEK UPSILON WITH HOOK SYMBOL -03D3 GREEK UPSILON WITH ACUTE AND HOOK SYMBOL -03D4 GREEK UPSILON WITH DIAERESIS AND HOOK SYMBOL -03D5 GREEK PHI SYMBOL -03D6 GREEK PI SYMBOL -03D7 GREEK KAI SYMBOL -03D8 GREEK LETTER ARCHAIC KOPPA -03D9 GREEK SMALL LETTER ARCHAIC KOPPA -03DA GREEK LETTER STIGMA -03DB GREEK SMALL LETTER STIGMA -03DC GREEK LETTER DIGAMMA -03DD GREEK SMALL LETTER DIGAMMA -03DE GREEK LETTER KOPPA -03DF GREEK SMALL LETTER KOPPA -03E0 GREEK LETTER SAMPI -03E1 GREEK SMALL LETTER SAMPI -03E2 COPTIC CAPITAL LETTER SHEI -03E3 COPTIC SMALL LETTER SHEI -03E4 COPTIC CAPITAL LETTER FEI -03E5 COPTIC SMALL LETTER FEI -03E6 COPTIC CAPITAL LETTER KHEI -03E7 COPTIC SMALL LETTER KHEI -03E8 COPTIC CAPITAL LETTER HORI -03E9 COPTIC SMALL LETTER HORI -03EA COPTIC CAPITAL LETTER GANGIA -03EB COPTIC SMALL LETTER GANGIA -03EC COPTIC CAPITAL LETTER SHIMA -03ED COPTIC SMALL LETTER SHIMA -03EE COPTIC CAPITAL LETTER DEI -03EF COPTIC SMALL LETTER DEI -03F0 GREEK KAPPA SYMBOL -03F1 GREEK RHO SYMBOL -03F2 GREEK LUNATE SIGMA SYMBOL -03F3 GREEK LETTER YOT -03F4 GREEK CAPITAL THETA SYMBOL -03F5 GREEK LUNATE EPSILON SYMBOL -03F6 GREEK REVERSED LUNATE EPSILON SYMBOL -03F7 GREEK CAPITAL LETTER SHO -03F8 GREEK SMALL LETTER SHO -03F9 GREEK CAPITAL LUNATE SIGMA SYMBOL -03FA GREEK CAPITAL LETTER SAN -03FB GREEK SMALL LETTER SAN -03FC GREEK RHO WITH STROKE SYMBOL -03FD GREEK CAPITAL REVERSED LUNATE SIGMA SYMBOL -03FE GREEK CAPITAL DOTTED LUNATE SIGMA SYMBOL -03FF GREEK CAPITAL REVERSED DOTTED LUNATE SIGMA SYMBOL -0400 CYRILLIC CAPITAL LETTER IE WITH GRAVE -0401 CYRILLIC CAPITAL LETTER IO -0402 CYRILLIC CAPITAL LETTER DJE -0403 CYRILLIC CAPITAL LETTER GJE -0404 CYRILLIC CAPITAL LETTER UKRAINIAN IE -0405 CYRILLIC CAPITAL LETTER DZE -0406 CYRILLIC CAPITAL LETTER BYELORUSSIAN-UKRAINIAN I -0407 CYRILLIC CAPITAL LETTER YI -0408 CYRILLIC CAPITAL LETTER JE -0409 CYRILLIC CAPITAL LETTER LJE -040A CYRILLIC CAPITAL LETTER NJE -040B CYRILLIC CAPITAL LETTER TSHE -040C CYRILLIC CAPITAL LETTER KJE -040D CYRILLIC CAPITAL LETTER I WITH GRAVE -040E CYRILLIC CAPITAL LETTER SHORT U -040F CYRILLIC CAPITAL LETTER DZHE -0410 CYRILLIC CAPITAL LETTER A -0411 CYRILLIC CAPITAL LETTER BE -0412 CYRILLIC CAPITAL LETTER VE -0413 CYRILLIC CAPITAL LETTER GHE -0414 CYRILLIC CAPITAL LETTER DE -0415 CYRILLIC CAPITAL LETTER IE -0416 CYRILLIC CAPITAL LETTER ZHE -0417 CYRILLIC CAPITAL LETTER ZE -0418 CYRILLIC CAPITAL LETTER I -0419 CYRILLIC CAPITAL LETTER SHORT I -041A CYRILLIC CAPITAL LETTER KA -041B CYRILLIC CAPITAL LETTER EL -041C CYRILLIC CAPITAL LETTER EM -041D CYRILLIC CAPITAL LETTER EN -041E CYRILLIC CAPITAL LETTER O -041F CYRILLIC CAPITAL LETTER PE -0420 CYRILLIC CAPITAL LETTER ER -0421 CYRILLIC CAPITAL LETTER ES -0422 CYRILLIC CAPITAL LETTER TE -0423 CYRILLIC CAPITAL LETTER U -0424 CYRILLIC CAPITAL LETTER EF -0425 CYRILLIC CAPITAL LETTER HA -0426 CYRILLIC CAPITAL LETTER TSE -0427 CYRILLIC CAPITAL LETTER CHE -0428 CYRILLIC CAPITAL LETTER SHA -0429 CYRILLIC CAPITAL LETTER SHCHA -042A CYRILLIC CAPITAL LETTER HARD SIGN -042B CYRILLIC CAPITAL LETTER YERU -042C CYRILLIC CAPITAL LETTER SOFT SIGN -042D CYRILLIC CAPITAL LETTER E -042E CYRILLIC CAPITAL LETTER YU -042F CYRILLIC CAPITAL LETTER YA -0430 CYRILLIC SMALL LETTER A -0431 CYRILLIC SMALL LETTER BE -0432 CYRILLIC SMALL LETTER VE -0433 CYRILLIC SMALL LETTER GHE -0434 CYRILLIC SMALL LETTER DE -0435 CYRILLIC SMALL LETTER IE -0436 CYRILLIC SMALL LETTER ZHE -0437 CYRILLIC SMALL LETTER ZE -0438 CYRILLIC SMALL LETTER I -0439 CYRILLIC SMALL LETTER SHORT I -043A CYRILLIC SMALL LETTER KA -043B CYRILLIC SMALL LETTER EL -043C CYRILLIC SMALL LETTER EM -043D CYRILLIC SMALL LETTER EN -043E CYRILLIC SMALL LETTER O -043F CYRILLIC SMALL LETTER PE -0440 CYRILLIC SMALL LETTER ER -0441 CYRILLIC SMALL LETTER ES -0442 CYRILLIC SMALL LETTER TE -0443 CYRILLIC SMALL LETTER U -0444 CYRILLIC SMALL LETTER EF -0445 CYRILLIC SMALL LETTER HA -0446 CYRILLIC SMALL LETTER TSE -0447 CYRILLIC SMALL LETTER CHE -0448 CYRILLIC SMALL LETTER SHA -0449 CYRILLIC SMALL LETTER SHCHA -044A CYRILLIC SMALL LETTER HARD SIGN -044B CYRILLIC SMALL LETTER YERU -044C CYRILLIC SMALL LETTER SOFT SIGN -044D CYRILLIC SMALL LETTER E -044E CYRILLIC SMALL LETTER YU -044F CYRILLIC SMALL LETTER YA -0450 CYRILLIC SMALL LETTER IE WITH GRAVE -0451 CYRILLIC SMALL LETTER IO -0452 CYRILLIC SMALL LETTER DJE -0453 CYRILLIC SMALL LETTER GJE -0454 CYRILLIC SMALL LETTER UKRAINIAN IE -0455 CYRILLIC SMALL LETTER DZE -0456 CYRILLIC SMALL LETTER BYELORUSSIAN-UKRAINIAN I -0457 CYRILLIC SMALL LETTER YI -0458 CYRILLIC SMALL LETTER JE -0459 CYRILLIC SMALL LETTER LJE -045A CYRILLIC SMALL LETTER NJE -045B CYRILLIC SMALL LETTER TSHE -045C CYRILLIC SMALL LETTER KJE -045D CYRILLIC SMALL LETTER I WITH GRAVE -045E CYRILLIC SMALL LETTER SHORT U -045F CYRILLIC SMALL LETTER DZHE -0460 CYRILLIC CAPITAL LETTER OMEGA -0461 CYRILLIC SMALL LETTER OMEGA -0462 CYRILLIC CAPITAL LETTER YAT -0463 CYRILLIC SMALL LETTER YAT -0464 CYRILLIC CAPITAL LETTER IOTIFIED E -0465 CYRILLIC SMALL LETTER IOTIFIED E -0466 CYRILLIC CAPITAL LETTER LITTLE YUS -0467 CYRILLIC SMALL LETTER LITTLE YUS -0468 CYRILLIC CAPITAL LETTER IOTIFIED LITTLE YUS -0469 CYRILLIC SMALL LETTER IOTIFIED LITTLE YUS -046A CYRILLIC CAPITAL LETTER BIG YUS -046B CYRILLIC SMALL LETTER BIG YUS -046C CYRILLIC CAPITAL LETTER IOTIFIED BIG YUS -046D CYRILLIC SMALL LETTER IOTIFIED BIG YUS -046E CYRILLIC CAPITAL LETTER KSI -046F CYRILLIC SMALL LETTER KSI -0470 CYRILLIC CAPITAL LETTER PSI -0471 CYRILLIC SMALL LETTER PSI -0472 CYRILLIC CAPITAL LETTER FITA -0473 CYRILLIC SMALL LETTER FITA -0474 CYRILLIC CAPITAL LETTER IZHITSA -0475 CYRILLIC SMALL LETTER IZHITSA -0476 CYRILLIC CAPITAL LETTER IZHITSA WITH DOUBLE GRAVE ACCENT -0477 CYRILLIC SMALL LETTER IZHITSA WITH DOUBLE GRAVE ACCENT -0478 CYRILLIC CAPITAL LETTER UK -0479 CYRILLIC SMALL LETTER UK -047A CYRILLIC CAPITAL LETTER ROUND OMEGA -047B CYRILLIC SMALL LETTER ROUND OMEGA -047C CYRILLIC CAPITAL LETTER OMEGA WITH TITLO -047D CYRILLIC SMALL LETTER OMEGA WITH TITLO -047E CYRILLIC CAPITAL LETTER OT -047F CYRILLIC SMALL LETTER OT -0480 CYRILLIC CAPITAL LETTER KOPPA -0481 CYRILLIC SMALL LETTER KOPPA -0482 CYRILLIC THOUSANDS SIGN -0483 COMBINING CYRILLIC TITLO -0484 COMBINING CYRILLIC PALATALIZATION -0485 COMBINING CYRILLIC DASIA PNEUMATA -0486 COMBINING CYRILLIC PSILI PNEUMATA -0487 COMBINING CYRILLIC POKRYTIE -0488 COMBINING CYRILLIC HUNDRED THOUSANDS SIGN -0489 COMBINING CYRILLIC MILLIONS SIGN -048A CYRILLIC CAPITAL LETTER SHORT I WITH TAIL -048B CYRILLIC SMALL LETTER SHORT I WITH TAIL -048C CYRILLIC CAPITAL LETTER SEMISOFT SIGN -048D CYRILLIC SMALL LETTER SEMISOFT SIGN -048E CYRILLIC CAPITAL LETTER ER WITH TICK -048F CYRILLIC SMALL LETTER ER WITH TICK -0490 CYRILLIC CAPITAL LETTER GHE WITH UPTURN -0491 CYRILLIC SMALL LETTER GHE WITH UPTURN -0492 CYRILLIC CAPITAL LETTER GHE WITH STROKE -0493 CYRILLIC SMALL LETTER GHE WITH STROKE -0494 CYRILLIC CAPITAL LETTER GHE WITH MIDDLE HOOK -0495 CYRILLIC SMALL LETTER GHE WITH MIDDLE HOOK -0496 CYRILLIC CAPITAL LETTER ZHE WITH DESCENDER -0497 CYRILLIC SMALL LETTER ZHE WITH DESCENDER -0498 CYRILLIC CAPITAL LETTER ZE WITH DESCENDER -0499 CYRILLIC SMALL LETTER ZE WITH DESCENDER -049A CYRILLIC CAPITAL LETTER KA WITH DESCENDER -049B CYRILLIC SMALL LETTER KA WITH DESCENDER -049C CYRILLIC CAPITAL LETTER KA WITH VERTICAL STROKE -049D CYRILLIC SMALL LETTER KA WITH VERTICAL STROKE -049E CYRILLIC CAPITAL LETTER KA WITH STROKE -049F CYRILLIC SMALL LETTER KA WITH STROKE -04A0 CYRILLIC CAPITAL LETTER BASHKIR KA -04A1 CYRILLIC SMALL LETTER BASHKIR KA -04A2 CYRILLIC CAPITAL LETTER EN WITH DESCENDER -04A3 CYRILLIC SMALL LETTER EN WITH DESCENDER -04A4 CYRILLIC CAPITAL LIGATURE EN GHE -04A5 CYRILLIC SMALL LIGATURE EN GHE -04A6 CYRILLIC CAPITAL LETTER PE WITH MIDDLE HOOK -04A7 CYRILLIC SMALL LETTER PE WITH MIDDLE HOOK -04A8 CYRILLIC CAPITAL LETTER ABKHASIAN HA -04A9 CYRILLIC SMALL LETTER ABKHASIAN HA -04AA CYRILLIC CAPITAL LETTER ES WITH DESCENDER -04AB CYRILLIC SMALL LETTER ES WITH DESCENDER -04AC CYRILLIC CAPITAL LETTER TE WITH DESCENDER -04AD CYRILLIC SMALL LETTER TE WITH DESCENDER -04AE CYRILLIC CAPITAL LETTER STRAIGHT U -04AF CYRILLIC SMALL LETTER STRAIGHT U -04B0 CYRILLIC CAPITAL LETTER STRAIGHT U WITH STROKE -04B1 CYRILLIC SMALL LETTER STRAIGHT U WITH STROKE -04B2 CYRILLIC CAPITAL LETTER HA WITH DESCENDER -04B3 CYRILLIC SMALL LETTER HA WITH DESCENDER -04B4 CYRILLIC CAPITAL LIGATURE TE TSE -04B5 CYRILLIC SMALL LIGATURE TE TSE -04B6 CYRILLIC CAPITAL LETTER CHE WITH DESCENDER -04B7 CYRILLIC SMALL LETTER CHE WITH DESCENDER -04B8 CYRILLIC CAPITAL LETTER CHE WITH VERTICAL STROKE -04B9 CYRILLIC SMALL LETTER CHE WITH VERTICAL STROKE -04BA CYRILLIC CAPITAL LETTER SHHA -04BB CYRILLIC SMALL LETTER SHHA -04BC CYRILLIC CAPITAL LETTER ABKHASIAN CHE -04BD CYRILLIC SMALL LETTER ABKHASIAN CHE -04BE CYRILLIC CAPITAL LETTER ABKHASIAN CHE WITH DESCENDER -04BF CYRILLIC SMALL LETTER ABKHASIAN CHE WITH DESCENDER -04C0 CYRILLIC LETTER PALOCHKA -04C1 CYRILLIC CAPITAL LETTER ZHE WITH BREVE -04C2 CYRILLIC SMALL LETTER ZHE WITH BREVE -04C3 CYRILLIC CAPITAL LETTER KA WITH HOOK -04C4 CYRILLIC SMALL LETTER KA WITH HOOK -04C5 CYRILLIC CAPITAL LETTER EL WITH TAIL -04C6 CYRILLIC SMALL LETTER EL WITH TAIL -04C7 CYRILLIC CAPITAL LETTER EN WITH HOOK -04C8 CYRILLIC SMALL LETTER EN WITH HOOK -04C9 CYRILLIC CAPITAL LETTER EN WITH TAIL -04CA CYRILLIC SMALL LETTER EN WITH TAIL -04CB CYRILLIC CAPITAL LETTER KHAKASSIAN CHE -04CC CYRILLIC SMALL LETTER KHAKASSIAN CHE -04CD CYRILLIC CAPITAL LETTER EM WITH TAIL -04CE CYRILLIC SMALL LETTER EM WITH TAIL -04CF CYRILLIC SMALL LETTER PALOCHKA -04D0 CYRILLIC CAPITAL LETTER A WITH BREVE -04D1 CYRILLIC SMALL LETTER A WITH BREVE -04D2 CYRILLIC CAPITAL LETTER A WITH DIAERESIS -04D3 CYRILLIC SMALL LETTER A WITH DIAERESIS -04D4 CYRILLIC CAPITAL LIGATURE A IE -04D5 CYRILLIC SMALL LIGATURE A IE -04D6 CYRILLIC CAPITAL LETTER IE WITH BREVE -04D7 CYRILLIC SMALL LETTER IE WITH BREVE -04D8 CYRILLIC CAPITAL LETTER SCHWA -04D9 CYRILLIC SMALL LETTER SCHWA -04DA CYRILLIC CAPITAL LETTER SCHWA WITH DIAERESIS -04DB CYRILLIC SMALL LETTER SCHWA WITH DIAERESIS -04DC CYRILLIC CAPITAL LETTER ZHE WITH DIAERESIS -04DD CYRILLIC SMALL LETTER ZHE WITH DIAERESIS -04DE CYRILLIC CAPITAL LETTER ZE WITH DIAERESIS -04DF CYRILLIC SMALL LETTER ZE WITH DIAERESIS -04E0 CYRILLIC CAPITAL LETTER ABKHASIAN DZE -04E1 CYRILLIC SMALL LETTER ABKHASIAN DZE -04E2 CYRILLIC CAPITAL LETTER I WITH MACRON -04E3 CYRILLIC SMALL LETTER I WITH MACRON -04E4 CYRILLIC CAPITAL LETTER I WITH DIAERESIS -04E5 CYRILLIC SMALL LETTER I WITH DIAERESIS -04E6 CYRILLIC CAPITAL LETTER O WITH DIAERESIS -04E7 CYRILLIC SMALL LETTER O WITH DIAERESIS -04E8 CYRILLIC CAPITAL LETTER BARRED O -04E9 CYRILLIC SMALL LETTER BARRED O -04EA CYRILLIC CAPITAL LETTER BARRED O WITH DIAERESIS -04EB CYRILLIC SMALL LETTER BARRED O WITH DIAERESIS -04EC CYRILLIC CAPITAL LETTER E WITH DIAERESIS -04ED CYRILLIC SMALL LETTER E WITH DIAERESIS -04EE CYRILLIC CAPITAL LETTER U WITH MACRON -04EF CYRILLIC SMALL LETTER U WITH MACRON -04F0 CYRILLIC CAPITAL LETTER U WITH DIAERESIS -04F1 CYRILLIC SMALL LETTER U WITH DIAERESIS -04F2 CYRILLIC CAPITAL LETTER U WITH DOUBLE ACUTE -04F3 CYRILLIC SMALL LETTER U WITH DOUBLE ACUTE -04F4 CYRILLIC CAPITAL LETTER CHE WITH DIAERESIS -04F5 CYRILLIC SMALL LETTER CHE WITH DIAERESIS -04F6 CYRILLIC CAPITAL LETTER GHE WITH DESCENDER -04F7 CYRILLIC SMALL LETTER GHE WITH DESCENDER -04F8 CYRILLIC CAPITAL LETTER YERU WITH DIAERESIS -04F9 CYRILLIC SMALL LETTER YERU WITH DIAERESIS -04FA CYRILLIC CAPITAL LETTER GHE WITH STROKE AND HOOK -04FB CYRILLIC SMALL LETTER GHE WITH STROKE AND HOOK -04FC CYRILLIC CAPITAL LETTER HA WITH HOOK -04FD CYRILLIC SMALL LETTER HA WITH HOOK -04FE CYRILLIC CAPITAL LETTER HA WITH STROKE -04FF CYRILLIC SMALL LETTER HA WITH STROKE -0500 CYRILLIC CAPITAL LETTER KOMI DE -0501 CYRILLIC SMALL LETTER KOMI DE -0502 CYRILLIC CAPITAL LETTER KOMI DJE -0503 CYRILLIC SMALL LETTER KOMI DJE -0504 CYRILLIC CAPITAL LETTER KOMI ZJE -0505 CYRILLIC SMALL LETTER KOMI ZJE -0506 CYRILLIC CAPITAL LETTER KOMI DZJE -0507 CYRILLIC SMALL LETTER KOMI DZJE -0508 CYRILLIC CAPITAL LETTER KOMI LJE -0509 CYRILLIC SMALL LETTER KOMI LJE -050A CYRILLIC CAPITAL LETTER KOMI NJE -050B CYRILLIC SMALL LETTER KOMI NJE -050C CYRILLIC CAPITAL LETTER KOMI SJE -050D CYRILLIC SMALL LETTER KOMI SJE -050E CYRILLIC CAPITAL LETTER KOMI TJE -050F CYRILLIC SMALL LETTER KOMI TJE -0510 CYRILLIC CAPITAL LETTER REVERSED ZE -0511 CYRILLIC SMALL LETTER REVERSED ZE -0512 CYRILLIC CAPITAL LETTER EL WITH HOOK -0513 CYRILLIC SMALL LETTER EL WITH HOOK -0514 CYRILLIC CAPITAL LETTER LHA -0515 CYRILLIC SMALL LETTER LHA -0516 CYRILLIC CAPITAL LETTER RHA -0517 CYRILLIC SMALL LETTER RHA -0518 CYRILLIC CAPITAL LETTER YAE -0519 CYRILLIC SMALL LETTER YAE -051A CYRILLIC CAPITAL LETTER QA -051B CYRILLIC SMALL LETTER QA -051C CYRILLIC CAPITAL LETTER WE -051D CYRILLIC SMALL LETTER WE -051E CYRILLIC CAPITAL LETTER ALEUT KA -051F CYRILLIC SMALL LETTER ALEUT KA -0520 CYRILLIC CAPITAL LETTER EL WITH MIDDLE HOOK -0521 CYRILLIC SMALL LETTER EL WITH MIDDLE HOOK -0522 CYRILLIC CAPITAL LETTER EN WITH MIDDLE HOOK -0523 CYRILLIC SMALL LETTER EN WITH MIDDLE HOOK -0524 CYRILLIC CAPITAL LETTER PE WITH DESCENDER -0525 CYRILLIC SMALL LETTER PE WITH DESCENDER -0531 ARMENIAN CAPITAL LETTER AYB -0532 ARMENIAN CAPITAL LETTER BEN -0533 ARMENIAN CAPITAL LETTER GIM -0534 ARMENIAN CAPITAL LETTER DA -0535 ARMENIAN CAPITAL LETTER ECH -0536 ARMENIAN CAPITAL LETTER ZA -0537 ARMENIAN CAPITAL LETTER EH -0538 ARMENIAN CAPITAL LETTER ET -0539 ARMENIAN CAPITAL LETTER TO -053A ARMENIAN CAPITAL LETTER ZHE -053B ARMENIAN CAPITAL LETTER INI -053C ARMENIAN CAPITAL LETTER LIWN -053D ARMENIAN CAPITAL LETTER XEH -053E ARMENIAN CAPITAL LETTER CA -053F ARMENIAN CAPITAL LETTER KEN -0540 ARMENIAN CAPITAL LETTER HO -0541 ARMENIAN CAPITAL LETTER JA -0542 ARMENIAN CAPITAL LETTER GHAD -0543 ARMENIAN CAPITAL LETTER CHEH -0544 ARMENIAN CAPITAL LETTER MEN -0545 ARMENIAN CAPITAL LETTER YI -0546 ARMENIAN CAPITAL LETTER NOW -0547 ARMENIAN CAPITAL LETTER SHA -0548 ARMENIAN CAPITAL LETTER VO -0549 ARMENIAN CAPITAL LETTER CHA -054A ARMENIAN CAPITAL LETTER PEH -054B ARMENIAN CAPITAL LETTER JHEH -054C ARMENIAN CAPITAL LETTER RA -054D ARMENIAN CAPITAL LETTER SEH -054E ARMENIAN CAPITAL LETTER VEW -054F ARMENIAN CAPITAL LETTER TIWN -0550 ARMENIAN CAPITAL LETTER REH -0551 ARMENIAN CAPITAL LETTER CO -0552 ARMENIAN CAPITAL LETTER YIWN -0553 ARMENIAN CAPITAL LETTER PIWR -0554 ARMENIAN CAPITAL LETTER KEH -0555 ARMENIAN CAPITAL LETTER OH -0556 ARMENIAN CAPITAL LETTER FEH -0559 ARMENIAN MODIFIER LETTER LEFT HALF RING -055A ARMENIAN APOSTROPHE -055B ARMENIAN EMPHASIS MARK -055C ARMENIAN EXCLAMATION MARK -055D ARMENIAN COMMA -055E ARMENIAN QUESTION MARK -055F ARMENIAN ABBREVIATION MARK -0561 ARMENIAN SMALL LETTER AYB -0562 ARMENIAN SMALL LETTER BEN -0563 ARMENIAN SMALL LETTER GIM -0564 ARMENIAN SMALL LETTER DA -0565 ARMENIAN SMALL LETTER ECH -0566 ARMENIAN SMALL LETTER ZA -0567 ARMENIAN SMALL LETTER EH -0568 ARMENIAN SMALL LETTER ET -0569 ARMENIAN SMALL LETTER TO -056A ARMENIAN SMALL LETTER ZHE -056B ARMENIAN SMALL LETTER INI -056C ARMENIAN SMALL LETTER LIWN -056D ARMENIAN SMALL LETTER XEH -056E ARMENIAN SMALL LETTER CA -056F ARMENIAN SMALL LETTER KEN -0570 ARMENIAN SMALL LETTER HO -0571 ARMENIAN SMALL LETTER JA -0572 ARMENIAN SMALL LETTER GHAD -0573 ARMENIAN SMALL LETTER CHEH -0574 ARMENIAN SMALL LETTER MEN -0575 ARMENIAN SMALL LETTER YI -0576 ARMENIAN SMALL LETTER NOW -0577 ARMENIAN SMALL LETTER SHA -0578 ARMENIAN SMALL LETTER VO -0579 ARMENIAN SMALL LETTER CHA -057A ARMENIAN SMALL LETTER PEH -057B ARMENIAN SMALL LETTER JHEH -057C ARMENIAN SMALL LETTER RA -057D ARMENIAN SMALL LETTER SEH -057E ARMENIAN SMALL LETTER VEW -057F ARMENIAN SMALL LETTER TIWN -0580 ARMENIAN SMALL LETTER REH -0581 ARMENIAN SMALL LETTER CO -0582 ARMENIAN SMALL LETTER YIWN -0583 ARMENIAN SMALL LETTER PIWR -0584 ARMENIAN SMALL LETTER KEH -0585 ARMENIAN SMALL LETTER OH -0586 ARMENIAN SMALL LETTER FEH -0587 ARMENIAN SMALL LIGATURE ECH YIWN -0589 ARMENIAN FULL STOP -058A ARMENIAN HYPHEN -0591 HEBREW ACCENT ETNAHTA -0592 HEBREW ACCENT SEGOL -0593 HEBREW ACCENT SHALSHELET -0594 HEBREW ACCENT ZAQEF QATAN -0595 HEBREW ACCENT ZAQEF GADOL -0596 HEBREW ACCENT TIPEHA -0597 HEBREW ACCENT REVIA -0598 HEBREW ACCENT ZARQA -0599 HEBREW ACCENT PASHTA -059A HEBREW ACCENT YETIV -059B HEBREW ACCENT TEVIR -059C HEBREW ACCENT GERESH -059D HEBREW ACCENT GERESH MUQDAM -059E HEBREW ACCENT GERSHAYIM -059F HEBREW ACCENT QARNEY PARA -05A0 HEBREW ACCENT TELISHA GEDOLA -05A1 HEBREW ACCENT PAZER -05A2 HEBREW ACCENT ATNAH HAFUKH -05A3 HEBREW ACCENT MUNAH -05A4 HEBREW ACCENT MAHAPAKH -05A5 HEBREW ACCENT MERKHA -05A6 HEBREW ACCENT MERKHA KEFULA -05A7 HEBREW ACCENT DARGA -05A8 HEBREW ACCENT QADMA -05A9 HEBREW ACCENT TELISHA QETANA -05AA HEBREW ACCENT YERAH BEN YOMO -05AB HEBREW ACCENT OLE -05AC HEBREW ACCENT ILUY -05AD HEBREW ACCENT DEHI -05AE HEBREW ACCENT ZINOR -05AF HEBREW MARK MASORA CIRCLE -05B0 HEBREW POINT SHEVA -05B1 HEBREW POINT HATAF SEGOL -05B2 HEBREW POINT HATAF PATAH -05B3 HEBREW POINT HATAF QAMATS -05B4 HEBREW POINT HIRIQ -05B5 HEBREW POINT TSERE -05B6 HEBREW POINT SEGOL -05B7 HEBREW POINT PATAH -05B8 HEBREW POINT QAMATS -05B9 HEBREW POINT HOLAM -05BA HEBREW POINT HOLAM HASER FOR VAV -05BB HEBREW POINT QUBUTS -05BC HEBREW POINT DAGESH OR MAPIQ -05BD HEBREW POINT METEG -05BE HEBREW PUNCTUATION MAQAF -05BF HEBREW POINT RAFE -05C0 HEBREW PUNCTUATION PASEQ -05C1 HEBREW POINT SHIN DOT -05C2 HEBREW POINT SIN DOT -05C3 HEBREW PUNCTUATION SOF PASUQ -05C4 HEBREW MARK UPPER DOT -05C5 HEBREW MARK LOWER DOT -05C6 HEBREW PUNCTUATION NUN HAFUKHA -05C7 HEBREW POINT QAMATS QATAN -05D0 HEBREW LETTER ALEF -05D1 HEBREW LETTER BET -05D2 HEBREW LETTER GIMEL -05D3 HEBREW LETTER DALET -05D4 HEBREW LETTER HE -05D5 HEBREW LETTER VAV -05D6 HEBREW LETTER ZAYIN -05D7 HEBREW LETTER HET -05D8 HEBREW LETTER TET -05D9 HEBREW LETTER YOD -05DA HEBREW LETTER FINAL KAF -05DB HEBREW LETTER KAF -05DC HEBREW LETTER LAMED -05DD HEBREW LETTER FINAL MEM -05DE HEBREW LETTER MEM -05DF HEBREW LETTER FINAL NUN -05E0 HEBREW LETTER NUN -05E1 HEBREW LETTER SAMEKH -05E2 HEBREW LETTER AYIN -05E3 HEBREW LETTER FINAL PE -05E4 HEBREW LETTER PE -05E5 HEBREW LETTER FINAL TSADI -05E6 HEBREW LETTER TSADI -05E7 HEBREW LETTER QOF -05E8 HEBREW LETTER RESH -05E9 HEBREW LETTER SHIN -05EA HEBREW LETTER TAV -05F0 HEBREW LIGATURE YIDDISH DOUBLE VAV -05F1 HEBREW LIGATURE YIDDISH VAV YOD -05F2 HEBREW LIGATURE YIDDISH DOUBLE YOD -05F3 HEBREW PUNCTUATION GERESH -05F4 HEBREW PUNCTUATION GERSHAYIM -0600 ARABIC NUMBER SIGN -0601 ARABIC SIGN SANAH -0602 ARABIC FOOTNOTE MARKER -0603 ARABIC SIGN SAFHA -0606 ARABIC-INDIC CUBE ROOT -0607 ARABIC-INDIC FOURTH ROOT -0608 ARABIC RAY -0609 ARABIC-INDIC PER MILLE SIGN -060A ARABIC-INDIC PER TEN THOUSAND SIGN -060B AFGHANI SIGN -060C ARABIC COMMA -060D ARABIC DATE SEPARATOR -060E ARABIC POETIC VERSE SIGN -060F ARABIC SIGN MISRA -0610 ARABIC SIGN SALLALLAHOU ALAYHE WASSALLAM -0611 ARABIC SIGN ALAYHE ASSALLAM -0612 ARABIC SIGN RAHMATULLAH ALAYHE -0613 ARABIC SIGN RADI ALLAHOU ANHU -0614 ARABIC SIGN TAKHALLUS -0615 ARABIC SMALL HIGH TAH -0616 ARABIC SMALL HIGH LIGATURE ALEF WITH LAM WITH YEH -0617 ARABIC SMALL HIGH ZAIN -0618 ARABIC SMALL FATHA -0619 ARABIC SMALL DAMMA -061A ARABIC SMALL KASRA -061B ARABIC SEMICOLON -061E ARABIC TRIPLE DOT PUNCTUATION MARK -061F ARABIC QUESTION MARK -0621 ARABIC LETTER HAMZA -0622 ARABIC LETTER ALEF WITH MADDA ABOVE -0623 ARABIC LETTER ALEF WITH HAMZA ABOVE -0624 ARABIC LETTER WAW WITH HAMZA ABOVE -0625 ARABIC LETTER ALEF WITH HAMZA BELOW -0626 ARABIC LETTER YEH WITH HAMZA ABOVE -0627 ARABIC LETTER ALEF -0628 ARABIC LETTER BEH -0629 ARABIC LETTER TEH MARBUTA -062A ARABIC LETTER TEH -062B ARABIC LETTER THEH -062C ARABIC LETTER JEEM -062D ARABIC LETTER HAH -062E ARABIC LETTER KHAH -062F ARABIC LETTER DAL -0630 ARABIC LETTER THAL -0631 ARABIC LETTER REH -0632 ARABIC LETTER ZAIN -0633 ARABIC LETTER SEEN -0634 ARABIC LETTER SHEEN -0635 ARABIC LETTER SAD -0636 ARABIC LETTER DAD -0637 ARABIC LETTER TAH -0638 ARABIC LETTER ZAH -0639 ARABIC LETTER AIN -063A ARABIC LETTER GHAIN -063B ARABIC LETTER KEHEH WITH TWO DOTS ABOVE -063C ARABIC LETTER KEHEH WITH THREE DOTS BELOW -063D ARABIC LETTER FARSI YEH WITH INVERTED V -063E ARABIC LETTER FARSI YEH WITH TWO DOTS ABOVE -063F ARABIC LETTER FARSI YEH WITH THREE DOTS ABOVE -0640 ARABIC TATWEEL -0641 ARABIC LETTER FEH -0642 ARABIC LETTER QAF -0643 ARABIC LETTER KAF -0644 ARABIC LETTER LAM -0645 ARABIC LETTER MEEM -0646 ARABIC LETTER NOON -0647 ARABIC LETTER HEH -0648 ARABIC LETTER WAW -0649 ARABIC LETTER ALEF MAKSURA -064A ARABIC LETTER YEH -064B ARABIC FATHATAN -064C ARABIC DAMMATAN -064D ARABIC KASRATAN -064E ARABIC FATHA -064F ARABIC DAMMA -0650 ARABIC KASRA -0651 ARABIC SHADDA -0652 ARABIC SUKUN -0653 ARABIC MADDAH ABOVE -0654 ARABIC HAMZA ABOVE -0655 ARABIC HAMZA BELOW -0656 ARABIC SUBSCRIPT ALEF -0657 ARABIC INVERTED DAMMA -0658 ARABIC MARK NOON GHUNNA -0659 ARABIC ZWARAKAY -065A ARABIC VOWEL SIGN SMALL V ABOVE -065B ARABIC VOWEL SIGN INVERTED SMALL V ABOVE -065C ARABIC VOWEL SIGN DOT BELOW -065D ARABIC REVERSED DAMMA -065E ARABIC FATHA WITH TWO DOTS -0660 ARABIC-INDIC DIGIT ZERO -0661 ARABIC-INDIC DIGIT ONE -0662 ARABIC-INDIC DIGIT TWO -0663 ARABIC-INDIC DIGIT THREE -0664 ARABIC-INDIC DIGIT FOUR -0665 ARABIC-INDIC DIGIT FIVE -0666 ARABIC-INDIC DIGIT SIX -0667 ARABIC-INDIC DIGIT SEVEN -0668 ARABIC-INDIC DIGIT EIGHT -0669 ARABIC-INDIC DIGIT NINE -066A ARABIC PERCENT SIGN -066B ARABIC DECIMAL SEPARATOR -066C ARABIC THOUSANDS SEPARATOR -066D ARABIC FIVE POINTED STAR -066E ARABIC LETTER DOTLESS BEH -066F ARABIC LETTER DOTLESS QAF -0670 ARABIC LETTER SUPERSCRIPT ALEF -0671 ARABIC LETTER ALEF WASLA -0672 ARABIC LETTER ALEF WITH WAVY HAMZA ABOVE -0673 ARABIC LETTER ALEF WITH WAVY HAMZA BELOW -0674 ARABIC LETTER HIGH HAMZA -0675 ARABIC LETTER HIGH HAMZA ALEF -0676 ARABIC LETTER HIGH HAMZA WAW -0677 ARABIC LETTER U WITH HAMZA ABOVE -0678 ARABIC LETTER HIGH HAMZA YEH -0679 ARABIC LETTER TTEH -067A ARABIC LETTER TTEHEH -067B ARABIC LETTER BEEH -067C ARABIC LETTER TEH WITH RING -067D ARABIC LETTER TEH WITH THREE DOTS ABOVE DOWNWARDS -067E ARABIC LETTER PEH -067F ARABIC LETTER TEHEH -0680 ARABIC LETTER BEHEH -0681 ARABIC LETTER HAH WITH HAMZA ABOVE -0682 ARABIC LETTER HAH WITH TWO DOTS VERTICAL ABOVE -0683 ARABIC LETTER NYEH -0684 ARABIC LETTER DYEH -0685 ARABIC LETTER HAH WITH THREE DOTS ABOVE -0686 ARABIC LETTER TCHEH -0687 ARABIC LETTER TCHEHEH -0688 ARABIC LETTER DDAL -0689 ARABIC LETTER DAL WITH RING -068A ARABIC LETTER DAL WITH DOT BELOW -068B ARABIC LETTER DAL WITH DOT BELOW AND SMALL TAH -068C ARABIC LETTER DAHAL -068D ARABIC LETTER DDAHAL -068E ARABIC LETTER DUL -068F ARABIC LETTER DAL WITH THREE DOTS ABOVE DOWNWARDS -0690 ARABIC LETTER DAL WITH FOUR DOTS ABOVE -0691 ARABIC LETTER RREH -0692 ARABIC LETTER REH WITH SMALL V -0693 ARABIC LETTER REH WITH RING -0694 ARABIC LETTER REH WITH DOT BELOW -0695 ARABIC LETTER REH WITH SMALL V BELOW -0696 ARABIC LETTER REH WITH DOT BELOW AND DOT ABOVE -0697 ARABIC LETTER REH WITH TWO DOTS ABOVE -0698 ARABIC LETTER JEH -0699 ARABIC LETTER REH WITH FOUR DOTS ABOVE -069A ARABIC LETTER SEEN WITH DOT BELOW AND DOT ABOVE -069B ARABIC LETTER SEEN WITH THREE DOTS BELOW -069C ARABIC LETTER SEEN WITH THREE DOTS BELOW AND THREE DOTS ABOVE -069D ARABIC LETTER SAD WITH TWO DOTS BELOW -069E ARABIC LETTER SAD WITH THREE DOTS ABOVE -069F ARABIC LETTER TAH WITH THREE DOTS ABOVE -06A0 ARABIC LETTER AIN WITH THREE DOTS ABOVE -06A1 ARABIC LETTER DOTLESS FEH -06A2 ARABIC LETTER FEH WITH DOT MOVED BELOW -06A3 ARABIC LETTER FEH WITH DOT BELOW -06A4 ARABIC LETTER VEH -06A5 ARABIC LETTER FEH WITH THREE DOTS BELOW -06A6 ARABIC LETTER PEHEH -06A7 ARABIC LETTER QAF WITH DOT ABOVE -06A8 ARABIC LETTER QAF WITH THREE DOTS ABOVE -06A9 ARABIC LETTER KEHEH -06AA ARABIC LETTER SWASH KAF -06AB ARABIC LETTER KAF WITH RING -06AC ARABIC LETTER KAF WITH DOT ABOVE -06AD ARABIC LETTER NG -06AE ARABIC LETTER KAF WITH THREE DOTS BELOW -06AF ARABIC LETTER GAF -06B0 ARABIC LETTER GAF WITH RING -06B1 ARABIC LETTER NGOEH -06B2 ARABIC LETTER GAF WITH TWO DOTS BELOW -06B3 ARABIC LETTER GUEH -06B4 ARABIC LETTER GAF WITH THREE DOTS ABOVE -06B5 ARABIC LETTER LAM WITH SMALL V -06B6 ARABIC LETTER LAM WITH DOT ABOVE -06B7 ARABIC LETTER LAM WITH THREE DOTS ABOVE -06B8 ARABIC LETTER LAM WITH THREE DOTS BELOW -06B9 ARABIC LETTER NOON WITH DOT BELOW -06BA ARABIC LETTER NOON GHUNNA -06BB ARABIC LETTER RNOON -06BC ARABIC LETTER NOON WITH RING -06BD ARABIC LETTER NOON WITH THREE DOTS ABOVE -06BE ARABIC LETTER HEH DOACHASHMEE -06BF ARABIC LETTER TCHEH WITH DOT ABOVE -06C0 ARABIC LETTER HEH WITH YEH ABOVE -06C1 ARABIC LETTER HEH GOAL -06C2 ARABIC LETTER HEH GOAL WITH HAMZA ABOVE -06C3 ARABIC LETTER TEH MARBUTA GOAL -06C4 ARABIC LETTER WAW WITH RING -06C5 ARABIC LETTER KIRGHIZ OE -06C6 ARABIC LETTER OE -06C7 ARABIC LETTER U -06C8 ARABIC LETTER YU -06C9 ARABIC LETTER KIRGHIZ YU -06CA ARABIC LETTER WAW WITH TWO DOTS ABOVE -06CB ARABIC LETTER VE -06CC ARABIC LETTER FARSI YEH -06CD ARABIC LETTER YEH WITH TAIL -06CE ARABIC LETTER YEH WITH SMALL V -06CF ARABIC LETTER WAW WITH DOT ABOVE -06D0 ARABIC LETTER E -06D1 ARABIC LETTER YEH WITH THREE DOTS BELOW -06D2 ARABIC LETTER YEH BARREE -06D3 ARABIC LETTER YEH BARREE WITH HAMZA ABOVE -06D4 ARABIC FULL STOP -06D5 ARABIC LETTER AE -06D6 ARABIC SMALL HIGH LIGATURE SAD WITH LAM WITH ALEF MAKSURA -06D7 ARABIC SMALL HIGH LIGATURE QAF WITH LAM WITH ALEF MAKSURA -06D8 ARABIC SMALL HIGH MEEM INITIAL FORM -06D9 ARABIC SMALL HIGH LAM ALEF -06DA ARABIC SMALL HIGH JEEM -06DB ARABIC SMALL HIGH THREE DOTS -06DC ARABIC SMALL HIGH SEEN -06DD ARABIC END OF AYAH -06DE ARABIC START OF RUB EL HIZB -06DF ARABIC SMALL HIGH ROUNDED ZERO -06E0 ARABIC SMALL HIGH UPRIGHT RECTANGULAR ZERO -06E1 ARABIC SMALL HIGH DOTLESS HEAD OF KHAH -06E2 ARABIC SMALL HIGH MEEM ISOLATED FORM -06E3 ARABIC SMALL LOW SEEN -06E4 ARABIC SMALL HIGH MADDA -06E5 ARABIC SMALL WAW -06E6 ARABIC SMALL YEH -06E7 ARABIC SMALL HIGH YEH -06E8 ARABIC SMALL HIGH NOON -06E9 ARABIC PLACE OF SAJDAH -06EA ARABIC EMPTY CENTRE LOW STOP -06EB ARABIC EMPTY CENTRE HIGH STOP -06EC ARABIC ROUNDED HIGH STOP WITH FILLED CENTRE -06ED ARABIC SMALL LOW MEEM -06EE ARABIC LETTER DAL WITH INVERTED V -06EF ARABIC LETTER REH WITH INVERTED V -06F0 EXTENDED ARABIC-INDIC DIGIT ZERO -06F1 EXTENDED ARABIC-INDIC DIGIT ONE -06F2 EXTENDED ARABIC-INDIC DIGIT TWO -06F3 EXTENDED ARABIC-INDIC DIGIT THREE -06F4 EXTENDED ARABIC-INDIC DIGIT FOUR -06F5 EXTENDED ARABIC-INDIC DIGIT FIVE -06F6 EXTENDED ARABIC-INDIC DIGIT SIX -06F7 EXTENDED ARABIC-INDIC DIGIT SEVEN -06F8 EXTENDED ARABIC-INDIC DIGIT EIGHT -06F9 EXTENDED ARABIC-INDIC DIGIT NINE -06FA ARABIC LETTER SHEEN WITH DOT BELOW -06FB ARABIC LETTER DAD WITH DOT BELOW -06FC ARABIC LETTER GHAIN WITH DOT BELOW -06FD ARABIC SIGN SINDHI AMPERSAND -06FE ARABIC SIGN SINDHI POSTPOSITION MEN -06FF ARABIC LETTER HEH WITH INVERTED V -0700 SYRIAC END OF PARAGRAPH -0701 SYRIAC SUPRALINEAR FULL STOP -0702 SYRIAC SUBLINEAR FULL STOP -0703 SYRIAC SUPRALINEAR COLON -0704 SYRIAC SUBLINEAR COLON -0705 SYRIAC HORIZONTAL COLON -0706 SYRIAC COLON SKEWED LEFT -0707 SYRIAC COLON SKEWED RIGHT -0708 SYRIAC SUPRALINEAR COLON SKEWED LEFT -0709 SYRIAC SUBLINEAR COLON SKEWED RIGHT -070A SYRIAC CONTRACTION -070B SYRIAC HARKLEAN OBELUS -070C SYRIAC HARKLEAN METOBELUS -070D SYRIAC HARKLEAN ASTERISCUS -070F SYRIAC ABBREVIATION MARK -0710 SYRIAC LETTER ALAPH -0711 SYRIAC LETTER SUPERSCRIPT ALAPH -0712 SYRIAC LETTER BETH -0713 SYRIAC LETTER GAMAL -0714 SYRIAC LETTER GAMAL GARSHUNI -0715 SYRIAC LETTER DALATH -0716 SYRIAC LETTER DOTLESS DALATH RISH -0717 SYRIAC LETTER HE -0718 SYRIAC LETTER WAW -0719 SYRIAC LETTER ZAIN -071A SYRIAC LETTER HETH -071B SYRIAC LETTER TETH -071C SYRIAC LETTER TETH GARSHUNI -071D SYRIAC LETTER YUDH -071E SYRIAC LETTER YUDH HE -071F SYRIAC LETTER KAPH -0720 SYRIAC LETTER LAMADH -0721 SYRIAC LETTER MIM -0722 SYRIAC LETTER NUN -0723 SYRIAC LETTER SEMKATH -0724 SYRIAC LETTER FINAL SEMKATH -0725 SYRIAC LETTER E -0726 SYRIAC LETTER PE -0727 SYRIAC LETTER REVERSED PE -0728 SYRIAC LETTER SADHE -0729 SYRIAC LETTER QAPH -072A SYRIAC LETTER RISH -072B SYRIAC LETTER SHIN -072C SYRIAC LETTER TAW -072D SYRIAC LETTER PERSIAN BHETH -072E SYRIAC LETTER PERSIAN GHAMAL -072F SYRIAC LETTER PERSIAN DHALATH -0730 SYRIAC PTHAHA ABOVE -0731 SYRIAC PTHAHA BELOW -0732 SYRIAC PTHAHA DOTTED -0733 SYRIAC ZQAPHA ABOVE -0734 SYRIAC ZQAPHA BELOW -0735 SYRIAC ZQAPHA DOTTED -0736 SYRIAC RBASA ABOVE -0737 SYRIAC RBASA BELOW -0738 SYRIAC DOTTED ZLAMA HORIZONTAL -0739 SYRIAC DOTTED ZLAMA ANGULAR -073A SYRIAC HBASA ABOVE -073B SYRIAC HBASA BELOW -073C SYRIAC HBASA-ESASA DOTTED -073D SYRIAC ESASA ABOVE -073E SYRIAC ESASA BELOW -073F SYRIAC RWAHA -0740 SYRIAC FEMININE DOT -0741 SYRIAC QUSHSHAYA -0742 SYRIAC RUKKAKHA -0743 SYRIAC TWO VERTICAL DOTS ABOVE -0744 SYRIAC TWO VERTICAL DOTS BELOW -0745 SYRIAC THREE DOTS ABOVE -0746 SYRIAC THREE DOTS BELOW -0747 SYRIAC OBLIQUE LINE ABOVE -0748 SYRIAC OBLIQUE LINE BELOW -0749 SYRIAC MUSIC -074A SYRIAC BARREKH -074D SYRIAC LETTER SOGDIAN ZHAIN -074E SYRIAC LETTER SOGDIAN KHAPH -074F SYRIAC LETTER SOGDIAN FE -0750 ARABIC LETTER BEH WITH THREE DOTS HORIZONTALLY BELOW -0751 ARABIC LETTER BEH WITH DOT BELOW AND THREE DOTS ABOVE -0752 ARABIC LETTER BEH WITH THREE DOTS POINTING UPWARDS BELOW -0753 ARABIC LETTER BEH WITH THREE DOTS POINTING UPWARDS BELOW AND TWO DOTS ABOVE -0754 ARABIC LETTER BEH WITH TWO DOTS BELOW AND DOT ABOVE -0755 ARABIC LETTER BEH WITH INVERTED SMALL V BELOW -0756 ARABIC LETTER BEH WITH SMALL V -0757 ARABIC LETTER HAH WITH TWO DOTS ABOVE -0758 ARABIC LETTER HAH WITH THREE DOTS POINTING UPWARDS BELOW -0759 ARABIC LETTER DAL WITH TWO DOTS VERTICALLY BELOW AND SMALL TAH -075A ARABIC LETTER DAL WITH INVERTED SMALL V BELOW -075B ARABIC LETTER REH WITH STROKE -075C ARABIC LETTER SEEN WITH FOUR DOTS ABOVE -075D ARABIC LETTER AIN WITH TWO DOTS ABOVE -075E ARABIC LETTER AIN WITH THREE DOTS POINTING DOWNWARDS ABOVE -075F ARABIC LETTER AIN WITH TWO DOTS VERTICALLY ABOVE -0760 ARABIC LETTER FEH WITH TWO DOTS BELOW -0761 ARABIC LETTER FEH WITH THREE DOTS POINTING UPWARDS BELOW -0762 ARABIC LETTER KEHEH WITH DOT ABOVE -0763 ARABIC LETTER KEHEH WITH THREE DOTS ABOVE -0764 ARABIC LETTER KEHEH WITH THREE DOTS POINTING UPWARDS BELOW -0765 ARABIC LETTER MEEM WITH DOT ABOVE -0766 ARABIC LETTER MEEM WITH DOT BELOW -0767 ARABIC LETTER NOON WITH TWO DOTS BELOW -0768 ARABIC LETTER NOON WITH SMALL TAH -0769 ARABIC LETTER NOON WITH SMALL V -076A ARABIC LETTER LAM WITH BAR -076B ARABIC LETTER REH WITH TWO DOTS VERTICALLY ABOVE -076C ARABIC LETTER REH WITH HAMZA ABOVE -076D ARABIC LETTER SEEN WITH TWO DOTS VERTICALLY ABOVE -076E ARABIC LETTER HAH WITH SMALL ARABIC LETTER TAH BELOW -076F ARABIC LETTER HAH WITH SMALL ARABIC LETTER TAH AND TWO DOTS -0770 ARABIC LETTER SEEN WITH SMALL ARABIC LETTER TAH AND TWO DOTS -0771 ARABIC LETTER REH WITH SMALL ARABIC LETTER TAH AND TWO DOTS -0772 ARABIC LETTER HAH WITH SMALL ARABIC LETTER TAH ABOVE -0773 ARABIC LETTER ALEF WITH EXTENDED ARABIC-INDIC DIGIT TWO ABOVE -0774 ARABIC LETTER ALEF WITH EXTENDED ARABIC-INDIC DIGIT THREE ABOVE -0775 ARABIC LETTER FARSI YEH WITH EXTENDED ARABIC-INDIC DIGIT TWO ABOVE -0776 ARABIC LETTER FARSI YEH WITH EXTENDED ARABIC-INDIC DIGIT THREE ABOVE -0777 ARABIC LETTER FARSI YEH WITH EXTENDED ARABIC-INDIC DIGIT FOUR BELOW -0778 ARABIC LETTER WAW WITH EXTENDED ARABIC-INDIC DIGIT TWO ABOVE -0779 ARABIC LETTER WAW WITH EXTENDED ARABIC-INDIC DIGIT THREE ABOVE -077A ARABIC LETTER YEH BARREE WITH EXTENDED ARABIC-INDIC DIGIT TWO ABOVE -077B ARABIC LETTER YEH BARREE WITH EXTENDED ARABIC-INDIC DIGIT THREE ABOVE -077C ARABIC LETTER HAH WITH EXTENDED ARABIC-INDIC DIGIT FOUR BELOW -077D ARABIC LETTER SEEN WITH EXTENDED ARABIC-INDIC DIGIT FOUR ABOVE -077E ARABIC LETTER SEEN WITH INVERTED V -077F ARABIC LETTER KAF WITH TWO DOTS ABOVE -0780 THAANA LETTER HAA -0781 THAANA LETTER SHAVIYANI -0782 THAANA LETTER NOONU -0783 THAANA LETTER RAA -0784 THAANA LETTER BAA -0785 THAANA LETTER LHAVIYANI -0786 THAANA LETTER KAAFU -0787 THAANA LETTER ALIFU -0788 THAANA LETTER VAAVU -0789 THAANA LETTER MEEMU -078A THAANA LETTER FAAFU -078B THAANA LETTER DHAALU -078C THAANA LETTER THAA -078D THAANA LETTER LAAMU -078E THAANA LETTER GAAFU -078F THAANA LETTER GNAVIYANI -0790 THAANA LETTER SEENU -0791 THAANA LETTER DAVIYANI -0792 THAANA LETTER ZAVIYANI -0793 THAANA LETTER TAVIYANI -0794 THAANA LETTER YAA -0795 THAANA LETTER PAVIYANI -0796 THAANA LETTER JAVIYANI -0797 THAANA LETTER CHAVIYANI -0798 THAANA LETTER TTAA -0799 THAANA LETTER HHAA -079A THAANA LETTER KHAA -079B THAANA LETTER THAALU -079C THAANA LETTER ZAA -079D THAANA LETTER SHEENU -079E THAANA LETTER SAADHU -079F THAANA LETTER DAADHU -07A0 THAANA LETTER TO -07A1 THAANA LETTER ZO -07A2 THAANA LETTER AINU -07A3 THAANA LETTER GHAINU -07A4 THAANA LETTER QAAFU -07A5 THAANA LETTER WAAVU -07A6 THAANA ABAFILI -07A7 THAANA AABAAFILI -07A8 THAANA IBIFILI -07A9 THAANA EEBEEFILI -07AA THAANA UBUFILI -07AB THAANA OOBOOFILI -07AC THAANA EBEFILI -07AD THAANA EYBEYFILI -07AE THAANA OBOFILI -07AF THAANA OABOAFILI -07B0 THAANA SUKUN -07B1 THAANA LETTER NAA -07C0 NKO DIGIT ZERO -07C1 NKO DIGIT ONE -07C2 NKO DIGIT TWO -07C3 NKO DIGIT THREE -07C4 NKO DIGIT FOUR -07C5 NKO DIGIT FIVE -07C6 NKO DIGIT SIX -07C7 NKO DIGIT SEVEN -07C8 NKO DIGIT EIGHT -07C9 NKO DIGIT NINE -07CA NKO LETTER A -07CB NKO LETTER EE -07CC NKO LETTER I -07CD NKO LETTER E -07CE NKO LETTER U -07CF NKO LETTER OO -07D0 NKO LETTER O -07D1 NKO LETTER DAGBASINNA -07D2 NKO LETTER N -07D3 NKO LETTER BA -07D4 NKO LETTER PA -07D5 NKO LETTER TA -07D6 NKO LETTER JA -07D7 NKO LETTER CHA -07D8 NKO LETTER DA -07D9 NKO LETTER RA -07DA NKO LETTER RRA -07DB NKO LETTER SA -07DC NKO LETTER GBA -07DD NKO LETTER FA -07DE NKO LETTER KA -07DF NKO LETTER LA -07E0 NKO LETTER NA WOLOSO -07E1 NKO LETTER MA -07E2 NKO LETTER NYA -07E3 NKO LETTER NA -07E4 NKO LETTER HA -07E5 NKO LETTER WA -07E6 NKO LETTER YA -07E7 NKO LETTER NYA WOLOSO -07E8 NKO LETTER JONA JA -07E9 NKO LETTER JONA CHA -07EA NKO LETTER JONA RA -07EB NKO COMBINING SHORT HIGH TONE -07EC NKO COMBINING SHORT LOW TONE -07ED NKO COMBINING SHORT RISING TONE -07EE NKO COMBINING LONG DESCENDING TONE -07EF NKO COMBINING LONG HIGH TONE -07F0 NKO COMBINING LONG LOW TONE -07F1 NKO COMBINING LONG RISING TONE -07F2 NKO COMBINING NASALIZATION MARK -07F3 NKO COMBINING DOUBLE DOT ABOVE -07F4 NKO HIGH TONE APOSTROPHE -07F5 NKO LOW TONE APOSTROPHE -07F6 NKO SYMBOL OO DENNEN -07F7 NKO SYMBOL GBAKURUNEN -07F8 NKO COMMA -07F9 NKO EXCLAMATION MARK -07FA NKO LAJANYALAN -0800 SAMARITAN LETTER ALAF -0801 SAMARITAN LETTER BIT -0802 SAMARITAN LETTER GAMAN -0803 SAMARITAN LETTER DALAT -0804 SAMARITAN LETTER IY -0805 SAMARITAN LETTER BAA -0806 SAMARITAN LETTER ZEN -0807 SAMARITAN LETTER IT -0808 SAMARITAN LETTER TIT -0809 SAMARITAN LETTER YUT -080A SAMARITAN LETTER KAAF -080B SAMARITAN LETTER LABAT -080C SAMARITAN LETTER MIM -080D SAMARITAN LETTER NUN -080E SAMARITAN LETTER SINGAAT -080F SAMARITAN LETTER IN -0810 SAMARITAN LETTER FI -0811 SAMARITAN LETTER TSAADIY -0812 SAMARITAN LETTER QUF -0813 SAMARITAN LETTER RISH -0814 SAMARITAN LETTER SHAN -0815 SAMARITAN LETTER TAAF -0816 SAMARITAN MARK IN -0817 SAMARITAN MARK IN-ALAF -0818 SAMARITAN MARK OCCLUSION -0819 SAMARITAN MARK DAGESH -081A SAMARITAN MODIFIER LETTER EPENTHETIC YUT -081B SAMARITAN MARK EPENTHETIC YUT -081C SAMARITAN VOWEL SIGN LONG E -081D SAMARITAN VOWEL SIGN E -081E SAMARITAN VOWEL SIGN OVERLONG AA -081F SAMARITAN VOWEL SIGN LONG AA -0820 SAMARITAN VOWEL SIGN AA -0821 SAMARITAN VOWEL SIGN OVERLONG A -0822 SAMARITAN VOWEL SIGN LONG A -0823 SAMARITAN VOWEL SIGN A -0824 SAMARITAN MODIFIER LETTER SHORT A -0825 SAMARITAN VOWEL SIGN SHORT A -0826 SAMARITAN VOWEL SIGN LONG U -0827 SAMARITAN VOWEL SIGN U -0828 SAMARITAN MODIFIER LETTER I -0829 SAMARITAN VOWEL SIGN LONG I -082A SAMARITAN VOWEL SIGN I -082B SAMARITAN VOWEL SIGN O -082C SAMARITAN VOWEL SIGN SUKUN -082D SAMARITAN MARK NEQUDAA -0830 SAMARITAN PUNCTUATION NEQUDAA -0831 SAMARITAN PUNCTUATION AFSAAQ -0832 SAMARITAN PUNCTUATION ANGED -0833 SAMARITAN PUNCTUATION BAU -0834 SAMARITAN PUNCTUATION ATMAAU -0835 SAMARITAN PUNCTUATION SHIYYAALAA -0836 SAMARITAN ABBREVIATION MARK -0837 SAMARITAN PUNCTUATION MELODIC QITSA -0838 SAMARITAN PUNCTUATION ZIQAA -0839 SAMARITAN PUNCTUATION QITSA -083A SAMARITAN PUNCTUATION ZAEF -083B SAMARITAN PUNCTUATION TURU -083C SAMARITAN PUNCTUATION ARKAANU -083D SAMARITAN PUNCTUATION SOF MASHFAAT -083E SAMARITAN PUNCTUATION ANNAAU -0900 DEVANAGARI SIGN INVERTED CANDRABINDU -0901 DEVANAGARI SIGN CANDRABINDU -0902 DEVANAGARI SIGN ANUSVARA -0903 DEVANAGARI SIGN VISARGA -0904 DEVANAGARI LETTER SHORT A -0905 DEVANAGARI LETTER A -0906 DEVANAGARI LETTER AA -0907 DEVANAGARI LETTER I -0908 DEVANAGARI LETTER II -0909 DEVANAGARI LETTER U -090A DEVANAGARI LETTER UU -090B DEVANAGARI LETTER VOCALIC R -090C DEVANAGARI LETTER VOCALIC L -090D DEVANAGARI LETTER CANDRA E -090E DEVANAGARI LETTER SHORT E -090F DEVANAGARI LETTER E -0910 DEVANAGARI LETTER AI -0911 DEVANAGARI LETTER CANDRA O -0912 DEVANAGARI LETTER SHORT O -0913 DEVANAGARI LETTER O -0914 DEVANAGARI LETTER AU -0915 DEVANAGARI LETTER KA -0916 DEVANAGARI LETTER KHA -0917 DEVANAGARI LETTER GA -0918 DEVANAGARI LETTER GHA -0919 DEVANAGARI LETTER NGA -091A DEVANAGARI LETTER CA -091B DEVANAGARI LETTER CHA -091C DEVANAGARI LETTER JA -091D DEVANAGARI LETTER JHA -091E DEVANAGARI LETTER NYA -091F DEVANAGARI LETTER TTA -0920 DEVANAGARI LETTER TTHA -0921 DEVANAGARI LETTER DDA -0922 DEVANAGARI LETTER DDHA -0923 DEVANAGARI LETTER NNA -0924 DEVANAGARI LETTER TA -0925 DEVANAGARI LETTER THA -0926 DEVANAGARI LETTER DA -0927 DEVANAGARI LETTER DHA -0928 DEVANAGARI LETTER NA -0929 DEVANAGARI LETTER NNNA -092A DEVANAGARI LETTER PA -092B DEVANAGARI LETTER PHA -092C DEVANAGARI LETTER BA -092D DEVANAGARI LETTER BHA -092E DEVANAGARI LETTER MA -092F DEVANAGARI LETTER YA -0930 DEVANAGARI LETTER RA -0931 DEVANAGARI LETTER RRA -0932 DEVANAGARI LETTER LA -0933 DEVANAGARI LETTER LLA -0934 DEVANAGARI LETTER LLLA -0935 DEVANAGARI LETTER VA -0936 DEVANAGARI LETTER SHA -0937 DEVANAGARI LETTER SSA -0938 DEVANAGARI LETTER SA -0939 DEVANAGARI LETTER HA -093C DEVANAGARI SIGN NUKTA -093D DEVANAGARI SIGN AVAGRAHA -093E DEVANAGARI VOWEL SIGN AA -093F DEVANAGARI VOWEL SIGN I -0940 DEVANAGARI VOWEL SIGN II -0941 DEVANAGARI VOWEL SIGN U -0942 DEVANAGARI VOWEL SIGN UU -0943 DEVANAGARI VOWEL SIGN VOCALIC R -0944 DEVANAGARI VOWEL SIGN VOCALIC RR -0945 DEVANAGARI VOWEL SIGN CANDRA E -0946 DEVANAGARI VOWEL SIGN SHORT E -0947 DEVANAGARI VOWEL SIGN E -0948 DEVANAGARI VOWEL SIGN AI -0949 DEVANAGARI VOWEL SIGN CANDRA O -094A DEVANAGARI VOWEL SIGN SHORT O -094B DEVANAGARI VOWEL SIGN O -094C DEVANAGARI VOWEL SIGN AU -094D DEVANAGARI SIGN VIRAMA -094E DEVANAGARI VOWEL SIGN PRISHTHAMATRA E -0950 DEVANAGARI OM -0951 DEVANAGARI STRESS SIGN UDATTA -0952 DEVANAGARI STRESS SIGN ANUDATTA -0953 DEVANAGARI GRAVE ACCENT -0954 DEVANAGARI ACUTE ACCENT -0955 DEVANAGARI VOWEL SIGN CANDRA LONG E -0958 DEVANAGARI LETTER QA -0959 DEVANAGARI LETTER KHHA -095A DEVANAGARI LETTER GHHA -095B DEVANAGARI LETTER ZA -095C DEVANAGARI LETTER DDDHA -095D DEVANAGARI LETTER RHA -095E DEVANAGARI LETTER FA -095F DEVANAGARI LETTER YYA -0960 DEVANAGARI LETTER VOCALIC RR -0961 DEVANAGARI LETTER VOCALIC LL -0962 DEVANAGARI VOWEL SIGN VOCALIC L -0963 DEVANAGARI VOWEL SIGN VOCALIC LL -0964 DEVANAGARI DANDA -0965 DEVANAGARI DOUBLE DANDA -0966 DEVANAGARI DIGIT ZERO -0967 DEVANAGARI DIGIT ONE -0968 DEVANAGARI DIGIT TWO -0969 DEVANAGARI DIGIT THREE -096A DEVANAGARI DIGIT FOUR -096B DEVANAGARI DIGIT FIVE -096C DEVANAGARI DIGIT SIX -096D DEVANAGARI DIGIT SEVEN -096E DEVANAGARI DIGIT EIGHT -096F DEVANAGARI DIGIT NINE -0970 DEVANAGARI ABBREVIATION SIGN -0971 DEVANAGARI SIGN HIGH SPACING DOT -0972 DEVANAGARI LETTER CANDRA A -0979 DEVANAGARI LETTER ZHA -097A DEVANAGARI LETTER HEAVY YA -097B DEVANAGARI LETTER GGA -097C DEVANAGARI LETTER JJA -097D DEVANAGARI LETTER GLOTTAL STOP -097E DEVANAGARI LETTER DDDA -097F DEVANAGARI LETTER BBA -0981 BENGALI SIGN CANDRABINDU -0982 BENGALI SIGN ANUSVARA -0983 BENGALI SIGN VISARGA -0985 BENGALI LETTER A -0986 BENGALI LETTER AA -0987 BENGALI LETTER I -0988 BENGALI LETTER II -0989 BENGALI LETTER U -098A BENGALI LETTER UU -098B BENGALI LETTER VOCALIC R -098C BENGALI LETTER VOCALIC L -098F BENGALI LETTER E -0990 BENGALI LETTER AI -0993 BENGALI LETTER O -0994 BENGALI LETTER AU -0995 BENGALI LETTER KA -0996 BENGALI LETTER KHA -0997 BENGALI LETTER GA -0998 BENGALI LETTER GHA -0999 BENGALI LETTER NGA -099A BENGALI LETTER CA -099B BENGALI LETTER CHA -099C BENGALI LETTER JA -099D BENGALI LETTER JHA -099E BENGALI LETTER NYA -099F BENGALI LETTER TTA -09A0 BENGALI LETTER TTHA -09A1 BENGALI LETTER DDA -09A2 BENGALI LETTER DDHA -09A3 BENGALI LETTER NNA -09A4 BENGALI LETTER TA -09A5 BENGALI LETTER THA -09A6 BENGALI LETTER DA -09A7 BENGALI LETTER DHA -09A8 BENGALI LETTER NA -09AA BENGALI LETTER PA -09AB BENGALI LETTER PHA -09AC BENGALI LETTER BA -09AD BENGALI LETTER BHA -09AE BENGALI LETTER MA -09AF BENGALI LETTER YA -09B0 BENGALI LETTER RA -09B2 BENGALI LETTER LA -09B6 BENGALI LETTER SHA -09B7 BENGALI LETTER SSA -09B8 BENGALI LETTER SA -09B9 BENGALI LETTER HA -09BC BENGALI SIGN NUKTA -09BD BENGALI SIGN AVAGRAHA -09BE BENGALI VOWEL SIGN AA -09BF BENGALI VOWEL SIGN I -09C0 BENGALI VOWEL SIGN II -09C1 BENGALI VOWEL SIGN U -09C2 BENGALI VOWEL SIGN UU -09C3 BENGALI VOWEL SIGN VOCALIC R -09C4 BENGALI VOWEL SIGN VOCALIC RR -09C7 BENGALI VOWEL SIGN E -09C8 BENGALI VOWEL SIGN AI -09CB BENGALI VOWEL SIGN O -09CC BENGALI VOWEL SIGN AU -09CD BENGALI SIGN VIRAMA -09CE BENGALI LETTER KHANDA TA -09D7 BENGALI AU LENGTH MARK -09DC BENGALI LETTER RRA -09DD BENGALI LETTER RHA -09DF BENGALI LETTER YYA -09E0 BENGALI LETTER VOCALIC RR -09E1 BENGALI LETTER VOCALIC LL -09E2 BENGALI VOWEL SIGN VOCALIC L -09E3 BENGALI VOWEL SIGN VOCALIC LL -09E6 BENGALI DIGIT ZERO -09E7 BENGALI DIGIT ONE -09E8 BENGALI DIGIT TWO -09E9 BENGALI DIGIT THREE -09EA BENGALI DIGIT FOUR -09EB BENGALI DIGIT FIVE -09EC BENGALI DIGIT SIX -09ED BENGALI DIGIT SEVEN -09EE BENGALI DIGIT EIGHT -09EF BENGALI DIGIT NINE -09F0 BENGALI LETTER RA WITH MIDDLE DIAGONAL -09F1 BENGALI LETTER RA WITH LOWER DIAGONAL -09F2 BENGALI RUPEE MARK -09F3 BENGALI RUPEE SIGN -09F4 BENGALI CURRENCY NUMERATOR ONE -09F5 BENGALI CURRENCY NUMERATOR TWO -09F6 BENGALI CURRENCY NUMERATOR THREE -09F7 BENGALI CURRENCY NUMERATOR FOUR -09F8 BENGALI CURRENCY NUMERATOR ONE LESS THAN THE DENOMINATOR -09F9 BENGALI CURRENCY DENOMINATOR SIXTEEN -09FA BENGALI ISSHAR -09FB BENGALI GANDA MARK -0A01 GURMUKHI SIGN ADAK BINDI -0A02 GURMUKHI SIGN BINDI -0A03 GURMUKHI SIGN VISARGA -0A05 GURMUKHI LETTER A -0A06 GURMUKHI LETTER AA -0A07 GURMUKHI LETTER I -0A08 GURMUKHI LETTER II -0A09 GURMUKHI LETTER U -0A0A GURMUKHI LETTER UU -0A0F GURMUKHI LETTER EE -0A10 GURMUKHI LETTER AI -0A13 GURMUKHI LETTER OO -0A14 GURMUKHI LETTER AU -0A15 GURMUKHI LETTER KA -0A16 GURMUKHI LETTER KHA -0A17 GURMUKHI LETTER GA -0A18 GURMUKHI LETTER GHA -0A19 GURMUKHI LETTER NGA -0A1A GURMUKHI LETTER CA -0A1B GURMUKHI LETTER CHA -0A1C GURMUKHI LETTER JA -0A1D GURMUKHI LETTER JHA -0A1E GURMUKHI LETTER NYA -0A1F GURMUKHI LETTER TTA -0A20 GURMUKHI LETTER TTHA -0A21 GURMUKHI LETTER DDA -0A22 GURMUKHI LETTER DDHA -0A23 GURMUKHI LETTER NNA -0A24 GURMUKHI LETTER TA -0A25 GURMUKHI LETTER THA -0A26 GURMUKHI LETTER DA -0A27 GURMUKHI LETTER DHA -0A28 GURMUKHI LETTER NA -0A2A GURMUKHI LETTER PA -0A2B GURMUKHI LETTER PHA -0A2C GURMUKHI LETTER BA -0A2D GURMUKHI LETTER BHA -0A2E GURMUKHI LETTER MA -0A2F GURMUKHI LETTER YA -0A30 GURMUKHI LETTER RA -0A32 GURMUKHI LETTER LA -0A33 GURMUKHI LETTER LLA -0A35 GURMUKHI LETTER VA -0A36 GURMUKHI LETTER SHA -0A38 GURMUKHI LETTER SA -0A39 GURMUKHI LETTER HA -0A3C GURMUKHI SIGN NUKTA -0A3E GURMUKHI VOWEL SIGN AA -0A3F GURMUKHI VOWEL SIGN I -0A40 GURMUKHI VOWEL SIGN II -0A41 GURMUKHI VOWEL SIGN U -0A42 GURMUKHI VOWEL SIGN UU -0A47 GURMUKHI VOWEL SIGN EE -0A48 GURMUKHI VOWEL SIGN AI -0A4B GURMUKHI VOWEL SIGN OO -0A4C GURMUKHI VOWEL SIGN AU -0A4D GURMUKHI SIGN VIRAMA -0A51 GURMUKHI SIGN UDAAT -0A59 GURMUKHI LETTER KHHA -0A5A GURMUKHI LETTER GHHA -0A5B GURMUKHI LETTER ZA -0A5C GURMUKHI LETTER RRA -0A5E GURMUKHI LETTER FA -0A66 GURMUKHI DIGIT ZERO -0A67 GURMUKHI DIGIT ONE -0A68 GURMUKHI DIGIT TWO -0A69 GURMUKHI DIGIT THREE -0A6A GURMUKHI DIGIT FOUR -0A6B GURMUKHI DIGIT FIVE -0A6C GURMUKHI DIGIT SIX -0A6D GURMUKHI DIGIT SEVEN -0A6E GURMUKHI DIGIT EIGHT -0A6F GURMUKHI DIGIT NINE -0A70 GURMUKHI TIPPI -0A71 GURMUKHI ADDAK -0A72 GURMUKHI IRI -0A73 GURMUKHI URA -0A74 GURMUKHI EK ONKAR -0A75 GURMUKHI SIGN YAKASH -0A81 GUJARATI SIGN CANDRABINDU -0A82 GUJARATI SIGN ANUSVARA -0A83 GUJARATI SIGN VISARGA -0A85 GUJARATI LETTER A -0A86 GUJARATI LETTER AA -0A87 GUJARATI LETTER I -0A88 GUJARATI LETTER II -0A89 GUJARATI LETTER U -0A8A GUJARATI LETTER UU -0A8B GUJARATI LETTER VOCALIC R -0A8C GUJARATI LETTER VOCALIC L -0A8D GUJARATI VOWEL CANDRA E -0A8F GUJARATI LETTER E -0A90 GUJARATI LETTER AI -0A91 GUJARATI VOWEL CANDRA O -0A93 GUJARATI LETTER O -0A94 GUJARATI LETTER AU -0A95 GUJARATI LETTER KA -0A96 GUJARATI LETTER KHA -0A97 GUJARATI LETTER GA -0A98 GUJARATI LETTER GHA -0A99 GUJARATI LETTER NGA -0A9A GUJARATI LETTER CA -0A9B GUJARATI LETTER CHA -0A9C GUJARATI LETTER JA -0A9D GUJARATI LETTER JHA -0A9E GUJARATI LETTER NYA -0A9F GUJARATI LETTER TTA -0AA0 GUJARATI LETTER TTHA -0AA1 GUJARATI LETTER DDA -0AA2 GUJARATI LETTER DDHA -0AA3 GUJARATI LETTER NNA -0AA4 GUJARATI LETTER TA -0AA5 GUJARATI LETTER THA -0AA6 GUJARATI LETTER DA -0AA7 GUJARATI LETTER DHA -0AA8 GUJARATI LETTER NA -0AAA GUJARATI LETTER PA -0AAB GUJARATI LETTER PHA -0AAC GUJARATI LETTER BA -0AAD GUJARATI LETTER BHA -0AAE GUJARATI LETTER MA -0AAF GUJARATI LETTER YA -0AB0 GUJARATI LETTER RA -0AB2 GUJARATI LETTER LA -0AB3 GUJARATI LETTER LLA -0AB5 GUJARATI LETTER VA -0AB6 GUJARATI LETTER SHA -0AB7 GUJARATI LETTER SSA -0AB8 GUJARATI LETTER SA -0AB9 GUJARATI LETTER HA -0ABC GUJARATI SIGN NUKTA -0ABD GUJARATI SIGN AVAGRAHA -0ABE GUJARATI VOWEL SIGN AA -0ABF GUJARATI VOWEL SIGN I -0AC0 GUJARATI VOWEL SIGN II -0AC1 GUJARATI VOWEL SIGN U -0AC2 GUJARATI VOWEL SIGN UU -0AC3 GUJARATI VOWEL SIGN VOCALIC R -0AC4 GUJARATI VOWEL SIGN VOCALIC RR -0AC5 GUJARATI VOWEL SIGN CANDRA E -0AC7 GUJARATI VOWEL SIGN E -0AC8 GUJARATI VOWEL SIGN AI -0AC9 GUJARATI VOWEL SIGN CANDRA O -0ACB GUJARATI VOWEL SIGN O -0ACC GUJARATI VOWEL SIGN AU -0ACD GUJARATI SIGN VIRAMA -0AD0 GUJARATI OM -0AE0 GUJARATI LETTER VOCALIC RR -0AE1 GUJARATI LETTER VOCALIC LL -0AE2 GUJARATI VOWEL SIGN VOCALIC L -0AE3 GUJARATI VOWEL SIGN VOCALIC LL -0AE6 GUJARATI DIGIT ZERO -0AE7 GUJARATI DIGIT ONE -0AE8 GUJARATI DIGIT TWO -0AE9 GUJARATI DIGIT THREE -0AEA GUJARATI DIGIT FOUR -0AEB GUJARATI DIGIT FIVE -0AEC GUJARATI DIGIT SIX -0AED GUJARATI DIGIT SEVEN -0AEE GUJARATI DIGIT EIGHT -0AEF GUJARATI DIGIT NINE -0AF1 GUJARATI RUPEE SIGN -0B01 ORIYA SIGN CANDRABINDU -0B02 ORIYA SIGN ANUSVARA -0B03 ORIYA SIGN VISARGA -0B05 ORIYA LETTER A -0B06 ORIYA LETTER AA -0B07 ORIYA LETTER I -0B08 ORIYA LETTER II -0B09 ORIYA LETTER U -0B0A ORIYA LETTER UU -0B0B ORIYA LETTER VOCALIC R -0B0C ORIYA LETTER VOCALIC L -0B0F ORIYA LETTER E -0B10 ORIYA LETTER AI -0B13 ORIYA LETTER O -0B14 ORIYA LETTER AU -0B15 ORIYA LETTER KA -0B16 ORIYA LETTER KHA -0B17 ORIYA LETTER GA -0B18 ORIYA LETTER GHA -0B19 ORIYA LETTER NGA -0B1A ORIYA LETTER CA -0B1B ORIYA LETTER CHA -0B1C ORIYA LETTER JA -0B1D ORIYA LETTER JHA -0B1E ORIYA LETTER NYA -0B1F ORIYA LETTER TTA -0B20 ORIYA LETTER TTHA -0B21 ORIYA LETTER DDA -0B22 ORIYA LETTER DDHA -0B23 ORIYA LETTER NNA -0B24 ORIYA LETTER TA -0B25 ORIYA LETTER THA -0B26 ORIYA LETTER DA -0B27 ORIYA LETTER DHA -0B28 ORIYA LETTER NA -0B2A ORIYA LETTER PA -0B2B ORIYA LETTER PHA -0B2C ORIYA LETTER BA -0B2D ORIYA LETTER BHA -0B2E ORIYA LETTER MA -0B2F ORIYA LETTER YA -0B30 ORIYA LETTER RA -0B32 ORIYA LETTER LA -0B33 ORIYA LETTER LLA -0B35 ORIYA LETTER VA -0B36 ORIYA LETTER SHA -0B37 ORIYA LETTER SSA -0B38 ORIYA LETTER SA -0B39 ORIYA LETTER HA -0B3C ORIYA SIGN NUKTA -0B3D ORIYA SIGN AVAGRAHA -0B3E ORIYA VOWEL SIGN AA -0B3F ORIYA VOWEL SIGN I -0B40 ORIYA VOWEL SIGN II -0B41 ORIYA VOWEL SIGN U -0B42 ORIYA VOWEL SIGN UU -0B43 ORIYA VOWEL SIGN VOCALIC R -0B44 ORIYA VOWEL SIGN VOCALIC RR -0B47 ORIYA VOWEL SIGN E -0B48 ORIYA VOWEL SIGN AI -0B4B ORIYA VOWEL SIGN O -0B4C ORIYA VOWEL SIGN AU -0B4D ORIYA SIGN VIRAMA -0B56 ORIYA AI LENGTH MARK -0B57 ORIYA AU LENGTH MARK -0B5C ORIYA LETTER RRA -0B5D ORIYA LETTER RHA -0B5F ORIYA LETTER YYA -0B60 ORIYA LETTER VOCALIC RR -0B61 ORIYA LETTER VOCALIC LL -0B62 ORIYA VOWEL SIGN VOCALIC L -0B63 ORIYA VOWEL SIGN VOCALIC LL -0B66 ORIYA DIGIT ZERO -0B67 ORIYA DIGIT ONE -0B68 ORIYA DIGIT TWO -0B69 ORIYA DIGIT THREE -0B6A ORIYA DIGIT FOUR -0B6B ORIYA DIGIT FIVE -0B6C ORIYA DIGIT SIX -0B6D ORIYA DIGIT SEVEN -0B6E ORIYA DIGIT EIGHT -0B6F ORIYA DIGIT NINE -0B70 ORIYA ISSHAR -0B71 ORIYA LETTER WA -0B82 TAMIL SIGN ANUSVARA -0B83 TAMIL SIGN VISARGA -0B85 TAMIL LETTER A -0B86 TAMIL LETTER AA -0B87 TAMIL LETTER I -0B88 TAMIL LETTER II -0B89 TAMIL LETTER U -0B8A TAMIL LETTER UU -0B8E TAMIL LETTER E -0B8F TAMIL LETTER EE -0B90 TAMIL LETTER AI -0B92 TAMIL LETTER O -0B93 TAMIL LETTER OO -0B94 TAMIL LETTER AU -0B95 TAMIL LETTER KA -0B99 TAMIL LETTER NGA -0B9A TAMIL LETTER CA -0B9C TAMIL LETTER JA -0B9E TAMIL LETTER NYA -0B9F TAMIL LETTER TTA -0BA3 TAMIL LETTER NNA -0BA4 TAMIL LETTER TA -0BA8 TAMIL LETTER NA -0BA9 TAMIL LETTER NNNA -0BAA TAMIL LETTER PA -0BAE TAMIL LETTER MA -0BAF TAMIL LETTER YA -0BB0 TAMIL LETTER RA -0BB1 TAMIL LETTER RRA -0BB2 TAMIL LETTER LA -0BB3 TAMIL LETTER LLA -0BB4 TAMIL LETTER LLLA -0BB5 TAMIL LETTER VA -0BB6 TAMIL LETTER SHA -0BB7 TAMIL LETTER SSA -0BB8 TAMIL LETTER SA -0BB9 TAMIL LETTER HA -0BBE TAMIL VOWEL SIGN AA -0BBF TAMIL VOWEL SIGN I -0BC0 TAMIL VOWEL SIGN II -0BC1 TAMIL VOWEL SIGN U -0BC2 TAMIL VOWEL SIGN UU -0BC6 TAMIL VOWEL SIGN E -0BC7 TAMIL VOWEL SIGN EE -0BC8 TAMIL VOWEL SIGN AI -0BCA TAMIL VOWEL SIGN O -0BCB TAMIL VOWEL SIGN OO -0BCC TAMIL VOWEL SIGN AU -0BCD TAMIL SIGN VIRAMA -0BD0 TAMIL OM -0BD7 TAMIL AU LENGTH MARK -0BE6 TAMIL DIGIT ZERO -0BE7 TAMIL DIGIT ONE -0BE8 TAMIL DIGIT TWO -0BE9 TAMIL DIGIT THREE -0BEA TAMIL DIGIT FOUR -0BEB TAMIL DIGIT FIVE -0BEC TAMIL DIGIT SIX -0BED TAMIL DIGIT SEVEN -0BEE TAMIL DIGIT EIGHT -0BEF TAMIL DIGIT NINE -0BF0 TAMIL NUMBER TEN -0BF1 TAMIL NUMBER ONE HUNDRED -0BF2 TAMIL NUMBER ONE THOUSAND -0BF3 TAMIL DAY SIGN -0BF4 TAMIL MONTH SIGN -0BF5 TAMIL YEAR SIGN -0BF6 TAMIL DEBIT SIGN -0BF7 TAMIL CREDIT SIGN -0BF8 TAMIL AS ABOVE SIGN -0BF9 TAMIL RUPEE SIGN -0BFA TAMIL NUMBER SIGN -0C01 TELUGU SIGN CANDRABINDU -0C02 TELUGU SIGN ANUSVARA -0C03 TELUGU SIGN VISARGA -0C05 TELUGU LETTER A -0C06 TELUGU LETTER AA -0C07 TELUGU LETTER I -0C08 TELUGU LETTER II -0C09 TELUGU LETTER U -0C0A TELUGU LETTER UU -0C0B TELUGU LETTER VOCALIC R -0C0C TELUGU LETTER VOCALIC L -0C0E TELUGU LETTER E -0C0F TELUGU LETTER EE -0C10 TELUGU LETTER AI -0C12 TELUGU LETTER O -0C13 TELUGU LETTER OO -0C14 TELUGU LETTER AU -0C15 TELUGU LETTER KA -0C16 TELUGU LETTER KHA -0C17 TELUGU LETTER GA -0C18 TELUGU LETTER GHA -0C19 TELUGU LETTER NGA -0C1A TELUGU LETTER CA -0C1B TELUGU LETTER CHA -0C1C TELUGU LETTER JA -0C1D TELUGU LETTER JHA -0C1E TELUGU LETTER NYA -0C1F TELUGU LETTER TTA -0C20 TELUGU LETTER TTHA -0C21 TELUGU LETTER DDA -0C22 TELUGU LETTER DDHA -0C23 TELUGU LETTER NNA -0C24 TELUGU LETTER TA -0C25 TELUGU LETTER THA -0C26 TELUGU LETTER DA -0C27 TELUGU LETTER DHA -0C28 TELUGU LETTER NA -0C2A TELUGU LETTER PA -0C2B TELUGU LETTER PHA -0C2C TELUGU LETTER BA -0C2D TELUGU LETTER BHA -0C2E TELUGU LETTER MA -0C2F TELUGU LETTER YA -0C30 TELUGU LETTER RA -0C31 TELUGU LETTER RRA -0C32 TELUGU LETTER LA -0C33 TELUGU LETTER LLA -0C35 TELUGU LETTER VA -0C36 TELUGU LETTER SHA -0C37 TELUGU LETTER SSA -0C38 TELUGU LETTER SA -0C39 TELUGU LETTER HA -0C3D TELUGU SIGN AVAGRAHA -0C3E TELUGU VOWEL SIGN AA -0C3F TELUGU VOWEL SIGN I -0C40 TELUGU VOWEL SIGN II -0C41 TELUGU VOWEL SIGN U -0C42 TELUGU VOWEL SIGN UU -0C43 TELUGU VOWEL SIGN VOCALIC R -0C44 TELUGU VOWEL SIGN VOCALIC RR -0C46 TELUGU VOWEL SIGN E -0C47 TELUGU VOWEL SIGN EE -0C48 TELUGU VOWEL SIGN AI -0C4A TELUGU VOWEL SIGN O -0C4B TELUGU VOWEL SIGN OO -0C4C TELUGU VOWEL SIGN AU -0C4D TELUGU SIGN VIRAMA -0C55 TELUGU LENGTH MARK -0C56 TELUGU AI LENGTH MARK -0C58 TELUGU LETTER TSA -0C59 TELUGU LETTER DZA -0C60 TELUGU LETTER VOCALIC RR -0C61 TELUGU LETTER VOCALIC LL -0C62 TELUGU VOWEL SIGN VOCALIC L -0C63 TELUGU VOWEL SIGN VOCALIC LL -0C66 TELUGU DIGIT ZERO -0C67 TELUGU DIGIT ONE -0C68 TELUGU DIGIT TWO -0C69 TELUGU DIGIT THREE -0C6A TELUGU DIGIT FOUR -0C6B TELUGU DIGIT FIVE -0C6C TELUGU DIGIT SIX -0C6D TELUGU DIGIT SEVEN -0C6E TELUGU DIGIT EIGHT -0C6F TELUGU DIGIT NINE -0C78 TELUGU FRACTION DIGIT ZERO FOR ODD POWERS OF FOUR -0C79 TELUGU FRACTION DIGIT ONE FOR ODD POWERS OF FOUR -0C7A TELUGU FRACTION DIGIT TWO FOR ODD POWERS OF FOUR -0C7B TELUGU FRACTION DIGIT THREE FOR ODD POWERS OF FOUR -0C7C TELUGU FRACTION DIGIT ONE FOR EVEN POWERS OF FOUR -0C7D TELUGU FRACTION DIGIT TWO FOR EVEN POWERS OF FOUR -0C7E TELUGU FRACTION DIGIT THREE FOR EVEN POWERS OF FOUR -0C7F TELUGU SIGN TUUMU -0C82 KANNADA SIGN ANUSVARA -0C83 KANNADA SIGN VISARGA -0C85 KANNADA LETTER A -0C86 KANNADA LETTER AA -0C87 KANNADA LETTER I -0C88 KANNADA LETTER II -0C89 KANNADA LETTER U -0C8A KANNADA LETTER UU -0C8B KANNADA LETTER VOCALIC R -0C8C KANNADA LETTER VOCALIC L -0C8E KANNADA LETTER E -0C8F KANNADA LETTER EE -0C90 KANNADA LETTER AI -0C92 KANNADA LETTER O -0C93 KANNADA LETTER OO -0C94 KANNADA LETTER AU -0C95 KANNADA LETTER KA -0C96 KANNADA LETTER KHA -0C97 KANNADA LETTER GA -0C98 KANNADA LETTER GHA -0C99 KANNADA LETTER NGA -0C9A KANNADA LETTER CA -0C9B KANNADA LETTER CHA -0C9C KANNADA LETTER JA -0C9D KANNADA LETTER JHA -0C9E KANNADA LETTER NYA -0C9F KANNADA LETTER TTA -0CA0 KANNADA LETTER TTHA -0CA1 KANNADA LETTER DDA -0CA2 KANNADA LETTER DDHA -0CA3 KANNADA LETTER NNA -0CA4 KANNADA LETTER TA -0CA5 KANNADA LETTER THA -0CA6 KANNADA LETTER DA -0CA7 KANNADA LETTER DHA -0CA8 KANNADA LETTER NA -0CAA KANNADA LETTER PA -0CAB KANNADA LETTER PHA -0CAC KANNADA LETTER BA -0CAD KANNADA LETTER BHA -0CAE KANNADA LETTER MA -0CAF KANNADA LETTER YA -0CB0 KANNADA LETTER RA -0CB1 KANNADA LETTER RRA -0CB2 KANNADA LETTER LA -0CB3 KANNADA LETTER LLA -0CB5 KANNADA LETTER VA -0CB6 KANNADA LETTER SHA -0CB7 KANNADA LETTER SSA -0CB8 KANNADA LETTER SA -0CB9 KANNADA LETTER HA -0CBC KANNADA SIGN NUKTA -0CBD KANNADA SIGN AVAGRAHA -0CBE KANNADA VOWEL SIGN AA -0CBF KANNADA VOWEL SIGN I -0CC0 KANNADA VOWEL SIGN II -0CC1 KANNADA VOWEL SIGN U -0CC2 KANNADA VOWEL SIGN UU -0CC3 KANNADA VOWEL SIGN VOCALIC R -0CC4 KANNADA VOWEL SIGN VOCALIC RR -0CC6 KANNADA VOWEL SIGN E -0CC7 KANNADA VOWEL SIGN EE -0CC8 KANNADA VOWEL SIGN AI -0CCA KANNADA VOWEL SIGN O -0CCB KANNADA VOWEL SIGN OO -0CCC KANNADA VOWEL SIGN AU -0CCD KANNADA SIGN VIRAMA -0CD5 KANNADA LENGTH MARK -0CD6 KANNADA AI LENGTH MARK -0CDE KANNADA LETTER FA -0CE0 KANNADA LETTER VOCALIC RR -0CE1 KANNADA LETTER VOCALIC LL -0CE2 KANNADA VOWEL SIGN VOCALIC L -0CE3 KANNADA VOWEL SIGN VOCALIC LL -0CE6 KANNADA DIGIT ZERO -0CE7 KANNADA DIGIT ONE -0CE8 KANNADA DIGIT TWO -0CE9 KANNADA DIGIT THREE -0CEA KANNADA DIGIT FOUR -0CEB KANNADA DIGIT FIVE -0CEC KANNADA DIGIT SIX -0CED KANNADA DIGIT SEVEN -0CEE KANNADA DIGIT EIGHT -0CEF KANNADA DIGIT NINE -0CF1 KANNADA SIGN JIHVAMULIYA -0CF2 KANNADA SIGN UPADHMANIYA -0D02 MALAYALAM SIGN ANUSVARA -0D03 MALAYALAM SIGN VISARGA -0D05 MALAYALAM LETTER A -0D06 MALAYALAM LETTER AA -0D07 MALAYALAM LETTER I -0D08 MALAYALAM LETTER II -0D09 MALAYALAM LETTER U -0D0A MALAYALAM LETTER UU -0D0B MALAYALAM LETTER VOCALIC R -0D0C MALAYALAM LETTER VOCALIC L -0D0E MALAYALAM LETTER E -0D0F MALAYALAM LETTER EE -0D10 MALAYALAM LETTER AI -0D12 MALAYALAM LETTER O -0D13 MALAYALAM LETTER OO -0D14 MALAYALAM LETTER AU -0D15 MALAYALAM LETTER KA -0D16 MALAYALAM LETTER KHA -0D17 MALAYALAM LETTER GA -0D18 MALAYALAM LETTER GHA -0D19 MALAYALAM LETTER NGA -0D1A MALAYALAM LETTER CA -0D1B MALAYALAM LETTER CHA -0D1C MALAYALAM LETTER JA -0D1D MALAYALAM LETTER JHA -0D1E MALAYALAM LETTER NYA -0D1F MALAYALAM LETTER TTA -0D20 MALAYALAM LETTER TTHA -0D21 MALAYALAM LETTER DDA -0D22 MALAYALAM LETTER DDHA -0D23 MALAYALAM LETTER NNA -0D24 MALAYALAM LETTER TA -0D25 MALAYALAM LETTER THA -0D26 MALAYALAM LETTER DA -0D27 MALAYALAM LETTER DHA -0D28 MALAYALAM LETTER NA -0D2A MALAYALAM LETTER PA -0D2B MALAYALAM LETTER PHA -0D2C MALAYALAM LETTER BA -0D2D MALAYALAM LETTER BHA -0D2E MALAYALAM LETTER MA -0D2F MALAYALAM LETTER YA -0D30 MALAYALAM LETTER RA -0D31 MALAYALAM LETTER RRA -0D32 MALAYALAM LETTER LA -0D33 MALAYALAM LETTER LLA -0D34 MALAYALAM LETTER LLLA -0D35 MALAYALAM LETTER VA -0D36 MALAYALAM LETTER SHA -0D37 MALAYALAM LETTER SSA -0D38 MALAYALAM LETTER SA -0D39 MALAYALAM LETTER HA -0D3D MALAYALAM SIGN AVAGRAHA -0D3E MALAYALAM VOWEL SIGN AA -0D3F MALAYALAM VOWEL SIGN I -0D40 MALAYALAM VOWEL SIGN II -0D41 MALAYALAM VOWEL SIGN U -0D42 MALAYALAM VOWEL SIGN UU -0D43 MALAYALAM VOWEL SIGN VOCALIC R -0D44 MALAYALAM VOWEL SIGN VOCALIC RR -0D46 MALAYALAM VOWEL SIGN E -0D47 MALAYALAM VOWEL SIGN EE -0D48 MALAYALAM VOWEL SIGN AI -0D4A MALAYALAM VOWEL SIGN O -0D4B MALAYALAM VOWEL SIGN OO -0D4C MALAYALAM VOWEL SIGN AU -0D4D MALAYALAM SIGN VIRAMA -0D57 MALAYALAM AU LENGTH MARK -0D60 MALAYALAM LETTER VOCALIC RR -0D61 MALAYALAM LETTER VOCALIC LL -0D62 MALAYALAM VOWEL SIGN VOCALIC L -0D63 MALAYALAM VOWEL SIGN VOCALIC LL -0D66 MALAYALAM DIGIT ZERO -0D67 MALAYALAM DIGIT ONE -0D68 MALAYALAM DIGIT TWO -0D69 MALAYALAM DIGIT THREE -0D6A MALAYALAM DIGIT FOUR -0D6B MALAYALAM DIGIT FIVE -0D6C MALAYALAM DIGIT SIX -0D6D MALAYALAM DIGIT SEVEN -0D6E MALAYALAM DIGIT EIGHT -0D6F MALAYALAM DIGIT NINE -0D70 MALAYALAM NUMBER TEN -0D71 MALAYALAM NUMBER ONE HUNDRED -0D72 MALAYALAM NUMBER ONE THOUSAND -0D73 MALAYALAM FRACTION ONE QUARTER -0D74 MALAYALAM FRACTION ONE HALF -0D75 MALAYALAM FRACTION THREE QUARTERS -0D79 MALAYALAM DATE MARK -0D7A MALAYALAM LETTER CHILLU NN -0D7B MALAYALAM LETTER CHILLU N -0D7C MALAYALAM LETTER CHILLU RR -0D7D MALAYALAM LETTER CHILLU L -0D7E MALAYALAM LETTER CHILLU LL -0D7F MALAYALAM LETTER CHILLU K -0D82 SINHALA SIGN ANUSVARAYA -0D83 SINHALA SIGN VISARGAYA -0D85 SINHALA LETTER AYANNA -0D86 SINHALA LETTER AAYANNA -0D87 SINHALA LETTER AEYANNA -0D88 SINHALA LETTER AEEYANNA -0D89 SINHALA LETTER IYANNA -0D8A SINHALA LETTER IIYANNA -0D8B SINHALA LETTER UYANNA -0D8C SINHALA LETTER UUYANNA -0D8D SINHALA LETTER IRUYANNA -0D8E SINHALA LETTER IRUUYANNA -0D8F SINHALA LETTER ILUYANNA -0D90 SINHALA LETTER ILUUYANNA -0D91 SINHALA LETTER EYANNA -0D92 SINHALA LETTER EEYANNA -0D93 SINHALA LETTER AIYANNA -0D94 SINHALA LETTER OYANNA -0D95 SINHALA LETTER OOYANNA -0D96 SINHALA LETTER AUYANNA -0D9A SINHALA LETTER ALPAPRAANA KAYANNA -0D9B SINHALA LETTER MAHAAPRAANA KAYANNA -0D9C SINHALA LETTER ALPAPRAANA GAYANNA -0D9D SINHALA LETTER MAHAAPRAANA GAYANNA -0D9E SINHALA LETTER KANTAJA NAASIKYAYA -0D9F SINHALA LETTER SANYAKA GAYANNA -0DA0 SINHALA LETTER ALPAPRAANA CAYANNA -0DA1 SINHALA LETTER MAHAAPRAANA CAYANNA -0DA2 SINHALA LETTER ALPAPRAANA JAYANNA -0DA3 SINHALA LETTER MAHAAPRAANA JAYANNA -0DA4 SINHALA LETTER TAALUJA NAASIKYAYA -0DA5 SINHALA LETTER TAALUJA SANYOOGA NAAKSIKYAYA -0DA6 SINHALA LETTER SANYAKA JAYANNA -0DA7 SINHALA LETTER ALPAPRAANA TTAYANNA -0DA8 SINHALA LETTER MAHAAPRAANA TTAYANNA -0DA9 SINHALA LETTER ALPAPRAANA DDAYANNA -0DAA SINHALA LETTER MAHAAPRAANA DDAYANNA -0DAB SINHALA LETTER MUURDHAJA NAYANNA -0DAC SINHALA LETTER SANYAKA DDAYANNA -0DAD SINHALA LETTER ALPAPRAANA TAYANNA -0DAE SINHALA LETTER MAHAAPRAANA TAYANNA -0DAF SINHALA LETTER ALPAPRAANA DAYANNA -0DB0 SINHALA LETTER MAHAAPRAANA DAYANNA -0DB1 SINHALA LETTER DANTAJA NAYANNA -0DB3 SINHALA LETTER SANYAKA DAYANNA -0DB4 SINHALA LETTER ALPAPRAANA PAYANNA -0DB5 SINHALA LETTER MAHAAPRAANA PAYANNA -0DB6 SINHALA LETTER ALPAPRAANA BAYANNA -0DB7 SINHALA LETTER MAHAAPRAANA BAYANNA -0DB8 SINHALA LETTER MAYANNA -0DB9 SINHALA LETTER AMBA BAYANNA -0DBA SINHALA LETTER YAYANNA -0DBB SINHALA LETTER RAYANNA -0DBD SINHALA LETTER DANTAJA LAYANNA -0DC0 SINHALA LETTER VAYANNA -0DC1 SINHALA LETTER TAALUJA SAYANNA -0DC2 SINHALA LETTER MUURDHAJA SAYANNA -0DC3 SINHALA LETTER DANTAJA SAYANNA -0DC4 SINHALA LETTER HAYANNA -0DC5 SINHALA LETTER MUURDHAJA LAYANNA -0DC6 SINHALA LETTER FAYANNA -0DCA SINHALA SIGN AL-LAKUNA -0DCF SINHALA VOWEL SIGN AELA-PILLA -0DD0 SINHALA VOWEL SIGN KETTI AEDA-PILLA -0DD1 SINHALA VOWEL SIGN DIGA AEDA-PILLA -0DD2 SINHALA VOWEL SIGN KETTI IS-PILLA -0DD3 SINHALA VOWEL SIGN DIGA IS-PILLA -0DD4 SINHALA VOWEL SIGN KETTI PAA-PILLA -0DD6 SINHALA VOWEL SIGN DIGA PAA-PILLA -0DD8 SINHALA VOWEL SIGN GAETTA-PILLA -0DD9 SINHALA VOWEL SIGN KOMBUVA -0DDA SINHALA VOWEL SIGN DIGA KOMBUVA -0DDB SINHALA VOWEL SIGN KOMBU DEKA -0DDC SINHALA VOWEL SIGN KOMBUVA HAA AELA-PILLA -0DDD SINHALA VOWEL SIGN KOMBUVA HAA DIGA AELA-PILLA -0DDE SINHALA VOWEL SIGN KOMBUVA HAA GAYANUKITTA -0DDF SINHALA VOWEL SIGN GAYANUKITTA -0DF2 SINHALA VOWEL SIGN DIGA GAETTA-PILLA -0DF3 SINHALA VOWEL SIGN DIGA GAYANUKITTA -0DF4 SINHALA PUNCTUATION KUNDDALIYA -0E01 THAI CHARACTER KO KAI -0E02 THAI CHARACTER KHO KHAI -0E03 THAI CHARACTER KHO KHUAT -0E04 THAI CHARACTER KHO KHWAI -0E05 THAI CHARACTER KHO KHON -0E06 THAI CHARACTER KHO RAKHANG -0E07 THAI CHARACTER NGO NGU -0E08 THAI CHARACTER CHO CHAN -0E09 THAI CHARACTER CHO CHING -0E0A THAI CHARACTER CHO CHANG -0E0B THAI CHARACTER SO SO -0E0C THAI CHARACTER CHO CHOE -0E0D THAI CHARACTER YO YING -0E0E THAI CHARACTER DO CHADA -0E0F THAI CHARACTER TO PATAK -0E10 THAI CHARACTER THO THAN -0E11 THAI CHARACTER THO NANGMONTHO -0E12 THAI CHARACTER THO PHUTHAO -0E13 THAI CHARACTER NO NEN -0E14 THAI CHARACTER DO DEK -0E15 THAI CHARACTER TO TAO -0E16 THAI CHARACTER THO THUNG -0E17 THAI CHARACTER THO THAHAN -0E18 THAI CHARACTER THO THONG -0E19 THAI CHARACTER NO NU -0E1A THAI CHARACTER BO BAIMAI -0E1B THAI CHARACTER PO PLA -0E1C THAI CHARACTER PHO PHUNG -0E1D THAI CHARACTER FO FA -0E1E THAI CHARACTER PHO PHAN -0E1F THAI CHARACTER FO FAN -0E20 THAI CHARACTER PHO SAMPHAO -0E21 THAI CHARACTER MO MA -0E22 THAI CHARACTER YO YAK -0E23 THAI CHARACTER RO RUA -0E24 THAI CHARACTER RU -0E25 THAI CHARACTER LO LING -0E26 THAI CHARACTER LU -0E27 THAI CHARACTER WO WAEN -0E28 THAI CHARACTER SO SALA -0E29 THAI CHARACTER SO RUSI -0E2A THAI CHARACTER SO SUA -0E2B THAI CHARACTER HO HIP -0E2C THAI CHARACTER LO CHULA -0E2D THAI CHARACTER O ANG -0E2E THAI CHARACTER HO NOKHUK -0E2F THAI CHARACTER PAIYANNOI -0E30 THAI CHARACTER SARA A -0E31 THAI CHARACTER MAI HAN-AKAT -0E32 THAI CHARACTER SARA AA -0E33 THAI CHARACTER SARA AM -0E34 THAI CHARACTER SARA I -0E35 THAI CHARACTER SARA II -0E36 THAI CHARACTER SARA UE -0E37 THAI CHARACTER SARA UEE -0E38 THAI CHARACTER SARA U -0E39 THAI CHARACTER SARA UU -0E3A THAI CHARACTER PHINTHU -0E3F THAI CURRENCY SYMBOL BAHT -0E40 THAI CHARACTER SARA E -0E41 THAI CHARACTER SARA AE -0E42 THAI CHARACTER SARA O -0E43 THAI CHARACTER SARA AI MAIMUAN -0E44 THAI CHARACTER SARA AI MAIMALAI -0E45 THAI CHARACTER LAKKHANGYAO -0E46 THAI CHARACTER MAIYAMOK -0E47 THAI CHARACTER MAITAIKHU -0E48 THAI CHARACTER MAI EK -0E49 THAI CHARACTER MAI THO -0E4A THAI CHARACTER MAI TRI -0E4B THAI CHARACTER MAI CHATTAWA -0E4C THAI CHARACTER THANTHAKHAT -0E4D THAI CHARACTER NIKHAHIT -0E4E THAI CHARACTER YAMAKKAN -0E4F THAI CHARACTER FONGMAN -0E50 THAI DIGIT ZERO -0E51 THAI DIGIT ONE -0E52 THAI DIGIT TWO -0E53 THAI DIGIT THREE -0E54 THAI DIGIT FOUR -0E55 THAI DIGIT FIVE -0E56 THAI DIGIT SIX -0E57 THAI DIGIT SEVEN -0E58 THAI DIGIT EIGHT -0E59 THAI DIGIT NINE -0E5A THAI CHARACTER ANGKHANKHU -0E5B THAI CHARACTER KHOMUT -0E81 LAO LETTER KO -0E82 LAO LETTER KHO SUNG -0E84 LAO LETTER KHO TAM -0E87 LAO LETTER NGO -0E88 LAO LETTER CO -0E8A LAO LETTER SO TAM -0E8D LAO LETTER NYO -0E94 LAO LETTER DO -0E95 LAO LETTER TO -0E96 LAO LETTER THO SUNG -0E97 LAO LETTER THO TAM -0E99 LAO LETTER NO -0E9A LAO LETTER BO -0E9B LAO LETTER PO -0E9C LAO LETTER PHO SUNG -0E9D LAO LETTER FO TAM -0E9E LAO LETTER PHO TAM -0E9F LAO LETTER FO SUNG -0EA1 LAO LETTER MO -0EA2 LAO LETTER YO -0EA3 LAO LETTER LO LING -0EA5 LAO LETTER LO LOOT -0EA7 LAO LETTER WO -0EAA LAO LETTER SO SUNG -0EAB LAO LETTER HO SUNG -0EAD LAO LETTER O -0EAE LAO LETTER HO TAM -0EAF LAO ELLIPSIS -0EB0 LAO VOWEL SIGN A -0EB1 LAO VOWEL SIGN MAI KAN -0EB2 LAO VOWEL SIGN AA -0EB3 LAO VOWEL SIGN AM -0EB4 LAO VOWEL SIGN I -0EB5 LAO VOWEL SIGN II -0EB6 LAO VOWEL SIGN Y -0EB7 LAO VOWEL SIGN YY -0EB8 LAO VOWEL SIGN U -0EB9 LAO VOWEL SIGN UU -0EBB LAO VOWEL SIGN MAI KON -0EBC LAO SEMIVOWEL SIGN LO -0EBD LAO SEMIVOWEL SIGN NYO -0EC0 LAO VOWEL SIGN E -0EC1 LAO VOWEL SIGN EI -0EC2 LAO VOWEL SIGN O -0EC3 LAO VOWEL SIGN AY -0EC4 LAO VOWEL SIGN AI -0EC6 LAO KO LA -0EC8 LAO TONE MAI EK -0EC9 LAO TONE MAI THO -0ECA LAO TONE MAI TI -0ECB LAO TONE MAI CATAWA -0ECC LAO CANCELLATION MARK -0ECD LAO NIGGAHITA -0ED0 LAO DIGIT ZERO -0ED1 LAO DIGIT ONE -0ED2 LAO DIGIT TWO -0ED3 LAO DIGIT THREE -0ED4 LAO DIGIT FOUR -0ED5 LAO DIGIT FIVE -0ED6 LAO DIGIT SIX -0ED7 LAO DIGIT SEVEN -0ED8 LAO DIGIT EIGHT -0ED9 LAO DIGIT NINE -0EDC LAO HO NO -0EDD LAO HO MO -0F00 TIBETAN SYLLABLE OM -0F01 TIBETAN MARK GTER YIG MGO TRUNCATED A -0F02 TIBETAN MARK GTER YIG MGO -UM RNAM BCAD MA -0F03 TIBETAN MARK GTER YIG MGO -UM GTER TSHEG MA -0F04 TIBETAN MARK INITIAL YIG MGO MDUN MA -0F05 TIBETAN MARK CLOSING YIG MGO SGAB MA -0F06 TIBETAN MARK CARET YIG MGO PHUR SHAD MA -0F07 TIBETAN MARK YIG MGO TSHEG SHAD MA -0F08 TIBETAN MARK SBRUL SHAD -0F09 TIBETAN MARK BSKUR YIG MGO -0F0A TIBETAN MARK BKA- SHOG YIG MGO -0F0B TIBETAN MARK INTERSYLLABIC TSHEG -0F0C TIBETAN MARK DELIMITER TSHEG BSTAR -0F0D TIBETAN MARK SHAD -0F0E TIBETAN MARK NYIS SHAD -0F0F TIBETAN MARK TSHEG SHAD -0F10 TIBETAN MARK NYIS TSHEG SHAD -0F11 TIBETAN MARK RIN CHEN SPUNGS SHAD -0F12 TIBETAN MARK RGYA GRAM SHAD -0F13 TIBETAN MARK CARET -DZUD RTAGS ME LONG CAN -0F14 TIBETAN MARK GTER TSHEG -0F15 TIBETAN LOGOTYPE SIGN CHAD RTAGS -0F16 TIBETAN LOGOTYPE SIGN LHAG RTAGS -0F17 TIBETAN ASTROLOGICAL SIGN SGRA GCAN -CHAR RTAGS -0F18 TIBETAN ASTROLOGICAL SIGN -KHYUD PA -0F19 TIBETAN ASTROLOGICAL SIGN SDONG TSHUGS -0F1A TIBETAN SIGN RDEL DKAR GCIG -0F1B TIBETAN SIGN RDEL DKAR GNYIS -0F1C TIBETAN SIGN RDEL DKAR GSUM -0F1D TIBETAN SIGN RDEL NAG GCIG -0F1E TIBETAN SIGN RDEL NAG GNYIS -0F1F TIBETAN SIGN RDEL DKAR RDEL NAG -0F20 TIBETAN DIGIT ZERO -0F21 TIBETAN DIGIT ONE -0F22 TIBETAN DIGIT TWO -0F23 TIBETAN DIGIT THREE -0F24 TIBETAN DIGIT FOUR -0F25 TIBETAN DIGIT FIVE -0F26 TIBETAN DIGIT SIX -0F27 TIBETAN DIGIT SEVEN -0F28 TIBETAN DIGIT EIGHT -0F29 TIBETAN DIGIT NINE -0F2A TIBETAN DIGIT HALF ONE -0F2B TIBETAN DIGIT HALF TWO -0F2C TIBETAN DIGIT HALF THREE -0F2D TIBETAN DIGIT HALF FOUR -0F2E TIBETAN DIGIT HALF FIVE -0F2F TIBETAN DIGIT HALF SIX -0F30 TIBETAN DIGIT HALF SEVEN -0F31 TIBETAN DIGIT HALF EIGHT -0F32 TIBETAN DIGIT HALF NINE -0F33 TIBETAN DIGIT HALF ZERO -0F34 TIBETAN MARK BSDUS RTAGS -0F35 TIBETAN MARK NGAS BZUNG NYI ZLA -0F36 TIBETAN MARK CARET -DZUD RTAGS BZHI MIG CAN -0F37 TIBETAN MARK NGAS BZUNG SGOR RTAGS -0F38 TIBETAN MARK CHE MGO -0F39 TIBETAN MARK TSA -PHRU -0F3A TIBETAN MARK GUG RTAGS GYON -0F3B TIBETAN MARK GUG RTAGS GYAS -0F3C TIBETAN MARK ANG KHANG GYON -0F3D TIBETAN MARK ANG KHANG GYAS -0F3E TIBETAN SIGN YAR TSHES -0F3F TIBETAN SIGN MAR TSHES -0F40 TIBETAN LETTER KA -0F41 TIBETAN LETTER KHA -0F42 TIBETAN LETTER GA -0F43 TIBETAN LETTER GHA -0F44 TIBETAN LETTER NGA -0F45 TIBETAN LETTER CA -0F46 TIBETAN LETTER CHA -0F47 TIBETAN LETTER JA -0F49 TIBETAN LETTER NYA -0F4A TIBETAN LETTER TTA -0F4B TIBETAN LETTER TTHA -0F4C TIBETAN LETTER DDA -0F4D TIBETAN LETTER DDHA -0F4E TIBETAN LETTER NNA -0F4F TIBETAN LETTER TA -0F50 TIBETAN LETTER THA -0F51 TIBETAN LETTER DA -0F52 TIBETAN LETTER DHA -0F53 TIBETAN LETTER NA -0F54 TIBETAN LETTER PA -0F55 TIBETAN LETTER PHA -0F56 TIBETAN LETTER BA -0F57 TIBETAN LETTER BHA -0F58 TIBETAN LETTER MA -0F59 TIBETAN LETTER TSA -0F5A TIBETAN LETTER TSHA -0F5B TIBETAN LETTER DZA -0F5C TIBETAN LETTER DZHA -0F5D TIBETAN LETTER WA -0F5E TIBETAN LETTER ZHA -0F5F TIBETAN LETTER ZA -0F60 TIBETAN LETTER -A -0F61 TIBETAN LETTER YA -0F62 TIBETAN LETTER RA -0F63 TIBETAN LETTER LA -0F64 TIBETAN LETTER SHA -0F65 TIBETAN LETTER SSA -0F66 TIBETAN LETTER SA -0F67 TIBETAN LETTER HA -0F68 TIBETAN LETTER A -0F69 TIBETAN LETTER KSSA -0F6A TIBETAN LETTER FIXED-FORM RA -0F6B TIBETAN LETTER KKA -0F6C TIBETAN LETTER RRA -0F71 TIBETAN VOWEL SIGN AA -0F72 TIBETAN VOWEL SIGN I -0F73 TIBETAN VOWEL SIGN II -0F74 TIBETAN VOWEL SIGN U -0F75 TIBETAN VOWEL SIGN UU -0F76 TIBETAN VOWEL SIGN VOCALIC R -0F77 TIBETAN VOWEL SIGN VOCALIC RR -0F78 TIBETAN VOWEL SIGN VOCALIC L -0F79 TIBETAN VOWEL SIGN VOCALIC LL -0F7A TIBETAN VOWEL SIGN E -0F7B TIBETAN VOWEL SIGN EE -0F7C TIBETAN VOWEL SIGN O -0F7D TIBETAN VOWEL SIGN OO -0F7E TIBETAN SIGN RJES SU NGA RO -0F7F TIBETAN SIGN RNAM BCAD -0F80 TIBETAN VOWEL SIGN REVERSED I -0F81 TIBETAN VOWEL SIGN REVERSED II -0F82 TIBETAN SIGN NYI ZLA NAA DA -0F83 TIBETAN SIGN SNA LDAN -0F84 TIBETAN MARK HALANTA -0F85 TIBETAN MARK PALUTA -0F86 TIBETAN SIGN LCI RTAGS -0F87 TIBETAN SIGN YANG RTAGS -0F88 TIBETAN SIGN LCE TSA CAN -0F89 TIBETAN SIGN MCHU CAN -0F8A TIBETAN SIGN GRU CAN RGYINGS -0F8B TIBETAN SIGN GRU MED RGYINGS -0F90 TIBETAN SUBJOINED LETTER KA -0F91 TIBETAN SUBJOINED LETTER KHA -0F92 TIBETAN SUBJOINED LETTER GA -0F93 TIBETAN SUBJOINED LETTER GHA -0F94 TIBETAN SUBJOINED LETTER NGA -0F95 TIBETAN SUBJOINED LETTER CA -0F96 TIBETAN SUBJOINED LETTER CHA -0F97 TIBETAN SUBJOINED LETTER JA -0F99 TIBETAN SUBJOINED LETTER NYA -0F9A TIBETAN SUBJOINED LETTER TTA -0F9B TIBETAN SUBJOINED LETTER TTHA -0F9C TIBETAN SUBJOINED LETTER DDA -0F9D TIBETAN SUBJOINED LETTER DDHA -0F9E TIBETAN SUBJOINED LETTER NNA -0F9F TIBETAN SUBJOINED LETTER TA -0FA0 TIBETAN SUBJOINED LETTER THA -0FA1 TIBETAN SUBJOINED LETTER DA -0FA2 TIBETAN SUBJOINED LETTER DHA -0FA3 TIBETAN SUBJOINED LETTER NA -0FA4 TIBETAN SUBJOINED LETTER PA -0FA5 TIBETAN SUBJOINED LETTER PHA -0FA6 TIBETAN SUBJOINED LETTER BA -0FA7 TIBETAN SUBJOINED LETTER BHA -0FA8 TIBETAN SUBJOINED LETTER MA -0FA9 TIBETAN SUBJOINED LETTER TSA -0FAA TIBETAN SUBJOINED LETTER TSHA -0FAB TIBETAN SUBJOINED LETTER DZA -0FAC TIBETAN SUBJOINED LETTER DZHA -0FAD TIBETAN SUBJOINED LETTER WA -0FAE TIBETAN SUBJOINED LETTER ZHA -0FAF TIBETAN SUBJOINED LETTER ZA -0FB0 TIBETAN SUBJOINED LETTER -A -0FB1 TIBETAN SUBJOINED LETTER YA -0FB2 TIBETAN SUBJOINED LETTER RA -0FB3 TIBETAN SUBJOINED LETTER LA -0FB4 TIBETAN SUBJOINED LETTER SHA -0FB5 TIBETAN SUBJOINED LETTER SSA -0FB6 TIBETAN SUBJOINED LETTER SA -0FB7 TIBETAN SUBJOINED LETTER HA -0FB8 TIBETAN SUBJOINED LETTER A -0FB9 TIBETAN SUBJOINED LETTER KSSA -0FBA TIBETAN SUBJOINED LETTER FIXED-FORM WA -0FBB TIBETAN SUBJOINED LETTER FIXED-FORM YA -0FBC TIBETAN SUBJOINED LETTER FIXED-FORM RA -0FBE TIBETAN KU RU KHA -0FBF TIBETAN KU RU KHA BZHI MIG CAN -0FC0 TIBETAN CANTILLATION SIGN HEAVY BEAT -0FC1 TIBETAN CANTILLATION SIGN LIGHT BEAT -0FC2 TIBETAN CANTILLATION SIGN CANG TE-U -0FC3 TIBETAN CANTILLATION SIGN SBUB -CHAL -0FC4 TIBETAN SYMBOL DRIL BU -0FC5 TIBETAN SYMBOL RDO RJE -0FC6 TIBETAN SYMBOL PADMA GDAN -0FC7 TIBETAN SYMBOL RDO RJE RGYA GRAM -0FC8 TIBETAN SYMBOL PHUR PA -0FC9 TIBETAN SYMBOL NOR BU -0FCA TIBETAN SYMBOL NOR BU NYIS -KHYIL -0FCB TIBETAN SYMBOL NOR BU GSUM -KHYIL -0FCC TIBETAN SYMBOL NOR BU BZHI -KHYIL -0FCE TIBETAN SIGN RDEL NAG RDEL DKAR -0FCF TIBETAN SIGN RDEL NAG GSUM -0FD0 TIBETAN MARK BSKA- SHOG GI MGO RGYAN -0FD1 TIBETAN MARK MNYAM YIG GI MGO RGYAN -0FD2 TIBETAN MARK NYIS TSHEG -0FD3 TIBETAN MARK INITIAL BRDA RNYING YIG MGO MDUN MA -0FD4 TIBETAN MARK CLOSING BRDA RNYING YIG MGO SGAB MA -0FD5 RIGHT-FACING SVASTI SIGN -0FD6 LEFT-FACING SVASTI SIGN -0FD7 RIGHT-FACING SVASTI SIGN WITH DOTS -0FD8 LEFT-FACING SVASTI SIGN WITH DOTS -1000 MYANMAR LETTER KA -1001 MYANMAR LETTER KHA -1002 MYANMAR LETTER GA -1003 MYANMAR LETTER GHA -1004 MYANMAR LETTER NGA -1005 MYANMAR LETTER CA -1006 MYANMAR LETTER CHA -1007 MYANMAR LETTER JA -1008 MYANMAR LETTER JHA -1009 MYANMAR LETTER NYA -100A MYANMAR LETTER NNYA -100B MYANMAR LETTER TTA -100C MYANMAR LETTER TTHA -100D MYANMAR LETTER DDA -100E MYANMAR LETTER DDHA -100F MYANMAR LETTER NNA -1010 MYANMAR LETTER TA -1011 MYANMAR LETTER THA -1012 MYANMAR LETTER DA -1013 MYANMAR LETTER DHA -1014 MYANMAR LETTER NA -1015 MYANMAR LETTER PA -1016 MYANMAR LETTER PHA -1017 MYANMAR LETTER BA -1018 MYANMAR LETTER BHA -1019 MYANMAR LETTER MA -101A MYANMAR LETTER YA -101B MYANMAR LETTER RA -101C MYANMAR LETTER LA -101D MYANMAR LETTER WA -101E MYANMAR LETTER SA -101F MYANMAR LETTER HA -1020 MYANMAR LETTER LLA -1021 MYANMAR LETTER A -1022 MYANMAR LETTER SHAN A -1023 MYANMAR LETTER I -1024 MYANMAR LETTER II -1025 MYANMAR LETTER U -1026 MYANMAR LETTER UU -1027 MYANMAR LETTER E -1028 MYANMAR LETTER MON E -1029 MYANMAR LETTER O -102A MYANMAR LETTER AU -102B MYANMAR VOWEL SIGN TALL AA -102C MYANMAR VOWEL SIGN AA -102D MYANMAR VOWEL SIGN I -102E MYANMAR VOWEL SIGN II -102F MYANMAR VOWEL SIGN U -1030 MYANMAR VOWEL SIGN UU -1031 MYANMAR VOWEL SIGN E -1032 MYANMAR VOWEL SIGN AI -1033 MYANMAR VOWEL SIGN MON II -1034 MYANMAR VOWEL SIGN MON O -1035 MYANMAR VOWEL SIGN E ABOVE -1036 MYANMAR SIGN ANUSVARA -1037 MYANMAR SIGN DOT BELOW -1038 MYANMAR SIGN VISARGA -1039 MYANMAR SIGN VIRAMA -103A MYANMAR SIGN ASAT -103B MYANMAR CONSONANT SIGN MEDIAL YA -103C MYANMAR CONSONANT SIGN MEDIAL RA -103D MYANMAR CONSONANT SIGN MEDIAL WA -103E MYANMAR CONSONANT SIGN MEDIAL HA -103F MYANMAR LETTER GREAT SA -1040 MYANMAR DIGIT ZERO -1041 MYANMAR DIGIT ONE -1042 MYANMAR DIGIT TWO -1043 MYANMAR DIGIT THREE -1044 MYANMAR DIGIT FOUR -1045 MYANMAR DIGIT FIVE -1046 MYANMAR DIGIT SIX -1047 MYANMAR DIGIT SEVEN -1048 MYANMAR DIGIT EIGHT -1049 MYANMAR DIGIT NINE -104A MYANMAR SIGN LITTLE SECTION -104B MYANMAR SIGN SECTION -104C MYANMAR SYMBOL LOCATIVE -104D MYANMAR SYMBOL COMPLETED -104E MYANMAR SYMBOL AFOREMENTIONED -104F MYANMAR SYMBOL GENITIVE -1050 MYANMAR LETTER SHA -1051 MYANMAR LETTER SSA -1052 MYANMAR LETTER VOCALIC R -1053 MYANMAR LETTER VOCALIC RR -1054 MYANMAR LETTER VOCALIC L -1055 MYANMAR LETTER VOCALIC LL -1056 MYANMAR VOWEL SIGN VOCALIC R -1057 MYANMAR VOWEL SIGN VOCALIC RR -1058 MYANMAR VOWEL SIGN VOCALIC L -1059 MYANMAR VOWEL SIGN VOCALIC LL -105A MYANMAR LETTER MON NGA -105B MYANMAR LETTER MON JHA -105C MYANMAR LETTER MON BBA -105D MYANMAR LETTER MON BBE -105E MYANMAR CONSONANT SIGN MON MEDIAL NA -105F MYANMAR CONSONANT SIGN MON MEDIAL MA -1060 MYANMAR CONSONANT SIGN MON MEDIAL LA -1061 MYANMAR LETTER SGAW KAREN SHA -1062 MYANMAR VOWEL SIGN SGAW KAREN EU -1063 MYANMAR TONE MARK SGAW KAREN HATHI -1064 MYANMAR TONE MARK SGAW KAREN KE PHO -1065 MYANMAR LETTER WESTERN PWO KAREN THA -1066 MYANMAR LETTER WESTERN PWO KAREN PWA -1067 MYANMAR VOWEL SIGN WESTERN PWO KAREN EU -1068 MYANMAR VOWEL SIGN WESTERN PWO KAREN UE -1069 MYANMAR SIGN WESTERN PWO KAREN TONE-1 -106A MYANMAR SIGN WESTERN PWO KAREN TONE-2 -106B MYANMAR SIGN WESTERN PWO KAREN TONE-3 -106C MYANMAR SIGN WESTERN PWO KAREN TONE-4 -106D MYANMAR SIGN WESTERN PWO KAREN TONE-5 -106E MYANMAR LETTER EASTERN PWO KAREN NNA -106F MYANMAR LETTER EASTERN PWO KAREN YWA -1070 MYANMAR LETTER EASTERN PWO KAREN GHWA -1071 MYANMAR VOWEL SIGN GEBA KAREN I -1072 MYANMAR VOWEL SIGN KAYAH OE -1073 MYANMAR VOWEL SIGN KAYAH U -1074 MYANMAR VOWEL SIGN KAYAH EE -1075 MYANMAR LETTER SHAN KA -1076 MYANMAR LETTER SHAN KHA -1077 MYANMAR LETTER SHAN GA -1078 MYANMAR LETTER SHAN CA -1079 MYANMAR LETTER SHAN ZA -107A MYANMAR LETTER SHAN NYA -107B MYANMAR LETTER SHAN DA -107C MYANMAR LETTER SHAN NA -107D MYANMAR LETTER SHAN PHA -107E MYANMAR LETTER SHAN FA -107F MYANMAR LETTER SHAN BA -1080 MYANMAR LETTER SHAN THA -1081 MYANMAR LETTER SHAN HA -1082 MYANMAR CONSONANT SIGN SHAN MEDIAL WA -1083 MYANMAR VOWEL SIGN SHAN AA -1084 MYANMAR VOWEL SIGN SHAN E -1085 MYANMAR VOWEL SIGN SHAN E ABOVE -1086 MYANMAR VOWEL SIGN SHAN FINAL Y -1087 MYANMAR SIGN SHAN TONE-2 -1088 MYANMAR SIGN SHAN TONE-3 -1089 MYANMAR SIGN SHAN TONE-5 -108A MYANMAR SIGN SHAN TONE-6 -108B MYANMAR SIGN SHAN COUNCIL TONE-2 -108C MYANMAR SIGN SHAN COUNCIL TONE-3 -108D MYANMAR SIGN SHAN COUNCIL EMPHATIC TONE -108E MYANMAR LETTER RUMAI PALAUNG FA -108F MYANMAR SIGN RUMAI PALAUNG TONE-5 -1090 MYANMAR SHAN DIGIT ZERO -1091 MYANMAR SHAN DIGIT ONE -1092 MYANMAR SHAN DIGIT TWO -1093 MYANMAR SHAN DIGIT THREE -1094 MYANMAR SHAN DIGIT FOUR -1095 MYANMAR SHAN DIGIT FIVE -1096 MYANMAR SHAN DIGIT SIX -1097 MYANMAR SHAN DIGIT SEVEN -1098 MYANMAR SHAN DIGIT EIGHT -1099 MYANMAR SHAN DIGIT NINE -109A MYANMAR SIGN KHAMTI TONE-1 -109B MYANMAR SIGN KHAMTI TONE-3 -109C MYANMAR VOWEL SIGN AITON A -109D MYANMAR VOWEL SIGN AITON AI -109E MYANMAR SYMBOL SHAN ONE -109F MYANMAR SYMBOL SHAN EXCLAMATION -10A0 GEORGIAN CAPITAL LETTER AN -10A1 GEORGIAN CAPITAL LETTER BAN -10A2 GEORGIAN CAPITAL LETTER GAN -10A3 GEORGIAN CAPITAL LETTER DON -10A4 GEORGIAN CAPITAL LETTER EN -10A5 GEORGIAN CAPITAL LETTER VIN -10A6 GEORGIAN CAPITAL LETTER ZEN -10A7 GEORGIAN CAPITAL LETTER TAN -10A8 GEORGIAN CAPITAL LETTER IN -10A9 GEORGIAN CAPITAL LETTER KAN -10AA GEORGIAN CAPITAL LETTER LAS -10AB GEORGIAN CAPITAL LETTER MAN -10AC GEORGIAN CAPITAL LETTER NAR -10AD GEORGIAN CAPITAL LETTER ON -10AE GEORGIAN CAPITAL LETTER PAR -10AF GEORGIAN CAPITAL LETTER ZHAR -10B0 GEORGIAN CAPITAL LETTER RAE -10B1 GEORGIAN CAPITAL LETTER SAN -10B2 GEORGIAN CAPITAL LETTER TAR -10B3 GEORGIAN CAPITAL LETTER UN -10B4 GEORGIAN CAPITAL LETTER PHAR -10B5 GEORGIAN CAPITAL LETTER KHAR -10B6 GEORGIAN CAPITAL LETTER GHAN -10B7 GEORGIAN CAPITAL LETTER QAR -10B8 GEORGIAN CAPITAL LETTER SHIN -10B9 GEORGIAN CAPITAL LETTER CHIN -10BA GEORGIAN CAPITAL LETTER CAN -10BB GEORGIAN CAPITAL LETTER JIL -10BC GEORGIAN CAPITAL LETTER CIL -10BD GEORGIAN CAPITAL LETTER CHAR -10BE GEORGIAN CAPITAL LETTER XAN -10BF GEORGIAN CAPITAL LETTER JHAN -10C0 GEORGIAN CAPITAL LETTER HAE -10C1 GEORGIAN CAPITAL LETTER HE -10C2 GEORGIAN CAPITAL LETTER HIE -10C3 GEORGIAN CAPITAL LETTER WE -10C4 GEORGIAN CAPITAL LETTER HAR -10C5 GEORGIAN CAPITAL LETTER HOE -10D0 GEORGIAN LETTER AN -10D1 GEORGIAN LETTER BAN -10D2 GEORGIAN LETTER GAN -10D3 GEORGIAN LETTER DON -10D4 GEORGIAN LETTER EN -10D5 GEORGIAN LETTER VIN -10D6 GEORGIAN LETTER ZEN -10D7 GEORGIAN LETTER TAN -10D8 GEORGIAN LETTER IN -10D9 GEORGIAN LETTER KAN -10DA GEORGIAN LETTER LAS -10DB GEORGIAN LETTER MAN -10DC GEORGIAN LETTER NAR -10DD GEORGIAN LETTER ON -10DE GEORGIAN LETTER PAR -10DF GEORGIAN LETTER ZHAR -10E0 GEORGIAN LETTER RAE -10E1 GEORGIAN LETTER SAN -10E2 GEORGIAN LETTER TAR -10E3 GEORGIAN LETTER UN -10E4 GEORGIAN LETTER PHAR -10E5 GEORGIAN LETTER KHAR -10E6 GEORGIAN LETTER GHAN -10E7 GEORGIAN LETTER QAR -10E8 GEORGIAN LETTER SHIN -10E9 GEORGIAN LETTER CHIN -10EA GEORGIAN LETTER CAN -10EB GEORGIAN LETTER JIL -10EC GEORGIAN LETTER CIL -10ED GEORGIAN LETTER CHAR -10EE GEORGIAN LETTER XAN -10EF GEORGIAN LETTER JHAN -10F0 GEORGIAN LETTER HAE -10F1 GEORGIAN LETTER HE -10F2 GEORGIAN LETTER HIE -10F3 GEORGIAN LETTER WE -10F4 GEORGIAN LETTER HAR -10F5 GEORGIAN LETTER HOE -10F6 GEORGIAN LETTER FI -10F7 GEORGIAN LETTER YN -10F8 GEORGIAN LETTER ELIFI -10F9 GEORGIAN LETTER TURNED GAN -10FA GEORGIAN LETTER AIN -10FB GEORGIAN PARAGRAPH SEPARATOR -10FC MODIFIER LETTER GEORGIAN NAR -1100 HANGUL CHOSEONG KIYEOK -1101 HANGUL CHOSEONG SSANGKIYEOK -1102 HANGUL CHOSEONG NIEUN -1103 HANGUL CHOSEONG TIKEUT -1104 HANGUL CHOSEONG SSANGTIKEUT -1105 HANGUL CHOSEONG RIEUL -1106 HANGUL CHOSEONG MIEUM -1107 HANGUL CHOSEONG PIEUP -1108 HANGUL CHOSEONG SSANGPIEUP -1109 HANGUL CHOSEONG SIOS -110A HANGUL CHOSEONG SSANGSIOS -110B HANGUL CHOSEONG IEUNG -110C HANGUL CHOSEONG CIEUC -110D HANGUL CHOSEONG SSANGCIEUC -110E HANGUL CHOSEONG CHIEUCH -110F HANGUL CHOSEONG KHIEUKH -1110 HANGUL CHOSEONG THIEUTH -1111 HANGUL CHOSEONG PHIEUPH -1112 HANGUL CHOSEONG HIEUH -1113 HANGUL CHOSEONG NIEUN-KIYEOK -1114 HANGUL CHOSEONG SSANGNIEUN -1115 HANGUL CHOSEONG NIEUN-TIKEUT -1116 HANGUL CHOSEONG NIEUN-PIEUP -1117 HANGUL CHOSEONG TIKEUT-KIYEOK -1118 HANGUL CHOSEONG RIEUL-NIEUN -1119 HANGUL CHOSEONG SSANGRIEUL -111A HANGUL CHOSEONG RIEUL-HIEUH -111B HANGUL CHOSEONG KAPYEOUNRIEUL -111C HANGUL CHOSEONG MIEUM-PIEUP -111D HANGUL CHOSEONG KAPYEOUNMIEUM -111E HANGUL CHOSEONG PIEUP-KIYEOK -111F HANGUL CHOSEONG PIEUP-NIEUN -1120 HANGUL CHOSEONG PIEUP-TIKEUT -1121 HANGUL CHOSEONG PIEUP-SIOS -1122 HANGUL CHOSEONG PIEUP-SIOS-KIYEOK -1123 HANGUL CHOSEONG PIEUP-SIOS-TIKEUT -1124 HANGUL CHOSEONG PIEUP-SIOS-PIEUP -1125 HANGUL CHOSEONG PIEUP-SSANGSIOS -1126 HANGUL CHOSEONG PIEUP-SIOS-CIEUC -1127 HANGUL CHOSEONG PIEUP-CIEUC -1128 HANGUL CHOSEONG PIEUP-CHIEUCH -1129 HANGUL CHOSEONG PIEUP-THIEUTH -112A HANGUL CHOSEONG PIEUP-PHIEUPH -112B HANGUL CHOSEONG KAPYEOUNPIEUP -112C HANGUL CHOSEONG KAPYEOUNSSANGPIEUP -112D HANGUL CHOSEONG SIOS-KIYEOK -112E HANGUL CHOSEONG SIOS-NIEUN -112F HANGUL CHOSEONG SIOS-TIKEUT -1130 HANGUL CHOSEONG SIOS-RIEUL -1131 HANGUL CHOSEONG SIOS-MIEUM -1132 HANGUL CHOSEONG SIOS-PIEUP -1133 HANGUL CHOSEONG SIOS-PIEUP-KIYEOK -1134 HANGUL CHOSEONG SIOS-SSANGSIOS -1135 HANGUL CHOSEONG SIOS-IEUNG -1136 HANGUL CHOSEONG SIOS-CIEUC -1137 HANGUL CHOSEONG SIOS-CHIEUCH -1138 HANGUL CHOSEONG SIOS-KHIEUKH -1139 HANGUL CHOSEONG SIOS-THIEUTH -113A HANGUL CHOSEONG SIOS-PHIEUPH -113B HANGUL CHOSEONG SIOS-HIEUH -113C HANGUL CHOSEONG CHITUEUMSIOS -113D HANGUL CHOSEONG CHITUEUMSSANGSIOS -113E HANGUL CHOSEONG CEONGCHIEUMSIOS -113F HANGUL CHOSEONG CEONGCHIEUMSSANGSIOS -1140 HANGUL CHOSEONG PANSIOS -1141 HANGUL CHOSEONG IEUNG-KIYEOK -1142 HANGUL CHOSEONG IEUNG-TIKEUT -1143 HANGUL CHOSEONG IEUNG-MIEUM -1144 HANGUL CHOSEONG IEUNG-PIEUP -1145 HANGUL CHOSEONG IEUNG-SIOS -1146 HANGUL CHOSEONG IEUNG-PANSIOS -1147 HANGUL CHOSEONG SSANGIEUNG -1148 HANGUL CHOSEONG IEUNG-CIEUC -1149 HANGUL CHOSEONG IEUNG-CHIEUCH -114A HANGUL CHOSEONG IEUNG-THIEUTH -114B HANGUL CHOSEONG IEUNG-PHIEUPH -114C HANGUL CHOSEONG YESIEUNG -114D HANGUL CHOSEONG CIEUC-IEUNG -114E HANGUL CHOSEONG CHITUEUMCIEUC -114F HANGUL CHOSEONG CHITUEUMSSANGCIEUC -1150 HANGUL CHOSEONG CEONGCHIEUMCIEUC -1151 HANGUL CHOSEONG CEONGCHIEUMSSANGCIEUC -1152 HANGUL CHOSEONG CHIEUCH-KHIEUKH -1153 HANGUL CHOSEONG CHIEUCH-HIEUH -1154 HANGUL CHOSEONG CHITUEUMCHIEUCH -1155 HANGUL CHOSEONG CEONGCHIEUMCHIEUCH -1156 HANGUL CHOSEONG PHIEUPH-PIEUP -1157 HANGUL CHOSEONG KAPYEOUNPHIEUPH -1158 HANGUL CHOSEONG SSANGHIEUH -1159 HANGUL CHOSEONG YEORINHIEUH -115A HANGUL CHOSEONG KIYEOK-TIKEUT -115B HANGUL CHOSEONG NIEUN-SIOS -115C HANGUL CHOSEONG NIEUN-CIEUC -115D HANGUL CHOSEONG NIEUN-HIEUH -115E HANGUL CHOSEONG TIKEUT-RIEUL -115F HANGUL CHOSEONG FILLER -1160 HANGUL JUNGSEONG FILLER -1161 HANGUL JUNGSEONG A -1162 HANGUL JUNGSEONG AE -1163 HANGUL JUNGSEONG YA -1164 HANGUL JUNGSEONG YAE -1165 HANGUL JUNGSEONG EO -1166 HANGUL JUNGSEONG E -1167 HANGUL JUNGSEONG YEO -1168 HANGUL JUNGSEONG YE -1169 HANGUL JUNGSEONG O -116A HANGUL JUNGSEONG WA -116B HANGUL JUNGSEONG WAE -116C HANGUL JUNGSEONG OE -116D HANGUL JUNGSEONG YO -116E HANGUL JUNGSEONG U -116F HANGUL JUNGSEONG WEO -1170 HANGUL JUNGSEONG WE -1171 HANGUL JUNGSEONG WI -1172 HANGUL JUNGSEONG YU -1173 HANGUL JUNGSEONG EU -1174 HANGUL JUNGSEONG YI -1175 HANGUL JUNGSEONG I -1176 HANGUL JUNGSEONG A-O -1177 HANGUL JUNGSEONG A-U -1178 HANGUL JUNGSEONG YA-O -1179 HANGUL JUNGSEONG YA-YO -117A HANGUL JUNGSEONG EO-O -117B HANGUL JUNGSEONG EO-U -117C HANGUL JUNGSEONG EO-EU -117D HANGUL JUNGSEONG YEO-O -117E HANGUL JUNGSEONG YEO-U -117F HANGUL JUNGSEONG O-EO -1180 HANGUL JUNGSEONG O-E -1181 HANGUL JUNGSEONG O-YE -1182 HANGUL JUNGSEONG O-O -1183 HANGUL JUNGSEONG O-U -1184 HANGUL JUNGSEONG YO-YA -1185 HANGUL JUNGSEONG YO-YAE -1186 HANGUL JUNGSEONG YO-YEO -1187 HANGUL JUNGSEONG YO-O -1188 HANGUL JUNGSEONG YO-I -1189 HANGUL JUNGSEONG U-A -118A HANGUL JUNGSEONG U-AE -118B HANGUL JUNGSEONG U-EO-EU -118C HANGUL JUNGSEONG U-YE -118D HANGUL JUNGSEONG U-U -118E HANGUL JUNGSEONG YU-A -118F HANGUL JUNGSEONG YU-EO -1190 HANGUL JUNGSEONG YU-E -1191 HANGUL JUNGSEONG YU-YEO -1192 HANGUL JUNGSEONG YU-YE -1193 HANGUL JUNGSEONG YU-U -1194 HANGUL JUNGSEONG YU-I -1195 HANGUL JUNGSEONG EU-U -1196 HANGUL JUNGSEONG EU-EU -1197 HANGUL JUNGSEONG YI-U -1198 HANGUL JUNGSEONG I-A -1199 HANGUL JUNGSEONG I-YA -119A HANGUL JUNGSEONG I-O -119B HANGUL JUNGSEONG I-U -119C HANGUL JUNGSEONG I-EU -119D HANGUL JUNGSEONG I-ARAEA -119E HANGUL JUNGSEONG ARAEA -119F HANGUL JUNGSEONG ARAEA-EO -11A0 HANGUL JUNGSEONG ARAEA-U -11A1 HANGUL JUNGSEONG ARAEA-I -11A2 HANGUL JUNGSEONG SSANGARAEA -11A3 HANGUL JUNGSEONG A-EU -11A4 HANGUL JUNGSEONG YA-U -11A5 HANGUL JUNGSEONG YEO-YA -11A6 HANGUL JUNGSEONG O-YA -11A7 HANGUL JUNGSEONG O-YAE -11A8 HANGUL JONGSEONG KIYEOK -11A9 HANGUL JONGSEONG SSANGKIYEOK -11AA HANGUL JONGSEONG KIYEOK-SIOS -11AB HANGUL JONGSEONG NIEUN -11AC HANGUL JONGSEONG NIEUN-CIEUC -11AD HANGUL JONGSEONG NIEUN-HIEUH -11AE HANGUL JONGSEONG TIKEUT -11AF HANGUL JONGSEONG RIEUL -11B0 HANGUL JONGSEONG RIEUL-KIYEOK -11B1 HANGUL JONGSEONG RIEUL-MIEUM -11B2 HANGUL JONGSEONG RIEUL-PIEUP -11B3 HANGUL JONGSEONG RIEUL-SIOS -11B4 HANGUL JONGSEONG RIEUL-THIEUTH -11B5 HANGUL JONGSEONG RIEUL-PHIEUPH -11B6 HANGUL JONGSEONG RIEUL-HIEUH -11B7 HANGUL JONGSEONG MIEUM -11B8 HANGUL JONGSEONG PIEUP -11B9 HANGUL JONGSEONG PIEUP-SIOS -11BA HANGUL JONGSEONG SIOS -11BB HANGUL JONGSEONG SSANGSIOS -11BC HANGUL JONGSEONG IEUNG -11BD HANGUL JONGSEONG CIEUC -11BE HANGUL JONGSEONG CHIEUCH -11BF HANGUL JONGSEONG KHIEUKH -11C0 HANGUL JONGSEONG THIEUTH -11C1 HANGUL JONGSEONG PHIEUPH -11C2 HANGUL JONGSEONG HIEUH -11C3 HANGUL JONGSEONG KIYEOK-RIEUL -11C4 HANGUL JONGSEONG KIYEOK-SIOS-KIYEOK -11C5 HANGUL JONGSEONG NIEUN-KIYEOK -11C6 HANGUL JONGSEONG NIEUN-TIKEUT -11C7 HANGUL JONGSEONG NIEUN-SIOS -11C8 HANGUL JONGSEONG NIEUN-PANSIOS -11C9 HANGUL JONGSEONG NIEUN-THIEUTH -11CA HANGUL JONGSEONG TIKEUT-KIYEOK -11CB HANGUL JONGSEONG TIKEUT-RIEUL -11CC HANGUL JONGSEONG RIEUL-KIYEOK-SIOS -11CD HANGUL JONGSEONG RIEUL-NIEUN -11CE HANGUL JONGSEONG RIEUL-TIKEUT -11CF HANGUL JONGSEONG RIEUL-TIKEUT-HIEUH -11D0 HANGUL JONGSEONG SSANGRIEUL -11D1 HANGUL JONGSEONG RIEUL-MIEUM-KIYEOK -11D2 HANGUL JONGSEONG RIEUL-MIEUM-SIOS -11D3 HANGUL JONGSEONG RIEUL-PIEUP-SIOS -11D4 HANGUL JONGSEONG RIEUL-PIEUP-HIEUH -11D5 HANGUL JONGSEONG RIEUL-KAPYEOUNPIEUP -11D6 HANGUL JONGSEONG RIEUL-SSANGSIOS -11D7 HANGUL JONGSEONG RIEUL-PANSIOS -11D8 HANGUL JONGSEONG RIEUL-KHIEUKH -11D9 HANGUL JONGSEONG RIEUL-YEORINHIEUH -11DA HANGUL JONGSEONG MIEUM-KIYEOK -11DB HANGUL JONGSEONG MIEUM-RIEUL -11DC HANGUL JONGSEONG MIEUM-PIEUP -11DD HANGUL JONGSEONG MIEUM-SIOS -11DE HANGUL JONGSEONG MIEUM-SSANGSIOS -11DF HANGUL JONGSEONG MIEUM-PANSIOS -11E0 HANGUL JONGSEONG MIEUM-CHIEUCH -11E1 HANGUL JONGSEONG MIEUM-HIEUH -11E2 HANGUL JONGSEONG KAPYEOUNMIEUM -11E3 HANGUL JONGSEONG PIEUP-RIEUL -11E4 HANGUL JONGSEONG PIEUP-PHIEUPH -11E5 HANGUL JONGSEONG PIEUP-HIEUH -11E6 HANGUL JONGSEONG KAPYEOUNPIEUP -11E7 HANGUL JONGSEONG SIOS-KIYEOK -11E8 HANGUL JONGSEONG SIOS-TIKEUT -11E9 HANGUL JONGSEONG SIOS-RIEUL -11EA HANGUL JONGSEONG SIOS-PIEUP -11EB HANGUL JONGSEONG PANSIOS -11EC HANGUL JONGSEONG IEUNG-KIYEOK -11ED HANGUL JONGSEONG IEUNG-SSANGKIYEOK -11EE HANGUL JONGSEONG SSANGIEUNG -11EF HANGUL JONGSEONG IEUNG-KHIEUKH -11F0 HANGUL JONGSEONG YESIEUNG -11F1 HANGUL JONGSEONG YESIEUNG-SIOS -11F2 HANGUL JONGSEONG YESIEUNG-PANSIOS -11F3 HANGUL JONGSEONG PHIEUPH-PIEUP -11F4 HANGUL JONGSEONG KAPYEOUNPHIEUPH -11F5 HANGUL JONGSEONG HIEUH-NIEUN -11F6 HANGUL JONGSEONG HIEUH-RIEUL -11F7 HANGUL JONGSEONG HIEUH-MIEUM -11F8 HANGUL JONGSEONG HIEUH-PIEUP -11F9 HANGUL JONGSEONG YEORINHIEUH -11FA HANGUL JONGSEONG KIYEOK-NIEUN -11FB HANGUL JONGSEONG KIYEOK-PIEUP -11FC HANGUL JONGSEONG KIYEOK-CHIEUCH -11FD HANGUL JONGSEONG KIYEOK-KHIEUKH -11FE HANGUL JONGSEONG KIYEOK-HIEUH -11FF HANGUL JONGSEONG SSANGNIEUN -1200 ETHIOPIC SYLLABLE HA -1201 ETHIOPIC SYLLABLE HU -1202 ETHIOPIC SYLLABLE HI -1203 ETHIOPIC SYLLABLE HAA -1204 ETHIOPIC SYLLABLE HEE -1205 ETHIOPIC SYLLABLE HE -1206 ETHIOPIC SYLLABLE HO -1207 ETHIOPIC SYLLABLE HOA -1208 ETHIOPIC SYLLABLE LA -1209 ETHIOPIC SYLLABLE LU -120A ETHIOPIC SYLLABLE LI -120B ETHIOPIC SYLLABLE LAA -120C ETHIOPIC SYLLABLE LEE -120D ETHIOPIC SYLLABLE LE -120E ETHIOPIC SYLLABLE LO -120F ETHIOPIC SYLLABLE LWA -1210 ETHIOPIC SYLLABLE HHA -1211 ETHIOPIC SYLLABLE HHU -1212 ETHIOPIC SYLLABLE HHI -1213 ETHIOPIC SYLLABLE HHAA -1214 ETHIOPIC SYLLABLE HHEE -1215 ETHIOPIC SYLLABLE HHE -1216 ETHIOPIC SYLLABLE HHO -1217 ETHIOPIC SYLLABLE HHWA -1218 ETHIOPIC SYLLABLE MA -1219 ETHIOPIC SYLLABLE MU -121A ETHIOPIC SYLLABLE MI -121B ETHIOPIC SYLLABLE MAA -121C ETHIOPIC SYLLABLE MEE -121D ETHIOPIC SYLLABLE ME -121E ETHIOPIC SYLLABLE MO -121F ETHIOPIC SYLLABLE MWA -1220 ETHIOPIC SYLLABLE SZA -1221 ETHIOPIC SYLLABLE SZU -1222 ETHIOPIC SYLLABLE SZI -1223 ETHIOPIC SYLLABLE SZAA -1224 ETHIOPIC SYLLABLE SZEE -1225 ETHIOPIC SYLLABLE SZE -1226 ETHIOPIC SYLLABLE SZO -1227 ETHIOPIC SYLLABLE SZWA -1228 ETHIOPIC SYLLABLE RA -1229 ETHIOPIC SYLLABLE RU -122A ETHIOPIC SYLLABLE RI -122B ETHIOPIC SYLLABLE RAA -122C ETHIOPIC SYLLABLE REE -122D ETHIOPIC SYLLABLE RE -122E ETHIOPIC SYLLABLE RO -122F ETHIOPIC SYLLABLE RWA -1230 ETHIOPIC SYLLABLE SA -1231 ETHIOPIC SYLLABLE SU -1232 ETHIOPIC SYLLABLE SI -1233 ETHIOPIC SYLLABLE SAA -1234 ETHIOPIC SYLLABLE SEE -1235 ETHIOPIC SYLLABLE SE -1236 ETHIOPIC SYLLABLE SO -1237 ETHIOPIC SYLLABLE SWA -1238 ETHIOPIC SYLLABLE SHA -1239 ETHIOPIC SYLLABLE SHU -123A ETHIOPIC SYLLABLE SHI -123B ETHIOPIC SYLLABLE SHAA -123C ETHIOPIC SYLLABLE SHEE -123D ETHIOPIC SYLLABLE SHE -123E ETHIOPIC SYLLABLE SHO -123F ETHIOPIC SYLLABLE SHWA -1240 ETHIOPIC SYLLABLE QA -1241 ETHIOPIC SYLLABLE QU -1242 ETHIOPIC SYLLABLE QI -1243 ETHIOPIC SYLLABLE QAA -1244 ETHIOPIC SYLLABLE QEE -1245 ETHIOPIC SYLLABLE QE -1246 ETHIOPIC SYLLABLE QO -1247 ETHIOPIC SYLLABLE QOA -1248 ETHIOPIC SYLLABLE QWA -124A ETHIOPIC SYLLABLE QWI -124B ETHIOPIC SYLLABLE QWAA -124C ETHIOPIC SYLLABLE QWEE -124D ETHIOPIC SYLLABLE QWE -1250 ETHIOPIC SYLLABLE QHA -1251 ETHIOPIC SYLLABLE QHU -1252 ETHIOPIC SYLLABLE QHI -1253 ETHIOPIC SYLLABLE QHAA -1254 ETHIOPIC SYLLABLE QHEE -1255 ETHIOPIC SYLLABLE QHE -1256 ETHIOPIC SYLLABLE QHO -1258 ETHIOPIC SYLLABLE QHWA -125A ETHIOPIC SYLLABLE QHWI -125B ETHIOPIC SYLLABLE QHWAA -125C ETHIOPIC SYLLABLE QHWEE -125D ETHIOPIC SYLLABLE QHWE -1260 ETHIOPIC SYLLABLE BA -1261 ETHIOPIC SYLLABLE BU -1262 ETHIOPIC SYLLABLE BI -1263 ETHIOPIC SYLLABLE BAA -1264 ETHIOPIC SYLLABLE BEE -1265 ETHIOPIC SYLLABLE BE -1266 ETHIOPIC SYLLABLE BO -1267 ETHIOPIC SYLLABLE BWA -1268 ETHIOPIC SYLLABLE VA -1269 ETHIOPIC SYLLABLE VU -126A ETHIOPIC SYLLABLE VI -126B ETHIOPIC SYLLABLE VAA -126C ETHIOPIC SYLLABLE VEE -126D ETHIOPIC SYLLABLE VE -126E ETHIOPIC SYLLABLE VO -126F ETHIOPIC SYLLABLE VWA -1270 ETHIOPIC SYLLABLE TA -1271 ETHIOPIC SYLLABLE TU -1272 ETHIOPIC SYLLABLE TI -1273 ETHIOPIC SYLLABLE TAA -1274 ETHIOPIC SYLLABLE TEE -1275 ETHIOPIC SYLLABLE TE -1276 ETHIOPIC SYLLABLE TO -1277 ETHIOPIC SYLLABLE TWA -1278 ETHIOPIC SYLLABLE CA -1279 ETHIOPIC SYLLABLE CU -127A ETHIOPIC SYLLABLE CI -127B ETHIOPIC SYLLABLE CAA -127C ETHIOPIC SYLLABLE CEE -127D ETHIOPIC SYLLABLE CE -127E ETHIOPIC SYLLABLE CO -127F ETHIOPIC SYLLABLE CWA -1280 ETHIOPIC SYLLABLE XA -1281 ETHIOPIC SYLLABLE XU -1282 ETHIOPIC SYLLABLE XI -1283 ETHIOPIC SYLLABLE XAA -1284 ETHIOPIC SYLLABLE XEE -1285 ETHIOPIC SYLLABLE XE -1286 ETHIOPIC SYLLABLE XO -1287 ETHIOPIC SYLLABLE XOA -1288 ETHIOPIC SYLLABLE XWA -128A ETHIOPIC SYLLABLE XWI -128B ETHIOPIC SYLLABLE XWAA -128C ETHIOPIC SYLLABLE XWEE -128D ETHIOPIC SYLLABLE XWE -1290 ETHIOPIC SYLLABLE NA -1291 ETHIOPIC SYLLABLE NU -1292 ETHIOPIC SYLLABLE NI -1293 ETHIOPIC SYLLABLE NAA -1294 ETHIOPIC SYLLABLE NEE -1295 ETHIOPIC SYLLABLE NE -1296 ETHIOPIC SYLLABLE NO -1297 ETHIOPIC SYLLABLE NWA -1298 ETHIOPIC SYLLABLE NYA -1299 ETHIOPIC SYLLABLE NYU -129A ETHIOPIC SYLLABLE NYI -129B ETHIOPIC SYLLABLE NYAA -129C ETHIOPIC SYLLABLE NYEE -129D ETHIOPIC SYLLABLE NYE -129E ETHIOPIC SYLLABLE NYO -129F ETHIOPIC SYLLABLE NYWA -12A0 ETHIOPIC SYLLABLE GLOTTAL A -12A1 ETHIOPIC SYLLABLE GLOTTAL U -12A2 ETHIOPIC SYLLABLE GLOTTAL I -12A3 ETHIOPIC SYLLABLE GLOTTAL AA -12A4 ETHIOPIC SYLLABLE GLOTTAL EE -12A5 ETHIOPIC SYLLABLE GLOTTAL E -12A6 ETHIOPIC SYLLABLE GLOTTAL O -12A7 ETHIOPIC SYLLABLE GLOTTAL WA -12A8 ETHIOPIC SYLLABLE KA -12A9 ETHIOPIC SYLLABLE KU -12AA ETHIOPIC SYLLABLE KI -12AB ETHIOPIC SYLLABLE KAA -12AC ETHIOPIC SYLLABLE KEE -12AD ETHIOPIC SYLLABLE KE -12AE ETHIOPIC SYLLABLE KO -12AF ETHIOPIC SYLLABLE KOA -12B0 ETHIOPIC SYLLABLE KWA -12B2 ETHIOPIC SYLLABLE KWI -12B3 ETHIOPIC SYLLABLE KWAA -12B4 ETHIOPIC SYLLABLE KWEE -12B5 ETHIOPIC SYLLABLE KWE -12B8 ETHIOPIC SYLLABLE KXA -12B9 ETHIOPIC SYLLABLE KXU -12BA ETHIOPIC SYLLABLE KXI -12BB ETHIOPIC SYLLABLE KXAA -12BC ETHIOPIC SYLLABLE KXEE -12BD ETHIOPIC SYLLABLE KXE -12BE ETHIOPIC SYLLABLE KXO -12C0 ETHIOPIC SYLLABLE KXWA -12C2 ETHIOPIC SYLLABLE KXWI -12C3 ETHIOPIC SYLLABLE KXWAA -12C4 ETHIOPIC SYLLABLE KXWEE -12C5 ETHIOPIC SYLLABLE KXWE -12C8 ETHIOPIC SYLLABLE WA -12C9 ETHIOPIC SYLLABLE WU -12CA ETHIOPIC SYLLABLE WI -12CB ETHIOPIC SYLLABLE WAA -12CC ETHIOPIC SYLLABLE WEE -12CD ETHIOPIC SYLLABLE WE -12CE ETHIOPIC SYLLABLE WO -12CF ETHIOPIC SYLLABLE WOA -12D0 ETHIOPIC SYLLABLE PHARYNGEAL A -12D1 ETHIOPIC SYLLABLE PHARYNGEAL U -12D2 ETHIOPIC SYLLABLE PHARYNGEAL I -12D3 ETHIOPIC SYLLABLE PHARYNGEAL AA -12D4 ETHIOPIC SYLLABLE PHARYNGEAL EE -12D5 ETHIOPIC SYLLABLE PHARYNGEAL E -12D6 ETHIOPIC SYLLABLE PHARYNGEAL O -12D8 ETHIOPIC SYLLABLE ZA -12D9 ETHIOPIC SYLLABLE ZU -12DA ETHIOPIC SYLLABLE ZI -12DB ETHIOPIC SYLLABLE ZAA -12DC ETHIOPIC SYLLABLE ZEE -12DD ETHIOPIC SYLLABLE ZE -12DE ETHIOPIC SYLLABLE ZO -12DF ETHIOPIC SYLLABLE ZWA -12E0 ETHIOPIC SYLLABLE ZHA -12E1 ETHIOPIC SYLLABLE ZHU -12E2 ETHIOPIC SYLLABLE ZHI -12E3 ETHIOPIC SYLLABLE ZHAA -12E4 ETHIOPIC SYLLABLE ZHEE -12E5 ETHIOPIC SYLLABLE ZHE -12E6 ETHIOPIC SYLLABLE ZHO -12E7 ETHIOPIC SYLLABLE ZHWA -12E8 ETHIOPIC SYLLABLE YA -12E9 ETHIOPIC SYLLABLE YU -12EA ETHIOPIC SYLLABLE YI -12EB ETHIOPIC SYLLABLE YAA -12EC ETHIOPIC SYLLABLE YEE -12ED ETHIOPIC SYLLABLE YE -12EE ETHIOPIC SYLLABLE YO -12EF ETHIOPIC SYLLABLE YOA -12F0 ETHIOPIC SYLLABLE DA -12F1 ETHIOPIC SYLLABLE DU -12F2 ETHIOPIC SYLLABLE DI -12F3 ETHIOPIC SYLLABLE DAA -12F4 ETHIOPIC SYLLABLE DEE -12F5 ETHIOPIC SYLLABLE DE -12F6 ETHIOPIC SYLLABLE DO -12F7 ETHIOPIC SYLLABLE DWA -12F8 ETHIOPIC SYLLABLE DDA -12F9 ETHIOPIC SYLLABLE DDU -12FA ETHIOPIC SYLLABLE DDI -12FB ETHIOPIC SYLLABLE DDAA -12FC ETHIOPIC SYLLABLE DDEE -12FD ETHIOPIC SYLLABLE DDE -12FE ETHIOPIC SYLLABLE DDO -12FF ETHIOPIC SYLLABLE DDWA -1300 ETHIOPIC SYLLABLE JA -1301 ETHIOPIC SYLLABLE JU -1302 ETHIOPIC SYLLABLE JI -1303 ETHIOPIC SYLLABLE JAA -1304 ETHIOPIC SYLLABLE JEE -1305 ETHIOPIC SYLLABLE JE -1306 ETHIOPIC SYLLABLE JO -1307 ETHIOPIC SYLLABLE JWA -1308 ETHIOPIC SYLLABLE GA -1309 ETHIOPIC SYLLABLE GU -130A ETHIOPIC SYLLABLE GI -130B ETHIOPIC SYLLABLE GAA -130C ETHIOPIC SYLLABLE GEE -130D ETHIOPIC SYLLABLE GE -130E ETHIOPIC SYLLABLE GO -130F ETHIOPIC SYLLABLE GOA -1310 ETHIOPIC SYLLABLE GWA -1312 ETHIOPIC SYLLABLE GWI -1313 ETHIOPIC SYLLABLE GWAA -1314 ETHIOPIC SYLLABLE GWEE -1315 ETHIOPIC SYLLABLE GWE -1318 ETHIOPIC SYLLABLE GGA -1319 ETHIOPIC SYLLABLE GGU -131A ETHIOPIC SYLLABLE GGI -131B ETHIOPIC SYLLABLE GGAA -131C ETHIOPIC SYLLABLE GGEE -131D ETHIOPIC SYLLABLE GGE -131E ETHIOPIC SYLLABLE GGO -131F ETHIOPIC SYLLABLE GGWAA -1320 ETHIOPIC SYLLABLE THA -1321 ETHIOPIC SYLLABLE THU -1322 ETHIOPIC SYLLABLE THI -1323 ETHIOPIC SYLLABLE THAA -1324 ETHIOPIC SYLLABLE THEE -1325 ETHIOPIC SYLLABLE THE -1326 ETHIOPIC SYLLABLE THO -1327 ETHIOPIC SYLLABLE THWA -1328 ETHIOPIC SYLLABLE CHA -1329 ETHIOPIC SYLLABLE CHU -132A ETHIOPIC SYLLABLE CHI -132B ETHIOPIC SYLLABLE CHAA -132C ETHIOPIC SYLLABLE CHEE -132D ETHIOPIC SYLLABLE CHE -132E ETHIOPIC SYLLABLE CHO -132F ETHIOPIC SYLLABLE CHWA -1330 ETHIOPIC SYLLABLE PHA -1331 ETHIOPIC SYLLABLE PHU -1332 ETHIOPIC SYLLABLE PHI -1333 ETHIOPIC SYLLABLE PHAA -1334 ETHIOPIC SYLLABLE PHEE -1335 ETHIOPIC SYLLABLE PHE -1336 ETHIOPIC SYLLABLE PHO -1337 ETHIOPIC SYLLABLE PHWA -1338 ETHIOPIC SYLLABLE TSA -1339 ETHIOPIC SYLLABLE TSU -133A ETHIOPIC SYLLABLE TSI -133B ETHIOPIC SYLLABLE TSAA -133C ETHIOPIC SYLLABLE TSEE -133D ETHIOPIC SYLLABLE TSE -133E ETHIOPIC SYLLABLE TSO -133F ETHIOPIC SYLLABLE TSWA -1340 ETHIOPIC SYLLABLE TZA -1341 ETHIOPIC SYLLABLE TZU -1342 ETHIOPIC SYLLABLE TZI -1343 ETHIOPIC SYLLABLE TZAA -1344 ETHIOPIC SYLLABLE TZEE -1345 ETHIOPIC SYLLABLE TZE -1346 ETHIOPIC SYLLABLE TZO -1347 ETHIOPIC SYLLABLE TZOA -1348 ETHIOPIC SYLLABLE FA -1349 ETHIOPIC SYLLABLE FU -134A ETHIOPIC SYLLABLE FI -134B ETHIOPIC SYLLABLE FAA -134C ETHIOPIC SYLLABLE FEE -134D ETHIOPIC SYLLABLE FE -134E ETHIOPIC SYLLABLE FO -134F ETHIOPIC SYLLABLE FWA -1350 ETHIOPIC SYLLABLE PA -1351 ETHIOPIC SYLLABLE PU -1352 ETHIOPIC SYLLABLE PI -1353 ETHIOPIC SYLLABLE PAA -1354 ETHIOPIC SYLLABLE PEE -1355 ETHIOPIC SYLLABLE PE -1356 ETHIOPIC SYLLABLE PO -1357 ETHIOPIC SYLLABLE PWA -1358 ETHIOPIC SYLLABLE RYA -1359 ETHIOPIC SYLLABLE MYA -135A ETHIOPIC SYLLABLE FYA -135F ETHIOPIC COMBINING GEMINATION MARK -1360 ETHIOPIC SECTION MARK -1361 ETHIOPIC WORDSPACE -1362 ETHIOPIC FULL STOP -1363 ETHIOPIC COMMA -1364 ETHIOPIC SEMICOLON -1365 ETHIOPIC COLON -1366 ETHIOPIC PREFACE COLON -1367 ETHIOPIC QUESTION MARK -1368 ETHIOPIC PARAGRAPH SEPARATOR -1369 ETHIOPIC DIGIT ONE -136A ETHIOPIC DIGIT TWO -136B ETHIOPIC DIGIT THREE -136C ETHIOPIC DIGIT FOUR -136D ETHIOPIC DIGIT FIVE -136E ETHIOPIC DIGIT SIX -136F ETHIOPIC DIGIT SEVEN -1370 ETHIOPIC DIGIT EIGHT -1371 ETHIOPIC DIGIT NINE -1372 ETHIOPIC NUMBER TEN -1373 ETHIOPIC NUMBER TWENTY -1374 ETHIOPIC NUMBER THIRTY -1375 ETHIOPIC NUMBER FORTY -1376 ETHIOPIC NUMBER FIFTY -1377 ETHIOPIC NUMBER SIXTY -1378 ETHIOPIC NUMBER SEVENTY -1379 ETHIOPIC NUMBER EIGHTY -137A ETHIOPIC NUMBER NINETY -137B ETHIOPIC NUMBER HUNDRED -137C ETHIOPIC NUMBER TEN THOUSAND -1380 ETHIOPIC SYLLABLE SEBATBEIT MWA -1381 ETHIOPIC SYLLABLE MWI -1382 ETHIOPIC SYLLABLE MWEE -1383 ETHIOPIC SYLLABLE MWE -1384 ETHIOPIC SYLLABLE SEBATBEIT BWA -1385 ETHIOPIC SYLLABLE BWI -1386 ETHIOPIC SYLLABLE BWEE -1387 ETHIOPIC SYLLABLE BWE -1388 ETHIOPIC SYLLABLE SEBATBEIT FWA -1389 ETHIOPIC SYLLABLE FWI -138A ETHIOPIC SYLLABLE FWEE -138B ETHIOPIC SYLLABLE FWE -138C ETHIOPIC SYLLABLE SEBATBEIT PWA -138D ETHIOPIC SYLLABLE PWI -138E ETHIOPIC SYLLABLE PWEE -138F ETHIOPIC SYLLABLE PWE -1390 ETHIOPIC TONAL MARK YIZET -1391 ETHIOPIC TONAL MARK DERET -1392 ETHIOPIC TONAL MARK RIKRIK -1393 ETHIOPIC TONAL MARK SHORT RIKRIK -1394 ETHIOPIC TONAL MARK DIFAT -1395 ETHIOPIC TONAL MARK KENAT -1396 ETHIOPIC TONAL MARK CHIRET -1397 ETHIOPIC TONAL MARK HIDET -1398 ETHIOPIC TONAL MARK DERET-HIDET -1399 ETHIOPIC TONAL MARK KURT -13A0 CHEROKEE LETTER A -13A1 CHEROKEE LETTER E -13A2 CHEROKEE LETTER I -13A3 CHEROKEE LETTER O -13A4 CHEROKEE LETTER U -13A5 CHEROKEE LETTER V -13A6 CHEROKEE LETTER GA -13A7 CHEROKEE LETTER KA -13A8 CHEROKEE LETTER GE -13A9 CHEROKEE LETTER GI -13AA CHEROKEE LETTER GO -13AB CHEROKEE LETTER GU -13AC CHEROKEE LETTER GV -13AD CHEROKEE LETTER HA -13AE CHEROKEE LETTER HE -13AF CHEROKEE LETTER HI -13B0 CHEROKEE LETTER HO -13B1 CHEROKEE LETTER HU -13B2 CHEROKEE LETTER HV -13B3 CHEROKEE LETTER LA -13B4 CHEROKEE LETTER LE -13B5 CHEROKEE LETTER LI -13B6 CHEROKEE LETTER LO -13B7 CHEROKEE LETTER LU -13B8 CHEROKEE LETTER LV -13B9 CHEROKEE LETTER MA -13BA CHEROKEE LETTER ME -13BB CHEROKEE LETTER MI -13BC CHEROKEE LETTER MO -13BD CHEROKEE LETTER MU -13BE CHEROKEE LETTER NA -13BF CHEROKEE LETTER HNA -13C0 CHEROKEE LETTER NAH -13C1 CHEROKEE LETTER NE -13C2 CHEROKEE LETTER NI -13C3 CHEROKEE LETTER NO -13C4 CHEROKEE LETTER NU -13C5 CHEROKEE LETTER NV -13C6 CHEROKEE LETTER QUA -13C7 CHEROKEE LETTER QUE -13C8 CHEROKEE LETTER QUI -13C9 CHEROKEE LETTER QUO -13CA CHEROKEE LETTER QUU -13CB CHEROKEE LETTER QUV -13CC CHEROKEE LETTER SA -13CD CHEROKEE LETTER S -13CE CHEROKEE LETTER SE -13CF CHEROKEE LETTER SI -13D0 CHEROKEE LETTER SO -13D1 CHEROKEE LETTER SU -13D2 CHEROKEE LETTER SV -13D3 CHEROKEE LETTER DA -13D4 CHEROKEE LETTER TA -13D5 CHEROKEE LETTER DE -13D6 CHEROKEE LETTER TE -13D7 CHEROKEE LETTER DI -13D8 CHEROKEE LETTER TI -13D9 CHEROKEE LETTER DO -13DA CHEROKEE LETTER DU -13DB CHEROKEE LETTER DV -13DC CHEROKEE LETTER DLA -13DD CHEROKEE LETTER TLA -13DE CHEROKEE LETTER TLE -13DF CHEROKEE LETTER TLI -13E0 CHEROKEE LETTER TLO -13E1 CHEROKEE LETTER TLU -13E2 CHEROKEE LETTER TLV -13E3 CHEROKEE LETTER TSA -13E4 CHEROKEE LETTER TSE -13E5 CHEROKEE LETTER TSI -13E6 CHEROKEE LETTER TSO -13E7 CHEROKEE LETTER TSU -13E8 CHEROKEE LETTER TSV -13E9 CHEROKEE LETTER WA -13EA CHEROKEE LETTER WE -13EB CHEROKEE LETTER WI -13EC CHEROKEE LETTER WO -13ED CHEROKEE LETTER WU -13EE CHEROKEE LETTER WV -13EF CHEROKEE LETTER YA -13F0 CHEROKEE LETTER YE -13F1 CHEROKEE LETTER YI -13F2 CHEROKEE LETTER YO -13F3 CHEROKEE LETTER YU -13F4 CHEROKEE LETTER YV -1400 CANADIAN SYLLABICS HYPHEN -1401 CANADIAN SYLLABICS E -1402 CANADIAN SYLLABICS AAI -1403 CANADIAN SYLLABICS I -1404 CANADIAN SYLLABICS II -1405 CANADIAN SYLLABICS O -1406 CANADIAN SYLLABICS OO -1407 CANADIAN SYLLABICS Y-CREE OO -1408 CANADIAN SYLLABICS CARRIER EE -1409 CANADIAN SYLLABICS CARRIER I -140A CANADIAN SYLLABICS A -140B CANADIAN SYLLABICS AA -140C CANADIAN SYLLABICS WE -140D CANADIAN SYLLABICS WEST-CREE WE -140E CANADIAN SYLLABICS WI -140F CANADIAN SYLLABICS WEST-CREE WI -1410 CANADIAN SYLLABICS WII -1411 CANADIAN SYLLABICS WEST-CREE WII -1412 CANADIAN SYLLABICS WO -1413 CANADIAN SYLLABICS WEST-CREE WO -1414 CANADIAN SYLLABICS WOO -1415 CANADIAN SYLLABICS WEST-CREE WOO -1416 CANADIAN SYLLABICS NASKAPI WOO -1417 CANADIAN SYLLABICS WA -1418 CANADIAN SYLLABICS WEST-CREE WA -1419 CANADIAN SYLLABICS WAA -141A CANADIAN SYLLABICS WEST-CREE WAA -141B CANADIAN SYLLABICS NASKAPI WAA -141C CANADIAN SYLLABICS AI -141D CANADIAN SYLLABICS Y-CREE W -141E CANADIAN SYLLABICS GLOTTAL STOP -141F CANADIAN SYLLABICS FINAL ACUTE -1420 CANADIAN SYLLABICS FINAL GRAVE -1421 CANADIAN SYLLABICS FINAL BOTTOM HALF RING -1422 CANADIAN SYLLABICS FINAL TOP HALF RING -1423 CANADIAN SYLLABICS FINAL RIGHT HALF RING -1424 CANADIAN SYLLABICS FINAL RING -1425 CANADIAN SYLLABICS FINAL DOUBLE ACUTE -1426 CANADIAN SYLLABICS FINAL DOUBLE SHORT VERTICAL STROKES -1427 CANADIAN SYLLABICS FINAL MIDDLE DOT -1428 CANADIAN SYLLABICS FINAL SHORT HORIZONTAL STROKE -1429 CANADIAN SYLLABICS FINAL PLUS -142A CANADIAN SYLLABICS FINAL DOWN TACK -142B CANADIAN SYLLABICS EN -142C CANADIAN SYLLABICS IN -142D CANADIAN SYLLABICS ON -142E CANADIAN SYLLABICS AN -142F CANADIAN SYLLABICS PE -1430 CANADIAN SYLLABICS PAAI -1431 CANADIAN SYLLABICS PI -1432 CANADIAN SYLLABICS PII -1433 CANADIAN SYLLABICS PO -1434 CANADIAN SYLLABICS POO -1435 CANADIAN SYLLABICS Y-CREE POO -1436 CANADIAN SYLLABICS CARRIER HEE -1437 CANADIAN SYLLABICS CARRIER HI -1438 CANADIAN SYLLABICS PA -1439 CANADIAN SYLLABICS PAA -143A CANADIAN SYLLABICS PWE -143B CANADIAN SYLLABICS WEST-CREE PWE -143C CANADIAN SYLLABICS PWI -143D CANADIAN SYLLABICS WEST-CREE PWI -143E CANADIAN SYLLABICS PWII -143F CANADIAN SYLLABICS WEST-CREE PWII -1440 CANADIAN SYLLABICS PWO -1441 CANADIAN SYLLABICS WEST-CREE PWO -1442 CANADIAN SYLLABICS PWOO -1443 CANADIAN SYLLABICS WEST-CREE PWOO -1444 CANADIAN SYLLABICS PWA -1445 CANADIAN SYLLABICS WEST-CREE PWA -1446 CANADIAN SYLLABICS PWAA -1447 CANADIAN SYLLABICS WEST-CREE PWAA -1448 CANADIAN SYLLABICS Y-CREE PWAA -1449 CANADIAN SYLLABICS P -144A CANADIAN SYLLABICS WEST-CREE P -144B CANADIAN SYLLABICS CARRIER H -144C CANADIAN SYLLABICS TE -144D CANADIAN SYLLABICS TAAI -144E CANADIAN SYLLABICS TI -144F CANADIAN SYLLABICS TII -1450 CANADIAN SYLLABICS TO -1451 CANADIAN SYLLABICS TOO -1452 CANADIAN SYLLABICS Y-CREE TOO -1453 CANADIAN SYLLABICS CARRIER DEE -1454 CANADIAN SYLLABICS CARRIER DI -1455 CANADIAN SYLLABICS TA -1456 CANADIAN SYLLABICS TAA -1457 CANADIAN SYLLABICS TWE -1458 CANADIAN SYLLABICS WEST-CREE TWE -1459 CANADIAN SYLLABICS TWI -145A CANADIAN SYLLABICS WEST-CREE TWI -145B CANADIAN SYLLABICS TWII -145C CANADIAN SYLLABICS WEST-CREE TWII -145D CANADIAN SYLLABICS TWO -145E CANADIAN SYLLABICS WEST-CREE TWO -145F CANADIAN SYLLABICS TWOO -1460 CANADIAN SYLLABICS WEST-CREE TWOO -1461 CANADIAN SYLLABICS TWA -1462 CANADIAN SYLLABICS WEST-CREE TWA -1463 CANADIAN SYLLABICS TWAA -1464 CANADIAN SYLLABICS WEST-CREE TWAA -1465 CANADIAN SYLLABICS NASKAPI TWAA -1466 CANADIAN SYLLABICS T -1467 CANADIAN SYLLABICS TTE -1468 CANADIAN SYLLABICS TTI -1469 CANADIAN SYLLABICS TTO -146A CANADIAN SYLLABICS TTA -146B CANADIAN SYLLABICS KE -146C CANADIAN SYLLABICS KAAI -146D CANADIAN SYLLABICS KI -146E CANADIAN SYLLABICS KII -146F CANADIAN SYLLABICS KO -1470 CANADIAN SYLLABICS KOO -1471 CANADIAN SYLLABICS Y-CREE KOO -1472 CANADIAN SYLLABICS KA -1473 CANADIAN SYLLABICS KAA -1474 CANADIAN SYLLABICS KWE -1475 CANADIAN SYLLABICS WEST-CREE KWE -1476 CANADIAN SYLLABICS KWI -1477 CANADIAN SYLLABICS WEST-CREE KWI -1478 CANADIAN SYLLABICS KWII -1479 CANADIAN SYLLABICS WEST-CREE KWII -147A CANADIAN SYLLABICS KWO -147B CANADIAN SYLLABICS WEST-CREE KWO -147C CANADIAN SYLLABICS KWOO -147D CANADIAN SYLLABICS WEST-CREE KWOO -147E CANADIAN SYLLABICS KWA -147F CANADIAN SYLLABICS WEST-CREE KWA -1480 CANADIAN SYLLABICS KWAA -1481 CANADIAN SYLLABICS WEST-CREE KWAA -1482 CANADIAN SYLLABICS NASKAPI KWAA -1483 CANADIAN SYLLABICS K -1484 CANADIAN SYLLABICS KW -1485 CANADIAN SYLLABICS SOUTH-SLAVEY KEH -1486 CANADIAN SYLLABICS SOUTH-SLAVEY KIH -1487 CANADIAN SYLLABICS SOUTH-SLAVEY KOH -1488 CANADIAN SYLLABICS SOUTH-SLAVEY KAH -1489 CANADIAN SYLLABICS CE -148A CANADIAN SYLLABICS CAAI -148B CANADIAN SYLLABICS CI -148C CANADIAN SYLLABICS CII -148D CANADIAN SYLLABICS CO -148E CANADIAN SYLLABICS COO -148F CANADIAN SYLLABICS Y-CREE COO -1490 CANADIAN SYLLABICS CA -1491 CANADIAN SYLLABICS CAA -1492 CANADIAN SYLLABICS CWE -1493 CANADIAN SYLLABICS WEST-CREE CWE -1494 CANADIAN SYLLABICS CWI -1495 CANADIAN SYLLABICS WEST-CREE CWI -1496 CANADIAN SYLLABICS CWII -1497 CANADIAN SYLLABICS WEST-CREE CWII -1498 CANADIAN SYLLABICS CWO -1499 CANADIAN SYLLABICS WEST-CREE CWO -149A CANADIAN SYLLABICS CWOO -149B CANADIAN SYLLABICS WEST-CREE CWOO -149C CANADIAN SYLLABICS CWA -149D CANADIAN SYLLABICS WEST-CREE CWA -149E CANADIAN SYLLABICS CWAA -149F CANADIAN SYLLABICS WEST-CREE CWAA -14A0 CANADIAN SYLLABICS NASKAPI CWAA -14A1 CANADIAN SYLLABICS C -14A2 CANADIAN SYLLABICS SAYISI TH -14A3 CANADIAN SYLLABICS ME -14A4 CANADIAN SYLLABICS MAAI -14A5 CANADIAN SYLLABICS MI -14A6 CANADIAN SYLLABICS MII -14A7 CANADIAN SYLLABICS MO -14A8 CANADIAN SYLLABICS MOO -14A9 CANADIAN SYLLABICS Y-CREE MOO -14AA CANADIAN SYLLABICS MA -14AB CANADIAN SYLLABICS MAA -14AC CANADIAN SYLLABICS MWE -14AD CANADIAN SYLLABICS WEST-CREE MWE -14AE CANADIAN SYLLABICS MWI -14AF CANADIAN SYLLABICS WEST-CREE MWI -14B0 CANADIAN SYLLABICS MWII -14B1 CANADIAN SYLLABICS WEST-CREE MWII -14B2 CANADIAN SYLLABICS MWO -14B3 CANADIAN SYLLABICS WEST-CREE MWO -14B4 CANADIAN SYLLABICS MWOO -14B5 CANADIAN SYLLABICS WEST-CREE MWOO -14B6 CANADIAN SYLLABICS MWA -14B7 CANADIAN SYLLABICS WEST-CREE MWA -14B8 CANADIAN SYLLABICS MWAA -14B9 CANADIAN SYLLABICS WEST-CREE MWAA -14BA CANADIAN SYLLABICS NASKAPI MWAA -14BB CANADIAN SYLLABICS M -14BC CANADIAN SYLLABICS WEST-CREE M -14BD CANADIAN SYLLABICS MH -14BE CANADIAN SYLLABICS ATHAPASCAN M -14BF CANADIAN SYLLABICS SAYISI M -14C0 CANADIAN SYLLABICS NE -14C1 CANADIAN SYLLABICS NAAI -14C2 CANADIAN SYLLABICS NI -14C3 CANADIAN SYLLABICS NII -14C4 CANADIAN SYLLABICS NO -14C5 CANADIAN SYLLABICS NOO -14C6 CANADIAN SYLLABICS Y-CREE NOO -14C7 CANADIAN SYLLABICS NA -14C8 CANADIAN SYLLABICS NAA -14C9 CANADIAN SYLLABICS NWE -14CA CANADIAN SYLLABICS WEST-CREE NWE -14CB CANADIAN SYLLABICS NWA -14CC CANADIAN SYLLABICS WEST-CREE NWA -14CD CANADIAN SYLLABICS NWAA -14CE CANADIAN SYLLABICS WEST-CREE NWAA -14CF CANADIAN SYLLABICS NASKAPI NWAA -14D0 CANADIAN SYLLABICS N -14D1 CANADIAN SYLLABICS CARRIER NG -14D2 CANADIAN SYLLABICS NH -14D3 CANADIAN SYLLABICS LE -14D4 CANADIAN SYLLABICS LAAI -14D5 CANADIAN SYLLABICS LI -14D6 CANADIAN SYLLABICS LII -14D7 CANADIAN SYLLABICS LO -14D8 CANADIAN SYLLABICS LOO -14D9 CANADIAN SYLLABICS Y-CREE LOO -14DA CANADIAN SYLLABICS LA -14DB CANADIAN SYLLABICS LAA -14DC CANADIAN SYLLABICS LWE -14DD CANADIAN SYLLABICS WEST-CREE LWE -14DE CANADIAN SYLLABICS LWI -14DF CANADIAN SYLLABICS WEST-CREE LWI -14E0 CANADIAN SYLLABICS LWII -14E1 CANADIAN SYLLABICS WEST-CREE LWII -14E2 CANADIAN SYLLABICS LWO -14E3 CANADIAN SYLLABICS WEST-CREE LWO -14E4 CANADIAN SYLLABICS LWOO -14E5 CANADIAN SYLLABICS WEST-CREE LWOO -14E6 CANADIAN SYLLABICS LWA -14E7 CANADIAN SYLLABICS WEST-CREE LWA -14E8 CANADIAN SYLLABICS LWAA -14E9 CANADIAN SYLLABICS WEST-CREE LWAA -14EA CANADIAN SYLLABICS L -14EB CANADIAN SYLLABICS WEST-CREE L -14EC CANADIAN SYLLABICS MEDIAL L -14ED CANADIAN SYLLABICS SE -14EE CANADIAN SYLLABICS SAAI -14EF CANADIAN SYLLABICS SI -14F0 CANADIAN SYLLABICS SII -14F1 CANADIAN SYLLABICS SO -14F2 CANADIAN SYLLABICS SOO -14F3 CANADIAN SYLLABICS Y-CREE SOO -14F4 CANADIAN SYLLABICS SA -14F5 CANADIAN SYLLABICS SAA -14F6 CANADIAN SYLLABICS SWE -14F7 CANADIAN SYLLABICS WEST-CREE SWE -14F8 CANADIAN SYLLABICS SWI -14F9 CANADIAN SYLLABICS WEST-CREE SWI -14FA CANADIAN SYLLABICS SWII -14FB CANADIAN SYLLABICS WEST-CREE SWII -14FC CANADIAN SYLLABICS SWO -14FD CANADIAN SYLLABICS WEST-CREE SWO -14FE CANADIAN SYLLABICS SWOO -14FF CANADIAN SYLLABICS WEST-CREE SWOO -1500 CANADIAN SYLLABICS SWA -1501 CANADIAN SYLLABICS WEST-CREE SWA -1502 CANADIAN SYLLABICS SWAA -1503 CANADIAN SYLLABICS WEST-CREE SWAA -1504 CANADIAN SYLLABICS NASKAPI SWAA -1505 CANADIAN SYLLABICS S -1506 CANADIAN SYLLABICS ATHAPASCAN S -1507 CANADIAN SYLLABICS SW -1508 CANADIAN SYLLABICS BLACKFOOT S -1509 CANADIAN SYLLABICS MOOSE-CREE SK -150A CANADIAN SYLLABICS NASKAPI SKW -150B CANADIAN SYLLABICS NASKAPI S-W -150C CANADIAN SYLLABICS NASKAPI SPWA -150D CANADIAN SYLLABICS NASKAPI STWA -150E CANADIAN SYLLABICS NASKAPI SKWA -150F CANADIAN SYLLABICS NASKAPI SCWA -1510 CANADIAN SYLLABICS SHE -1511 CANADIAN SYLLABICS SHI -1512 CANADIAN SYLLABICS SHII -1513 CANADIAN SYLLABICS SHO -1514 CANADIAN SYLLABICS SHOO -1515 CANADIAN SYLLABICS SHA -1516 CANADIAN SYLLABICS SHAA -1517 CANADIAN SYLLABICS SHWE -1518 CANADIAN SYLLABICS WEST-CREE SHWE -1519 CANADIAN SYLLABICS SHWI -151A CANADIAN SYLLABICS WEST-CREE SHWI -151B CANADIAN SYLLABICS SHWII -151C CANADIAN SYLLABICS WEST-CREE SHWII -151D CANADIAN SYLLABICS SHWO -151E CANADIAN SYLLABICS WEST-CREE SHWO -151F CANADIAN SYLLABICS SHWOO -1520 CANADIAN SYLLABICS WEST-CREE SHWOO -1521 CANADIAN SYLLABICS SHWA -1522 CANADIAN SYLLABICS WEST-CREE SHWA -1523 CANADIAN SYLLABICS SHWAA -1524 CANADIAN SYLLABICS WEST-CREE SHWAA -1525 CANADIAN SYLLABICS SH -1526 CANADIAN SYLLABICS YE -1527 CANADIAN SYLLABICS YAAI -1528 CANADIAN SYLLABICS YI -1529 CANADIAN SYLLABICS YII -152A CANADIAN SYLLABICS YO -152B CANADIAN SYLLABICS YOO -152C CANADIAN SYLLABICS Y-CREE YOO -152D CANADIAN SYLLABICS YA -152E CANADIAN SYLLABICS YAA -152F CANADIAN SYLLABICS YWE -1530 CANADIAN SYLLABICS WEST-CREE YWE -1531 CANADIAN SYLLABICS YWI -1532 CANADIAN SYLLABICS WEST-CREE YWI -1533 CANADIAN SYLLABICS YWII -1534 CANADIAN SYLLABICS WEST-CREE YWII -1535 CANADIAN SYLLABICS YWO -1536 CANADIAN SYLLABICS WEST-CREE YWO -1537 CANADIAN SYLLABICS YWOO -1538 CANADIAN SYLLABICS WEST-CREE YWOO -1539 CANADIAN SYLLABICS YWA -153A CANADIAN SYLLABICS WEST-CREE YWA -153B CANADIAN SYLLABICS YWAA -153C CANADIAN SYLLABICS WEST-CREE YWAA -153D CANADIAN SYLLABICS NASKAPI YWAA -153E CANADIAN SYLLABICS Y -153F CANADIAN SYLLABICS BIBLE-CREE Y -1540 CANADIAN SYLLABICS WEST-CREE Y -1541 CANADIAN SYLLABICS SAYISI YI -1542 CANADIAN SYLLABICS RE -1543 CANADIAN SYLLABICS R-CREE RE -1544 CANADIAN SYLLABICS WEST-CREE LE -1545 CANADIAN SYLLABICS RAAI -1546 CANADIAN SYLLABICS RI -1547 CANADIAN SYLLABICS RII -1548 CANADIAN SYLLABICS RO -1549 CANADIAN SYLLABICS ROO -154A CANADIAN SYLLABICS WEST-CREE LO -154B CANADIAN SYLLABICS RA -154C CANADIAN SYLLABICS RAA -154D CANADIAN SYLLABICS WEST-CREE LA -154E CANADIAN SYLLABICS RWAA -154F CANADIAN SYLLABICS WEST-CREE RWAA -1550 CANADIAN SYLLABICS R -1551 CANADIAN SYLLABICS WEST-CREE R -1552 CANADIAN SYLLABICS MEDIAL R -1553 CANADIAN SYLLABICS FE -1554 CANADIAN SYLLABICS FAAI -1555 CANADIAN SYLLABICS FI -1556 CANADIAN SYLLABICS FII -1557 CANADIAN SYLLABICS FO -1558 CANADIAN SYLLABICS FOO -1559 CANADIAN SYLLABICS FA -155A CANADIAN SYLLABICS FAA -155B CANADIAN SYLLABICS FWAA -155C CANADIAN SYLLABICS WEST-CREE FWAA -155D CANADIAN SYLLABICS F -155E CANADIAN SYLLABICS THE -155F CANADIAN SYLLABICS N-CREE THE -1560 CANADIAN SYLLABICS THI -1561 CANADIAN SYLLABICS N-CREE THI -1562 CANADIAN SYLLABICS THII -1563 CANADIAN SYLLABICS N-CREE THII -1564 CANADIAN SYLLABICS THO -1565 CANADIAN SYLLABICS THOO -1566 CANADIAN SYLLABICS THA -1567 CANADIAN SYLLABICS THAA -1568 CANADIAN SYLLABICS THWAA -1569 CANADIAN SYLLABICS WEST-CREE THWAA -156A CANADIAN SYLLABICS TH -156B CANADIAN SYLLABICS TTHE -156C CANADIAN SYLLABICS TTHI -156D CANADIAN SYLLABICS TTHO -156E CANADIAN SYLLABICS TTHA -156F CANADIAN SYLLABICS TTH -1570 CANADIAN SYLLABICS TYE -1571 CANADIAN SYLLABICS TYI -1572 CANADIAN SYLLABICS TYO -1573 CANADIAN SYLLABICS TYA -1574 CANADIAN SYLLABICS NUNAVIK HE -1575 CANADIAN SYLLABICS NUNAVIK HI -1576 CANADIAN SYLLABICS NUNAVIK HII -1577 CANADIAN SYLLABICS NUNAVIK HO -1578 CANADIAN SYLLABICS NUNAVIK HOO -1579 CANADIAN SYLLABICS NUNAVIK HA -157A CANADIAN SYLLABICS NUNAVIK HAA -157B CANADIAN SYLLABICS NUNAVIK H -157C CANADIAN SYLLABICS NUNAVUT H -157D CANADIAN SYLLABICS HK -157E CANADIAN SYLLABICS QAAI -157F CANADIAN SYLLABICS QI -1580 CANADIAN SYLLABICS QII -1581 CANADIAN SYLLABICS QO -1582 CANADIAN SYLLABICS QOO -1583 CANADIAN SYLLABICS QA -1584 CANADIAN SYLLABICS QAA -1585 CANADIAN SYLLABICS Q -1586 CANADIAN SYLLABICS TLHE -1587 CANADIAN SYLLABICS TLHI -1588 CANADIAN SYLLABICS TLHO -1589 CANADIAN SYLLABICS TLHA -158A CANADIAN SYLLABICS WEST-CREE RE -158B CANADIAN SYLLABICS WEST-CREE RI -158C CANADIAN SYLLABICS WEST-CREE RO -158D CANADIAN SYLLABICS WEST-CREE RA -158E CANADIAN SYLLABICS NGAAI -158F CANADIAN SYLLABICS NGI -1590 CANADIAN SYLLABICS NGII -1591 CANADIAN SYLLABICS NGO -1592 CANADIAN SYLLABICS NGOO -1593 CANADIAN SYLLABICS NGA -1594 CANADIAN SYLLABICS NGAA -1595 CANADIAN SYLLABICS NG -1596 CANADIAN SYLLABICS NNG -1597 CANADIAN SYLLABICS SAYISI SHE -1598 CANADIAN SYLLABICS SAYISI SHI -1599 CANADIAN SYLLABICS SAYISI SHO -159A CANADIAN SYLLABICS SAYISI SHA -159B CANADIAN SYLLABICS WOODS-CREE THE -159C CANADIAN SYLLABICS WOODS-CREE THI -159D CANADIAN SYLLABICS WOODS-CREE THO -159E CANADIAN SYLLABICS WOODS-CREE THA -159F CANADIAN SYLLABICS WOODS-CREE TH -15A0 CANADIAN SYLLABICS LHI -15A1 CANADIAN SYLLABICS LHII -15A2 CANADIAN SYLLABICS LHO -15A3 CANADIAN SYLLABICS LHOO -15A4 CANADIAN SYLLABICS LHA -15A5 CANADIAN SYLLABICS LHAA -15A6 CANADIAN SYLLABICS LH -15A7 CANADIAN SYLLABICS TH-CREE THE -15A8 CANADIAN SYLLABICS TH-CREE THI -15A9 CANADIAN SYLLABICS TH-CREE THII -15AA CANADIAN SYLLABICS TH-CREE THO -15AB CANADIAN SYLLABICS TH-CREE THOO -15AC CANADIAN SYLLABICS TH-CREE THA -15AD CANADIAN SYLLABICS TH-CREE THAA -15AE CANADIAN SYLLABICS TH-CREE TH -15AF CANADIAN SYLLABICS AIVILIK B -15B0 CANADIAN SYLLABICS BLACKFOOT E -15B1 CANADIAN SYLLABICS BLACKFOOT I -15B2 CANADIAN SYLLABICS BLACKFOOT O -15B3 CANADIAN SYLLABICS BLACKFOOT A -15B4 CANADIAN SYLLABICS BLACKFOOT WE -15B5 CANADIAN SYLLABICS BLACKFOOT WI -15B6 CANADIAN SYLLABICS BLACKFOOT WO -15B7 CANADIAN SYLLABICS BLACKFOOT WA -15B8 CANADIAN SYLLABICS BLACKFOOT NE -15B9 CANADIAN SYLLABICS BLACKFOOT NI -15BA CANADIAN SYLLABICS BLACKFOOT NO -15BB CANADIAN SYLLABICS BLACKFOOT NA -15BC CANADIAN SYLLABICS BLACKFOOT KE -15BD CANADIAN SYLLABICS BLACKFOOT KI -15BE CANADIAN SYLLABICS BLACKFOOT KO -15BF CANADIAN SYLLABICS BLACKFOOT KA -15C0 CANADIAN SYLLABICS SAYISI HE -15C1 CANADIAN SYLLABICS SAYISI HI -15C2 CANADIAN SYLLABICS SAYISI HO -15C3 CANADIAN SYLLABICS SAYISI HA -15C4 CANADIAN SYLLABICS CARRIER GHU -15C5 CANADIAN SYLLABICS CARRIER GHO -15C6 CANADIAN SYLLABICS CARRIER GHE -15C7 CANADIAN SYLLABICS CARRIER GHEE -15C8 CANADIAN SYLLABICS CARRIER GHI -15C9 CANADIAN SYLLABICS CARRIER GHA -15CA CANADIAN SYLLABICS CARRIER RU -15CB CANADIAN SYLLABICS CARRIER RO -15CC CANADIAN SYLLABICS CARRIER RE -15CD CANADIAN SYLLABICS CARRIER REE -15CE CANADIAN SYLLABICS CARRIER RI -15CF CANADIAN SYLLABICS CARRIER RA -15D0 CANADIAN SYLLABICS CARRIER WU -15D1 CANADIAN SYLLABICS CARRIER WO -15D2 CANADIAN SYLLABICS CARRIER WE -15D3 CANADIAN SYLLABICS CARRIER WEE -15D4 CANADIAN SYLLABICS CARRIER WI -15D5 CANADIAN SYLLABICS CARRIER WA -15D6 CANADIAN SYLLABICS CARRIER HWU -15D7 CANADIAN SYLLABICS CARRIER HWO -15D8 CANADIAN SYLLABICS CARRIER HWE -15D9 CANADIAN SYLLABICS CARRIER HWEE -15DA CANADIAN SYLLABICS CARRIER HWI -15DB CANADIAN SYLLABICS CARRIER HWA -15DC CANADIAN SYLLABICS CARRIER THU -15DD CANADIAN SYLLABICS CARRIER THO -15DE CANADIAN SYLLABICS CARRIER THE -15DF CANADIAN SYLLABICS CARRIER THEE -15E0 CANADIAN SYLLABICS CARRIER THI -15E1 CANADIAN SYLLABICS CARRIER THA -15E2 CANADIAN SYLLABICS CARRIER TTU -15E3 CANADIAN SYLLABICS CARRIER TTO -15E4 CANADIAN SYLLABICS CARRIER TTE -15E5 CANADIAN SYLLABICS CARRIER TTEE -15E6 CANADIAN SYLLABICS CARRIER TTI -15E7 CANADIAN SYLLABICS CARRIER TTA -15E8 CANADIAN SYLLABICS CARRIER PU -15E9 CANADIAN SYLLABICS CARRIER PO -15EA CANADIAN SYLLABICS CARRIER PE -15EB CANADIAN SYLLABICS CARRIER PEE -15EC CANADIAN SYLLABICS CARRIER PI -15ED CANADIAN SYLLABICS CARRIER PA -15EE CANADIAN SYLLABICS CARRIER P -15EF CANADIAN SYLLABICS CARRIER GU -15F0 CANADIAN SYLLABICS CARRIER GO -15F1 CANADIAN SYLLABICS CARRIER GE -15F2 CANADIAN SYLLABICS CARRIER GEE -15F3 CANADIAN SYLLABICS CARRIER GI -15F4 CANADIAN SYLLABICS CARRIER GA -15F5 CANADIAN SYLLABICS CARRIER KHU -15F6 CANADIAN SYLLABICS CARRIER KHO -15F7 CANADIAN SYLLABICS CARRIER KHE -15F8 CANADIAN SYLLABICS CARRIER KHEE -15F9 CANADIAN SYLLABICS CARRIER KHI -15FA CANADIAN SYLLABICS CARRIER KHA -15FB CANADIAN SYLLABICS CARRIER KKU -15FC CANADIAN SYLLABICS CARRIER KKO -15FD CANADIAN SYLLABICS CARRIER KKE -15FE CANADIAN SYLLABICS CARRIER KKEE -15FF CANADIAN SYLLABICS CARRIER KKI -1600 CANADIAN SYLLABICS CARRIER KKA -1601 CANADIAN SYLLABICS CARRIER KK -1602 CANADIAN SYLLABICS CARRIER NU -1603 CANADIAN SYLLABICS CARRIER NO -1604 CANADIAN SYLLABICS CARRIER NE -1605 CANADIAN SYLLABICS CARRIER NEE -1606 CANADIAN SYLLABICS CARRIER NI -1607 CANADIAN SYLLABICS CARRIER NA -1608 CANADIAN SYLLABICS CARRIER MU -1609 CANADIAN SYLLABICS CARRIER MO -160A CANADIAN SYLLABICS CARRIER ME -160B CANADIAN SYLLABICS CARRIER MEE -160C CANADIAN SYLLABICS CARRIER MI -160D CANADIAN SYLLABICS CARRIER MA -160E CANADIAN SYLLABICS CARRIER YU -160F CANADIAN SYLLABICS CARRIER YO -1610 CANADIAN SYLLABICS CARRIER YE -1611 CANADIAN SYLLABICS CARRIER YEE -1612 CANADIAN SYLLABICS CARRIER YI -1613 CANADIAN SYLLABICS CARRIER YA -1614 CANADIAN SYLLABICS CARRIER JU -1615 CANADIAN SYLLABICS SAYISI JU -1616 CANADIAN SYLLABICS CARRIER JO -1617 CANADIAN SYLLABICS CARRIER JE -1618 CANADIAN SYLLABICS CARRIER JEE -1619 CANADIAN SYLLABICS CARRIER JI -161A CANADIAN SYLLABICS SAYISI JI -161B CANADIAN SYLLABICS CARRIER JA -161C CANADIAN SYLLABICS CARRIER JJU -161D CANADIAN SYLLABICS CARRIER JJO -161E CANADIAN SYLLABICS CARRIER JJE -161F CANADIAN SYLLABICS CARRIER JJEE -1620 CANADIAN SYLLABICS CARRIER JJI -1621 CANADIAN SYLLABICS CARRIER JJA -1622 CANADIAN SYLLABICS CARRIER LU -1623 CANADIAN SYLLABICS CARRIER LO -1624 CANADIAN SYLLABICS CARRIER LE -1625 CANADIAN SYLLABICS CARRIER LEE -1626 CANADIAN SYLLABICS CARRIER LI -1627 CANADIAN SYLLABICS CARRIER LA -1628 CANADIAN SYLLABICS CARRIER DLU -1629 CANADIAN SYLLABICS CARRIER DLO -162A CANADIAN SYLLABICS CARRIER DLE -162B CANADIAN SYLLABICS CARRIER DLEE -162C CANADIAN SYLLABICS CARRIER DLI -162D CANADIAN SYLLABICS CARRIER DLA -162E CANADIAN SYLLABICS CARRIER LHU -162F CANADIAN SYLLABICS CARRIER LHO -1630 CANADIAN SYLLABICS CARRIER LHE -1631 CANADIAN SYLLABICS CARRIER LHEE -1632 CANADIAN SYLLABICS CARRIER LHI -1633 CANADIAN SYLLABICS CARRIER LHA -1634 CANADIAN SYLLABICS CARRIER TLHU -1635 CANADIAN SYLLABICS CARRIER TLHO -1636 CANADIAN SYLLABICS CARRIER TLHE -1637 CANADIAN SYLLABICS CARRIER TLHEE -1638 CANADIAN SYLLABICS CARRIER TLHI -1639 CANADIAN SYLLABICS CARRIER TLHA -163A CANADIAN SYLLABICS CARRIER TLU -163B CANADIAN SYLLABICS CARRIER TLO -163C CANADIAN SYLLABICS CARRIER TLE -163D CANADIAN SYLLABICS CARRIER TLEE -163E CANADIAN SYLLABICS CARRIER TLI -163F CANADIAN SYLLABICS CARRIER TLA -1640 CANADIAN SYLLABICS CARRIER ZU -1641 CANADIAN SYLLABICS CARRIER ZO -1642 CANADIAN SYLLABICS CARRIER ZE -1643 CANADIAN SYLLABICS CARRIER ZEE -1644 CANADIAN SYLLABICS CARRIER ZI -1645 CANADIAN SYLLABICS CARRIER ZA -1646 CANADIAN SYLLABICS CARRIER Z -1647 CANADIAN SYLLABICS CARRIER INITIAL Z -1648 CANADIAN SYLLABICS CARRIER DZU -1649 CANADIAN SYLLABICS CARRIER DZO -164A CANADIAN SYLLABICS CARRIER DZE -164B CANADIAN SYLLABICS CARRIER DZEE -164C CANADIAN SYLLABICS CARRIER DZI -164D CANADIAN SYLLABICS CARRIER DZA -164E CANADIAN SYLLABICS CARRIER SU -164F CANADIAN SYLLABICS CARRIER SO -1650 CANADIAN SYLLABICS CARRIER SE -1651 CANADIAN SYLLABICS CARRIER SEE -1652 CANADIAN SYLLABICS CARRIER SI -1653 CANADIAN SYLLABICS CARRIER SA -1654 CANADIAN SYLLABICS CARRIER SHU -1655 CANADIAN SYLLABICS CARRIER SHO -1656 CANADIAN SYLLABICS CARRIER SHE -1657 CANADIAN SYLLABICS CARRIER SHEE -1658 CANADIAN SYLLABICS CARRIER SHI -1659 CANADIAN SYLLABICS CARRIER SHA -165A CANADIAN SYLLABICS CARRIER SH -165B CANADIAN SYLLABICS CARRIER TSU -165C CANADIAN SYLLABICS CARRIER TSO -165D CANADIAN SYLLABICS CARRIER TSE -165E CANADIAN SYLLABICS CARRIER TSEE -165F CANADIAN SYLLABICS CARRIER TSI -1660 CANADIAN SYLLABICS CARRIER TSA -1661 CANADIAN SYLLABICS CARRIER CHU -1662 CANADIAN SYLLABICS CARRIER CHO -1663 CANADIAN SYLLABICS CARRIER CHE -1664 CANADIAN SYLLABICS CARRIER CHEE -1665 CANADIAN SYLLABICS CARRIER CHI -1666 CANADIAN SYLLABICS CARRIER CHA -1667 CANADIAN SYLLABICS CARRIER TTSU -1668 CANADIAN SYLLABICS CARRIER TTSO -1669 CANADIAN SYLLABICS CARRIER TTSE -166A CANADIAN SYLLABICS CARRIER TTSEE -166B CANADIAN SYLLABICS CARRIER TTSI -166C CANADIAN SYLLABICS CARRIER TTSA -166D CANADIAN SYLLABICS CHI SIGN -166E CANADIAN SYLLABICS FULL STOP -166F CANADIAN SYLLABICS QAI -1670 CANADIAN SYLLABICS NGAI -1671 CANADIAN SYLLABICS NNGI -1672 CANADIAN SYLLABICS NNGII -1673 CANADIAN SYLLABICS NNGO -1674 CANADIAN SYLLABICS NNGOO -1675 CANADIAN SYLLABICS NNGA -1676 CANADIAN SYLLABICS NNGAA -1677 CANADIAN SYLLABICS WOODS-CREE THWEE -1678 CANADIAN SYLLABICS WOODS-CREE THWI -1679 CANADIAN SYLLABICS WOODS-CREE THWII -167A CANADIAN SYLLABICS WOODS-CREE THWO -167B CANADIAN SYLLABICS WOODS-CREE THWOO -167C CANADIAN SYLLABICS WOODS-CREE THWA -167D CANADIAN SYLLABICS WOODS-CREE THWAA -167E CANADIAN SYLLABICS WOODS-CREE FINAL TH -167F CANADIAN SYLLABICS BLACKFOOT W -1680 OGHAM SPACE MARK -1681 OGHAM LETTER BEITH -1682 OGHAM LETTER LUIS -1683 OGHAM LETTER FEARN -1684 OGHAM LETTER SAIL -1685 OGHAM LETTER NION -1686 OGHAM LETTER UATH -1687 OGHAM LETTER DAIR -1688 OGHAM LETTER TINNE -1689 OGHAM LETTER COLL -168A OGHAM LETTER CEIRT -168B OGHAM LETTER MUIN -168C OGHAM LETTER GORT -168D OGHAM LETTER NGEADAL -168E OGHAM LETTER STRAIF -168F OGHAM LETTER RUIS -1690 OGHAM LETTER AILM -1691 OGHAM LETTER ONN -1692 OGHAM LETTER UR -1693 OGHAM LETTER EADHADH -1694 OGHAM LETTER IODHADH -1695 OGHAM LETTER EABHADH -1696 OGHAM LETTER OR -1697 OGHAM LETTER UILLEANN -1698 OGHAM LETTER IFIN -1699 OGHAM LETTER EAMHANCHOLL -169A OGHAM LETTER PEITH -169B OGHAM FEATHER MARK -169C OGHAM REVERSED FEATHER MARK -16A0 RUNIC LETTER FEHU FEOH FE F -16A1 RUNIC LETTER V -16A2 RUNIC LETTER URUZ UR U -16A3 RUNIC LETTER YR -16A4 RUNIC LETTER Y -16A5 RUNIC LETTER W -16A6 RUNIC LETTER THURISAZ THURS THORN -16A7 RUNIC LETTER ETH -16A8 RUNIC LETTER ANSUZ A -16A9 RUNIC LETTER OS O -16AA RUNIC LETTER AC A -16AB RUNIC LETTER AESC -16AC RUNIC LETTER LONG-BRANCH-OSS O -16AD RUNIC LETTER SHORT-TWIG-OSS O -16AE RUNIC LETTER O -16AF RUNIC LETTER OE -16B0 RUNIC LETTER ON -16B1 RUNIC LETTER RAIDO RAD REID R -16B2 RUNIC LETTER KAUNA -16B3 RUNIC LETTER CEN -16B4 RUNIC LETTER KAUN K -16B5 RUNIC LETTER G -16B6 RUNIC LETTER ENG -16B7 RUNIC LETTER GEBO GYFU G -16B8 RUNIC LETTER GAR -16B9 RUNIC LETTER WUNJO WYNN W -16BA RUNIC LETTER HAGLAZ H -16BB RUNIC LETTER HAEGL H -16BC RUNIC LETTER LONG-BRANCH-HAGALL H -16BD RUNIC LETTER SHORT-TWIG-HAGALL H -16BE RUNIC LETTER NAUDIZ NYD NAUD N -16BF RUNIC LETTER SHORT-TWIG-NAUD N -16C0 RUNIC LETTER DOTTED-N -16C1 RUNIC LETTER ISAZ IS ISS I -16C2 RUNIC LETTER E -16C3 RUNIC LETTER JERAN J -16C4 RUNIC LETTER GER -16C5 RUNIC LETTER LONG-BRANCH-AR AE -16C6 RUNIC LETTER SHORT-TWIG-AR A -16C7 RUNIC LETTER IWAZ EOH -16C8 RUNIC LETTER PERTHO PEORTH P -16C9 RUNIC LETTER ALGIZ EOLHX -16CA RUNIC LETTER SOWILO S -16CB RUNIC LETTER SIGEL LONG-BRANCH-SOL S -16CC RUNIC LETTER SHORT-TWIG-SOL S -16CD RUNIC LETTER C -16CE RUNIC LETTER Z -16CF RUNIC LETTER TIWAZ TIR TYR T -16D0 RUNIC LETTER SHORT-TWIG-TYR T -16D1 RUNIC LETTER D -16D2 RUNIC LETTER BERKANAN BEORC BJARKAN B -16D3 RUNIC LETTER SHORT-TWIG-BJARKAN B -16D4 RUNIC LETTER DOTTED-P -16D5 RUNIC LETTER OPEN-P -16D6 RUNIC LETTER EHWAZ EH E -16D7 RUNIC LETTER MANNAZ MAN M -16D8 RUNIC LETTER LONG-BRANCH-MADR M -16D9 RUNIC LETTER SHORT-TWIG-MADR M -16DA RUNIC LETTER LAUKAZ LAGU LOGR L -16DB RUNIC LETTER DOTTED-L -16DC RUNIC LETTER INGWAZ -16DD RUNIC LETTER ING -16DE RUNIC LETTER DAGAZ DAEG D -16DF RUNIC LETTER OTHALAN ETHEL O -16E0 RUNIC LETTER EAR -16E1 RUNIC LETTER IOR -16E2 RUNIC LETTER CWEORTH -16E3 RUNIC LETTER CALC -16E4 RUNIC LETTER CEALC -16E5 RUNIC LETTER STAN -16E6 RUNIC LETTER LONG-BRANCH-YR -16E7 RUNIC LETTER SHORT-TWIG-YR -16E8 RUNIC LETTER ICELANDIC-YR -16E9 RUNIC LETTER Q -16EA RUNIC LETTER X -16EB RUNIC SINGLE PUNCTUATION -16EC RUNIC MULTIPLE PUNCTUATION -16ED RUNIC CROSS PUNCTUATION -16EE RUNIC ARLAUG SYMBOL -16EF RUNIC TVIMADUR SYMBOL -16F0 RUNIC BELGTHOR SYMBOL -1700 TAGALOG LETTER A -1701 TAGALOG LETTER I -1702 TAGALOG LETTER U -1703 TAGALOG LETTER KA -1704 TAGALOG LETTER GA -1705 TAGALOG LETTER NGA -1706 TAGALOG LETTER TA -1707 TAGALOG LETTER DA -1708 TAGALOG LETTER NA -1709 TAGALOG LETTER PA -170A TAGALOG LETTER BA -170B TAGALOG LETTER MA -170C TAGALOG LETTER YA -170E TAGALOG LETTER LA -170F TAGALOG LETTER WA -1710 TAGALOG LETTER SA -1711 TAGALOG LETTER HA -1712 TAGALOG VOWEL SIGN I -1713 TAGALOG VOWEL SIGN U -1714 TAGALOG SIGN VIRAMA -1720 HANUNOO LETTER A -1721 HANUNOO LETTER I -1722 HANUNOO LETTER U -1723 HANUNOO LETTER KA -1724 HANUNOO LETTER GA -1725 HANUNOO LETTER NGA -1726 HANUNOO LETTER TA -1727 HANUNOO LETTER DA -1728 HANUNOO LETTER NA -1729 HANUNOO LETTER PA -172A HANUNOO LETTER BA -172B HANUNOO LETTER MA -172C HANUNOO LETTER YA -172D HANUNOO LETTER RA -172E HANUNOO LETTER LA -172F HANUNOO LETTER WA -1730 HANUNOO LETTER SA -1731 HANUNOO LETTER HA -1732 HANUNOO VOWEL SIGN I -1733 HANUNOO VOWEL SIGN U -1734 HANUNOO SIGN PAMUDPOD -1735 PHILIPPINE SINGLE PUNCTUATION -1736 PHILIPPINE DOUBLE PUNCTUATION -1740 BUHID LETTER A -1741 BUHID LETTER I -1742 BUHID LETTER U -1743 BUHID LETTER KA -1744 BUHID LETTER GA -1745 BUHID LETTER NGA -1746 BUHID LETTER TA -1747 BUHID LETTER DA -1748 BUHID LETTER NA -1749 BUHID LETTER PA -174A BUHID LETTER BA -174B BUHID LETTER MA -174C BUHID LETTER YA -174D BUHID LETTER RA -174E BUHID LETTER LA -174F BUHID LETTER WA -1750 BUHID LETTER SA -1751 BUHID LETTER HA -1752 BUHID VOWEL SIGN I -1753 BUHID VOWEL SIGN U -1760 TAGBANWA LETTER A -1761 TAGBANWA LETTER I -1762 TAGBANWA LETTER U -1763 TAGBANWA LETTER KA -1764 TAGBANWA LETTER GA -1765 TAGBANWA LETTER NGA -1766 TAGBANWA LETTER TA -1767 TAGBANWA LETTER DA -1768 TAGBANWA LETTER NA -1769 TAGBANWA LETTER PA -176A TAGBANWA LETTER BA -176B TAGBANWA LETTER MA -176C TAGBANWA LETTER YA -176E TAGBANWA LETTER LA -176F TAGBANWA LETTER WA -1770 TAGBANWA LETTER SA -1772 TAGBANWA VOWEL SIGN I -1773 TAGBANWA VOWEL SIGN U -1780 KHMER LETTER KA -1781 KHMER LETTER KHA -1782 KHMER LETTER KO -1783 KHMER LETTER KHO -1784 KHMER LETTER NGO -1785 KHMER LETTER CA -1786 KHMER LETTER CHA -1787 KHMER LETTER CO -1788 KHMER LETTER CHO -1789 KHMER LETTER NYO -178A KHMER LETTER DA -178B KHMER LETTER TTHA -178C KHMER LETTER DO -178D KHMER LETTER TTHO -178E KHMER LETTER NNO -178F KHMER LETTER TA -1790 KHMER LETTER THA -1791 KHMER LETTER TO -1792 KHMER LETTER THO -1793 KHMER LETTER NO -1794 KHMER LETTER BA -1795 KHMER LETTER PHA -1796 KHMER LETTER PO -1797 KHMER LETTER PHO -1798 KHMER LETTER MO -1799 KHMER LETTER YO -179A KHMER LETTER RO -179B KHMER LETTER LO -179C KHMER LETTER VO -179D KHMER LETTER SHA -179E KHMER LETTER SSO -179F KHMER LETTER SA -17A0 KHMER LETTER HA -17A1 KHMER LETTER LA -17A2 KHMER LETTER QA -17A3 KHMER INDEPENDENT VOWEL QAQ -17A4 KHMER INDEPENDENT VOWEL QAA -17A5 KHMER INDEPENDENT VOWEL QI -17A6 KHMER INDEPENDENT VOWEL QII -17A7 KHMER INDEPENDENT VOWEL QU -17A8 KHMER INDEPENDENT VOWEL QUK -17A9 KHMER INDEPENDENT VOWEL QUU -17AA KHMER INDEPENDENT VOWEL QUUV -17AB KHMER INDEPENDENT VOWEL RY -17AC KHMER INDEPENDENT VOWEL RYY -17AD KHMER INDEPENDENT VOWEL LY -17AE KHMER INDEPENDENT VOWEL LYY -17AF KHMER INDEPENDENT VOWEL QE -17B0 KHMER INDEPENDENT VOWEL QAI -17B1 KHMER INDEPENDENT VOWEL QOO TYPE ONE -17B2 KHMER INDEPENDENT VOWEL QOO TYPE TWO -17B3 KHMER INDEPENDENT VOWEL QAU -17B4 KHMER VOWEL INHERENT AQ -17B5 KHMER VOWEL INHERENT AA -17B6 KHMER VOWEL SIGN AA -17B7 KHMER VOWEL SIGN I -17B8 KHMER VOWEL SIGN II -17B9 KHMER VOWEL SIGN Y -17BA KHMER VOWEL SIGN YY -17BB KHMER VOWEL SIGN U -17BC KHMER VOWEL SIGN UU -17BD KHMER VOWEL SIGN UA -17BE KHMER VOWEL SIGN OE -17BF KHMER VOWEL SIGN YA -17C0 KHMER VOWEL SIGN IE -17C1 KHMER VOWEL SIGN E -17C2 KHMER VOWEL SIGN AE -17C3 KHMER VOWEL SIGN AI -17C4 KHMER VOWEL SIGN OO -17C5 KHMER VOWEL SIGN AU -17C6 KHMER SIGN NIKAHIT -17C7 KHMER SIGN REAHMUK -17C8 KHMER SIGN YUUKALEAPINTU -17C9 KHMER SIGN MUUSIKATOAN -17CA KHMER SIGN TRIISAP -17CB KHMER SIGN BANTOC -17CC KHMER SIGN ROBAT -17CD KHMER SIGN TOANDAKHIAT -17CE KHMER SIGN KAKABAT -17CF KHMER SIGN AHSDA -17D0 KHMER SIGN SAMYOK SANNYA -17D1 KHMER SIGN VIRIAM -17D2 KHMER SIGN COENG -17D3 KHMER SIGN BATHAMASAT -17D4 KHMER SIGN KHAN -17D5 KHMER SIGN BARIYOOSAN -17D6 KHMER SIGN CAMNUC PII KUUH -17D7 KHMER SIGN LEK TOO -17D8 KHMER SIGN BEYYAL -17D9 KHMER SIGN PHNAEK MUAN -17DA KHMER SIGN KOOMUUT -17DB KHMER CURRENCY SYMBOL RIEL -17DC KHMER SIGN AVAKRAHASANYA -17DD KHMER SIGN ATTHACAN -17E0 KHMER DIGIT ZERO -17E1 KHMER DIGIT ONE -17E2 KHMER DIGIT TWO -17E3 KHMER DIGIT THREE -17E4 KHMER DIGIT FOUR -17E5 KHMER DIGIT FIVE -17E6 KHMER DIGIT SIX -17E7 KHMER DIGIT SEVEN -17E8 KHMER DIGIT EIGHT -17E9 KHMER DIGIT NINE -17F0 KHMER SYMBOL LEK ATTAK SON -17F1 KHMER SYMBOL LEK ATTAK MUOY -17F2 KHMER SYMBOL LEK ATTAK PII -17F3 KHMER SYMBOL LEK ATTAK BEI -17F4 KHMER SYMBOL LEK ATTAK BUON -17F5 KHMER SYMBOL LEK ATTAK PRAM -17F6 KHMER SYMBOL LEK ATTAK PRAM-MUOY -17F7 KHMER SYMBOL LEK ATTAK PRAM-PII -17F8 KHMER SYMBOL LEK ATTAK PRAM-BEI -17F9 KHMER SYMBOL LEK ATTAK PRAM-BUON -1800 MONGOLIAN BIRGA -1801 MONGOLIAN ELLIPSIS -1802 MONGOLIAN COMMA -1803 MONGOLIAN FULL STOP -1804 MONGOLIAN COLON -1805 MONGOLIAN FOUR DOTS -1806 MONGOLIAN TODO SOFT HYPHEN -1807 MONGOLIAN SIBE SYLLABLE BOUNDARY MARKER -1808 MONGOLIAN MANCHU COMMA -1809 MONGOLIAN MANCHU FULL STOP -180A MONGOLIAN NIRUGU -180B MONGOLIAN FREE VARIATION SELECTOR ONE -180C MONGOLIAN FREE VARIATION SELECTOR TWO -180D MONGOLIAN FREE VARIATION SELECTOR THREE -180E MONGOLIAN VOWEL SEPARATOR -1810 MONGOLIAN DIGIT ZERO -1811 MONGOLIAN DIGIT ONE -1812 MONGOLIAN DIGIT TWO -1813 MONGOLIAN DIGIT THREE -1814 MONGOLIAN DIGIT FOUR -1815 MONGOLIAN DIGIT FIVE -1816 MONGOLIAN DIGIT SIX -1817 MONGOLIAN DIGIT SEVEN -1818 MONGOLIAN DIGIT EIGHT -1819 MONGOLIAN DIGIT NINE -1820 MONGOLIAN LETTER A -1821 MONGOLIAN LETTER E -1822 MONGOLIAN LETTER I -1823 MONGOLIAN LETTER O -1824 MONGOLIAN LETTER U -1825 MONGOLIAN LETTER OE -1826 MONGOLIAN LETTER UE -1827 MONGOLIAN LETTER EE -1828 MONGOLIAN LETTER NA -1829 MONGOLIAN LETTER ANG -182A MONGOLIAN LETTER BA -182B MONGOLIAN LETTER PA -182C MONGOLIAN LETTER QA -182D MONGOLIAN LETTER GA -182E MONGOLIAN LETTER MA -182F MONGOLIAN LETTER LA -1830 MONGOLIAN LETTER SA -1831 MONGOLIAN LETTER SHA -1832 MONGOLIAN LETTER TA -1833 MONGOLIAN LETTER DA -1834 MONGOLIAN LETTER CHA -1835 MONGOLIAN LETTER JA -1836 MONGOLIAN LETTER YA -1837 MONGOLIAN LETTER RA -1838 MONGOLIAN LETTER WA -1839 MONGOLIAN LETTER FA -183A MONGOLIAN LETTER KA -183B MONGOLIAN LETTER KHA -183C MONGOLIAN LETTER TSA -183D MONGOLIAN LETTER ZA -183E MONGOLIAN LETTER HAA -183F MONGOLIAN LETTER ZRA -1840 MONGOLIAN LETTER LHA -1841 MONGOLIAN LETTER ZHI -1842 MONGOLIAN LETTER CHI -1843 MONGOLIAN LETTER TODO LONG VOWEL SIGN -1844 MONGOLIAN LETTER TODO E -1845 MONGOLIAN LETTER TODO I -1846 MONGOLIAN LETTER TODO O -1847 MONGOLIAN LETTER TODO U -1848 MONGOLIAN LETTER TODO OE -1849 MONGOLIAN LETTER TODO UE -184A MONGOLIAN LETTER TODO ANG -184B MONGOLIAN LETTER TODO BA -184C MONGOLIAN LETTER TODO PA -184D MONGOLIAN LETTER TODO QA -184E MONGOLIAN LETTER TODO GA -184F MONGOLIAN LETTER TODO MA -1850 MONGOLIAN LETTER TODO TA -1851 MONGOLIAN LETTER TODO DA -1852 MONGOLIAN LETTER TODO CHA -1853 MONGOLIAN LETTER TODO JA -1854 MONGOLIAN LETTER TODO TSA -1855 MONGOLIAN LETTER TODO YA -1856 MONGOLIAN LETTER TODO WA -1857 MONGOLIAN LETTER TODO KA -1858 MONGOLIAN LETTER TODO GAA -1859 MONGOLIAN LETTER TODO HAA -185A MONGOLIAN LETTER TODO JIA -185B MONGOLIAN LETTER TODO NIA -185C MONGOLIAN LETTER TODO DZA -185D MONGOLIAN LETTER SIBE E -185E MONGOLIAN LETTER SIBE I -185F MONGOLIAN LETTER SIBE IY -1860 MONGOLIAN LETTER SIBE UE -1861 MONGOLIAN LETTER SIBE U -1862 MONGOLIAN LETTER SIBE ANG -1863 MONGOLIAN LETTER SIBE KA -1864 MONGOLIAN LETTER SIBE GA -1865 MONGOLIAN LETTER SIBE HA -1866 MONGOLIAN LETTER SIBE PA -1867 MONGOLIAN LETTER SIBE SHA -1868 MONGOLIAN LETTER SIBE TA -1869 MONGOLIAN LETTER SIBE DA -186A MONGOLIAN LETTER SIBE JA -186B MONGOLIAN LETTER SIBE FA -186C MONGOLIAN LETTER SIBE GAA -186D MONGOLIAN LETTER SIBE HAA -186E MONGOLIAN LETTER SIBE TSA -186F MONGOLIAN LETTER SIBE ZA -1870 MONGOLIAN LETTER SIBE RAA -1871 MONGOLIAN LETTER SIBE CHA -1872 MONGOLIAN LETTER SIBE ZHA -1873 MONGOLIAN LETTER MANCHU I -1874 MONGOLIAN LETTER MANCHU KA -1875 MONGOLIAN LETTER MANCHU RA -1876 MONGOLIAN LETTER MANCHU FA -1877 MONGOLIAN LETTER MANCHU ZHA -1880 MONGOLIAN LETTER ALI GALI ANUSVARA ONE -1881 MONGOLIAN LETTER ALI GALI VISARGA ONE -1882 MONGOLIAN LETTER ALI GALI DAMARU -1883 MONGOLIAN LETTER ALI GALI UBADAMA -1884 MONGOLIAN LETTER ALI GALI INVERTED UBADAMA -1885 MONGOLIAN LETTER ALI GALI BALUDA -1886 MONGOLIAN LETTER ALI GALI THREE BALUDA -1887 MONGOLIAN LETTER ALI GALI A -1888 MONGOLIAN LETTER ALI GALI I -1889 MONGOLIAN LETTER ALI GALI KA -188A MONGOLIAN LETTER ALI GALI NGA -188B MONGOLIAN LETTER ALI GALI CA -188C MONGOLIAN LETTER ALI GALI TTA -188D MONGOLIAN LETTER ALI GALI TTHA -188E MONGOLIAN LETTER ALI GALI DDA -188F MONGOLIAN LETTER ALI GALI NNA -1890 MONGOLIAN LETTER ALI GALI TA -1891 MONGOLIAN LETTER ALI GALI DA -1892 MONGOLIAN LETTER ALI GALI PA -1893 MONGOLIAN LETTER ALI GALI PHA -1894 MONGOLIAN LETTER ALI GALI SSA -1895 MONGOLIAN LETTER ALI GALI ZHA -1896 MONGOLIAN LETTER ALI GALI ZA -1897 MONGOLIAN LETTER ALI GALI AH -1898 MONGOLIAN LETTER TODO ALI GALI TA -1899 MONGOLIAN LETTER TODO ALI GALI ZHA -189A MONGOLIAN LETTER MANCHU ALI GALI GHA -189B MONGOLIAN LETTER MANCHU ALI GALI NGA -189C MONGOLIAN LETTER MANCHU ALI GALI CA -189D MONGOLIAN LETTER MANCHU ALI GALI JHA -189E MONGOLIAN LETTER MANCHU ALI GALI TTA -189F MONGOLIAN LETTER MANCHU ALI GALI DDHA -18A0 MONGOLIAN LETTER MANCHU ALI GALI TA -18A1 MONGOLIAN LETTER MANCHU ALI GALI DHA -18A2 MONGOLIAN LETTER MANCHU ALI GALI SSA -18A3 MONGOLIAN LETTER MANCHU ALI GALI CYA -18A4 MONGOLIAN LETTER MANCHU ALI GALI ZHA -18A5 MONGOLIAN LETTER MANCHU ALI GALI ZA -18A6 MONGOLIAN LETTER ALI GALI HALF U -18A7 MONGOLIAN LETTER ALI GALI HALF YA -18A8 MONGOLIAN LETTER MANCHU ALI GALI BHA -18A9 MONGOLIAN LETTER ALI GALI DAGALGA -18AA MONGOLIAN LETTER MANCHU ALI GALI LHA -18B0 CANADIAN SYLLABICS OY -18B1 CANADIAN SYLLABICS AY -18B2 CANADIAN SYLLABICS AAY -18B3 CANADIAN SYLLABICS WAY -18B4 CANADIAN SYLLABICS POY -18B5 CANADIAN SYLLABICS PAY -18B6 CANADIAN SYLLABICS PWOY -18B7 CANADIAN SYLLABICS TAY -18B8 CANADIAN SYLLABICS KAY -18B9 CANADIAN SYLLABICS KWAY -18BA CANADIAN SYLLABICS MAY -18BB CANADIAN SYLLABICS NOY -18BC CANADIAN SYLLABICS NAY -18BD CANADIAN SYLLABICS LAY -18BE CANADIAN SYLLABICS SOY -18BF CANADIAN SYLLABICS SAY -18C0 CANADIAN SYLLABICS SHOY -18C1 CANADIAN SYLLABICS SHAY -18C2 CANADIAN SYLLABICS SHWOY -18C3 CANADIAN SYLLABICS YOY -18C4 CANADIAN SYLLABICS YAY -18C5 CANADIAN SYLLABICS RAY -18C6 CANADIAN SYLLABICS NWI -18C7 CANADIAN SYLLABICS OJIBWAY NWI -18C8 CANADIAN SYLLABICS NWII -18C9 CANADIAN SYLLABICS OJIBWAY NWII -18CA CANADIAN SYLLABICS NWO -18CB CANADIAN SYLLABICS OJIBWAY NWO -18CC CANADIAN SYLLABICS NWOO -18CD CANADIAN SYLLABICS OJIBWAY NWOO -18CE CANADIAN SYLLABICS RWEE -18CF CANADIAN SYLLABICS RWI -18D0 CANADIAN SYLLABICS RWII -18D1 CANADIAN SYLLABICS RWO -18D2 CANADIAN SYLLABICS RWOO -18D3 CANADIAN SYLLABICS RWA -18D4 CANADIAN SYLLABICS OJIBWAY P -18D5 CANADIAN SYLLABICS OJIBWAY T -18D6 CANADIAN SYLLABICS OJIBWAY K -18D7 CANADIAN SYLLABICS OJIBWAY C -18D8 CANADIAN SYLLABICS OJIBWAY M -18D9 CANADIAN SYLLABICS OJIBWAY N -18DA CANADIAN SYLLABICS OJIBWAY S -18DB CANADIAN SYLLABICS OJIBWAY SH -18DC CANADIAN SYLLABICS EASTERN W -18DD CANADIAN SYLLABICS WESTERN W -18DE CANADIAN SYLLABICS FINAL SMALL RING -18DF CANADIAN SYLLABICS FINAL RAISED DOT -18E0 CANADIAN SYLLABICS R-CREE RWE -18E1 CANADIAN SYLLABICS WEST-CREE LOO -18E2 CANADIAN SYLLABICS WEST-CREE LAA -18E3 CANADIAN SYLLABICS THWE -18E4 CANADIAN SYLLABICS THWA -18E5 CANADIAN SYLLABICS TTHWE -18E6 CANADIAN SYLLABICS TTHOO -18E7 CANADIAN SYLLABICS TTHAA -18E8 CANADIAN SYLLABICS TLHWE -18E9 CANADIAN SYLLABICS TLHOO -18EA CANADIAN SYLLABICS SAYISI SHWE -18EB CANADIAN SYLLABICS SAYISI SHOO -18EC CANADIAN SYLLABICS SAYISI HOO -18ED CANADIAN SYLLABICS CARRIER GWU -18EE CANADIAN SYLLABICS CARRIER DENE GEE -18EF CANADIAN SYLLABICS CARRIER GAA -18F0 CANADIAN SYLLABICS CARRIER GWA -18F1 CANADIAN SYLLABICS SAYISI JUU -18F2 CANADIAN SYLLABICS CARRIER JWA -18F3 CANADIAN SYLLABICS BEAVER DENE L -18F4 CANADIAN SYLLABICS BEAVER DENE R -18F5 CANADIAN SYLLABICS CARRIER DENTAL S -1900 LIMBU VOWEL-CARRIER LETTER -1901 LIMBU LETTER KA -1902 LIMBU LETTER KHA -1903 LIMBU LETTER GA -1904 LIMBU LETTER GHA -1905 LIMBU LETTER NGA -1906 LIMBU LETTER CA -1907 LIMBU LETTER CHA -1908 LIMBU LETTER JA -1909 LIMBU LETTER JHA -190A LIMBU LETTER YAN -190B LIMBU LETTER TA -190C LIMBU LETTER THA -190D LIMBU LETTER DA -190E LIMBU LETTER DHA -190F LIMBU LETTER NA -1910 LIMBU LETTER PA -1911 LIMBU LETTER PHA -1912 LIMBU LETTER BA -1913 LIMBU LETTER BHA -1914 LIMBU LETTER MA -1915 LIMBU LETTER YA -1916 LIMBU LETTER RA -1917 LIMBU LETTER LA -1918 LIMBU LETTER WA -1919 LIMBU LETTER SHA -191A LIMBU LETTER SSA -191B LIMBU LETTER SA -191C LIMBU LETTER HA -1920 LIMBU VOWEL SIGN A -1921 LIMBU VOWEL SIGN I -1922 LIMBU VOWEL SIGN U -1923 LIMBU VOWEL SIGN EE -1924 LIMBU VOWEL SIGN AI -1925 LIMBU VOWEL SIGN OO -1926 LIMBU VOWEL SIGN AU -1927 LIMBU VOWEL SIGN E -1928 LIMBU VOWEL SIGN O -1929 LIMBU SUBJOINED LETTER YA -192A LIMBU SUBJOINED LETTER RA -192B LIMBU SUBJOINED LETTER WA -1930 LIMBU SMALL LETTER KA -1931 LIMBU SMALL LETTER NGA -1932 LIMBU SMALL LETTER ANUSVARA -1933 LIMBU SMALL LETTER TA -1934 LIMBU SMALL LETTER NA -1935 LIMBU SMALL LETTER PA -1936 LIMBU SMALL LETTER MA -1937 LIMBU SMALL LETTER RA -1938 LIMBU SMALL LETTER LA -1939 LIMBU SIGN MUKPHRENG -193A LIMBU SIGN KEMPHRENG -193B LIMBU SIGN SA-I -1940 LIMBU SIGN LOO -1944 LIMBU EXCLAMATION MARK -1945 LIMBU QUESTION MARK -1946 LIMBU DIGIT ZERO -1947 LIMBU DIGIT ONE -1948 LIMBU DIGIT TWO -1949 LIMBU DIGIT THREE -194A LIMBU DIGIT FOUR -194B LIMBU DIGIT FIVE -194C LIMBU DIGIT SIX -194D LIMBU DIGIT SEVEN -194E LIMBU DIGIT EIGHT -194F LIMBU DIGIT NINE -1950 TAI LE LETTER KA -1951 TAI LE LETTER XA -1952 TAI LE LETTER NGA -1953 TAI LE LETTER TSA -1954 TAI LE LETTER SA -1955 TAI LE LETTER YA -1956 TAI LE LETTER TA -1957 TAI LE LETTER THA -1958 TAI LE LETTER LA -1959 TAI LE LETTER PA -195A TAI LE LETTER PHA -195B TAI LE LETTER MA -195C TAI LE LETTER FA -195D TAI LE LETTER VA -195E TAI LE LETTER HA -195F TAI LE LETTER QA -1960 TAI LE LETTER KHA -1961 TAI LE LETTER TSHA -1962 TAI LE LETTER NA -1963 TAI LE LETTER A -1964 TAI LE LETTER I -1965 TAI LE LETTER EE -1966 TAI LE LETTER EH -1967 TAI LE LETTER U -1968 TAI LE LETTER OO -1969 TAI LE LETTER O -196A TAI LE LETTER UE -196B TAI LE LETTER E -196C TAI LE LETTER AUE -196D TAI LE LETTER AI -1970 TAI LE LETTER TONE-2 -1971 TAI LE LETTER TONE-3 -1972 TAI LE LETTER TONE-4 -1973 TAI LE LETTER TONE-5 -1974 TAI LE LETTER TONE-6 -1980 NEW TAI LUE LETTER HIGH QA -1981 NEW TAI LUE LETTER LOW QA -1982 NEW TAI LUE LETTER HIGH KA -1983 NEW TAI LUE LETTER HIGH XA -1984 NEW TAI LUE LETTER HIGH NGA -1985 NEW TAI LUE LETTER LOW KA -1986 NEW TAI LUE LETTER LOW XA -1987 NEW TAI LUE LETTER LOW NGA -1988 NEW TAI LUE LETTER HIGH TSA -1989 NEW TAI LUE LETTER HIGH SA -198A NEW TAI LUE LETTER HIGH YA -198B NEW TAI LUE LETTER LOW TSA -198C NEW TAI LUE LETTER LOW SA -198D NEW TAI LUE LETTER LOW YA -198E NEW TAI LUE LETTER HIGH TA -198F NEW TAI LUE LETTER HIGH THA -1990 NEW TAI LUE LETTER HIGH NA -1991 NEW TAI LUE LETTER LOW TA -1992 NEW TAI LUE LETTER LOW THA -1993 NEW TAI LUE LETTER LOW NA -1994 NEW TAI LUE LETTER HIGH PA -1995 NEW TAI LUE LETTER HIGH PHA -1996 NEW TAI LUE LETTER HIGH MA -1997 NEW TAI LUE LETTER LOW PA -1998 NEW TAI LUE LETTER LOW PHA -1999 NEW TAI LUE LETTER LOW MA -199A NEW TAI LUE LETTER HIGH FA -199B NEW TAI LUE LETTER HIGH VA -199C NEW TAI LUE LETTER HIGH LA -199D NEW TAI LUE LETTER LOW FA -199E NEW TAI LUE LETTER LOW VA -199F NEW TAI LUE LETTER LOW LA -19A0 NEW TAI LUE LETTER HIGH HA -19A1 NEW TAI LUE LETTER HIGH DA -19A2 NEW TAI LUE LETTER HIGH BA -19A3 NEW TAI LUE LETTER LOW HA -19A4 NEW TAI LUE LETTER LOW DA -19A5 NEW TAI LUE LETTER LOW BA -19A6 NEW TAI LUE LETTER HIGH KVA -19A7 NEW TAI LUE LETTER HIGH XVA -19A8 NEW TAI LUE LETTER LOW KVA -19A9 NEW TAI LUE LETTER LOW XVA -19AA NEW TAI LUE LETTER HIGH SUA -19AB NEW TAI LUE LETTER LOW SUA -19B0 NEW TAI LUE VOWEL SIGN VOWEL SHORTENER -19B1 NEW TAI LUE VOWEL SIGN AA -19B2 NEW TAI LUE VOWEL SIGN II -19B3 NEW TAI LUE VOWEL SIGN U -19B4 NEW TAI LUE VOWEL SIGN UU -19B5 NEW TAI LUE VOWEL SIGN E -19B6 NEW TAI LUE VOWEL SIGN AE -19B7 NEW TAI LUE VOWEL SIGN O -19B8 NEW TAI LUE VOWEL SIGN OA -19B9 NEW TAI LUE VOWEL SIGN UE -19BA NEW TAI LUE VOWEL SIGN AY -19BB NEW TAI LUE VOWEL SIGN AAY -19BC NEW TAI LUE VOWEL SIGN UY -19BD NEW TAI LUE VOWEL SIGN OY -19BE NEW TAI LUE VOWEL SIGN OAY -19BF NEW TAI LUE VOWEL SIGN UEY -19C0 NEW TAI LUE VOWEL SIGN IY -19C1 NEW TAI LUE LETTER FINAL V -19C2 NEW TAI LUE LETTER FINAL NG -19C3 NEW TAI LUE LETTER FINAL N -19C4 NEW TAI LUE LETTER FINAL M -19C5 NEW TAI LUE LETTER FINAL K -19C6 NEW TAI LUE LETTER FINAL D -19C7 NEW TAI LUE LETTER FINAL B -19C8 NEW TAI LUE TONE MARK-1 -19C9 NEW TAI LUE TONE MARK-2 -19D0 NEW TAI LUE DIGIT ZERO -19D1 NEW TAI LUE DIGIT ONE -19D2 NEW TAI LUE DIGIT TWO -19D3 NEW TAI LUE DIGIT THREE -19D4 NEW TAI LUE DIGIT FOUR -19D5 NEW TAI LUE DIGIT FIVE -19D6 NEW TAI LUE DIGIT SIX -19D7 NEW TAI LUE DIGIT SEVEN -19D8 NEW TAI LUE DIGIT EIGHT -19D9 NEW TAI LUE DIGIT NINE -19DA NEW TAI LUE THAM DIGIT ONE -19DE NEW TAI LUE SIGN LAE -19DF NEW TAI LUE SIGN LAEV -19E0 KHMER SYMBOL PATHAMASAT -19E1 KHMER SYMBOL MUOY KOET -19E2 KHMER SYMBOL PII KOET -19E3 KHMER SYMBOL BEI KOET -19E4 KHMER SYMBOL BUON KOET -19E5 KHMER SYMBOL PRAM KOET -19E6 KHMER SYMBOL PRAM-MUOY KOET -19E7 KHMER SYMBOL PRAM-PII KOET -19E8 KHMER SYMBOL PRAM-BEI KOET -19E9 KHMER SYMBOL PRAM-BUON KOET -19EA KHMER SYMBOL DAP KOET -19EB KHMER SYMBOL DAP-MUOY KOET -19EC KHMER SYMBOL DAP-PII KOET -19ED KHMER SYMBOL DAP-BEI KOET -19EE KHMER SYMBOL DAP-BUON KOET -19EF KHMER SYMBOL DAP-PRAM KOET -19F0 KHMER SYMBOL TUTEYASAT -19F1 KHMER SYMBOL MUOY ROC -19F2 KHMER SYMBOL PII ROC -19F3 KHMER SYMBOL BEI ROC -19F4 KHMER SYMBOL BUON ROC -19F5 KHMER SYMBOL PRAM ROC -19F6 KHMER SYMBOL PRAM-MUOY ROC -19F7 KHMER SYMBOL PRAM-PII ROC -19F8 KHMER SYMBOL PRAM-BEI ROC -19F9 KHMER SYMBOL PRAM-BUON ROC -19FA KHMER SYMBOL DAP ROC -19FB KHMER SYMBOL DAP-MUOY ROC -19FC KHMER SYMBOL DAP-PII ROC -19FD KHMER SYMBOL DAP-BEI ROC -19FE KHMER SYMBOL DAP-BUON ROC -19FF KHMER SYMBOL DAP-PRAM ROC -1A00 BUGINESE LETTER KA -1A01 BUGINESE LETTER GA -1A02 BUGINESE LETTER NGA -1A03 BUGINESE LETTER NGKA -1A04 BUGINESE LETTER PA -1A05 BUGINESE LETTER BA -1A06 BUGINESE LETTER MA -1A07 BUGINESE LETTER MPA -1A08 BUGINESE LETTER TA -1A09 BUGINESE LETTER DA -1A0A BUGINESE LETTER NA -1A0B BUGINESE LETTER NRA -1A0C BUGINESE LETTER CA -1A0D BUGINESE LETTER JA -1A0E BUGINESE LETTER NYA -1A0F BUGINESE LETTER NYCA -1A10 BUGINESE LETTER YA -1A11 BUGINESE LETTER RA -1A12 BUGINESE LETTER LA -1A13 BUGINESE LETTER VA -1A14 BUGINESE LETTER SA -1A15 BUGINESE LETTER A -1A16 BUGINESE LETTER HA -1A17 BUGINESE VOWEL SIGN I -1A18 BUGINESE VOWEL SIGN U -1A19 BUGINESE VOWEL SIGN E -1A1A BUGINESE VOWEL SIGN O -1A1B BUGINESE VOWEL SIGN AE -1A1E BUGINESE PALLAWA -1A1F BUGINESE END OF SECTION -1A20 TAI THAM LETTER HIGH KA -1A21 TAI THAM LETTER HIGH KHA -1A22 TAI THAM LETTER HIGH KXA -1A23 TAI THAM LETTER LOW KA -1A24 TAI THAM LETTER LOW KXA -1A25 TAI THAM LETTER LOW KHA -1A26 TAI THAM LETTER NGA -1A27 TAI THAM LETTER HIGH CA -1A28 TAI THAM LETTER HIGH CHA -1A29 TAI THAM LETTER LOW CA -1A2A TAI THAM LETTER LOW SA -1A2B TAI THAM LETTER LOW CHA -1A2C TAI THAM LETTER NYA -1A2D TAI THAM LETTER RATA -1A2E TAI THAM LETTER HIGH RATHA -1A2F TAI THAM LETTER DA -1A30 TAI THAM LETTER LOW RATHA -1A31 TAI THAM LETTER RANA -1A32 TAI THAM LETTER HIGH TA -1A33 TAI THAM LETTER HIGH THA -1A34 TAI THAM LETTER LOW TA -1A35 TAI THAM LETTER LOW THA -1A36 TAI THAM LETTER NA -1A37 TAI THAM LETTER BA -1A38 TAI THAM LETTER HIGH PA -1A39 TAI THAM LETTER HIGH PHA -1A3A TAI THAM LETTER HIGH FA -1A3B TAI THAM LETTER LOW PA -1A3C TAI THAM LETTER LOW FA -1A3D TAI THAM LETTER LOW PHA -1A3E TAI THAM LETTER MA -1A3F TAI THAM LETTER LOW YA -1A40 TAI THAM LETTER HIGH YA -1A41 TAI THAM LETTER RA -1A42 TAI THAM LETTER RUE -1A43 TAI THAM LETTER LA -1A44 TAI THAM LETTER LUE -1A45 TAI THAM LETTER WA -1A46 TAI THAM LETTER HIGH SHA -1A47 TAI THAM LETTER HIGH SSA -1A48 TAI THAM LETTER HIGH SA -1A49 TAI THAM LETTER HIGH HA -1A4A TAI THAM LETTER LLA -1A4B TAI THAM LETTER A -1A4C TAI THAM LETTER LOW HA -1A4D TAI THAM LETTER I -1A4E TAI THAM LETTER II -1A4F TAI THAM LETTER U -1A50 TAI THAM LETTER UU -1A51 TAI THAM LETTER EE -1A52 TAI THAM LETTER OO -1A53 TAI THAM LETTER LAE -1A54 TAI THAM LETTER GREAT SA -1A55 TAI THAM CONSONANT SIGN MEDIAL RA -1A56 TAI THAM CONSONANT SIGN MEDIAL LA -1A57 TAI THAM CONSONANT SIGN LA TANG LAI -1A58 TAI THAM SIGN MAI KANG LAI -1A59 TAI THAM CONSONANT SIGN FINAL NGA -1A5A TAI THAM CONSONANT SIGN LOW PA -1A5B TAI THAM CONSONANT SIGN HIGH RATHA OR LOW PA -1A5C TAI THAM CONSONANT SIGN MA -1A5D TAI THAM CONSONANT SIGN BA -1A5E TAI THAM CONSONANT SIGN SA -1A60 TAI THAM SIGN SAKOT -1A61 TAI THAM VOWEL SIGN A -1A62 TAI THAM VOWEL SIGN MAI SAT -1A63 TAI THAM VOWEL SIGN AA -1A64 TAI THAM VOWEL SIGN TALL AA -1A65 TAI THAM VOWEL SIGN I -1A66 TAI THAM VOWEL SIGN II -1A67 TAI THAM VOWEL SIGN UE -1A68 TAI THAM VOWEL SIGN UUE -1A69 TAI THAM VOWEL SIGN U -1A6A TAI THAM VOWEL SIGN UU -1A6B TAI THAM VOWEL SIGN O -1A6C TAI THAM VOWEL SIGN OA BELOW -1A6D TAI THAM VOWEL SIGN OY -1A6E TAI THAM VOWEL SIGN E -1A6F TAI THAM VOWEL SIGN AE -1A70 TAI THAM VOWEL SIGN OO -1A71 TAI THAM VOWEL SIGN AI -1A72 TAI THAM VOWEL SIGN THAM AI -1A73 TAI THAM VOWEL SIGN OA ABOVE -1A74 TAI THAM SIGN MAI KANG -1A75 TAI THAM SIGN TONE-1 -1A76 TAI THAM SIGN TONE-2 -1A77 TAI THAM SIGN KHUEN TONE-3 -1A78 TAI THAM SIGN KHUEN TONE-4 -1A79 TAI THAM SIGN KHUEN TONE-5 -1A7A TAI THAM SIGN RA HAAM -1A7B TAI THAM SIGN MAI SAM -1A7C TAI THAM SIGN KHUEN-LUE KARAN -1A7F TAI THAM COMBINING CRYPTOGRAMMIC DOT -1A80 TAI THAM HORA DIGIT ZERO -1A81 TAI THAM HORA DIGIT ONE -1A82 TAI THAM HORA DIGIT TWO -1A83 TAI THAM HORA DIGIT THREE -1A84 TAI THAM HORA DIGIT FOUR -1A85 TAI THAM HORA DIGIT FIVE -1A86 TAI THAM HORA DIGIT SIX -1A87 TAI THAM HORA DIGIT SEVEN -1A88 TAI THAM HORA DIGIT EIGHT -1A89 TAI THAM HORA DIGIT NINE -1A90 TAI THAM THAM DIGIT ZERO -1A91 TAI THAM THAM DIGIT ONE -1A92 TAI THAM THAM DIGIT TWO -1A93 TAI THAM THAM DIGIT THREE -1A94 TAI THAM THAM DIGIT FOUR -1A95 TAI THAM THAM DIGIT FIVE -1A96 TAI THAM THAM DIGIT SIX -1A97 TAI THAM THAM DIGIT SEVEN -1A98 TAI THAM THAM DIGIT EIGHT -1A99 TAI THAM THAM DIGIT NINE -1AA0 TAI THAM SIGN WIANG -1AA1 TAI THAM SIGN WIANGWAAK -1AA2 TAI THAM SIGN SAWAN -1AA3 TAI THAM SIGN KEOW -1AA4 TAI THAM SIGN HOY -1AA5 TAI THAM SIGN DOKMAI -1AA6 TAI THAM SIGN REVERSED ROTATED RANA -1AA7 TAI THAM SIGN MAI YAMOK -1AA8 TAI THAM SIGN KAAN -1AA9 TAI THAM SIGN KAANKUU -1AAA TAI THAM SIGN SATKAAN -1AAB TAI THAM SIGN SATKAANKUU -1AAC TAI THAM SIGN HANG -1AAD TAI THAM SIGN CAANG -1B00 BALINESE SIGN ULU RICEM -1B01 BALINESE SIGN ULU CANDRA -1B02 BALINESE SIGN CECEK -1B03 BALINESE SIGN SURANG -1B04 BALINESE SIGN BISAH -1B05 BALINESE LETTER AKARA -1B06 BALINESE LETTER AKARA TEDUNG -1B07 BALINESE LETTER IKARA -1B08 BALINESE LETTER IKARA TEDUNG -1B09 BALINESE LETTER UKARA -1B0A BALINESE LETTER UKARA TEDUNG -1B0B BALINESE LETTER RA REPA -1B0C BALINESE LETTER RA REPA TEDUNG -1B0D BALINESE LETTER LA LENGA -1B0E BALINESE LETTER LA LENGA TEDUNG -1B0F BALINESE LETTER EKARA -1B10 BALINESE LETTER AIKARA -1B11 BALINESE LETTER OKARA -1B12 BALINESE LETTER OKARA TEDUNG -1B13 BALINESE LETTER KA -1B14 BALINESE LETTER KA MAHAPRANA -1B15 BALINESE LETTER GA -1B16 BALINESE LETTER GA GORA -1B17 BALINESE LETTER NGA -1B18 BALINESE LETTER CA -1B19 BALINESE LETTER CA LACA -1B1A BALINESE LETTER JA -1B1B BALINESE LETTER JA JERA -1B1C BALINESE LETTER NYA -1B1D BALINESE LETTER TA LATIK -1B1E BALINESE LETTER TA MURDA MAHAPRANA -1B1F BALINESE LETTER DA MURDA ALPAPRANA -1B20 BALINESE LETTER DA MURDA MAHAPRANA -1B21 BALINESE LETTER NA RAMBAT -1B22 BALINESE LETTER TA -1B23 BALINESE LETTER TA TAWA -1B24 BALINESE LETTER DA -1B25 BALINESE LETTER DA MADU -1B26 BALINESE LETTER NA -1B27 BALINESE LETTER PA -1B28 BALINESE LETTER PA KAPAL -1B29 BALINESE LETTER BA -1B2A BALINESE LETTER BA KEMBANG -1B2B BALINESE LETTER MA -1B2C BALINESE LETTER YA -1B2D BALINESE LETTER RA -1B2E BALINESE LETTER LA -1B2F BALINESE LETTER WA -1B30 BALINESE LETTER SA SAGA -1B31 BALINESE LETTER SA SAPA -1B32 BALINESE LETTER SA -1B33 BALINESE LETTER HA -1B34 BALINESE SIGN REREKAN -1B35 BALINESE VOWEL SIGN TEDUNG -1B36 BALINESE VOWEL SIGN ULU -1B37 BALINESE VOWEL SIGN ULU SARI -1B38 BALINESE VOWEL SIGN SUKU -1B39 BALINESE VOWEL SIGN SUKU ILUT -1B3A BALINESE VOWEL SIGN RA REPA -1B3B BALINESE VOWEL SIGN RA REPA TEDUNG -1B3C BALINESE VOWEL SIGN LA LENGA -1B3D BALINESE VOWEL SIGN LA LENGA TEDUNG -1B3E BALINESE VOWEL SIGN TALING -1B3F BALINESE VOWEL SIGN TALING REPA -1B40 BALINESE VOWEL SIGN TALING TEDUNG -1B41 BALINESE VOWEL SIGN TALING REPA TEDUNG -1B42 BALINESE VOWEL SIGN PEPET -1B43 BALINESE VOWEL SIGN PEPET TEDUNG -1B44 BALINESE ADEG ADEG -1B45 BALINESE LETTER KAF SASAK -1B46 BALINESE LETTER KHOT SASAK -1B47 BALINESE LETTER TZIR SASAK -1B48 BALINESE LETTER EF SASAK -1B49 BALINESE LETTER VE SASAK -1B4A BALINESE LETTER ZAL SASAK -1B4B BALINESE LETTER ASYURA SASAK -1B50 BALINESE DIGIT ZERO -1B51 BALINESE DIGIT ONE -1B52 BALINESE DIGIT TWO -1B53 BALINESE DIGIT THREE -1B54 BALINESE DIGIT FOUR -1B55 BALINESE DIGIT FIVE -1B56 BALINESE DIGIT SIX -1B57 BALINESE DIGIT SEVEN -1B58 BALINESE DIGIT EIGHT -1B59 BALINESE DIGIT NINE -1B5A BALINESE PANTI -1B5B BALINESE PAMADA -1B5C BALINESE WINDU -1B5D BALINESE CARIK PAMUNGKAH -1B5E BALINESE CARIK SIKI -1B5F BALINESE CARIK PAREREN -1B60 BALINESE PAMENENG -1B61 BALINESE MUSICAL SYMBOL DONG -1B62 BALINESE MUSICAL SYMBOL DENG -1B63 BALINESE MUSICAL SYMBOL DUNG -1B64 BALINESE MUSICAL SYMBOL DANG -1B65 BALINESE MUSICAL SYMBOL DANG SURANG -1B66 BALINESE MUSICAL SYMBOL DING -1B67 BALINESE MUSICAL SYMBOL DAENG -1B68 BALINESE MUSICAL SYMBOL DEUNG -1B69 BALINESE MUSICAL SYMBOL DAING -1B6A BALINESE MUSICAL SYMBOL DANG GEDE -1B6B BALINESE MUSICAL SYMBOL COMBINING TEGEH -1B6C BALINESE MUSICAL SYMBOL COMBINING ENDEP -1B6D BALINESE MUSICAL SYMBOL COMBINING KEMPUL -1B6E BALINESE MUSICAL SYMBOL COMBINING KEMPLI -1B6F BALINESE MUSICAL SYMBOL COMBINING JEGOGAN -1B70 BALINESE MUSICAL SYMBOL COMBINING KEMPUL WITH JEGOGAN -1B71 BALINESE MUSICAL SYMBOL COMBINING KEMPLI WITH JEGOGAN -1B72 BALINESE MUSICAL SYMBOL COMBINING BENDE -1B73 BALINESE MUSICAL SYMBOL COMBINING GONG -1B74 BALINESE MUSICAL SYMBOL RIGHT-HAND OPEN DUG -1B75 BALINESE MUSICAL SYMBOL RIGHT-HAND OPEN DAG -1B76 BALINESE MUSICAL SYMBOL RIGHT-HAND CLOSED TUK -1B77 BALINESE MUSICAL SYMBOL RIGHT-HAND CLOSED TAK -1B78 BALINESE MUSICAL SYMBOL LEFT-HAND OPEN PANG -1B79 BALINESE MUSICAL SYMBOL LEFT-HAND OPEN PUNG -1B7A BALINESE MUSICAL SYMBOL LEFT-HAND CLOSED PLAK -1B7B BALINESE MUSICAL SYMBOL LEFT-HAND CLOSED PLUK -1B7C BALINESE MUSICAL SYMBOL LEFT-HAND OPEN PING -1B80 SUNDANESE SIGN PANYECEK -1B81 SUNDANESE SIGN PANGLAYAR -1B82 SUNDANESE SIGN PANGWISAD -1B83 SUNDANESE LETTER A -1B84 SUNDANESE LETTER I -1B85 SUNDANESE LETTER U -1B86 SUNDANESE LETTER AE -1B87 SUNDANESE LETTER O -1B88 SUNDANESE LETTER E -1B89 SUNDANESE LETTER EU -1B8A SUNDANESE LETTER KA -1B8B SUNDANESE LETTER QA -1B8C SUNDANESE LETTER GA -1B8D SUNDANESE LETTER NGA -1B8E SUNDANESE LETTER CA -1B8F SUNDANESE LETTER JA -1B90 SUNDANESE LETTER ZA -1B91 SUNDANESE LETTER NYA -1B92 SUNDANESE LETTER TA -1B93 SUNDANESE LETTER DA -1B94 SUNDANESE LETTER NA -1B95 SUNDANESE LETTER PA -1B96 SUNDANESE LETTER FA -1B97 SUNDANESE LETTER VA -1B98 SUNDANESE LETTER BA -1B99 SUNDANESE LETTER MA -1B9A SUNDANESE LETTER YA -1B9B SUNDANESE LETTER RA -1B9C SUNDANESE LETTER LA -1B9D SUNDANESE LETTER WA -1B9E SUNDANESE LETTER SA -1B9F SUNDANESE LETTER XA -1BA0 SUNDANESE LETTER HA -1BA1 SUNDANESE CONSONANT SIGN PAMINGKAL -1BA2 SUNDANESE CONSONANT SIGN PANYAKRA -1BA3 SUNDANESE CONSONANT SIGN PANYIKU -1BA4 SUNDANESE VOWEL SIGN PANGHULU -1BA5 SUNDANESE VOWEL SIGN PANYUKU -1BA6 SUNDANESE VOWEL SIGN PANAELAENG -1BA7 SUNDANESE VOWEL SIGN PANOLONG -1BA8 SUNDANESE VOWEL SIGN PAMEPET -1BA9 SUNDANESE VOWEL SIGN PANEULEUNG -1BAA SUNDANESE SIGN PAMAAEH -1BAE SUNDANESE LETTER KHA -1BAF SUNDANESE LETTER SYA -1BB0 SUNDANESE DIGIT ZERO -1BB1 SUNDANESE DIGIT ONE -1BB2 SUNDANESE DIGIT TWO -1BB3 SUNDANESE DIGIT THREE -1BB4 SUNDANESE DIGIT FOUR -1BB5 SUNDANESE DIGIT FIVE -1BB6 SUNDANESE DIGIT SIX -1BB7 SUNDANESE DIGIT SEVEN -1BB8 SUNDANESE DIGIT EIGHT -1BB9 SUNDANESE DIGIT NINE -1C00 LEPCHA LETTER KA -1C01 LEPCHA LETTER KLA -1C02 LEPCHA LETTER KHA -1C03 LEPCHA LETTER GA -1C04 LEPCHA LETTER GLA -1C05 LEPCHA LETTER NGA -1C06 LEPCHA LETTER CA -1C07 LEPCHA LETTER CHA -1C08 LEPCHA LETTER JA -1C09 LEPCHA LETTER NYA -1C0A LEPCHA LETTER TA -1C0B LEPCHA LETTER THA -1C0C LEPCHA LETTER DA -1C0D LEPCHA LETTER NA -1C0E LEPCHA LETTER PA -1C0F LEPCHA LETTER PLA -1C10 LEPCHA LETTER PHA -1C11 LEPCHA LETTER FA -1C12 LEPCHA LETTER FLA -1C13 LEPCHA LETTER BA -1C14 LEPCHA LETTER BLA -1C15 LEPCHA LETTER MA -1C16 LEPCHA LETTER MLA -1C17 LEPCHA LETTER TSA -1C18 LEPCHA LETTER TSHA -1C19 LEPCHA LETTER DZA -1C1A LEPCHA LETTER YA -1C1B LEPCHA LETTER RA -1C1C LEPCHA LETTER LA -1C1D LEPCHA LETTER HA -1C1E LEPCHA LETTER HLA -1C1F LEPCHA LETTER VA -1C20 LEPCHA LETTER SA -1C21 LEPCHA LETTER SHA -1C22 LEPCHA LETTER WA -1C23 LEPCHA LETTER A -1C24 LEPCHA SUBJOINED LETTER YA -1C25 LEPCHA SUBJOINED LETTER RA -1C26 LEPCHA VOWEL SIGN AA -1C27 LEPCHA VOWEL SIGN I -1C28 LEPCHA VOWEL SIGN O -1C29 LEPCHA VOWEL SIGN OO -1C2A LEPCHA VOWEL SIGN U -1C2B LEPCHA VOWEL SIGN UU -1C2C LEPCHA VOWEL SIGN E -1C2D LEPCHA CONSONANT SIGN K -1C2E LEPCHA CONSONANT SIGN M -1C2F LEPCHA CONSONANT SIGN L -1C30 LEPCHA CONSONANT SIGN N -1C31 LEPCHA CONSONANT SIGN P -1C32 LEPCHA CONSONANT SIGN R -1C33 LEPCHA CONSONANT SIGN T -1C34 LEPCHA CONSONANT SIGN NYIN-DO -1C35 LEPCHA CONSONANT SIGN KANG -1C36 LEPCHA SIGN RAN -1C37 LEPCHA SIGN NUKTA -1C3B LEPCHA PUNCTUATION TA-ROL -1C3C LEPCHA PUNCTUATION NYET THYOOM TA-ROL -1C3D LEPCHA PUNCTUATION CER-WA -1C3E LEPCHA PUNCTUATION TSHOOK CER-WA -1C3F LEPCHA PUNCTUATION TSHOOK -1C40 LEPCHA DIGIT ZERO -1C41 LEPCHA DIGIT ONE -1C42 LEPCHA DIGIT TWO -1C43 LEPCHA DIGIT THREE -1C44 LEPCHA DIGIT FOUR -1C45 LEPCHA DIGIT FIVE -1C46 LEPCHA DIGIT SIX -1C47 LEPCHA DIGIT SEVEN -1C48 LEPCHA DIGIT EIGHT -1C49 LEPCHA DIGIT NINE -1C4D LEPCHA LETTER TTA -1C4E LEPCHA LETTER TTHA -1C4F LEPCHA LETTER DDA -1C50 OL CHIKI DIGIT ZERO -1C51 OL CHIKI DIGIT ONE -1C52 OL CHIKI DIGIT TWO -1C53 OL CHIKI DIGIT THREE -1C54 OL CHIKI DIGIT FOUR -1C55 OL CHIKI DIGIT FIVE -1C56 OL CHIKI DIGIT SIX -1C57 OL CHIKI DIGIT SEVEN -1C58 OL CHIKI DIGIT EIGHT -1C59 OL CHIKI DIGIT NINE -1C5A OL CHIKI LETTER LA -1C5B OL CHIKI LETTER AT -1C5C OL CHIKI LETTER AG -1C5D OL CHIKI LETTER ANG -1C5E OL CHIKI LETTER AL -1C5F OL CHIKI LETTER LAA -1C60 OL CHIKI LETTER AAK -1C61 OL CHIKI LETTER AAJ -1C62 OL CHIKI LETTER AAM -1C63 OL CHIKI LETTER AAW -1C64 OL CHIKI LETTER LI -1C65 OL CHIKI LETTER IS -1C66 OL CHIKI LETTER IH -1C67 OL CHIKI LETTER INY -1C68 OL CHIKI LETTER IR -1C69 OL CHIKI LETTER LU -1C6A OL CHIKI LETTER UC -1C6B OL CHIKI LETTER UD -1C6C OL CHIKI LETTER UNN -1C6D OL CHIKI LETTER UY -1C6E OL CHIKI LETTER LE -1C6F OL CHIKI LETTER EP -1C70 OL CHIKI LETTER EDD -1C71 OL CHIKI LETTER EN -1C72 OL CHIKI LETTER ERR -1C73 OL CHIKI LETTER LO -1C74 OL CHIKI LETTER OTT -1C75 OL CHIKI LETTER OB -1C76 OL CHIKI LETTER OV -1C77 OL CHIKI LETTER OH -1C78 OL CHIKI MU TTUDDAG -1C79 OL CHIKI GAAHLAA TTUDDAAG -1C7A OL CHIKI MU-GAAHLAA TTUDDAAG -1C7B OL CHIKI RELAA -1C7C OL CHIKI PHAARKAA -1C7D OL CHIKI AHAD -1C7E OL CHIKI PUNCTUATION MUCAAD -1C7F OL CHIKI PUNCTUATION DOUBLE MUCAAD -1CD0 VEDIC TONE KARSHANA -1CD1 VEDIC TONE SHARA -1CD2 VEDIC TONE PRENKHA -1CD3 VEDIC SIGN NIHSHVASA -1CD4 VEDIC SIGN YAJURVEDIC MIDLINE SVARITA -1CD5 VEDIC TONE YAJURVEDIC AGGRAVATED INDEPENDENT SVARITA -1CD6 VEDIC TONE YAJURVEDIC INDEPENDENT SVARITA -1CD7 VEDIC TONE YAJURVEDIC KATHAKA INDEPENDENT SVARITA -1CD8 VEDIC TONE CANDRA BELOW -1CD9 VEDIC TONE YAJURVEDIC KATHAKA INDEPENDENT SVARITA SCHROEDER -1CDA VEDIC TONE DOUBLE SVARITA -1CDB VEDIC TONE TRIPLE SVARITA -1CDC VEDIC TONE KATHAKA ANUDATTA -1CDD VEDIC TONE DOT BELOW -1CDE VEDIC TONE TWO DOTS BELOW -1CDF VEDIC TONE THREE DOTS BELOW -1CE0 VEDIC TONE RIGVEDIC KASHMIRI INDEPENDENT SVARITA -1CE1 VEDIC TONE ATHARVAVEDIC INDEPENDENT SVARITA -1CE2 VEDIC SIGN VISARGA SVARITA -1CE3 VEDIC SIGN VISARGA UDATTA -1CE4 VEDIC SIGN REVERSED VISARGA UDATTA -1CE5 VEDIC SIGN VISARGA ANUDATTA -1CE6 VEDIC SIGN REVERSED VISARGA ANUDATTA -1CE7 VEDIC SIGN VISARGA UDATTA WITH TAIL -1CE8 VEDIC SIGN VISARGA ANUDATTA WITH TAIL -1CE9 VEDIC SIGN ANUSVARA ANTARGOMUKHA -1CEA VEDIC SIGN ANUSVARA BAHIRGOMUKHA -1CEB VEDIC SIGN ANUSVARA VAMAGOMUKHA -1CEC VEDIC SIGN ANUSVARA VAMAGOMUKHA WITH TAIL -1CED VEDIC SIGN TIRYAK -1CEE VEDIC SIGN HEXIFORM LONG ANUSVARA -1CEF VEDIC SIGN LONG ANUSVARA -1CF0 VEDIC SIGN RTHANG LONG ANUSVARA -1CF1 VEDIC SIGN ANUSVARA UBHAYATO MUKHA -1CF2 VEDIC SIGN ARDHAVISARGA -1D00 LATIN LETTER SMALL CAPITAL A -1D01 LATIN LETTER SMALL CAPITAL AE -1D02 LATIN SMALL LETTER TURNED AE -1D03 LATIN LETTER SMALL CAPITAL BARRED B -1D04 LATIN LETTER SMALL CAPITAL C -1D05 LATIN LETTER SMALL CAPITAL D -1D06 LATIN LETTER SMALL CAPITAL ETH -1D07 LATIN LETTER SMALL CAPITAL E -1D08 LATIN SMALL LETTER TURNED OPEN E -1D09 LATIN SMALL LETTER TURNED I -1D0A LATIN LETTER SMALL CAPITAL J -1D0B LATIN LETTER SMALL CAPITAL K -1D0C LATIN LETTER SMALL CAPITAL L WITH STROKE -1D0D LATIN LETTER SMALL CAPITAL M -1D0E LATIN LETTER SMALL CAPITAL REVERSED N -1D0F LATIN LETTER SMALL CAPITAL O -1D10 LATIN LETTER SMALL CAPITAL OPEN O -1D11 LATIN SMALL LETTER SIDEWAYS O -1D12 LATIN SMALL LETTER SIDEWAYS OPEN O -1D13 LATIN SMALL LETTER SIDEWAYS O WITH STROKE -1D14 LATIN SMALL LETTER TURNED OE -1D15 LATIN LETTER SMALL CAPITAL OU -1D16 LATIN SMALL LETTER TOP HALF O -1D17 LATIN SMALL LETTER BOTTOM HALF O -1D18 LATIN LETTER SMALL CAPITAL P -1D19 LATIN LETTER SMALL CAPITAL REVERSED R -1D1A LATIN LETTER SMALL CAPITAL TURNED R -1D1B LATIN LETTER SMALL CAPITAL T -1D1C LATIN LETTER SMALL CAPITAL U -1D1D LATIN SMALL LETTER SIDEWAYS U -1D1E LATIN SMALL LETTER SIDEWAYS DIAERESIZED U -1D1F LATIN SMALL LETTER SIDEWAYS TURNED M -1D20 LATIN LETTER SMALL CAPITAL V -1D21 LATIN LETTER SMALL CAPITAL W -1D22 LATIN LETTER SMALL CAPITAL Z -1D23 LATIN LETTER SMALL CAPITAL EZH -1D24 LATIN LETTER VOICED LARYNGEAL SPIRANT -1D25 LATIN LETTER AIN -1D26 GREEK LETTER SMALL CAPITAL GAMMA -1D27 GREEK LETTER SMALL CAPITAL LAMDA -1D28 GREEK LETTER SMALL CAPITAL PI -1D29 GREEK LETTER SMALL CAPITAL RHO -1D2A GREEK LETTER SMALL CAPITAL PSI -1D2B CYRILLIC LETTER SMALL CAPITAL EL -1D2C MODIFIER LETTER CAPITAL A -1D2D MODIFIER LETTER CAPITAL AE -1D2E MODIFIER LETTER CAPITAL B -1D2F MODIFIER LETTER CAPITAL BARRED B -1D30 MODIFIER LETTER CAPITAL D -1D31 MODIFIER LETTER CAPITAL E -1D32 MODIFIER LETTER CAPITAL REVERSED E -1D33 MODIFIER LETTER CAPITAL G -1D34 MODIFIER LETTER CAPITAL H -1D35 MODIFIER LETTER CAPITAL I -1D36 MODIFIER LETTER CAPITAL J -1D37 MODIFIER LETTER CAPITAL K -1D38 MODIFIER LETTER CAPITAL L -1D39 MODIFIER LETTER CAPITAL M -1D3A MODIFIER LETTER CAPITAL N -1D3B MODIFIER LETTER CAPITAL REVERSED N -1D3C MODIFIER LETTER CAPITAL O -1D3D MODIFIER LETTER CAPITAL OU -1D3E MODIFIER LETTER CAPITAL P -1D3F MODIFIER LETTER CAPITAL R -1D40 MODIFIER LETTER CAPITAL T -1D41 MODIFIER LETTER CAPITAL U -1D42 MODIFIER LETTER CAPITAL W -1D43 MODIFIER LETTER SMALL A -1D44 MODIFIER LETTER SMALL TURNED A -1D45 MODIFIER LETTER SMALL ALPHA -1D46 MODIFIER LETTER SMALL TURNED AE -1D47 MODIFIER LETTER SMALL B -1D48 MODIFIER LETTER SMALL D -1D49 MODIFIER LETTER SMALL E -1D4A MODIFIER LETTER SMALL SCHWA -1D4B MODIFIER LETTER SMALL OPEN E -1D4C MODIFIER LETTER SMALL TURNED OPEN E -1D4D MODIFIER LETTER SMALL G -1D4E MODIFIER LETTER SMALL TURNED I -1D4F MODIFIER LETTER SMALL K -1D50 MODIFIER LETTER SMALL M -1D51 MODIFIER LETTER SMALL ENG -1D52 MODIFIER LETTER SMALL O -1D53 MODIFIER LETTER SMALL OPEN O -1D54 MODIFIER LETTER SMALL TOP HALF O -1D55 MODIFIER LETTER SMALL BOTTOM HALF O -1D56 MODIFIER LETTER SMALL P -1D57 MODIFIER LETTER SMALL T -1D58 MODIFIER LETTER SMALL U -1D59 MODIFIER LETTER SMALL SIDEWAYS U -1D5A MODIFIER LETTER SMALL TURNED M -1D5B MODIFIER LETTER SMALL V -1D5C MODIFIER LETTER SMALL AIN -1D5D MODIFIER LETTER SMALL BETA -1D5E MODIFIER LETTER SMALL GREEK GAMMA -1D5F MODIFIER LETTER SMALL DELTA -1D60 MODIFIER LETTER SMALL GREEK PHI -1D61 MODIFIER LETTER SMALL CHI -1D62 LATIN SUBSCRIPT SMALL LETTER I -1D63 LATIN SUBSCRIPT SMALL LETTER R -1D64 LATIN SUBSCRIPT SMALL LETTER U -1D65 LATIN SUBSCRIPT SMALL LETTER V -1D66 GREEK SUBSCRIPT SMALL LETTER BETA -1D67 GREEK SUBSCRIPT SMALL LETTER GAMMA -1D68 GREEK SUBSCRIPT SMALL LETTER RHO -1D69 GREEK SUBSCRIPT SMALL LETTER PHI -1D6A GREEK SUBSCRIPT SMALL LETTER CHI -1D6B LATIN SMALL LETTER UE -1D6C LATIN SMALL LETTER B WITH MIDDLE TILDE -1D6D LATIN SMALL LETTER D WITH MIDDLE TILDE -1D6E LATIN SMALL LETTER F WITH MIDDLE TILDE -1D6F LATIN SMALL LETTER M WITH MIDDLE TILDE -1D70 LATIN SMALL LETTER N WITH MIDDLE TILDE -1D71 LATIN SMALL LETTER P WITH MIDDLE TILDE -1D72 LATIN SMALL LETTER R WITH MIDDLE TILDE -1D73 LATIN SMALL LETTER R WITH FISHHOOK AND MIDDLE TILDE -1D74 LATIN SMALL LETTER S WITH MIDDLE TILDE -1D75 LATIN SMALL LETTER T WITH MIDDLE TILDE -1D76 LATIN SMALL LETTER Z WITH MIDDLE TILDE -1D77 LATIN SMALL LETTER TURNED G -1D78 MODIFIER LETTER CYRILLIC EN -1D79 LATIN SMALL LETTER INSULAR G -1D7A LATIN SMALL LETTER TH WITH STRIKETHROUGH -1D7B LATIN SMALL CAPITAL LETTER I WITH STROKE -1D7C LATIN SMALL LETTER IOTA WITH STROKE -1D7D LATIN SMALL LETTER P WITH STROKE -1D7E LATIN SMALL CAPITAL LETTER U WITH STROKE -1D7F LATIN SMALL LETTER UPSILON WITH STROKE -1D80 LATIN SMALL LETTER B WITH PALATAL HOOK -1D81 LATIN SMALL LETTER D WITH PALATAL HOOK -1D82 LATIN SMALL LETTER F WITH PALATAL HOOK -1D83 LATIN SMALL LETTER G WITH PALATAL HOOK -1D84 LATIN SMALL LETTER K WITH PALATAL HOOK -1D85 LATIN SMALL LETTER L WITH PALATAL HOOK -1D86 LATIN SMALL LETTER M WITH PALATAL HOOK -1D87 LATIN SMALL LETTER N WITH PALATAL HOOK -1D88 LATIN SMALL LETTER P WITH PALATAL HOOK -1D89 LATIN SMALL LETTER R WITH PALATAL HOOK -1D8A LATIN SMALL LETTER S WITH PALATAL HOOK -1D8B LATIN SMALL LETTER ESH WITH PALATAL HOOK -1D8C LATIN SMALL LETTER V WITH PALATAL HOOK -1D8D LATIN SMALL LETTER X WITH PALATAL HOOK -1D8E LATIN SMALL LETTER Z WITH PALATAL HOOK -1D8F LATIN SMALL LETTER A WITH RETROFLEX HOOK -1D90 LATIN SMALL LETTER ALPHA WITH RETROFLEX HOOK -1D91 LATIN SMALL LETTER D WITH HOOK AND TAIL -1D92 LATIN SMALL LETTER E WITH RETROFLEX HOOK -1D93 LATIN SMALL LETTER OPEN E WITH RETROFLEX HOOK -1D94 LATIN SMALL LETTER REVERSED OPEN E WITH RETROFLEX HOOK -1D95 LATIN SMALL LETTER SCHWA WITH RETROFLEX HOOK -1D96 LATIN SMALL LETTER I WITH RETROFLEX HOOK -1D97 LATIN SMALL LETTER OPEN O WITH RETROFLEX HOOK -1D98 LATIN SMALL LETTER ESH WITH RETROFLEX HOOK -1D99 LATIN SMALL LETTER U WITH RETROFLEX HOOK -1D9A LATIN SMALL LETTER EZH WITH RETROFLEX HOOK -1D9B MODIFIER LETTER SMALL TURNED ALPHA -1D9C MODIFIER LETTER SMALL C -1D9D MODIFIER LETTER SMALL C WITH CURL -1D9E MODIFIER LETTER SMALL ETH -1D9F MODIFIER LETTER SMALL REVERSED OPEN E -1DA0 MODIFIER LETTER SMALL F -1DA1 MODIFIER LETTER SMALL DOTLESS J WITH STROKE -1DA2 MODIFIER LETTER SMALL SCRIPT G -1DA3 MODIFIER LETTER SMALL TURNED H -1DA4 MODIFIER LETTER SMALL I WITH STROKE -1DA5 MODIFIER LETTER SMALL IOTA -1DA6 MODIFIER LETTER SMALL CAPITAL I -1DA7 MODIFIER LETTER SMALL CAPITAL I WITH STROKE -1DA8 MODIFIER LETTER SMALL J WITH CROSSED-TAIL -1DA9 MODIFIER LETTER SMALL L WITH RETROFLEX HOOK -1DAA MODIFIER LETTER SMALL L WITH PALATAL HOOK -1DAB MODIFIER LETTER SMALL CAPITAL L -1DAC MODIFIER LETTER SMALL M WITH HOOK -1DAD MODIFIER LETTER SMALL TURNED M WITH LONG LEG -1DAE MODIFIER LETTER SMALL N WITH LEFT HOOK -1DAF MODIFIER LETTER SMALL N WITH RETROFLEX HOOK -1DB0 MODIFIER LETTER SMALL CAPITAL N -1DB1 MODIFIER LETTER SMALL BARRED O -1DB2 MODIFIER LETTER SMALL PHI -1DB3 MODIFIER LETTER SMALL S WITH HOOK -1DB4 MODIFIER LETTER SMALL ESH -1DB5 MODIFIER LETTER SMALL T WITH PALATAL HOOK -1DB6 MODIFIER LETTER SMALL U BAR -1DB7 MODIFIER LETTER SMALL UPSILON -1DB8 MODIFIER LETTER SMALL CAPITAL U -1DB9 MODIFIER LETTER SMALL V WITH HOOK -1DBA MODIFIER LETTER SMALL TURNED V -1DBB MODIFIER LETTER SMALL Z -1DBC MODIFIER LETTER SMALL Z WITH RETROFLEX HOOK -1DBD MODIFIER LETTER SMALL Z WITH CURL -1DBE MODIFIER LETTER SMALL EZH -1DBF MODIFIER LETTER SMALL THETA -1DC0 COMBINING DOTTED GRAVE ACCENT -1DC1 COMBINING DOTTED ACUTE ACCENT -1DC2 COMBINING SNAKE BELOW -1DC3 COMBINING SUSPENSION MARK -1DC4 COMBINING MACRON-ACUTE -1DC5 COMBINING GRAVE-MACRON -1DC6 COMBINING MACRON-GRAVE -1DC7 COMBINING ACUTE-MACRON -1DC8 COMBINING GRAVE-ACUTE-GRAVE -1DC9 COMBINING ACUTE-GRAVE-ACUTE -1DCA COMBINING LATIN SMALL LETTER R BELOW -1DCB COMBINING BREVE-MACRON -1DCC COMBINING MACRON-BREVE -1DCD COMBINING DOUBLE CIRCUMFLEX ABOVE -1DCE COMBINING OGONEK ABOVE -1DCF COMBINING ZIGZAG BELOW -1DD0 COMBINING IS BELOW -1DD1 COMBINING UR ABOVE -1DD2 COMBINING US ABOVE -1DD3 COMBINING LATIN SMALL LETTER FLATTENED OPEN A ABOVE -1DD4 COMBINING LATIN SMALL LETTER AE -1DD5 COMBINING LATIN SMALL LETTER AO -1DD6 COMBINING LATIN SMALL LETTER AV -1DD7 COMBINING LATIN SMALL LETTER C CEDILLA -1DD8 COMBINING LATIN SMALL LETTER INSULAR D -1DD9 COMBINING LATIN SMALL LETTER ETH -1DDA COMBINING LATIN SMALL LETTER G -1DDB COMBINING LATIN LETTER SMALL CAPITAL G -1DDC COMBINING LATIN SMALL LETTER K -1DDD COMBINING LATIN SMALL LETTER L -1DDE COMBINING LATIN LETTER SMALL CAPITAL L -1DDF COMBINING LATIN LETTER SMALL CAPITAL M -1DE0 COMBINING LATIN SMALL LETTER N -1DE1 COMBINING LATIN LETTER SMALL CAPITAL N -1DE2 COMBINING LATIN LETTER SMALL CAPITAL R -1DE3 COMBINING LATIN SMALL LETTER R ROTUNDA -1DE4 COMBINING LATIN SMALL LETTER S -1DE5 COMBINING LATIN SMALL LETTER LONG S -1DE6 COMBINING LATIN SMALL LETTER Z -1DFD COMBINING ALMOST EQUAL TO BELOW -1DFE COMBINING LEFT ARROWHEAD ABOVE -1DFF COMBINING RIGHT ARROWHEAD AND DOWN ARROWHEAD BELOW -1E00 LATIN CAPITAL LETTER A WITH RING BELOW -1E01 LATIN SMALL LETTER A WITH RING BELOW -1E02 LATIN CAPITAL LETTER B WITH DOT ABOVE -1E03 LATIN SMALL LETTER B WITH DOT ABOVE -1E04 LATIN CAPITAL LETTER B WITH DOT BELOW -1E05 LATIN SMALL LETTER B WITH DOT BELOW -1E06 LATIN CAPITAL LETTER B WITH LINE BELOW -1E07 LATIN SMALL LETTER B WITH LINE BELOW -1E08 LATIN CAPITAL LETTER C WITH CEDILLA AND ACUTE -1E09 LATIN SMALL LETTER C WITH CEDILLA AND ACUTE -1E0A LATIN CAPITAL LETTER D WITH DOT ABOVE -1E0B LATIN SMALL LETTER D WITH DOT ABOVE -1E0C LATIN CAPITAL LETTER D WITH DOT BELOW -1E0D LATIN SMALL LETTER D WITH DOT BELOW -1E0E LATIN CAPITAL LETTER D WITH LINE BELOW -1E0F LATIN SMALL LETTER D WITH LINE BELOW -1E10 LATIN CAPITAL LETTER D WITH CEDILLA -1E11 LATIN SMALL LETTER D WITH CEDILLA -1E12 LATIN CAPITAL LETTER D WITH CIRCUMFLEX BELOW -1E13 LATIN SMALL LETTER D WITH CIRCUMFLEX BELOW -1E14 LATIN CAPITAL LETTER E WITH MACRON AND GRAVE -1E15 LATIN SMALL LETTER E WITH MACRON AND GRAVE -1E16 LATIN CAPITAL LETTER E WITH MACRON AND ACUTE -1E17 LATIN SMALL LETTER E WITH MACRON AND ACUTE -1E18 LATIN CAPITAL LETTER E WITH CIRCUMFLEX BELOW -1E19 LATIN SMALL LETTER E WITH CIRCUMFLEX BELOW -1E1A LATIN CAPITAL LETTER E WITH TILDE BELOW -1E1B LATIN SMALL LETTER E WITH TILDE BELOW -1E1C LATIN CAPITAL LETTER E WITH CEDILLA AND BREVE -1E1D LATIN SMALL LETTER E WITH CEDILLA AND BREVE -1E1E LATIN CAPITAL LETTER F WITH DOT ABOVE -1E1F LATIN SMALL LETTER F WITH DOT ABOVE -1E20 LATIN CAPITAL LETTER G WITH MACRON -1E21 LATIN SMALL LETTER G WITH MACRON -1E22 LATIN CAPITAL LETTER H WITH DOT ABOVE -1E23 LATIN SMALL LETTER H WITH DOT ABOVE -1E24 LATIN CAPITAL LETTER H WITH DOT BELOW -1E25 LATIN SMALL LETTER H WITH DOT BELOW -1E26 LATIN CAPITAL LETTER H WITH DIAERESIS -1E27 LATIN SMALL LETTER H WITH DIAERESIS -1E28 LATIN CAPITAL LETTER H WITH CEDILLA -1E29 LATIN SMALL LETTER H WITH CEDILLA -1E2A LATIN CAPITAL LETTER H WITH BREVE BELOW -1E2B LATIN SMALL LETTER H WITH BREVE BELOW -1E2C LATIN CAPITAL LETTER I WITH TILDE BELOW -1E2D LATIN SMALL LETTER I WITH TILDE BELOW -1E2E LATIN CAPITAL LETTER I WITH DIAERESIS AND ACUTE -1E2F LATIN SMALL LETTER I WITH DIAERESIS AND ACUTE -1E30 LATIN CAPITAL LETTER K WITH ACUTE -1E31 LATIN SMALL LETTER K WITH ACUTE -1E32 LATIN CAPITAL LETTER K WITH DOT BELOW -1E33 LATIN SMALL LETTER K WITH DOT BELOW -1E34 LATIN CAPITAL LETTER K WITH LINE BELOW -1E35 LATIN SMALL LETTER K WITH LINE BELOW -1E36 LATIN CAPITAL LETTER L WITH DOT BELOW -1E37 LATIN SMALL LETTER L WITH DOT BELOW -1E38 LATIN CAPITAL LETTER L WITH DOT BELOW AND MACRON -1E39 LATIN SMALL LETTER L WITH DOT BELOW AND MACRON -1E3A LATIN CAPITAL LETTER L WITH LINE BELOW -1E3B LATIN SMALL LETTER L WITH LINE BELOW -1E3C LATIN CAPITAL LETTER L WITH CIRCUMFLEX BELOW -1E3D LATIN SMALL LETTER L WITH CIRCUMFLEX BELOW -1E3E LATIN CAPITAL LETTER M WITH ACUTE -1E3F LATIN SMALL LETTER M WITH ACUTE -1E40 LATIN CAPITAL LETTER M WITH DOT ABOVE -1E41 LATIN SMALL LETTER M WITH DOT ABOVE -1E42 LATIN CAPITAL LETTER M WITH DOT BELOW -1E43 LATIN SMALL LETTER M WITH DOT BELOW -1E44 LATIN CAPITAL LETTER N WITH DOT ABOVE -1E45 LATIN SMALL LETTER N WITH DOT ABOVE -1E46 LATIN CAPITAL LETTER N WITH DOT BELOW -1E47 LATIN SMALL LETTER N WITH DOT BELOW -1E48 LATIN CAPITAL LETTER N WITH LINE BELOW -1E49 LATIN SMALL LETTER N WITH LINE BELOW -1E4A LATIN CAPITAL LETTER N WITH CIRCUMFLEX BELOW -1E4B LATIN SMALL LETTER N WITH CIRCUMFLEX BELOW -1E4C LATIN CAPITAL LETTER O WITH TILDE AND ACUTE -1E4D LATIN SMALL LETTER O WITH TILDE AND ACUTE -1E4E LATIN CAPITAL LETTER O WITH TILDE AND DIAERESIS -1E4F LATIN SMALL LETTER O WITH TILDE AND DIAERESIS -1E50 LATIN CAPITAL LETTER O WITH MACRON AND GRAVE -1E51 LATIN SMALL LETTER O WITH MACRON AND GRAVE -1E52 LATIN CAPITAL LETTER O WITH MACRON AND ACUTE -1E53 LATIN SMALL LETTER O WITH MACRON AND ACUTE -1E54 LATIN CAPITAL LETTER P WITH ACUTE -1E55 LATIN SMALL LETTER P WITH ACUTE -1E56 LATIN CAPITAL LETTER P WITH DOT ABOVE -1E57 LATIN SMALL LETTER P WITH DOT ABOVE -1E58 LATIN CAPITAL LETTER R WITH DOT ABOVE -1E59 LATIN SMALL LETTER R WITH DOT ABOVE -1E5A LATIN CAPITAL LETTER R WITH DOT BELOW -1E5B LATIN SMALL LETTER R WITH DOT BELOW -1E5C LATIN CAPITAL LETTER R WITH DOT BELOW AND MACRON -1E5D LATIN SMALL LETTER R WITH DOT BELOW AND MACRON -1E5E LATIN CAPITAL LETTER R WITH LINE BELOW -1E5F LATIN SMALL LETTER R WITH LINE BELOW -1E60 LATIN CAPITAL LETTER S WITH DOT ABOVE -1E61 LATIN SMALL LETTER S WITH DOT ABOVE -1E62 LATIN CAPITAL LETTER S WITH DOT BELOW -1E63 LATIN SMALL LETTER S WITH DOT BELOW -1E64 LATIN CAPITAL LETTER S WITH ACUTE AND DOT ABOVE -1E65 LATIN SMALL LETTER S WITH ACUTE AND DOT ABOVE -1E66 LATIN CAPITAL LETTER S WITH CARON AND DOT ABOVE -1E67 LATIN SMALL LETTER S WITH CARON AND DOT ABOVE -1E68 LATIN CAPITAL LETTER S WITH DOT BELOW AND DOT ABOVE -1E69 LATIN SMALL LETTER S WITH DOT BELOW AND DOT ABOVE -1E6A LATIN CAPITAL LETTER T WITH DOT ABOVE -1E6B LATIN SMALL LETTER T WITH DOT ABOVE -1E6C LATIN CAPITAL LETTER T WITH DOT BELOW -1E6D LATIN SMALL LETTER T WITH DOT BELOW -1E6E LATIN CAPITAL LETTER T WITH LINE BELOW -1E6F LATIN SMALL LETTER T WITH LINE BELOW -1E70 LATIN CAPITAL LETTER T WITH CIRCUMFLEX BELOW -1E71 LATIN SMALL LETTER T WITH CIRCUMFLEX BELOW -1E72 LATIN CAPITAL LETTER U WITH DIAERESIS BELOW -1E73 LATIN SMALL LETTER U WITH DIAERESIS BELOW -1E74 LATIN CAPITAL LETTER U WITH TILDE BELOW -1E75 LATIN SMALL LETTER U WITH TILDE BELOW -1E76 LATIN CAPITAL LETTER U WITH CIRCUMFLEX BELOW -1E77 LATIN SMALL LETTER U WITH CIRCUMFLEX BELOW -1E78 LATIN CAPITAL LETTER U WITH TILDE AND ACUTE -1E79 LATIN SMALL LETTER U WITH TILDE AND ACUTE -1E7A LATIN CAPITAL LETTER U WITH MACRON AND DIAERESIS -1E7B LATIN SMALL LETTER U WITH MACRON AND DIAERESIS -1E7C LATIN CAPITAL LETTER V WITH TILDE -1E7D LATIN SMALL LETTER V WITH TILDE -1E7E LATIN CAPITAL LETTER V WITH DOT BELOW -1E7F LATIN SMALL LETTER V WITH DOT BELOW -1E80 LATIN CAPITAL LETTER W WITH GRAVE -1E81 LATIN SMALL LETTER W WITH GRAVE -1E82 LATIN CAPITAL LETTER W WITH ACUTE -1E83 LATIN SMALL LETTER W WITH ACUTE -1E84 LATIN CAPITAL LETTER W WITH DIAERESIS -1E85 LATIN SMALL LETTER W WITH DIAERESIS -1E86 LATIN CAPITAL LETTER W WITH DOT ABOVE -1E87 LATIN SMALL LETTER W WITH DOT ABOVE -1E88 LATIN CAPITAL LETTER W WITH DOT BELOW -1E89 LATIN SMALL LETTER W WITH DOT BELOW -1E8A LATIN CAPITAL LETTER X WITH DOT ABOVE -1E8B LATIN SMALL LETTER X WITH DOT ABOVE -1E8C LATIN CAPITAL LETTER X WITH DIAERESIS -1E8D LATIN SMALL LETTER X WITH DIAERESIS -1E8E LATIN CAPITAL LETTER Y WITH DOT ABOVE -1E8F LATIN SMALL LETTER Y WITH DOT ABOVE -1E90 LATIN CAPITAL LETTER Z WITH CIRCUMFLEX -1E91 LATIN SMALL LETTER Z WITH CIRCUMFLEX -1E92 LATIN CAPITAL LETTER Z WITH DOT BELOW -1E93 LATIN SMALL LETTER Z WITH DOT BELOW -1E94 LATIN CAPITAL LETTER Z WITH LINE BELOW -1E95 LATIN SMALL LETTER Z WITH LINE BELOW -1E96 LATIN SMALL LETTER H WITH LINE BELOW -1E97 LATIN SMALL LETTER T WITH DIAERESIS -1E98 LATIN SMALL LETTER W WITH RING ABOVE -1E99 LATIN SMALL LETTER Y WITH RING ABOVE -1E9A LATIN SMALL LETTER A WITH RIGHT HALF RING -1E9B LATIN SMALL LETTER LONG S WITH DOT ABOVE -1E9C LATIN SMALL LETTER LONG S WITH DIAGONAL STROKE -1E9D LATIN SMALL LETTER LONG S WITH HIGH STROKE -1E9E LATIN CAPITAL LETTER SHARP S -1E9F LATIN SMALL LETTER DELTA -1EA0 LATIN CAPITAL LETTER A WITH DOT BELOW -1EA1 LATIN SMALL LETTER A WITH DOT BELOW -1EA2 LATIN CAPITAL LETTER A WITH HOOK ABOVE -1EA3 LATIN SMALL LETTER A WITH HOOK ABOVE -1EA4 LATIN CAPITAL LETTER A WITH CIRCUMFLEX AND ACUTE -1EA5 LATIN SMALL LETTER A WITH CIRCUMFLEX AND ACUTE -1EA6 LATIN CAPITAL LETTER A WITH CIRCUMFLEX AND GRAVE -1EA7 LATIN SMALL LETTER A WITH CIRCUMFLEX AND GRAVE -1EA8 LATIN CAPITAL LETTER A WITH CIRCUMFLEX AND HOOK ABOVE -1EA9 LATIN SMALL LETTER A WITH CIRCUMFLEX AND HOOK ABOVE -1EAA LATIN CAPITAL LETTER A WITH CIRCUMFLEX AND TILDE -1EAB LATIN SMALL LETTER A WITH CIRCUMFLEX AND TILDE -1EAC LATIN CAPITAL LETTER A WITH CIRCUMFLEX AND DOT BELOW -1EAD LATIN SMALL LETTER A WITH CIRCUMFLEX AND DOT BELOW -1EAE LATIN CAPITAL LETTER A WITH BREVE AND ACUTE -1EAF LATIN SMALL LETTER A WITH BREVE AND ACUTE -1EB0 LATIN CAPITAL LETTER A WITH BREVE AND GRAVE -1EB1 LATIN SMALL LETTER A WITH BREVE AND GRAVE -1EB2 LATIN CAPITAL LETTER A WITH BREVE AND HOOK ABOVE -1EB3 LATIN SMALL LETTER A WITH BREVE AND HOOK ABOVE -1EB4 LATIN CAPITAL LETTER A WITH BREVE AND TILDE -1EB5 LATIN SMALL LETTER A WITH BREVE AND TILDE -1EB6 LATIN CAPITAL LETTER A WITH BREVE AND DOT BELOW -1EB7 LATIN SMALL LETTER A WITH BREVE AND DOT BELOW -1EB8 LATIN CAPITAL LETTER E WITH DOT BELOW -1EB9 LATIN SMALL LETTER E WITH DOT BELOW -1EBA LATIN CAPITAL LETTER E WITH HOOK ABOVE -1EBB LATIN SMALL LETTER E WITH HOOK ABOVE -1EBC LATIN CAPITAL LETTER E WITH TILDE -1EBD LATIN SMALL LETTER E WITH TILDE -1EBE LATIN CAPITAL LETTER E WITH CIRCUMFLEX AND ACUTE -1EBF LATIN SMALL LETTER E WITH CIRCUMFLEX AND ACUTE -1EC0 LATIN CAPITAL LETTER E WITH CIRCUMFLEX AND GRAVE -1EC1 LATIN SMALL LETTER E WITH CIRCUMFLEX AND GRAVE -1EC2 LATIN CAPITAL LETTER E WITH CIRCUMFLEX AND HOOK ABOVE -1EC3 LATIN SMALL LETTER E WITH CIRCUMFLEX AND HOOK ABOVE -1EC4 LATIN CAPITAL LETTER E WITH CIRCUMFLEX AND TILDE -1EC5 LATIN SMALL LETTER E WITH CIRCUMFLEX AND TILDE -1EC6 LATIN CAPITAL LETTER E WITH CIRCUMFLEX AND DOT BELOW -1EC7 LATIN SMALL LETTER E WITH CIRCUMFLEX AND DOT BELOW -1EC8 LATIN CAPITAL LETTER I WITH HOOK ABOVE -1EC9 LATIN SMALL LETTER I WITH HOOK ABOVE -1ECA LATIN CAPITAL LETTER I WITH DOT BELOW -1ECB LATIN SMALL LETTER I WITH DOT BELOW -1ECC LATIN CAPITAL LETTER O WITH DOT BELOW -1ECD LATIN SMALL LETTER O WITH DOT BELOW -1ECE LATIN CAPITAL LETTER O WITH HOOK ABOVE -1ECF LATIN SMALL LETTER O WITH HOOK ABOVE -1ED0 LATIN CAPITAL LETTER O WITH CIRCUMFLEX AND ACUTE -1ED1 LATIN SMALL LETTER O WITH CIRCUMFLEX AND ACUTE -1ED2 LATIN CAPITAL LETTER O WITH CIRCUMFLEX AND GRAVE -1ED3 LATIN SMALL LETTER O WITH CIRCUMFLEX AND GRAVE -1ED4 LATIN CAPITAL LETTER O WITH CIRCUMFLEX AND HOOK ABOVE -1ED5 LATIN SMALL LETTER O WITH CIRCUMFLEX AND HOOK ABOVE -1ED6 LATIN CAPITAL LETTER O WITH CIRCUMFLEX AND TILDE -1ED7 LATIN SMALL LETTER O WITH CIRCUMFLEX AND TILDE -1ED8 LATIN CAPITAL LETTER O WITH CIRCUMFLEX AND DOT BELOW -1ED9 LATIN SMALL LETTER O WITH CIRCUMFLEX AND DOT BELOW -1EDA LATIN CAPITAL LETTER O WITH HORN AND ACUTE -1EDB LATIN SMALL LETTER O WITH HORN AND ACUTE -1EDC LATIN CAPITAL LETTER O WITH HORN AND GRAVE -1EDD LATIN SMALL LETTER O WITH HORN AND GRAVE -1EDE LATIN CAPITAL LETTER O WITH HORN AND HOOK ABOVE -1EDF LATIN SMALL LETTER O WITH HORN AND HOOK ABOVE -1EE0 LATIN CAPITAL LETTER O WITH HORN AND TILDE -1EE1 LATIN SMALL LETTER O WITH HORN AND TILDE -1EE2 LATIN CAPITAL LETTER O WITH HORN AND DOT BELOW -1EE3 LATIN SMALL LETTER O WITH HORN AND DOT BELOW -1EE4 LATIN CAPITAL LETTER U WITH DOT BELOW -1EE5 LATIN SMALL LETTER U WITH DOT BELOW -1EE6 LATIN CAPITAL LETTER U WITH HOOK ABOVE -1EE7 LATIN SMALL LETTER U WITH HOOK ABOVE -1EE8 LATIN CAPITAL LETTER U WITH HORN AND ACUTE -1EE9 LATIN SMALL LETTER U WITH HORN AND ACUTE -1EEA LATIN CAPITAL LETTER U WITH HORN AND GRAVE -1EEB LATIN SMALL LETTER U WITH HORN AND GRAVE -1EEC LATIN CAPITAL LETTER U WITH HORN AND HOOK ABOVE -1EED LATIN SMALL LETTER U WITH HORN AND HOOK ABOVE -1EEE LATIN CAPITAL LETTER U WITH HORN AND TILDE -1EEF LATIN SMALL LETTER U WITH HORN AND TILDE -1EF0 LATIN CAPITAL LETTER U WITH HORN AND DOT BELOW -1EF1 LATIN SMALL LETTER U WITH HORN AND DOT BELOW -1EF2 LATIN CAPITAL LETTER Y WITH GRAVE -1EF3 LATIN SMALL LETTER Y WITH GRAVE -1EF4 LATIN CAPITAL LETTER Y WITH DOT BELOW -1EF5 LATIN SMALL LETTER Y WITH DOT BELOW -1EF6 LATIN CAPITAL LETTER Y WITH HOOK ABOVE -1EF7 LATIN SMALL LETTER Y WITH HOOK ABOVE -1EF8 LATIN CAPITAL LETTER Y WITH TILDE -1EF9 LATIN SMALL LETTER Y WITH TILDE -1EFA LATIN CAPITAL LETTER MIDDLE-WELSH LL -1EFB LATIN SMALL LETTER MIDDLE-WELSH LL -1EFC LATIN CAPITAL LETTER MIDDLE-WELSH V -1EFD LATIN SMALL LETTER MIDDLE-WELSH V -1EFE LATIN CAPITAL LETTER Y WITH LOOP -1EFF LATIN SMALL LETTER Y WITH LOOP -1F00 GREEK SMALL LETTER ALPHA WITH PSILI -1F01 GREEK SMALL LETTER ALPHA WITH DASIA -1F02 GREEK SMALL LETTER ALPHA WITH PSILI AND VARIA -1F03 GREEK SMALL LETTER ALPHA WITH DASIA AND VARIA -1F04 GREEK SMALL LETTER ALPHA WITH PSILI AND OXIA -1F05 GREEK SMALL LETTER ALPHA WITH DASIA AND OXIA -1F06 GREEK SMALL LETTER ALPHA WITH PSILI AND PERISPOMENI -1F07 GREEK SMALL LETTER ALPHA WITH DASIA AND PERISPOMENI -1F08 GREEK CAPITAL LETTER ALPHA WITH PSILI -1F09 GREEK CAPITAL LETTER ALPHA WITH DASIA -1F0A GREEK CAPITAL LETTER ALPHA WITH PSILI AND VARIA -1F0B GREEK CAPITAL LETTER ALPHA WITH DASIA AND VARIA -1F0C GREEK CAPITAL LETTER ALPHA WITH PSILI AND OXIA -1F0D GREEK CAPITAL LETTER ALPHA WITH DASIA AND OXIA -1F0E GREEK CAPITAL LETTER ALPHA WITH PSILI AND PERISPOMENI -1F0F GREEK CAPITAL LETTER ALPHA WITH DASIA AND PERISPOMENI -1F10 GREEK SMALL LETTER EPSILON WITH PSILI -1F11 GREEK SMALL LETTER EPSILON WITH DASIA -1F12 GREEK SMALL LETTER EPSILON WITH PSILI AND VARIA -1F13 GREEK SMALL LETTER EPSILON WITH DASIA AND VARIA -1F14 GREEK SMALL LETTER EPSILON WITH PSILI AND OXIA -1F15 GREEK SMALL LETTER EPSILON WITH DASIA AND OXIA -1F18 GREEK CAPITAL LETTER EPSILON WITH PSILI -1F19 GREEK CAPITAL LETTER EPSILON WITH DASIA -1F1A GREEK CAPITAL LETTER EPSILON WITH PSILI AND VARIA -1F1B GREEK CAPITAL LETTER EPSILON WITH DASIA AND VARIA -1F1C GREEK CAPITAL LETTER EPSILON WITH PSILI AND OXIA -1F1D GREEK CAPITAL LETTER EPSILON WITH DASIA AND OXIA -1F20 GREEK SMALL LETTER ETA WITH PSILI -1F21 GREEK SMALL LETTER ETA WITH DASIA -1F22 GREEK SMALL LETTER ETA WITH PSILI AND VARIA -1F23 GREEK SMALL LETTER ETA WITH DASIA AND VARIA -1F24 GREEK SMALL LETTER ETA WITH PSILI AND OXIA -1F25 GREEK SMALL LETTER ETA WITH DASIA AND OXIA -1F26 GREEK SMALL LETTER ETA WITH PSILI AND PERISPOMENI -1F27 GREEK SMALL LETTER ETA WITH DASIA AND PERISPOMENI -1F28 GREEK CAPITAL LETTER ETA WITH PSILI -1F29 GREEK CAPITAL LETTER ETA WITH DASIA -1F2A GREEK CAPITAL LETTER ETA WITH PSILI AND VARIA -1F2B GREEK CAPITAL LETTER ETA WITH DASIA AND VARIA -1F2C GREEK CAPITAL LETTER ETA WITH PSILI AND OXIA -1F2D GREEK CAPITAL LETTER ETA WITH DASIA AND OXIA -1F2E GREEK CAPITAL LETTER ETA WITH PSILI AND PERISPOMENI -1F2F GREEK CAPITAL LETTER ETA WITH DASIA AND PERISPOMENI -1F30 GREEK SMALL LETTER IOTA WITH PSILI -1F31 GREEK SMALL LETTER IOTA WITH DASIA -1F32 GREEK SMALL LETTER IOTA WITH PSILI AND VARIA -1F33 GREEK SMALL LETTER IOTA WITH DASIA AND VARIA -1F34 GREEK SMALL LETTER IOTA WITH PSILI AND OXIA -1F35 GREEK SMALL LETTER IOTA WITH DASIA AND OXIA -1F36 GREEK SMALL LETTER IOTA WITH PSILI AND PERISPOMENI -1F37 GREEK SMALL LETTER IOTA WITH DASIA AND PERISPOMENI -1F38 GREEK CAPITAL LETTER IOTA WITH PSILI -1F39 GREEK CAPITAL LETTER IOTA WITH DASIA -1F3A GREEK CAPITAL LETTER IOTA WITH PSILI AND VARIA -1F3B GREEK CAPITAL LETTER IOTA WITH DASIA AND VARIA -1F3C GREEK CAPITAL LETTER IOTA WITH PSILI AND OXIA -1F3D GREEK CAPITAL LETTER IOTA WITH DASIA AND OXIA -1F3E GREEK CAPITAL LETTER IOTA WITH PSILI AND PERISPOMENI -1F3F GREEK CAPITAL LETTER IOTA WITH DASIA AND PERISPOMENI -1F40 GREEK SMALL LETTER OMICRON WITH PSILI -1F41 GREEK SMALL LETTER OMICRON WITH DASIA -1F42 GREEK SMALL LETTER OMICRON WITH PSILI AND VARIA -1F43 GREEK SMALL LETTER OMICRON WITH DASIA AND VARIA -1F44 GREEK SMALL LETTER OMICRON WITH PSILI AND OXIA -1F45 GREEK SMALL LETTER OMICRON WITH DASIA AND OXIA -1F48 GREEK CAPITAL LETTER OMICRON WITH PSILI -1F49 GREEK CAPITAL LETTER OMICRON WITH DASIA -1F4A GREEK CAPITAL LETTER OMICRON WITH PSILI AND VARIA -1F4B GREEK CAPITAL LETTER OMICRON WITH DASIA AND VARIA -1F4C GREEK CAPITAL LETTER OMICRON WITH PSILI AND OXIA -1F4D GREEK CAPITAL LETTER OMICRON WITH DASIA AND OXIA -1F50 GREEK SMALL LETTER UPSILON WITH PSILI -1F51 GREEK SMALL LETTER UPSILON WITH DASIA -1F52 GREEK SMALL LETTER UPSILON WITH PSILI AND VARIA -1F53 GREEK SMALL LETTER UPSILON WITH DASIA AND VARIA -1F54 GREEK SMALL LETTER UPSILON WITH PSILI AND OXIA -1F55 GREEK SMALL LETTER UPSILON WITH DASIA AND OXIA -1F56 GREEK SMALL LETTER UPSILON WITH PSILI AND PERISPOMENI -1F57 GREEK SMALL LETTER UPSILON WITH DASIA AND PERISPOMENI -1F59 GREEK CAPITAL LETTER UPSILON WITH DASIA -1F5B GREEK CAPITAL LETTER UPSILON WITH DASIA AND VARIA -1F5D GREEK CAPITAL LETTER UPSILON WITH DASIA AND OXIA -1F5F GREEK CAPITAL LETTER UPSILON WITH DASIA AND PERISPOMENI -1F60 GREEK SMALL LETTER OMEGA WITH PSILI -1F61 GREEK SMALL LETTER OMEGA WITH DASIA -1F62 GREEK SMALL LETTER OMEGA WITH PSILI AND VARIA -1F63 GREEK SMALL LETTER OMEGA WITH DASIA AND VARIA -1F64 GREEK SMALL LETTER OMEGA WITH PSILI AND OXIA -1F65 GREEK SMALL LETTER OMEGA WITH DASIA AND OXIA -1F66 GREEK SMALL LETTER OMEGA WITH PSILI AND PERISPOMENI -1F67 GREEK SMALL LETTER OMEGA WITH DASIA AND PERISPOMENI -1F68 GREEK CAPITAL LETTER OMEGA WITH PSILI -1F69 GREEK CAPITAL LETTER OMEGA WITH DASIA -1F6A GREEK CAPITAL LETTER OMEGA WITH PSILI AND VARIA -1F6B GREEK CAPITAL LETTER OMEGA WITH DASIA AND VARIA -1F6C GREEK CAPITAL LETTER OMEGA WITH PSILI AND OXIA -1F6D GREEK CAPITAL LETTER OMEGA WITH DASIA AND OXIA -1F6E GREEK CAPITAL LETTER OMEGA WITH PSILI AND PERISPOMENI -1F6F GREEK CAPITAL LETTER OMEGA WITH DASIA AND PERISPOMENI -1F70 GREEK SMALL LETTER ALPHA WITH VARIA -1F71 GREEK SMALL LETTER ALPHA WITH OXIA -1F72 GREEK SMALL LETTER EPSILON WITH VARIA -1F73 GREEK SMALL LETTER EPSILON WITH OXIA -1F74 GREEK SMALL LETTER ETA WITH VARIA -1F75 GREEK SMALL LETTER ETA WITH OXIA -1F76 GREEK SMALL LETTER IOTA WITH VARIA -1F77 GREEK SMALL LETTER IOTA WITH OXIA -1F78 GREEK SMALL LETTER OMICRON WITH VARIA -1F79 GREEK SMALL LETTER OMICRON WITH OXIA -1F7A GREEK SMALL LETTER UPSILON WITH VARIA -1F7B GREEK SMALL LETTER UPSILON WITH OXIA -1F7C GREEK SMALL LETTER OMEGA WITH VARIA -1F7D GREEK SMALL LETTER OMEGA WITH OXIA -1F80 GREEK SMALL LETTER ALPHA WITH PSILI AND YPOGEGRAMMENI -1F81 GREEK SMALL LETTER ALPHA WITH DASIA AND YPOGEGRAMMENI -1F82 GREEK SMALL LETTER ALPHA WITH PSILI AND VARIA AND YPOGEGRAMMENI -1F83 GREEK SMALL LETTER ALPHA WITH DASIA AND VARIA AND YPOGEGRAMMENI -1F84 GREEK SMALL LETTER ALPHA WITH PSILI AND OXIA AND YPOGEGRAMMENI -1F85 GREEK SMALL LETTER ALPHA WITH DASIA AND OXIA AND YPOGEGRAMMENI -1F86 GREEK SMALL LETTER ALPHA WITH PSILI AND PERISPOMENI AND YPOGEGRAMMENI -1F87 GREEK SMALL LETTER ALPHA WITH DASIA AND PERISPOMENI AND YPOGEGRAMMENI -1F88 GREEK CAPITAL LETTER ALPHA WITH PSILI AND PROSGEGRAMMENI -1F89 GREEK CAPITAL LETTER ALPHA WITH DASIA AND PROSGEGRAMMENI -1F8A GREEK CAPITAL LETTER ALPHA WITH PSILI AND VARIA AND PROSGEGRAMMENI -1F8B GREEK CAPITAL LETTER ALPHA WITH DASIA AND VARIA AND PROSGEGRAMMENI -1F8C GREEK CAPITAL LETTER ALPHA WITH PSILI AND OXIA AND PROSGEGRAMMENI -1F8D GREEK CAPITAL LETTER ALPHA WITH DASIA AND OXIA AND PROSGEGRAMMENI -1F8E GREEK CAPITAL LETTER ALPHA WITH PSILI AND PERISPOMENI AND PROSGEGRAMMENI -1F8F GREEK CAPITAL LETTER ALPHA WITH DASIA AND PERISPOMENI AND PROSGEGRAMMENI -1F90 GREEK SMALL LETTER ETA WITH PSILI AND YPOGEGRAMMENI -1F91 GREEK SMALL LETTER ETA WITH DASIA AND YPOGEGRAMMENI -1F92 GREEK SMALL LETTER ETA WITH PSILI AND VARIA AND YPOGEGRAMMENI -1F93 GREEK SMALL LETTER ETA WITH DASIA AND VARIA AND YPOGEGRAMMENI -1F94 GREEK SMALL LETTER ETA WITH PSILI AND OXIA AND YPOGEGRAMMENI -1F95 GREEK SMALL LETTER ETA WITH DASIA AND OXIA AND YPOGEGRAMMENI -1F96 GREEK SMALL LETTER ETA WITH PSILI AND PERISPOMENI AND YPOGEGRAMMENI -1F97 GREEK SMALL LETTER ETA WITH DASIA AND PERISPOMENI AND YPOGEGRAMMENI -1F98 GREEK CAPITAL LETTER ETA WITH PSILI AND PROSGEGRAMMENI -1F99 GREEK CAPITAL LETTER ETA WITH DASIA AND PROSGEGRAMMENI -1F9A GREEK CAPITAL LETTER ETA WITH PSILI AND VARIA AND PROSGEGRAMMENI -1F9B GREEK CAPITAL LETTER ETA WITH DASIA AND VARIA AND PROSGEGRAMMENI -1F9C GREEK CAPITAL LETTER ETA WITH PSILI AND OXIA AND PROSGEGRAMMENI -1F9D GREEK CAPITAL LETTER ETA WITH DASIA AND OXIA AND PROSGEGRAMMENI -1F9E GREEK CAPITAL LETTER ETA WITH PSILI AND PERISPOMENI AND PROSGEGRAMMENI -1F9F GREEK CAPITAL LETTER ETA WITH DASIA AND PERISPOMENI AND PROSGEGRAMMENI -1FA0 GREEK SMALL LETTER OMEGA WITH PSILI AND YPOGEGRAMMENI -1FA1 GREEK SMALL LETTER OMEGA WITH DASIA AND YPOGEGRAMMENI -1FA2 GREEK SMALL LETTER OMEGA WITH PSILI AND VARIA AND YPOGEGRAMMENI -1FA3 GREEK SMALL LETTER OMEGA WITH DASIA AND VARIA AND YPOGEGRAMMENI -1FA4 GREEK SMALL LETTER OMEGA WITH PSILI AND OXIA AND YPOGEGRAMMENI -1FA5 GREEK SMALL LETTER OMEGA WITH DASIA AND OXIA AND YPOGEGRAMMENI -1FA6 GREEK SMALL LETTER OMEGA WITH PSILI AND PERISPOMENI AND YPOGEGRAMMENI -1FA7 GREEK SMALL LETTER OMEGA WITH DASIA AND PERISPOMENI AND YPOGEGRAMMENI -1FA8 GREEK CAPITAL LETTER OMEGA WITH PSILI AND PROSGEGRAMMENI -1FA9 GREEK CAPITAL LETTER OMEGA WITH DASIA AND PROSGEGRAMMENI -1FAA GREEK CAPITAL LETTER OMEGA WITH PSILI AND VARIA AND PROSGEGRAMMENI -1FAB GREEK CAPITAL LETTER OMEGA WITH DASIA AND VARIA AND PROSGEGRAMMENI -1FAC GREEK CAPITAL LETTER OMEGA WITH PSILI AND OXIA AND PROSGEGRAMMENI -1FAD GREEK CAPITAL LETTER OMEGA WITH DASIA AND OXIA AND PROSGEGRAMMENI -1FAE GREEK CAPITAL LETTER OMEGA WITH PSILI AND PERISPOMENI AND PROSGEGRAMMENI -1FAF GREEK CAPITAL LETTER OMEGA WITH DASIA AND PERISPOMENI AND PROSGEGRAMMENI -1FB0 GREEK SMALL LETTER ALPHA WITH VRACHY -1FB1 GREEK SMALL LETTER ALPHA WITH MACRON -1FB2 GREEK SMALL LETTER ALPHA WITH VARIA AND YPOGEGRAMMENI -1FB3 GREEK SMALL LETTER ALPHA WITH YPOGEGRAMMENI -1FB4 GREEK SMALL LETTER ALPHA WITH OXIA AND YPOGEGRAMMENI -1FB6 GREEK SMALL LETTER ALPHA WITH PERISPOMENI -1FB7 GREEK SMALL LETTER ALPHA WITH PERISPOMENI AND YPOGEGRAMMENI -1FB8 GREEK CAPITAL LETTER ALPHA WITH VRACHY -1FB9 GREEK CAPITAL LETTER ALPHA WITH MACRON -1FBA GREEK CAPITAL LETTER ALPHA WITH VARIA -1FBB GREEK CAPITAL LETTER ALPHA WITH OXIA -1FBC GREEK CAPITAL LETTER ALPHA WITH PROSGEGRAMMENI -1FBD GREEK KORONIS -1FBE GREEK PROSGEGRAMMENI -1FBF GREEK PSILI -1FC0 GREEK PERISPOMENI -1FC1 GREEK DIALYTIKA AND PERISPOMENI -1FC2 GREEK SMALL LETTER ETA WITH VARIA AND YPOGEGRAMMENI -1FC3 GREEK SMALL LETTER ETA WITH YPOGEGRAMMENI -1FC4 GREEK SMALL LETTER ETA WITH OXIA AND YPOGEGRAMMENI -1FC6 GREEK SMALL LETTER ETA WITH PERISPOMENI -1FC7 GREEK SMALL LETTER ETA WITH PERISPOMENI AND YPOGEGRAMMENI -1FC8 GREEK CAPITAL LETTER EPSILON WITH VARIA -1FC9 GREEK CAPITAL LETTER EPSILON WITH OXIA -1FCA GREEK CAPITAL LETTER ETA WITH VARIA -1FCB GREEK CAPITAL LETTER ETA WITH OXIA -1FCC GREEK CAPITAL LETTER ETA WITH PROSGEGRAMMENI -1FCD GREEK PSILI AND VARIA -1FCE GREEK PSILI AND OXIA -1FCF GREEK PSILI AND PERISPOMENI -1FD0 GREEK SMALL LETTER IOTA WITH VRACHY -1FD1 GREEK SMALL LETTER IOTA WITH MACRON -1FD2 GREEK SMALL LETTER IOTA WITH DIALYTIKA AND VARIA -1FD3 GREEK SMALL LETTER IOTA WITH DIALYTIKA AND OXIA -1FD6 GREEK SMALL LETTER IOTA WITH PERISPOMENI -1FD7 GREEK SMALL LETTER IOTA WITH DIALYTIKA AND PERISPOMENI -1FD8 GREEK CAPITAL LETTER IOTA WITH VRACHY -1FD9 GREEK CAPITAL LETTER IOTA WITH MACRON -1FDA GREEK CAPITAL LETTER IOTA WITH VARIA -1FDB GREEK CAPITAL LETTER IOTA WITH OXIA -1FDD GREEK DASIA AND VARIA -1FDE GREEK DASIA AND OXIA -1FDF GREEK DASIA AND PERISPOMENI -1FE0 GREEK SMALL LETTER UPSILON WITH VRACHY -1FE1 GREEK SMALL LETTER UPSILON WITH MACRON -1FE2 GREEK SMALL LETTER UPSILON WITH DIALYTIKA AND VARIA -1FE3 GREEK SMALL LETTER UPSILON WITH DIALYTIKA AND OXIA -1FE4 GREEK SMALL LETTER RHO WITH PSILI -1FE5 GREEK SMALL LETTER RHO WITH DASIA -1FE6 GREEK SMALL LETTER UPSILON WITH PERISPOMENI -1FE7 GREEK SMALL LETTER UPSILON WITH DIALYTIKA AND PERISPOMENI -1FE8 GREEK CAPITAL LETTER UPSILON WITH VRACHY -1FE9 GREEK CAPITAL LETTER UPSILON WITH MACRON -1FEA GREEK CAPITAL LETTER UPSILON WITH VARIA -1FEB GREEK CAPITAL LETTER UPSILON WITH OXIA -1FEC GREEK CAPITAL LETTER RHO WITH DASIA -1FED GREEK DIALYTIKA AND VARIA -1FEE GREEK DIALYTIKA AND OXIA -1FEF GREEK VARIA -1FF2 GREEK SMALL LETTER OMEGA WITH VARIA AND YPOGEGRAMMENI -1FF3 GREEK SMALL LETTER OMEGA WITH YPOGEGRAMMENI -1FF4 GREEK SMALL LETTER OMEGA WITH OXIA AND YPOGEGRAMMENI -1FF6 GREEK SMALL LETTER OMEGA WITH PERISPOMENI -1FF7 GREEK SMALL LETTER OMEGA WITH PERISPOMENI AND YPOGEGRAMMENI -1FF8 GREEK CAPITAL LETTER OMICRON WITH VARIA -1FF9 GREEK CAPITAL LETTER OMICRON WITH OXIA -1FFA GREEK CAPITAL LETTER OMEGA WITH VARIA -1FFB GREEK CAPITAL LETTER OMEGA WITH OXIA -1FFC GREEK CAPITAL LETTER OMEGA WITH PROSGEGRAMMENI -1FFD GREEK OXIA -1FFE GREEK DASIA -2000 EN QUAD -2001 EM QUAD -2002 EN SPACE -2003 EM SPACE -2004 THREE-PER-EM SPACE -2005 FOUR-PER-EM SPACE -2006 SIX-PER-EM SPACE -2007 FIGURE SPACE -2008 PUNCTUATION SPACE -2009 THIN SPACE -200A HAIR SPACE -200B ZERO WIDTH SPACE -200C ZERO WIDTH NON-JOINER -200D ZERO WIDTH JOINER -200E LEFT-TO-RIGHT MARK -200F RIGHT-TO-LEFT MARK -2010 HYPHEN -2011 NON-BREAKING HYPHEN -2012 FIGURE DASH -2013 EN DASH -2014 EM DASH -2015 HORIZONTAL BAR -2016 DOUBLE VERTICAL LINE -2017 DOUBLE LOW LINE -2018 LEFT SINGLE QUOTATION MARK -2019 RIGHT SINGLE QUOTATION MARK -201A SINGLE LOW-9 QUOTATION MARK -201B SINGLE HIGH-REVERSED-9 QUOTATION MARK -201C LEFT DOUBLE QUOTATION MARK -201D RIGHT DOUBLE QUOTATION MARK -201E DOUBLE LOW-9 QUOTATION MARK -201F DOUBLE HIGH-REVERSED-9 QUOTATION MARK -2020 DAGGER -2021 DOUBLE DAGGER -2022 BULLET -2023 TRIANGULAR BULLET -2024 ONE DOT LEADER -2025 TWO DOT LEADER -2026 HORIZONTAL ELLIPSIS -2027 HYPHENATION POINT -2028 LINE SEPARATOR -2029 PARAGRAPH SEPARATOR -202A LEFT-TO-RIGHT EMBEDDING -202B RIGHT-TO-LEFT EMBEDDING -202C POP DIRECTIONAL FORMATTING -202D LEFT-TO-RIGHT OVERRIDE -202E RIGHT-TO-LEFT OVERRIDE -202F NARROW NO-BREAK SPACE -2030 PER MILLE SIGN -2031 PER TEN THOUSAND SIGN -2032 PRIME -2033 DOUBLE PRIME -2034 TRIPLE PRIME -2035 REVERSED PRIME -2036 REVERSED DOUBLE PRIME -2037 REVERSED TRIPLE PRIME -2038 CARET -2039 SINGLE LEFT-POINTING ANGLE QUOTATION MARK -203A SINGLE RIGHT-POINTING ANGLE QUOTATION MARK -203B REFERENCE MARK -203C DOUBLE EXCLAMATION MARK -203D INTERROBANG -203E OVERLINE -203F UNDERTIE -2040 CHARACTER TIE -2041 CARET INSERTION POINT -2042 ASTERISM -2043 HYPHEN BULLET -2044 FRACTION SLASH -2045 LEFT SQUARE BRACKET WITH QUILL -2046 RIGHT SQUARE BRACKET WITH QUILL -2047 DOUBLE QUESTION MARK -2048 QUESTION EXCLAMATION MARK -2049 EXCLAMATION QUESTION MARK -204A TIRONIAN SIGN ET -204B REVERSED PILCROW SIGN -204C BLACK LEFTWARDS BULLET -204D BLACK RIGHTWARDS BULLET -204E LOW ASTERISK -204F REVERSED SEMICOLON -2050 CLOSE UP -2051 TWO ASTERISKS ALIGNED VERTICALLY -2052 COMMERCIAL MINUS SIGN -2053 SWUNG DASH -2054 INVERTED UNDERTIE -2055 FLOWER PUNCTUATION MARK -2056 THREE DOT PUNCTUATION -2057 QUADRUPLE PRIME -2058 FOUR DOT PUNCTUATION -2059 FIVE DOT PUNCTUATION -205A TWO DOT PUNCTUATION -205B FOUR DOT MARK -205C DOTTED CROSS -205D TRICOLON -205E VERTICAL FOUR DOTS -205F MEDIUM MATHEMATICAL SPACE -2060 WORD JOINER -2061 FUNCTION APPLICATION -2062 INVISIBLE TIMES -2063 INVISIBLE SEPARATOR -2064 INVISIBLE PLUS -206A INHIBIT SYMMETRIC SWAPPING -206B ACTIVATE SYMMETRIC SWAPPING -206C INHIBIT ARABIC FORM SHAPING -206D ACTIVATE ARABIC FORM SHAPING -206E NATIONAL DIGIT SHAPES -206F NOMINAL DIGIT SHAPES -2070 SUPERSCRIPT ZERO -2071 SUPERSCRIPT LATIN SMALL LETTER I -2074 SUPERSCRIPT FOUR -2075 SUPERSCRIPT FIVE -2076 SUPERSCRIPT SIX -2077 SUPERSCRIPT SEVEN -2078 SUPERSCRIPT EIGHT -2079 SUPERSCRIPT NINE -207A SUPERSCRIPT PLUS SIGN -207B SUPERSCRIPT MINUS -207C SUPERSCRIPT EQUALS SIGN -207D SUPERSCRIPT LEFT PARENTHESIS -207E SUPERSCRIPT RIGHT PARENTHESIS -207F SUPERSCRIPT LATIN SMALL LETTER N -2080 SUBSCRIPT ZERO -2081 SUBSCRIPT ONE -2082 SUBSCRIPT TWO -2083 SUBSCRIPT THREE -2084 SUBSCRIPT FOUR -2085 SUBSCRIPT FIVE -2086 SUBSCRIPT SIX -2087 SUBSCRIPT SEVEN -2088 SUBSCRIPT EIGHT -2089 SUBSCRIPT NINE -208A SUBSCRIPT PLUS SIGN -208B SUBSCRIPT MINUS -208C SUBSCRIPT EQUALS SIGN -208D SUBSCRIPT LEFT PARENTHESIS -208E SUBSCRIPT RIGHT PARENTHESIS -2090 LATIN SUBSCRIPT SMALL LETTER A -2091 LATIN SUBSCRIPT SMALL LETTER E -2092 LATIN SUBSCRIPT SMALL LETTER O -2093 LATIN SUBSCRIPT SMALL LETTER X -2094 LATIN SUBSCRIPT SMALL LETTER SCHWA -20A0 EURO-CURRENCY SIGN -20A1 COLON SIGN -20A2 CRUZEIRO SIGN -20A3 FRENCH FRANC SIGN -20A4 LIRA SIGN -20A5 MILL SIGN -20A6 NAIRA SIGN -20A7 PESETA SIGN -20A8 RUPEE SIGN -20A9 WON SIGN -20AA NEW SHEQEL SIGN -20AB DONG SIGN -20AC EURO SIGN -20AD KIP SIGN -20AE TUGRIK SIGN -20AF DRACHMA SIGN -20B0 GERMAN PENNY SIGN -20B1 PESO SIGN -20B2 GUARANI SIGN -20B3 AUSTRAL SIGN -20B4 HRYVNIA SIGN -20B5 CEDI SIGN -20B6 LIVRE TOURNOIS SIGN -20B7 SPESMILO SIGN -20B8 TENGE SIGN -20D0 COMBINING LEFT HARPOON ABOVE -20D1 COMBINING RIGHT HARPOON ABOVE -20D2 COMBINING LONG VERTICAL LINE OVERLAY -20D3 COMBINING SHORT VERTICAL LINE OVERLAY -20D4 COMBINING ANTICLOCKWISE ARROW ABOVE -20D5 COMBINING CLOCKWISE ARROW ABOVE -20D6 COMBINING LEFT ARROW ABOVE -20D7 COMBINING RIGHT ARROW ABOVE -20D8 COMBINING RING OVERLAY -20D9 COMBINING CLOCKWISE RING OVERLAY -20DA COMBINING ANTICLOCKWISE RING OVERLAY -20DB COMBINING THREE DOTS ABOVE -20DC COMBINING FOUR DOTS ABOVE -20DD COMBINING ENCLOSING CIRCLE -20DE COMBINING ENCLOSING SQUARE -20DF COMBINING ENCLOSING DIAMOND -20E0 COMBINING ENCLOSING CIRCLE BACKSLASH -20E1 COMBINING LEFT RIGHT ARROW ABOVE -20E2 COMBINING ENCLOSING SCREEN -20E3 COMBINING ENCLOSING KEYCAP -20E4 COMBINING ENCLOSING UPWARD POINTING TRIANGLE -20E5 COMBINING REVERSE SOLIDUS OVERLAY -20E6 COMBINING DOUBLE VERTICAL STROKE OVERLAY -20E7 COMBINING ANNUITY SYMBOL -20E8 COMBINING TRIPLE UNDERDOT -20E9 COMBINING WIDE BRIDGE ABOVE -20EA COMBINING LEFTWARDS ARROW OVERLAY -20EB COMBINING LONG DOUBLE SOLIDUS OVERLAY -20EC COMBINING RIGHTWARDS HARPOON WITH BARB DOWNWARDS -20ED COMBINING LEFTWARDS HARPOON WITH BARB DOWNWARDS -20EE COMBINING LEFT ARROW BELOW -20EF COMBINING RIGHT ARROW BELOW -20F0 COMBINING ASTERISK ABOVE -2100 ACCOUNT OF -2101 ADDRESSED TO THE SUBJECT -2102 DOUBLE-STRUCK CAPITAL C -2103 DEGREE CELSIUS -2104 CENTRE LINE SYMBOL -2105 CARE OF -2106 CADA UNA -2107 EULER CONSTANT -2108 SCRUPLE -2109 DEGREE FAHRENHEIT -210A SCRIPT SMALL G -210B SCRIPT CAPITAL H -210C BLACK-LETTER CAPITAL H -210D DOUBLE-STRUCK CAPITAL H -210E PLANCK CONSTANT -210F PLANCK CONSTANT OVER TWO PI -2110 SCRIPT CAPITAL I -2111 BLACK-LETTER CAPITAL I -2112 SCRIPT CAPITAL L -2113 SCRIPT SMALL L -2114 L B BAR SYMBOL -2115 DOUBLE-STRUCK CAPITAL N -2116 NUMERO SIGN -2117 SOUND RECORDING COPYRIGHT -2118 SCRIPT CAPITAL P -2119 DOUBLE-STRUCK CAPITAL P -211A DOUBLE-STRUCK CAPITAL Q -211B SCRIPT CAPITAL R -211C BLACK-LETTER CAPITAL R -211D DOUBLE-STRUCK CAPITAL R -211E PRESCRIPTION TAKE -211F RESPONSE -2120 SERVICE MARK -2121 TELEPHONE SIGN -2122 TRADE MARK SIGN -2123 VERSICLE -2124 DOUBLE-STRUCK CAPITAL Z -2125 OUNCE SIGN -2126 OHM SIGN -2127 INVERTED OHM SIGN -2128 BLACK-LETTER CAPITAL Z -2129 TURNED GREEK SMALL LETTER IOTA -212A KELVIN SIGN -212B ANGSTROM SIGN -212C SCRIPT CAPITAL B -212D BLACK-LETTER CAPITAL C -212E ESTIMATED SYMBOL -212F SCRIPT SMALL E -2130 SCRIPT CAPITAL E -2131 SCRIPT CAPITAL F -2132 TURNED CAPITAL F -2133 SCRIPT CAPITAL M -2134 SCRIPT SMALL O -2135 ALEF SYMBOL -2136 BET SYMBOL -2137 GIMEL SYMBOL -2138 DALET SYMBOL -2139 INFORMATION SOURCE -213A ROTATED CAPITAL Q -213B FACSIMILE SIGN -213C DOUBLE-STRUCK SMALL PI -213D DOUBLE-STRUCK SMALL GAMMA -213E DOUBLE-STRUCK CAPITAL GAMMA -213F DOUBLE-STRUCK CAPITAL PI -2140 DOUBLE-STRUCK N-ARY SUMMATION -2141 TURNED SANS-SERIF CAPITAL G -2142 TURNED SANS-SERIF CAPITAL L -2143 REVERSED SANS-SERIF CAPITAL L -2144 TURNED SANS-SERIF CAPITAL Y -2145 DOUBLE-STRUCK ITALIC CAPITAL D -2146 DOUBLE-STRUCK ITALIC SMALL D -2147 DOUBLE-STRUCK ITALIC SMALL E -2148 DOUBLE-STRUCK ITALIC SMALL I -2149 DOUBLE-STRUCK ITALIC SMALL J -214A PROPERTY LINE -214B TURNED AMPERSAND -214C PER SIGN -214D AKTIESELSKAB -214E TURNED SMALL F -214F SYMBOL FOR SAMARITAN SOURCE -2150 VULGAR FRACTION ONE SEVENTH -2151 VULGAR FRACTION ONE NINTH -2152 VULGAR FRACTION ONE TENTH -2153 VULGAR FRACTION ONE THIRD -2154 VULGAR FRACTION TWO THIRDS -2155 VULGAR FRACTION ONE FIFTH -2156 VULGAR FRACTION TWO FIFTHS -2157 VULGAR FRACTION THREE FIFTHS -2158 VULGAR FRACTION FOUR FIFTHS -2159 VULGAR FRACTION ONE SIXTH -215A VULGAR FRACTION FIVE SIXTHS -215B VULGAR FRACTION ONE EIGHTH -215C VULGAR FRACTION THREE EIGHTHS -215D VULGAR FRACTION FIVE EIGHTHS -215E VULGAR FRACTION SEVEN EIGHTHS -215F FRACTION NUMERATOR ONE -2160 ROMAN NUMERAL ONE -2161 ROMAN NUMERAL TWO -2162 ROMAN NUMERAL THREE -2163 ROMAN NUMERAL FOUR -2164 ROMAN NUMERAL FIVE -2165 ROMAN NUMERAL SIX -2166 ROMAN NUMERAL SEVEN -2167 ROMAN NUMERAL EIGHT -2168 ROMAN NUMERAL NINE -2169 ROMAN NUMERAL TEN -216A ROMAN NUMERAL ELEVEN -216B ROMAN NUMERAL TWELVE -216C ROMAN NUMERAL FIFTY -216D ROMAN NUMERAL ONE HUNDRED -216E ROMAN NUMERAL FIVE HUNDRED -216F ROMAN NUMERAL ONE THOUSAND -2170 SMALL ROMAN NUMERAL ONE -2171 SMALL ROMAN NUMERAL TWO -2172 SMALL ROMAN NUMERAL THREE -2173 SMALL ROMAN NUMERAL FOUR -2174 SMALL ROMAN NUMERAL FIVE -2175 SMALL ROMAN NUMERAL SIX -2176 SMALL ROMAN NUMERAL SEVEN -2177 SMALL ROMAN NUMERAL EIGHT -2178 SMALL ROMAN NUMERAL NINE -2179 SMALL ROMAN NUMERAL TEN -217A SMALL ROMAN NUMERAL ELEVEN -217B SMALL ROMAN NUMERAL TWELVE -217C SMALL ROMAN NUMERAL FIFTY -217D SMALL ROMAN NUMERAL ONE HUNDRED -217E SMALL ROMAN NUMERAL FIVE HUNDRED -217F SMALL ROMAN NUMERAL ONE THOUSAND -2180 ROMAN NUMERAL ONE THOUSAND C D -2181 ROMAN NUMERAL FIVE THOUSAND -2182 ROMAN NUMERAL TEN THOUSAND -2183 ROMAN NUMERAL REVERSED ONE HUNDRED -2184 LATIN SMALL LETTER REVERSED C -2185 ROMAN NUMERAL SIX LATE FORM -2186 ROMAN NUMERAL FIFTY EARLY FORM -2187 ROMAN NUMERAL FIFTY THOUSAND -2188 ROMAN NUMERAL ONE HUNDRED THOUSAND -2189 VULGAR FRACTION ZERO THIRDS -2190 LEFTWARDS ARROW -2191 UPWARDS ARROW -2192 RIGHTWARDS ARROW -2193 DOWNWARDS ARROW -2194 LEFT RIGHT ARROW -2195 UP DOWN ARROW -2196 NORTH WEST ARROW -2197 NORTH EAST ARROW -2198 SOUTH EAST ARROW -2199 SOUTH WEST ARROW -219A LEFTWARDS ARROW WITH STROKE -219B RIGHTWARDS ARROW WITH STROKE -219C LEFTWARDS WAVE ARROW -219D RIGHTWARDS WAVE ARROW -219E LEFTWARDS TWO HEADED ARROW -219F UPWARDS TWO HEADED ARROW -21A0 RIGHTWARDS TWO HEADED ARROW -21A1 DOWNWARDS TWO HEADED ARROW -21A2 LEFTWARDS ARROW WITH TAIL -21A3 RIGHTWARDS ARROW WITH TAIL -21A4 LEFTWARDS ARROW FROM BAR -21A5 UPWARDS ARROW FROM BAR -21A6 RIGHTWARDS ARROW FROM BAR -21A7 DOWNWARDS ARROW FROM BAR -21A8 UP DOWN ARROW WITH BASE -21A9 LEFTWARDS ARROW WITH HOOK -21AA RIGHTWARDS ARROW WITH HOOK -21AB LEFTWARDS ARROW WITH LOOP -21AC RIGHTWARDS ARROW WITH LOOP -21AD LEFT RIGHT WAVE ARROW -21AE LEFT RIGHT ARROW WITH STROKE -21AF DOWNWARDS ZIGZAG ARROW -21B0 UPWARDS ARROW WITH TIP LEFTWARDS -21B1 UPWARDS ARROW WITH TIP RIGHTWARDS -21B2 DOWNWARDS ARROW WITH TIP LEFTWARDS -21B3 DOWNWARDS ARROW WITH TIP RIGHTWARDS -21B4 RIGHTWARDS ARROW WITH CORNER DOWNWARDS -21B5 DOWNWARDS ARROW WITH CORNER LEFTWARDS -21B6 ANTICLOCKWISE TOP SEMICIRCLE ARROW -21B7 CLOCKWISE TOP SEMICIRCLE ARROW -21B8 NORTH WEST ARROW TO LONG BAR -21B9 LEFTWARDS ARROW TO BAR OVER RIGHTWARDS ARROW TO BAR -21BA ANTICLOCKWISE OPEN CIRCLE ARROW -21BB CLOCKWISE OPEN CIRCLE ARROW -21BC LEFTWARDS HARPOON WITH BARB UPWARDS -21BD LEFTWARDS HARPOON WITH BARB DOWNWARDS -21BE UPWARDS HARPOON WITH BARB RIGHTWARDS -21BF UPWARDS HARPOON WITH BARB LEFTWARDS -21C0 RIGHTWARDS HARPOON WITH BARB UPWARDS -21C1 RIGHTWARDS HARPOON WITH BARB DOWNWARDS -21C2 DOWNWARDS HARPOON WITH BARB RIGHTWARDS -21C3 DOWNWARDS HARPOON WITH BARB LEFTWARDS -21C4 RIGHTWARDS ARROW OVER LEFTWARDS ARROW -21C5 UPWARDS ARROW LEFTWARDS OF DOWNWARDS ARROW -21C6 LEFTWARDS ARROW OVER RIGHTWARDS ARROW -21C7 LEFTWARDS PAIRED ARROWS -21C8 UPWARDS PAIRED ARROWS -21C9 RIGHTWARDS PAIRED ARROWS -21CA DOWNWARDS PAIRED ARROWS -21CB LEFTWARDS HARPOON OVER RIGHTWARDS HARPOON -21CC RIGHTWARDS HARPOON OVER LEFTWARDS HARPOON -21CD LEFTWARDS DOUBLE ARROW WITH STROKE -21CE LEFT RIGHT DOUBLE ARROW WITH STROKE -21CF RIGHTWARDS DOUBLE ARROW WITH STROKE -21D0 LEFTWARDS DOUBLE ARROW -21D1 UPWARDS DOUBLE ARROW -21D2 RIGHTWARDS DOUBLE ARROW -21D3 DOWNWARDS DOUBLE ARROW -21D4 LEFT RIGHT DOUBLE ARROW -21D5 UP DOWN DOUBLE ARROW -21D6 NORTH WEST DOUBLE ARROW -21D7 NORTH EAST DOUBLE ARROW -21D8 SOUTH EAST DOUBLE ARROW -21D9 SOUTH WEST DOUBLE ARROW -21DA LEFTWARDS TRIPLE ARROW -21DB RIGHTWARDS TRIPLE ARROW -21DC LEFTWARDS SQUIGGLE ARROW -21DD RIGHTWARDS SQUIGGLE ARROW -21DE UPWARDS ARROW WITH DOUBLE STROKE -21DF DOWNWARDS ARROW WITH DOUBLE STROKE -21E0 LEFTWARDS DASHED ARROW -21E1 UPWARDS DASHED ARROW -21E2 RIGHTWARDS DASHED ARROW -21E3 DOWNWARDS DASHED ARROW -21E4 LEFTWARDS ARROW TO BAR -21E5 RIGHTWARDS ARROW TO BAR -21E6 LEFTWARDS WHITE ARROW -21E7 UPWARDS WHITE ARROW -21E8 RIGHTWARDS WHITE ARROW -21E9 DOWNWARDS WHITE ARROW -21EA UPWARDS WHITE ARROW FROM BAR -21EB UPWARDS WHITE ARROW ON PEDESTAL -21EC UPWARDS WHITE ARROW ON PEDESTAL WITH HORIZONTAL BAR -21ED UPWARDS WHITE ARROW ON PEDESTAL WITH VERTICAL BAR -21EE UPWARDS WHITE DOUBLE ARROW -21EF UPWARDS WHITE DOUBLE ARROW ON PEDESTAL -21F0 RIGHTWARDS WHITE ARROW FROM WALL -21F1 NORTH WEST ARROW TO CORNER -21F2 SOUTH EAST ARROW TO CORNER -21F3 UP DOWN WHITE ARROW -21F4 RIGHT ARROW WITH SMALL CIRCLE -21F5 DOWNWARDS ARROW LEFTWARDS OF UPWARDS ARROW -21F6 THREE RIGHTWARDS ARROWS -21F7 LEFTWARDS ARROW WITH VERTICAL STROKE -21F8 RIGHTWARDS ARROW WITH VERTICAL STROKE -21F9 LEFT RIGHT ARROW WITH VERTICAL STROKE -21FA LEFTWARDS ARROW WITH DOUBLE VERTICAL STROKE -21FB RIGHTWARDS ARROW WITH DOUBLE VERTICAL STROKE -21FC LEFT RIGHT ARROW WITH DOUBLE VERTICAL STROKE -21FD LEFTWARDS OPEN-HEADED ARROW -21FE RIGHTWARDS OPEN-HEADED ARROW -21FF LEFT RIGHT OPEN-HEADED ARROW -2200 FOR ALL -2201 COMPLEMENT -2202 PARTIAL DIFFERENTIAL -2203 THERE EXISTS -2204 THERE DOES NOT EXIST -2205 EMPTY SET -2206 INCREMENT -2207 NABLA -2208 ELEMENT OF -2209 NOT AN ELEMENT OF -220A SMALL ELEMENT OF -220B CONTAINS AS MEMBER -220C DOES NOT CONTAIN AS MEMBER -220D SMALL CONTAINS AS MEMBER -220E END OF PROOF -220F N-ARY PRODUCT -2210 N-ARY COPRODUCT -2211 N-ARY SUMMATION -2212 MINUS SIGN -2213 MINUS-OR-PLUS SIGN -2214 DOT PLUS -2215 DIVISION SLASH -2216 SET MINUS -2217 ASTERISK OPERATOR -2218 RING OPERATOR -2219 BULLET OPERATOR -221A SQUARE ROOT -221B CUBE ROOT -221C FOURTH ROOT -221D PROPORTIONAL TO -221E INFINITY -221F RIGHT ANGLE -2220 ANGLE -2221 MEASURED ANGLE -2222 SPHERICAL ANGLE -2223 DIVIDES -2224 DOES NOT DIVIDE -2225 PARALLEL TO -2226 NOT PARALLEL TO -2227 LOGICAL AND -2228 LOGICAL OR -2229 INTERSECTION -222A UNION -222B INTEGRAL -222C DOUBLE INTEGRAL -222D TRIPLE INTEGRAL -222E CONTOUR INTEGRAL -222F SURFACE INTEGRAL -2230 VOLUME INTEGRAL -2231 CLOCKWISE INTEGRAL -2232 CLOCKWISE CONTOUR INTEGRAL -2233 ANTICLOCKWISE CONTOUR INTEGRAL -2234 THEREFORE -2235 BECAUSE -2236 RATIO -2237 PROPORTION -2238 DOT MINUS -2239 EXCESS -223A GEOMETRIC PROPORTION -223B HOMOTHETIC -223C TILDE OPERATOR -223D REVERSED TILDE -223E INVERTED LAZY S -223F SINE WAVE -2240 WREATH PRODUCT -2241 NOT TILDE -2242 MINUS TILDE -2243 ASYMPTOTICALLY EQUAL TO -2244 NOT ASYMPTOTICALLY EQUAL TO -2245 APPROXIMATELY EQUAL TO -2246 APPROXIMATELY BUT NOT ACTUALLY EQUAL TO -2247 NEITHER APPROXIMATELY NOR ACTUALLY EQUAL TO -2248 ALMOST EQUAL TO -2249 NOT ALMOST EQUAL TO -224A ALMOST EQUAL OR EQUAL TO -224B TRIPLE TILDE -224C ALL EQUAL TO -224D EQUIVALENT TO -224E GEOMETRICALLY EQUIVALENT TO -224F DIFFERENCE BETWEEN -2250 APPROACHES THE LIMIT -2251 GEOMETRICALLY EQUAL TO -2252 APPROXIMATELY EQUAL TO OR THE IMAGE OF -2253 IMAGE OF OR APPROXIMATELY EQUAL TO -2254 COLON EQUALS -2255 EQUALS COLON -2256 RING IN EQUAL TO -2257 RING EQUAL TO -2258 CORRESPONDS TO -2259 ESTIMATES -225A EQUIANGULAR TO -225B STAR EQUALS -225C DELTA EQUAL TO -225D EQUAL TO BY DEFINITION -225E MEASURED BY -225F QUESTIONED EQUAL TO -2260 NOT EQUAL TO -2261 IDENTICAL TO -2262 NOT IDENTICAL TO -2263 STRICTLY EQUIVALENT TO -2264 LESS-THAN OR EQUAL TO -2265 GREATER-THAN OR EQUAL TO -2266 LESS-THAN OVER EQUAL TO -2267 GREATER-THAN OVER EQUAL TO -2268 LESS-THAN BUT NOT EQUAL TO -2269 GREATER-THAN BUT NOT EQUAL TO -226A MUCH LESS-THAN -226B MUCH GREATER-THAN -226C BETWEEN -226D NOT EQUIVALENT TO -226E NOT LESS-THAN -226F NOT GREATER-THAN -2270 NEITHER LESS-THAN NOR EQUAL TO -2271 NEITHER GREATER-THAN NOR EQUAL TO -2272 LESS-THAN OR EQUIVALENT TO -2273 GREATER-THAN OR EQUIVALENT TO -2274 NEITHER LESS-THAN NOR EQUIVALENT TO -2275 NEITHER GREATER-THAN NOR EQUIVALENT TO -2276 LESS-THAN OR GREATER-THAN -2277 GREATER-THAN OR LESS-THAN -2278 NEITHER LESS-THAN NOR GREATER-THAN -2279 NEITHER GREATER-THAN NOR LESS-THAN -227A PRECEDES -227B SUCCEEDS -227C PRECEDES OR EQUAL TO -227D SUCCEEDS OR EQUAL TO -227E PRECEDES OR EQUIVALENT TO -227F SUCCEEDS OR EQUIVALENT TO -2280 DOES NOT PRECEDE -2281 DOES NOT SUCCEED -2282 SUBSET OF -2283 SUPERSET OF -2284 NOT A SUBSET OF -2285 NOT A SUPERSET OF -2286 SUBSET OF OR EQUAL TO -2287 SUPERSET OF OR EQUAL TO -2288 NEITHER A SUBSET OF NOR EQUAL TO -2289 NEITHER A SUPERSET OF NOR EQUAL TO -228A SUBSET OF WITH NOT EQUAL TO -228B SUPERSET OF WITH NOT EQUAL TO -228C MULTISET -228D MULTISET MULTIPLICATION -228E MULTISET UNION -228F SQUARE IMAGE OF -2290 SQUARE ORIGINAL OF -2291 SQUARE IMAGE OF OR EQUAL TO -2292 SQUARE ORIGINAL OF OR EQUAL TO -2293 SQUARE CAP -2294 SQUARE CUP -2295 CIRCLED PLUS -2296 CIRCLED MINUS -2297 CIRCLED TIMES -2298 CIRCLED DIVISION SLASH -2299 CIRCLED DOT OPERATOR -229A CIRCLED RING OPERATOR -229B CIRCLED ASTERISK OPERATOR -229C CIRCLED EQUALS -229D CIRCLED DASH -229E SQUARED PLUS -229F SQUARED MINUS -22A0 SQUARED TIMES -22A1 SQUARED DOT OPERATOR -22A2 RIGHT TACK -22A3 LEFT TACK -22A4 DOWN TACK -22A5 UP TACK -22A6 ASSERTION -22A7 MODELS -22A8 TRUE -22A9 FORCES -22AA TRIPLE VERTICAL BAR RIGHT TURNSTILE -22AB DOUBLE VERTICAL BAR DOUBLE RIGHT TURNSTILE -22AC DOES NOT PROVE -22AD NOT TRUE -22AE DOES NOT FORCE -22AF NEGATED DOUBLE VERTICAL BAR DOUBLE RIGHT TURNSTILE -22B0 PRECEDES UNDER RELATION -22B1 SUCCEEDS UNDER RELATION -22B2 NORMAL SUBGROUP OF -22B3 CONTAINS AS NORMAL SUBGROUP -22B4 NORMAL SUBGROUP OF OR EQUAL TO -22B5 CONTAINS AS NORMAL SUBGROUP OR EQUAL TO -22B6 ORIGINAL OF -22B7 IMAGE OF -22B8 MULTIMAP -22B9 HERMITIAN CONJUGATE MATRIX -22BA INTERCALATE -22BB XOR -22BC NAND -22BD NOR -22BE RIGHT ANGLE WITH ARC -22BF RIGHT TRIANGLE -22C0 N-ARY LOGICAL AND -22C1 N-ARY LOGICAL OR -22C2 N-ARY INTERSECTION -22C3 N-ARY UNION -22C4 DIAMOND OPERATOR -22C5 DOT OPERATOR -22C6 STAR OPERATOR -22C7 DIVISION TIMES -22C8 BOWTIE -22C9 LEFT NORMAL FACTOR SEMIDIRECT PRODUCT -22CA RIGHT NORMAL FACTOR SEMIDIRECT PRODUCT -22CB LEFT SEMIDIRECT PRODUCT -22CC RIGHT SEMIDIRECT PRODUCT -22CD REVERSED TILDE EQUALS -22CE CURLY LOGICAL OR -22CF CURLY LOGICAL AND -22D0 DOUBLE SUBSET -22D1 DOUBLE SUPERSET -22D2 DOUBLE INTERSECTION -22D3 DOUBLE UNION -22D4 PITCHFORK -22D5 EQUAL AND PARALLEL TO -22D6 LESS-THAN WITH DOT -22D7 GREATER-THAN WITH DOT -22D8 VERY MUCH LESS-THAN -22D9 VERY MUCH GREATER-THAN -22DA LESS-THAN EQUAL TO OR GREATER-THAN -22DB GREATER-THAN EQUAL TO OR LESS-THAN -22DC EQUAL TO OR LESS-THAN -22DD EQUAL TO OR GREATER-THAN -22DE EQUAL TO OR PRECEDES -22DF EQUAL TO OR SUCCEEDS -22E0 DOES NOT PRECEDE OR EQUAL -22E1 DOES NOT SUCCEED OR EQUAL -22E2 NOT SQUARE IMAGE OF OR EQUAL TO -22E3 NOT SQUARE ORIGINAL OF OR EQUAL TO -22E4 SQUARE IMAGE OF OR NOT EQUAL TO -22E5 SQUARE ORIGINAL OF OR NOT EQUAL TO -22E6 LESS-THAN BUT NOT EQUIVALENT TO -22E7 GREATER-THAN BUT NOT EQUIVALENT TO -22E8 PRECEDES BUT NOT EQUIVALENT TO -22E9 SUCCEEDS BUT NOT EQUIVALENT TO -22EA NOT NORMAL SUBGROUP OF -22EB DOES NOT CONTAIN AS NORMAL SUBGROUP -22EC NOT NORMAL SUBGROUP OF OR EQUAL TO -22ED DOES NOT CONTAIN AS NORMAL SUBGROUP OR EQUAL -22EE VERTICAL ELLIPSIS -22EF MIDLINE HORIZONTAL ELLIPSIS -22F0 UP RIGHT DIAGONAL ELLIPSIS -22F1 DOWN RIGHT DIAGONAL ELLIPSIS -22F2 ELEMENT OF WITH LONG HORIZONTAL STROKE -22F3 ELEMENT OF WITH VERTICAL BAR AT END OF HORIZONTAL STROKE -22F4 SMALL ELEMENT OF WITH VERTICAL BAR AT END OF HORIZONTAL STROKE -22F5 ELEMENT OF WITH DOT ABOVE -22F6 ELEMENT OF WITH OVERBAR -22F7 SMALL ELEMENT OF WITH OVERBAR -22F8 ELEMENT OF WITH UNDERBAR -22F9 ELEMENT OF WITH TWO HORIZONTAL STROKES -22FA CONTAINS WITH LONG HORIZONTAL STROKE -22FB CONTAINS WITH VERTICAL BAR AT END OF HORIZONTAL STROKE -22FC SMALL CONTAINS WITH VERTICAL BAR AT END OF HORIZONTAL STROKE -22FD CONTAINS WITH OVERBAR -22FE SMALL CONTAINS WITH OVERBAR -22FF Z NOTATION BAG MEMBERSHIP -2300 DIAMETER SIGN -2301 ELECTRIC ARROW -2302 HOUSE -2303 UP ARROWHEAD -2304 DOWN ARROWHEAD -2305 PROJECTIVE -2306 PERSPECTIVE -2307 WAVY LINE -2308 LEFT CEILING -2309 RIGHT CEILING -230A LEFT FLOOR -230B RIGHT FLOOR -230C BOTTOM RIGHT CROP -230D BOTTOM LEFT CROP -230E TOP RIGHT CROP -230F TOP LEFT CROP -2310 REVERSED NOT SIGN -2311 SQUARE LOZENGE -2312 ARC -2313 SEGMENT -2314 SECTOR -2315 TELEPHONE RECORDER -2316 POSITION INDICATOR -2317 VIEWDATA SQUARE -2318 PLACE OF INTEREST SIGN -2319 TURNED NOT SIGN -231A WATCH -231B HOURGLASS -231C TOP LEFT CORNER -231D TOP RIGHT CORNER -231E BOTTOM LEFT CORNER -231F BOTTOM RIGHT CORNER -2320 TOP HALF INTEGRAL -2321 BOTTOM HALF INTEGRAL -2322 FROWN -2323 SMILE -2324 UP ARROWHEAD BETWEEN TWO HORIZONTAL BARS -2325 OPTION KEY -2326 ERASE TO THE RIGHT -2327 X IN A RECTANGLE BOX -2328 KEYBOARD -2329 LEFT-POINTING ANGLE BRACKET -232A RIGHT-POINTING ANGLE BRACKET -232B ERASE TO THE LEFT -232C BENZENE RING -232D CYLINDRICITY -232E ALL AROUND-PROFILE -232F SYMMETRY -2330 TOTAL RUNOUT -2331 DIMENSION ORIGIN -2332 CONICAL TAPER -2333 SLOPE -2334 COUNTERBORE -2335 COUNTERSINK -2336 APL FUNCTIONAL SYMBOL I-BEAM -2337 APL FUNCTIONAL SYMBOL SQUISH QUAD -2338 APL FUNCTIONAL SYMBOL QUAD EQUAL -2339 APL FUNCTIONAL SYMBOL QUAD DIVIDE -233A APL FUNCTIONAL SYMBOL QUAD DIAMOND -233B APL FUNCTIONAL SYMBOL QUAD JOT -233C APL FUNCTIONAL SYMBOL QUAD CIRCLE -233D APL FUNCTIONAL SYMBOL CIRCLE STILE -233E APL FUNCTIONAL SYMBOL CIRCLE JOT -233F APL FUNCTIONAL SYMBOL SLASH BAR -2340 APL FUNCTIONAL SYMBOL BACKSLASH BAR -2341 APL FUNCTIONAL SYMBOL QUAD SLASH -2342 APL FUNCTIONAL SYMBOL QUAD BACKSLASH -2343 APL FUNCTIONAL SYMBOL QUAD LESS-THAN -2344 APL FUNCTIONAL SYMBOL QUAD GREATER-THAN -2345 APL FUNCTIONAL SYMBOL LEFTWARDS VANE -2346 APL FUNCTIONAL SYMBOL RIGHTWARDS VANE -2347 APL FUNCTIONAL SYMBOL QUAD LEFTWARDS ARROW -2348 APL FUNCTIONAL SYMBOL QUAD RIGHTWARDS ARROW -2349 APL FUNCTIONAL SYMBOL CIRCLE BACKSLASH -234A APL FUNCTIONAL SYMBOL DOWN TACK UNDERBAR -234B APL FUNCTIONAL SYMBOL DELTA STILE -234C APL FUNCTIONAL SYMBOL QUAD DOWN CARET -234D APL FUNCTIONAL SYMBOL QUAD DELTA -234E APL FUNCTIONAL SYMBOL DOWN TACK JOT -234F APL FUNCTIONAL SYMBOL UPWARDS VANE -2350 APL FUNCTIONAL SYMBOL QUAD UPWARDS ARROW -2351 APL FUNCTIONAL SYMBOL UP TACK OVERBAR -2352 APL FUNCTIONAL SYMBOL DEL STILE -2353 APL FUNCTIONAL SYMBOL QUAD UP CARET -2354 APL FUNCTIONAL SYMBOL QUAD DEL -2355 APL FUNCTIONAL SYMBOL UP TACK JOT -2356 APL FUNCTIONAL SYMBOL DOWNWARDS VANE -2357 APL FUNCTIONAL SYMBOL QUAD DOWNWARDS ARROW -2358 APL FUNCTIONAL SYMBOL QUOTE UNDERBAR -2359 APL FUNCTIONAL SYMBOL DELTA UNDERBAR -235A APL FUNCTIONAL SYMBOL DIAMOND UNDERBAR -235B APL FUNCTIONAL SYMBOL JOT UNDERBAR -235C APL FUNCTIONAL SYMBOL CIRCLE UNDERBAR -235D APL FUNCTIONAL SYMBOL UP SHOE JOT -235E APL FUNCTIONAL SYMBOL QUOTE QUAD -235F APL FUNCTIONAL SYMBOL CIRCLE STAR -2360 APL FUNCTIONAL SYMBOL QUAD COLON -2361 APL FUNCTIONAL SYMBOL UP TACK DIAERESIS -2362 APL FUNCTIONAL SYMBOL DEL DIAERESIS -2363 APL FUNCTIONAL SYMBOL STAR DIAERESIS -2364 APL FUNCTIONAL SYMBOL JOT DIAERESIS -2365 APL FUNCTIONAL SYMBOL CIRCLE DIAERESIS -2366 APL FUNCTIONAL SYMBOL DOWN SHOE STILE -2367 APL FUNCTIONAL SYMBOL LEFT SHOE STILE -2368 APL FUNCTIONAL SYMBOL TILDE DIAERESIS -2369 APL FUNCTIONAL SYMBOL GREATER-THAN DIAERESIS -236A APL FUNCTIONAL SYMBOL COMMA BAR -236B APL FUNCTIONAL SYMBOL DEL TILDE -236C APL FUNCTIONAL SYMBOL ZILDE -236D APL FUNCTIONAL SYMBOL STILE TILDE -236E APL FUNCTIONAL SYMBOL SEMICOLON UNDERBAR -236F APL FUNCTIONAL SYMBOL QUAD NOT EQUAL -2370 APL FUNCTIONAL SYMBOL QUAD QUESTION -2371 APL FUNCTIONAL SYMBOL DOWN CARET TILDE -2372 APL FUNCTIONAL SYMBOL UP CARET TILDE -2373 APL FUNCTIONAL SYMBOL IOTA -2374 APL FUNCTIONAL SYMBOL RHO -2375 APL FUNCTIONAL SYMBOL OMEGA -2376 APL FUNCTIONAL SYMBOL ALPHA UNDERBAR -2377 APL FUNCTIONAL SYMBOL EPSILON UNDERBAR -2378 APL FUNCTIONAL SYMBOL IOTA UNDERBAR -2379 APL FUNCTIONAL SYMBOL OMEGA UNDERBAR -237A APL FUNCTIONAL SYMBOL ALPHA -237B NOT CHECK MARK -237C RIGHT ANGLE WITH DOWNWARDS ZIGZAG ARROW -237D SHOULDERED OPEN BOX -237E BELL SYMBOL -237F VERTICAL LINE WITH MIDDLE DOT -2380 INSERTION SYMBOL -2381 CONTINUOUS UNDERLINE SYMBOL -2382 DISCONTINUOUS UNDERLINE SYMBOL -2383 EMPHASIS SYMBOL -2384 COMPOSITION SYMBOL -2385 WHITE SQUARE WITH CENTRE VERTICAL LINE -2386 ENTER SYMBOL -2387 ALTERNATIVE KEY SYMBOL -2388 HELM SYMBOL -2389 CIRCLED HORIZONTAL BAR WITH NOTCH -238A CIRCLED TRIANGLE DOWN -238B BROKEN CIRCLE WITH NORTHWEST ARROW -238C UNDO SYMBOL -238D MONOSTABLE SYMBOL -238E HYSTERESIS SYMBOL -238F OPEN-CIRCUIT-OUTPUT H-TYPE SYMBOL -2390 OPEN-CIRCUIT-OUTPUT L-TYPE SYMBOL -2391 PASSIVE-PULL-DOWN-OUTPUT SYMBOL -2392 PASSIVE-PULL-UP-OUTPUT SYMBOL -2393 DIRECT CURRENT SYMBOL FORM TWO -2394 SOFTWARE-FUNCTION SYMBOL -2395 APL FUNCTIONAL SYMBOL QUAD -2396 DECIMAL SEPARATOR KEY SYMBOL -2397 PREVIOUS PAGE -2398 NEXT PAGE -2399 PRINT SCREEN SYMBOL -239A CLEAR SCREEN SYMBOL -239B LEFT PARENTHESIS UPPER HOOK -239C LEFT PARENTHESIS EXTENSION -239D LEFT PARENTHESIS LOWER HOOK -239E RIGHT PARENTHESIS UPPER HOOK -239F RIGHT PARENTHESIS EXTENSION -23A0 RIGHT PARENTHESIS LOWER HOOK -23A1 LEFT SQUARE BRACKET UPPER CORNER -23A2 LEFT SQUARE BRACKET EXTENSION -23A3 LEFT SQUARE BRACKET LOWER CORNER -23A4 RIGHT SQUARE BRACKET UPPER CORNER -23A5 RIGHT SQUARE BRACKET EXTENSION -23A6 RIGHT SQUARE BRACKET LOWER CORNER -23A7 LEFT CURLY BRACKET UPPER HOOK -23A8 LEFT CURLY BRACKET MIDDLE PIECE -23A9 LEFT CURLY BRACKET LOWER HOOK -23AA CURLY BRACKET EXTENSION -23AB RIGHT CURLY BRACKET UPPER HOOK -23AC RIGHT CURLY BRACKET MIDDLE PIECE -23AD RIGHT CURLY BRACKET LOWER HOOK -23AE INTEGRAL EXTENSION -23AF HORIZONTAL LINE EXTENSION -23B0 UPPER LEFT OR LOWER RIGHT CURLY BRACKET SECTION -23B1 UPPER RIGHT OR LOWER LEFT CURLY BRACKET SECTION -23B2 SUMMATION TOP -23B3 SUMMATION BOTTOM -23B4 TOP SQUARE BRACKET -23B5 BOTTOM SQUARE BRACKET -23B6 BOTTOM SQUARE BRACKET OVER TOP SQUARE BRACKET -23B7 RADICAL SYMBOL BOTTOM -23B8 LEFT VERTICAL BOX LINE -23B9 RIGHT VERTICAL BOX LINE -23BA HORIZONTAL SCAN LINE-1 -23BB HORIZONTAL SCAN LINE-3 -23BC HORIZONTAL SCAN LINE-7 -23BD HORIZONTAL SCAN LINE-9 -23BE DENTISTRY SYMBOL LIGHT VERTICAL AND TOP RIGHT -23BF DENTISTRY SYMBOL LIGHT VERTICAL AND BOTTOM RIGHT -23C0 DENTISTRY SYMBOL LIGHT VERTICAL WITH CIRCLE -23C1 DENTISTRY SYMBOL LIGHT DOWN AND HORIZONTAL WITH CIRCLE -23C2 DENTISTRY SYMBOL LIGHT UP AND HORIZONTAL WITH CIRCLE -23C3 DENTISTRY SYMBOL LIGHT VERTICAL WITH TRIANGLE -23C4 DENTISTRY SYMBOL LIGHT DOWN AND HORIZONTAL WITH TRIANGLE -23C5 DENTISTRY SYMBOL LIGHT UP AND HORIZONTAL WITH TRIANGLE -23C6 DENTISTRY SYMBOL LIGHT VERTICAL AND WAVE -23C7 DENTISTRY SYMBOL LIGHT DOWN AND HORIZONTAL WITH WAVE -23C8 DENTISTRY SYMBOL LIGHT UP AND HORIZONTAL WITH WAVE -23C9 DENTISTRY SYMBOL LIGHT DOWN AND HORIZONTAL -23CA DENTISTRY SYMBOL LIGHT UP AND HORIZONTAL -23CB DENTISTRY SYMBOL LIGHT VERTICAL AND TOP LEFT -23CC DENTISTRY SYMBOL LIGHT VERTICAL AND BOTTOM LEFT -23CD SQUARE FOOT -23CE RETURN SYMBOL -23CF EJECT SYMBOL -23D0 VERTICAL LINE EXTENSION -23D1 METRICAL BREVE -23D2 METRICAL LONG OVER SHORT -23D3 METRICAL SHORT OVER LONG -23D4 METRICAL LONG OVER TWO SHORTS -23D5 METRICAL TWO SHORTS OVER LONG -23D6 METRICAL TWO SHORTS JOINED -23D7 METRICAL TRISEME -23D8 METRICAL TETRASEME -23D9 METRICAL PENTASEME -23DA EARTH GROUND -23DB FUSE -23DC TOP PARENTHESIS -23DD BOTTOM PARENTHESIS -23DE TOP CURLY BRACKET -23DF BOTTOM CURLY BRACKET -23E0 TOP TORTOISE SHELL BRACKET -23E1 BOTTOM TORTOISE SHELL BRACKET -23E2 WHITE TRAPEZIUM -23E3 BENZENE RING WITH CIRCLE -23E4 STRAIGHTNESS -23E5 FLATNESS -23E6 AC CURRENT -23E7 ELECTRICAL INTERSECTION -23E8 DECIMAL EXPONENT SYMBOL -2400 SYMBOL FOR NULL -2401 SYMBOL FOR START OF HEADING -2402 SYMBOL FOR START OF TEXT -2403 SYMBOL FOR END OF TEXT -2404 SYMBOL FOR END OF TRANSMISSION -2405 SYMBOL FOR ENQUIRY -2406 SYMBOL FOR ACKNOWLEDGE -2407 SYMBOL FOR BELL -2408 SYMBOL FOR BACKSPACE -2409 SYMBOL FOR HORIZONTAL TABULATION -240A SYMBOL FOR LINE FEED -240B SYMBOL FOR VERTICAL TABULATION -240C SYMBOL FOR FORM FEED -240D SYMBOL FOR CARRIAGE RETURN -240E SYMBOL FOR SHIFT OUT -240F SYMBOL FOR SHIFT IN -2410 SYMBOL FOR DATA LINK ESCAPE -2411 SYMBOL FOR DEVICE CONTROL ONE -2412 SYMBOL FOR DEVICE CONTROL TWO -2413 SYMBOL FOR DEVICE CONTROL THREE -2414 SYMBOL FOR DEVICE CONTROL FOUR -2415 SYMBOL FOR NEGATIVE ACKNOWLEDGE -2416 SYMBOL FOR SYNCHRONOUS IDLE -2417 SYMBOL FOR END OF TRANSMISSION BLOCK -2418 SYMBOL FOR CANCEL -2419 SYMBOL FOR END OF MEDIUM -241A SYMBOL FOR SUBSTITUTE -241B SYMBOL FOR ESCAPE -241C SYMBOL FOR FILE SEPARATOR -241D SYMBOL FOR GROUP SEPARATOR -241E SYMBOL FOR RECORD SEPARATOR -241F SYMBOL FOR UNIT SEPARATOR -2420 SYMBOL FOR SPACE -2421 SYMBOL FOR DELETE -2422 BLANK SYMBOL -2423 OPEN BOX -2424 SYMBOL FOR NEWLINE -2425 SYMBOL FOR DELETE FORM TWO -2426 SYMBOL FOR SUBSTITUTE FORM TWO -2440 OCR HOOK -2441 OCR CHAIR -2442 OCR FORK -2443 OCR INVERTED FORK -2444 OCR BELT BUCKLE -2445 OCR BOW TIE -2446 OCR BRANCH BANK IDENTIFICATION -2447 OCR AMOUNT OF CHECK -2448 OCR DASH -2449 OCR CUSTOMER ACCOUNT NUMBER -244A OCR DOUBLE BACKSLASH -2460 CIRCLED DIGIT ONE -2461 CIRCLED DIGIT TWO -2462 CIRCLED DIGIT THREE -2463 CIRCLED DIGIT FOUR -2464 CIRCLED DIGIT FIVE -2465 CIRCLED DIGIT SIX -2466 CIRCLED DIGIT SEVEN -2467 CIRCLED DIGIT EIGHT -2468 CIRCLED DIGIT NINE -2469 CIRCLED NUMBER TEN -246A CIRCLED NUMBER ELEVEN -246B CIRCLED NUMBER TWELVE -246C CIRCLED NUMBER THIRTEEN -246D CIRCLED NUMBER FOURTEEN -246E CIRCLED NUMBER FIFTEEN -246F CIRCLED NUMBER SIXTEEN -2470 CIRCLED NUMBER SEVENTEEN -2471 CIRCLED NUMBER EIGHTEEN -2472 CIRCLED NUMBER NINETEEN -2473 CIRCLED NUMBER TWENTY -2474 PARENTHESIZED DIGIT ONE -2475 PARENTHESIZED DIGIT TWO -2476 PARENTHESIZED DIGIT THREE -2477 PARENTHESIZED DIGIT FOUR -2478 PARENTHESIZED DIGIT FIVE -2479 PARENTHESIZED DIGIT SIX -247A PARENTHESIZED DIGIT SEVEN -247B PARENTHESIZED DIGIT EIGHT -247C PARENTHESIZED DIGIT NINE -247D PARENTHESIZED NUMBER TEN -247E PARENTHESIZED NUMBER ELEVEN -247F PARENTHESIZED NUMBER TWELVE -2480 PARENTHESIZED NUMBER THIRTEEN -2481 PARENTHESIZED NUMBER FOURTEEN -2482 PARENTHESIZED NUMBER FIFTEEN -2483 PARENTHESIZED NUMBER SIXTEEN -2484 PARENTHESIZED NUMBER SEVENTEEN -2485 PARENTHESIZED NUMBER EIGHTEEN -2486 PARENTHESIZED NUMBER NINETEEN -2487 PARENTHESIZED NUMBER TWENTY -2488 DIGIT ONE FULL STOP -2489 DIGIT TWO FULL STOP -248A DIGIT THREE FULL STOP -248B DIGIT FOUR FULL STOP -248C DIGIT FIVE FULL STOP -248D DIGIT SIX FULL STOP -248E DIGIT SEVEN FULL STOP -248F DIGIT EIGHT FULL STOP -2490 DIGIT NINE FULL STOP -2491 NUMBER TEN FULL STOP -2492 NUMBER ELEVEN FULL STOP -2493 NUMBER TWELVE FULL STOP -2494 NUMBER THIRTEEN FULL STOP -2495 NUMBER FOURTEEN FULL STOP -2496 NUMBER FIFTEEN FULL STOP -2497 NUMBER SIXTEEN FULL STOP -2498 NUMBER SEVENTEEN FULL STOP -2499 NUMBER EIGHTEEN FULL STOP -249A NUMBER NINETEEN FULL STOP -249B NUMBER TWENTY FULL STOP -249C PARENTHESIZED LATIN SMALL LETTER A -249D PARENTHESIZED LATIN SMALL LETTER B -249E PARENTHESIZED LATIN SMALL LETTER C -249F PARENTHESIZED LATIN SMALL LETTER D -24A0 PARENTHESIZED LATIN SMALL LETTER E -24A1 PARENTHESIZED LATIN SMALL LETTER F -24A2 PARENTHESIZED LATIN SMALL LETTER G -24A3 PARENTHESIZED LATIN SMALL LETTER H -24A4 PARENTHESIZED LATIN SMALL LETTER I -24A5 PARENTHESIZED LATIN SMALL LETTER J -24A6 PARENTHESIZED LATIN SMALL LETTER K -24A7 PARENTHESIZED LATIN SMALL LETTER L -24A8 PARENTHESIZED LATIN SMALL LETTER M -24A9 PARENTHESIZED LATIN SMALL LETTER N -24AA PARENTHESIZED LATIN SMALL LETTER O -24AB PARENTHESIZED LATIN SMALL LETTER P -24AC PARENTHESIZED LATIN SMALL LETTER Q -24AD PARENTHESIZED LATIN SMALL LETTER R -24AE PARENTHESIZED LATIN SMALL LETTER S -24AF PARENTHESIZED LATIN SMALL LETTER T -24B0 PARENTHESIZED LATIN SMALL LETTER U -24B1 PARENTHESIZED LATIN SMALL LETTER V -24B2 PARENTHESIZED LATIN SMALL LETTER W -24B3 PARENTHESIZED LATIN SMALL LETTER X -24B4 PARENTHESIZED LATIN SMALL LETTER Y -24B5 PARENTHESIZED LATIN SMALL LETTER Z -24B6 CIRCLED LATIN CAPITAL LETTER A -24B7 CIRCLED LATIN CAPITAL LETTER B -24B8 CIRCLED LATIN CAPITAL LETTER C -24B9 CIRCLED LATIN CAPITAL LETTER D -24BA CIRCLED LATIN CAPITAL LETTER E -24BB CIRCLED LATIN CAPITAL LETTER F -24BC CIRCLED LATIN CAPITAL LETTER G -24BD CIRCLED LATIN CAPITAL LETTER H -24BE CIRCLED LATIN CAPITAL LETTER I -24BF CIRCLED LATIN CAPITAL LETTER J -24C0 CIRCLED LATIN CAPITAL LETTER K -24C1 CIRCLED LATIN CAPITAL LETTER L -24C2 CIRCLED LATIN CAPITAL LETTER M -24C3 CIRCLED LATIN CAPITAL LETTER N -24C4 CIRCLED LATIN CAPITAL LETTER O -24C5 CIRCLED LATIN CAPITAL LETTER P -24C6 CIRCLED LATIN CAPITAL LETTER Q -24C7 CIRCLED LATIN CAPITAL LETTER R -24C8 CIRCLED LATIN CAPITAL LETTER S -24C9 CIRCLED LATIN CAPITAL LETTER T -24CA CIRCLED LATIN CAPITAL LETTER U -24CB CIRCLED LATIN CAPITAL LETTER V -24CC CIRCLED LATIN CAPITAL LETTER W -24CD CIRCLED LATIN CAPITAL LETTER X -24CE CIRCLED LATIN CAPITAL LETTER Y -24CF CIRCLED LATIN CAPITAL LETTER Z -24D0 CIRCLED LATIN SMALL LETTER A -24D1 CIRCLED LATIN SMALL LETTER B -24D2 CIRCLED LATIN SMALL LETTER C -24D3 CIRCLED LATIN SMALL LETTER D -24D4 CIRCLED LATIN SMALL LETTER E -24D5 CIRCLED LATIN SMALL LETTER F -24D6 CIRCLED LATIN SMALL LETTER G -24D7 CIRCLED LATIN SMALL LETTER H -24D8 CIRCLED LATIN SMALL LETTER I -24D9 CIRCLED LATIN SMALL LETTER J -24DA CIRCLED LATIN SMALL LETTER K -24DB CIRCLED LATIN SMALL LETTER L -24DC CIRCLED LATIN SMALL LETTER M -24DD CIRCLED LATIN SMALL LETTER N -24DE CIRCLED LATIN SMALL LETTER O -24DF CIRCLED LATIN SMALL LETTER P -24E0 CIRCLED LATIN SMALL LETTER Q -24E1 CIRCLED LATIN SMALL LETTER R -24E2 CIRCLED LATIN SMALL LETTER S -24E3 CIRCLED LATIN SMALL LETTER T -24E4 CIRCLED LATIN SMALL LETTER U -24E5 CIRCLED LATIN SMALL LETTER V -24E6 CIRCLED LATIN SMALL LETTER W -24E7 CIRCLED LATIN SMALL LETTER X -24E8 CIRCLED LATIN SMALL LETTER Y -24E9 CIRCLED LATIN SMALL LETTER Z -24EA CIRCLED DIGIT ZERO -24EB NEGATIVE CIRCLED NUMBER ELEVEN -24EC NEGATIVE CIRCLED NUMBER TWELVE -24ED NEGATIVE CIRCLED NUMBER THIRTEEN -24EE NEGATIVE CIRCLED NUMBER FOURTEEN -24EF NEGATIVE CIRCLED NUMBER FIFTEEN -24F0 NEGATIVE CIRCLED NUMBER SIXTEEN -24F1 NEGATIVE CIRCLED NUMBER SEVENTEEN -24F2 NEGATIVE CIRCLED NUMBER EIGHTEEN -24F3 NEGATIVE CIRCLED NUMBER NINETEEN -24F4 NEGATIVE CIRCLED NUMBER TWENTY -24F5 DOUBLE CIRCLED DIGIT ONE -24F6 DOUBLE CIRCLED DIGIT TWO -24F7 DOUBLE CIRCLED DIGIT THREE -24F8 DOUBLE CIRCLED DIGIT FOUR -24F9 DOUBLE CIRCLED DIGIT FIVE -24FA DOUBLE CIRCLED DIGIT SIX -24FB DOUBLE CIRCLED DIGIT SEVEN -24FC DOUBLE CIRCLED DIGIT EIGHT -24FD DOUBLE CIRCLED DIGIT NINE -24FE DOUBLE CIRCLED NUMBER TEN -24FF NEGATIVE CIRCLED DIGIT ZERO -2500 BOX DRAWINGS LIGHT HORIZONTAL -2501 BOX DRAWINGS HEAVY HORIZONTAL -2502 BOX DRAWINGS LIGHT VERTICAL -2503 BOX DRAWINGS HEAVY VERTICAL -2504 BOX DRAWINGS LIGHT TRIPLE DASH HORIZONTAL -2505 BOX DRAWINGS HEAVY TRIPLE DASH HORIZONTAL -2506 BOX DRAWINGS LIGHT TRIPLE DASH VERTICAL -2507 BOX DRAWINGS HEAVY TRIPLE DASH VERTICAL -2508 BOX DRAWINGS LIGHT QUADRUPLE DASH HORIZONTAL -2509 BOX DRAWINGS HEAVY QUADRUPLE DASH HORIZONTAL -250A BOX DRAWINGS LIGHT QUADRUPLE DASH VERTICAL -250B BOX DRAWINGS HEAVY QUADRUPLE DASH VERTICAL -250C BOX DRAWINGS LIGHT DOWN AND RIGHT -250D BOX DRAWINGS DOWN LIGHT AND RIGHT HEAVY -250E BOX DRAWINGS DOWN HEAVY AND RIGHT LIGHT -250F BOX DRAWINGS HEAVY DOWN AND RIGHT -2510 BOX DRAWINGS LIGHT DOWN AND LEFT -2511 BOX DRAWINGS DOWN LIGHT AND LEFT HEAVY -2512 BOX DRAWINGS DOWN HEAVY AND LEFT LIGHT -2513 BOX DRAWINGS HEAVY DOWN AND LEFT -2514 BOX DRAWINGS LIGHT UP AND RIGHT -2515 BOX DRAWINGS UP LIGHT AND RIGHT HEAVY -2516 BOX DRAWINGS UP HEAVY AND RIGHT LIGHT -2517 BOX DRAWINGS HEAVY UP AND RIGHT -2518 BOX DRAWINGS LIGHT UP AND LEFT -2519 BOX DRAWINGS UP LIGHT AND LEFT HEAVY -251A BOX DRAWINGS UP HEAVY AND LEFT LIGHT -251B BOX DRAWINGS HEAVY UP AND LEFT -251C BOX DRAWINGS LIGHT VERTICAL AND RIGHT -251D BOX DRAWINGS VERTICAL LIGHT AND RIGHT HEAVY -251E BOX DRAWINGS UP HEAVY AND RIGHT DOWN LIGHT -251F BOX DRAWINGS DOWN HEAVY AND RIGHT UP LIGHT -2520 BOX DRAWINGS VERTICAL HEAVY AND RIGHT LIGHT -2521 BOX DRAWINGS DOWN LIGHT AND RIGHT UP HEAVY -2522 BOX DRAWINGS UP LIGHT AND RIGHT DOWN HEAVY -2523 BOX DRAWINGS HEAVY VERTICAL AND RIGHT -2524 BOX DRAWINGS LIGHT VERTICAL AND LEFT -2525 BOX DRAWINGS VERTICAL LIGHT AND LEFT HEAVY -2526 BOX DRAWINGS UP HEAVY AND LEFT DOWN LIGHT -2527 BOX DRAWINGS DOWN HEAVY AND LEFT UP LIGHT -2528 BOX DRAWINGS VERTICAL HEAVY AND LEFT LIGHT -2529 BOX DRAWINGS DOWN LIGHT AND LEFT UP HEAVY -252A BOX DRAWINGS UP LIGHT AND LEFT DOWN HEAVY -252B BOX DRAWINGS HEAVY VERTICAL AND LEFT -252C BOX DRAWINGS LIGHT DOWN AND HORIZONTAL -252D BOX DRAWINGS LEFT HEAVY AND RIGHT DOWN LIGHT -252E BOX DRAWINGS RIGHT HEAVY AND LEFT DOWN LIGHT -252F BOX DRAWINGS DOWN LIGHT AND HORIZONTAL HEAVY -2530 BOX DRAWINGS DOWN HEAVY AND HORIZONTAL LIGHT -2531 BOX DRAWINGS RIGHT LIGHT AND LEFT DOWN HEAVY -2532 BOX DRAWINGS LEFT LIGHT AND RIGHT DOWN HEAVY -2533 BOX DRAWINGS HEAVY DOWN AND HORIZONTAL -2534 BOX DRAWINGS LIGHT UP AND HORIZONTAL -2535 BOX DRAWINGS LEFT HEAVY AND RIGHT UP LIGHT -2536 BOX DRAWINGS RIGHT HEAVY AND LEFT UP LIGHT -2537 BOX DRAWINGS UP LIGHT AND HORIZONTAL HEAVY -2538 BOX DRAWINGS UP HEAVY AND HORIZONTAL LIGHT -2539 BOX DRAWINGS RIGHT LIGHT AND LEFT UP HEAVY -253A BOX DRAWINGS LEFT LIGHT AND RIGHT UP HEAVY -253B BOX DRAWINGS HEAVY UP AND HORIZONTAL -253C BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL -253D BOX DRAWINGS LEFT HEAVY AND RIGHT VERTICAL LIGHT -253E BOX DRAWINGS RIGHT HEAVY AND LEFT VERTICAL LIGHT -253F BOX DRAWINGS VERTICAL LIGHT AND HORIZONTAL HEAVY -2540 BOX DRAWINGS UP HEAVY AND DOWN HORIZONTAL LIGHT -2541 BOX DRAWINGS DOWN HEAVY AND UP HORIZONTAL LIGHT -2542 BOX DRAWINGS VERTICAL HEAVY AND HORIZONTAL LIGHT -2543 BOX DRAWINGS LEFT UP HEAVY AND RIGHT DOWN LIGHT -2544 BOX DRAWINGS RIGHT UP HEAVY AND LEFT DOWN LIGHT -2545 BOX DRAWINGS LEFT DOWN HEAVY AND RIGHT UP LIGHT -2546 BOX DRAWINGS RIGHT DOWN HEAVY AND LEFT UP LIGHT -2547 BOX DRAWINGS DOWN LIGHT AND UP HORIZONTAL HEAVY -2548 BOX DRAWINGS UP LIGHT AND DOWN HORIZONTAL HEAVY -2549 BOX DRAWINGS RIGHT LIGHT AND LEFT VERTICAL HEAVY -254A BOX DRAWINGS LEFT LIGHT AND RIGHT VERTICAL HEAVY -254B BOX DRAWINGS HEAVY VERTICAL AND HORIZONTAL -254C BOX DRAWINGS LIGHT DOUBLE DASH HORIZONTAL -254D BOX DRAWINGS HEAVY DOUBLE DASH HORIZONTAL -254E BOX DRAWINGS LIGHT DOUBLE DASH VERTICAL -254F BOX DRAWINGS HEAVY DOUBLE DASH VERTICAL -2550 BOX DRAWINGS DOUBLE HORIZONTAL -2551 BOX DRAWINGS DOUBLE VERTICAL -2552 BOX DRAWINGS DOWN SINGLE AND RIGHT DOUBLE -2553 BOX DRAWINGS DOWN DOUBLE AND RIGHT SINGLE -2554 BOX DRAWINGS DOUBLE DOWN AND RIGHT -2555 BOX DRAWINGS DOWN SINGLE AND LEFT DOUBLE -2556 BOX DRAWINGS DOWN DOUBLE AND LEFT SINGLE -2557 BOX DRAWINGS DOUBLE DOWN AND LEFT -2558 BOX DRAWINGS UP SINGLE AND RIGHT DOUBLE -2559 BOX DRAWINGS UP DOUBLE AND RIGHT SINGLE -255A BOX DRAWINGS DOUBLE UP AND RIGHT -255B BOX DRAWINGS UP SINGLE AND LEFT DOUBLE -255C BOX DRAWINGS UP DOUBLE AND LEFT SINGLE -255D BOX DRAWINGS DOUBLE UP AND LEFT -255E BOX DRAWINGS VERTICAL SINGLE AND RIGHT DOUBLE -255F BOX DRAWINGS VERTICAL DOUBLE AND RIGHT SINGLE -2560 BOX DRAWINGS DOUBLE VERTICAL AND RIGHT -2561 BOX DRAWINGS VERTICAL SINGLE AND LEFT DOUBLE -2562 BOX DRAWINGS VERTICAL DOUBLE AND LEFT SINGLE -2563 BOX DRAWINGS DOUBLE VERTICAL AND LEFT -2564 BOX DRAWINGS DOWN SINGLE AND HORIZONTAL DOUBLE -2565 BOX DRAWINGS DOWN DOUBLE AND HORIZONTAL SINGLE -2566 BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL -2567 BOX DRAWINGS UP SINGLE AND HORIZONTAL DOUBLE -2568 BOX DRAWINGS UP DOUBLE AND HORIZONTAL SINGLE -2569 BOX DRAWINGS DOUBLE UP AND HORIZONTAL -256A BOX DRAWINGS VERTICAL SINGLE AND HORIZONTAL DOUBLE -256B BOX DRAWINGS VERTICAL DOUBLE AND HORIZONTAL SINGLE -256C BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL -256D BOX DRAWINGS LIGHT ARC DOWN AND RIGHT -256E BOX DRAWINGS LIGHT ARC DOWN AND LEFT -256F BOX DRAWINGS LIGHT ARC UP AND LEFT -2570 BOX DRAWINGS LIGHT ARC UP AND RIGHT -2571 BOX DRAWINGS LIGHT DIAGONAL UPPER RIGHT TO LOWER LEFT -2572 BOX DRAWINGS LIGHT DIAGONAL UPPER LEFT TO LOWER RIGHT -2573 BOX DRAWINGS LIGHT DIAGONAL CROSS -2574 BOX DRAWINGS LIGHT LEFT -2575 BOX DRAWINGS LIGHT UP -2576 BOX DRAWINGS LIGHT RIGHT -2577 BOX DRAWINGS LIGHT DOWN -2578 BOX DRAWINGS HEAVY LEFT -2579 BOX DRAWINGS HEAVY UP -257A BOX DRAWINGS HEAVY RIGHT -257B BOX DRAWINGS HEAVY DOWN -257C BOX DRAWINGS LIGHT LEFT AND HEAVY RIGHT -257D BOX DRAWINGS LIGHT UP AND HEAVY DOWN -257E BOX DRAWINGS HEAVY LEFT AND LIGHT RIGHT -257F BOX DRAWINGS HEAVY UP AND LIGHT DOWN -2580 UPPER HALF BLOCK -2581 LOWER ONE EIGHTH BLOCK -2582 LOWER ONE QUARTER BLOCK -2583 LOWER THREE EIGHTHS BLOCK -2584 LOWER HALF BLOCK -2585 LOWER FIVE EIGHTHS BLOCK -2586 LOWER THREE QUARTERS BLOCK -2587 LOWER SEVEN EIGHTHS BLOCK -2588 FULL BLOCK -2589 LEFT SEVEN EIGHTHS BLOCK -258A LEFT THREE QUARTERS BLOCK -258B LEFT FIVE EIGHTHS BLOCK -258C LEFT HALF BLOCK -258D LEFT THREE EIGHTHS BLOCK -258E LEFT ONE QUARTER BLOCK -258F LEFT ONE EIGHTH BLOCK -2590 RIGHT HALF BLOCK -2591 LIGHT SHADE -2592 MEDIUM SHADE -2593 DARK SHADE -2594 UPPER ONE EIGHTH BLOCK -2595 RIGHT ONE EIGHTH BLOCK -2596 QUADRANT LOWER LEFT -2597 QUADRANT LOWER RIGHT -2598 QUADRANT UPPER LEFT -2599 QUADRANT UPPER LEFT AND LOWER LEFT AND LOWER RIGHT -259A QUADRANT UPPER LEFT AND LOWER RIGHT -259B QUADRANT UPPER LEFT AND UPPER RIGHT AND LOWER LEFT -259C QUADRANT UPPER LEFT AND UPPER RIGHT AND LOWER RIGHT -259D QUADRANT UPPER RIGHT -259E QUADRANT UPPER RIGHT AND LOWER LEFT -259F QUADRANT UPPER RIGHT AND LOWER LEFT AND LOWER RIGHT -25A0 BLACK SQUARE -25A1 WHITE SQUARE -25A2 WHITE SQUARE WITH ROUNDED CORNERS -25A3 WHITE SQUARE CONTAINING BLACK SMALL SQUARE -25A4 SQUARE WITH HORIZONTAL FILL -25A5 SQUARE WITH VERTICAL FILL -25A6 SQUARE WITH ORTHOGONAL CROSSHATCH FILL -25A7 SQUARE WITH UPPER LEFT TO LOWER RIGHT FILL -25A8 SQUARE WITH UPPER RIGHT TO LOWER LEFT FILL -25A9 SQUARE WITH DIAGONAL CROSSHATCH FILL -25AA BLACK SMALL SQUARE -25AB WHITE SMALL SQUARE -25AC BLACK RECTANGLE -25AD WHITE RECTANGLE -25AE BLACK VERTICAL RECTANGLE -25AF WHITE VERTICAL RECTANGLE -25B0 BLACK PARALLELOGRAM -25B1 WHITE PARALLELOGRAM -25B2 BLACK UP-POINTING TRIANGLE -25B3 WHITE UP-POINTING TRIANGLE -25B4 BLACK UP-POINTING SMALL TRIANGLE -25B5 WHITE UP-POINTING SMALL TRIANGLE -25B6 BLACK RIGHT-POINTING TRIANGLE -25B7 WHITE RIGHT-POINTING TRIANGLE -25B8 BLACK RIGHT-POINTING SMALL TRIANGLE -25B9 WHITE RIGHT-POINTING SMALL TRIANGLE -25BA BLACK RIGHT-POINTING POINTER -25BB WHITE RIGHT-POINTING POINTER -25BC BLACK DOWN-POINTING TRIANGLE -25BD WHITE DOWN-POINTING TRIANGLE -25BE BLACK DOWN-POINTING SMALL TRIANGLE -25BF WHITE DOWN-POINTING SMALL TRIANGLE -25C0 BLACK LEFT-POINTING TRIANGLE -25C1 WHITE LEFT-POINTING TRIANGLE -25C2 BLACK LEFT-POINTING SMALL TRIANGLE -25C3 WHITE LEFT-POINTING SMALL TRIANGLE -25C4 BLACK LEFT-POINTING POINTER -25C5 WHITE LEFT-POINTING POINTER -25C6 BLACK DIAMOND -25C7 WHITE DIAMOND -25C8 WHITE DIAMOND CONTAINING BLACK SMALL DIAMOND -25C9 FISHEYE -25CA LOZENGE -25CB WHITE CIRCLE -25CC DOTTED CIRCLE -25CD CIRCLE WITH VERTICAL FILL -25CE BULLSEYE -25CF BLACK CIRCLE -25D0 CIRCLE WITH LEFT HALF BLACK -25D1 CIRCLE WITH RIGHT HALF BLACK -25D2 CIRCLE WITH LOWER HALF BLACK -25D3 CIRCLE WITH UPPER HALF BLACK -25D4 CIRCLE WITH UPPER RIGHT QUADRANT BLACK -25D5 CIRCLE WITH ALL BUT UPPER LEFT QUADRANT BLACK -25D6 LEFT HALF BLACK CIRCLE -25D7 RIGHT HALF BLACK CIRCLE -25D8 INVERSE BULLET -25D9 INVERSE WHITE CIRCLE -25DA UPPER HALF INVERSE WHITE CIRCLE -25DB LOWER HALF INVERSE WHITE CIRCLE -25DC UPPER LEFT QUADRANT CIRCULAR ARC -25DD UPPER RIGHT QUADRANT CIRCULAR ARC -25DE LOWER RIGHT QUADRANT CIRCULAR ARC -25DF LOWER LEFT QUADRANT CIRCULAR ARC -25E0 UPPER HALF CIRCLE -25E1 LOWER HALF CIRCLE -25E2 BLACK LOWER RIGHT TRIANGLE -25E3 BLACK LOWER LEFT TRIANGLE -25E4 BLACK UPPER LEFT TRIANGLE -25E5 BLACK UPPER RIGHT TRIANGLE -25E6 WHITE BULLET -25E7 SQUARE WITH LEFT HALF BLACK -25E8 SQUARE WITH RIGHT HALF BLACK -25E9 SQUARE WITH UPPER LEFT DIAGONAL HALF BLACK -25EA SQUARE WITH LOWER RIGHT DIAGONAL HALF BLACK -25EB WHITE SQUARE WITH VERTICAL BISECTING LINE -25EC WHITE UP-POINTING TRIANGLE WITH DOT -25ED UP-POINTING TRIANGLE WITH LEFT HALF BLACK -25EE UP-POINTING TRIANGLE WITH RIGHT HALF BLACK -25EF LARGE CIRCLE -25F0 WHITE SQUARE WITH UPPER LEFT QUADRANT -25F1 WHITE SQUARE WITH LOWER LEFT QUADRANT -25F2 WHITE SQUARE WITH LOWER RIGHT QUADRANT -25F3 WHITE SQUARE WITH UPPER RIGHT QUADRANT -25F4 WHITE CIRCLE WITH UPPER LEFT QUADRANT -25F5 WHITE CIRCLE WITH LOWER LEFT QUADRANT -25F6 WHITE CIRCLE WITH LOWER RIGHT QUADRANT -25F7 WHITE CIRCLE WITH UPPER RIGHT QUADRANT -25F8 UPPER LEFT TRIANGLE -25F9 UPPER RIGHT TRIANGLE -25FA LOWER LEFT TRIANGLE -25FB WHITE MEDIUM SQUARE -25FC BLACK MEDIUM SQUARE -25FD WHITE MEDIUM SMALL SQUARE -25FE BLACK MEDIUM SMALL SQUARE -25FF LOWER RIGHT TRIANGLE -2600 BLACK SUN WITH RAYS -2601 CLOUD -2602 UMBRELLA -2603 SNOWMAN -2604 COMET -2605 BLACK STAR -2606 WHITE STAR -2607 LIGHTNING -2608 THUNDERSTORM -2609 SUN -260A ASCENDING NODE -260B DESCENDING NODE -260C CONJUNCTION -260D OPPOSITION -260E BLACK TELEPHONE -260F WHITE TELEPHONE -2610 BALLOT BOX -2611 BALLOT BOX WITH CHECK -2612 BALLOT BOX WITH X -2613 SALTIRE -2614 UMBRELLA WITH RAIN DROPS -2615 HOT BEVERAGE -2616 WHITE SHOGI PIECE -2617 BLACK SHOGI PIECE -2618 SHAMROCK -2619 REVERSED ROTATED FLORAL HEART BULLET -261A BLACK LEFT POINTING INDEX -261B BLACK RIGHT POINTING INDEX -261C WHITE LEFT POINTING INDEX -261D WHITE UP POINTING INDEX -261E WHITE RIGHT POINTING INDEX -261F WHITE DOWN POINTING INDEX -2620 SKULL AND CROSSBONES -2621 CAUTION SIGN -2622 RADIOACTIVE SIGN -2623 BIOHAZARD SIGN -2624 CADUCEUS -2625 ANKH -2626 ORTHODOX CROSS -2627 CHI RHO -2628 CROSS OF LORRAINE -2629 CROSS OF JERUSALEM -262A STAR AND CRESCENT -262B FARSI SYMBOL -262C ADI SHAKTI -262D HAMMER AND SICKLE -262E PEACE SYMBOL -262F YIN YANG -2630 TRIGRAM FOR HEAVEN -2631 TRIGRAM FOR LAKE -2632 TRIGRAM FOR FIRE -2633 TRIGRAM FOR THUNDER -2634 TRIGRAM FOR WIND -2635 TRIGRAM FOR WATER -2636 TRIGRAM FOR MOUNTAIN -2637 TRIGRAM FOR EARTH -2638 WHEEL OF DHARMA -2639 WHITE FROWNING FACE -263A WHITE SMILING FACE -263B BLACK SMILING FACE -263C WHITE SUN WITH RAYS -263D FIRST QUARTER MOON -263E LAST QUARTER MOON -263F MERCURY -2640 FEMALE SIGN -2641 EARTH -2642 MALE SIGN -2643 JUPITER -2644 SATURN -2645 URANUS -2646 NEPTUNE -2647 PLUTO -2648 ARIES -2649 TAURUS -264A GEMINI -264B CANCER -264C LEO -264D VIRGO -264E LIBRA -264F SCORPIUS -2650 SAGITTARIUS -2651 CAPRICORN -2652 AQUARIUS -2653 PISCES -2654 WHITE CHESS KING -2655 WHITE CHESS QUEEN -2656 WHITE CHESS ROOK -2657 WHITE CHESS BISHOP -2658 WHITE CHESS KNIGHT -2659 WHITE CHESS PAWN -265A BLACK CHESS KING -265B BLACK CHESS QUEEN -265C BLACK CHESS ROOK -265D BLACK CHESS BISHOP -265E BLACK CHESS KNIGHT -265F BLACK CHESS PAWN -2660 BLACK SPADE SUIT -2661 WHITE HEART SUIT -2662 WHITE DIAMOND SUIT -2663 BLACK CLUB SUIT -2664 WHITE SPADE SUIT -2665 BLACK HEART SUIT -2666 BLACK DIAMOND SUIT -2667 WHITE CLUB SUIT -2668 HOT SPRINGS -2669 QUARTER NOTE -266A EIGHTH NOTE -266B BEAMED EIGHTH NOTES -266C BEAMED SIXTEENTH NOTES -266D MUSIC FLAT SIGN -266E MUSIC NATURAL SIGN -266F MUSIC SHARP SIGN -2670 WEST SYRIAC CROSS -2671 EAST SYRIAC CROSS -2672 UNIVERSAL RECYCLING SYMBOL -2673 RECYCLING SYMBOL FOR TYPE-1 PLASTICS -2674 RECYCLING SYMBOL FOR TYPE-2 PLASTICS -2675 RECYCLING SYMBOL FOR TYPE-3 PLASTICS -2676 RECYCLING SYMBOL FOR TYPE-4 PLASTICS -2677 RECYCLING SYMBOL FOR TYPE-5 PLASTICS -2678 RECYCLING SYMBOL FOR TYPE-6 PLASTICS -2679 RECYCLING SYMBOL FOR TYPE-7 PLASTICS -267A RECYCLING SYMBOL FOR GENERIC MATERIALS -267B BLACK UNIVERSAL RECYCLING SYMBOL -267C RECYCLED PAPER SYMBOL -267D PARTIALLY-RECYCLED PAPER SYMBOL -267E PERMANENT PAPER SIGN -267F WHEELCHAIR SYMBOL -2680 DIE FACE-1 -2681 DIE FACE-2 -2682 DIE FACE-3 -2683 DIE FACE-4 -2684 DIE FACE-5 -2685 DIE FACE-6 -2686 WHITE CIRCLE WITH DOT RIGHT -2687 WHITE CIRCLE WITH TWO DOTS -2688 BLACK CIRCLE WITH WHITE DOT RIGHT -2689 BLACK CIRCLE WITH TWO WHITE DOTS -268A MONOGRAM FOR YANG -268B MONOGRAM FOR YIN -268C DIGRAM FOR GREATER YANG -268D DIGRAM FOR LESSER YIN -268E DIGRAM FOR LESSER YANG -268F DIGRAM FOR GREATER YIN -2690 WHITE FLAG -2691 BLACK FLAG -2692 HAMMER AND PICK -2693 ANCHOR -2694 CROSSED SWORDS -2695 STAFF OF AESCULAPIUS -2696 SCALES -2697 ALEMBIC -2698 FLOWER -2699 GEAR -269A STAFF OF HERMES -269B ATOM SYMBOL -269C FLEUR-DE-LIS -269D OUTLINED WHITE STAR -269E THREE LINES CONVERGING RIGHT -269F THREE LINES CONVERGING LEFT -26A0 WARNING SIGN -26A1 HIGH VOLTAGE SIGN -26A2 DOUBLED FEMALE SIGN -26A3 DOUBLED MALE SIGN -26A4 INTERLOCKED FEMALE AND MALE SIGN -26A5 MALE AND FEMALE SIGN -26A6 MALE WITH STROKE SIGN -26A7 MALE WITH STROKE AND MALE AND FEMALE SIGN -26A8 VERTICAL MALE WITH STROKE SIGN -26A9 HORIZONTAL MALE WITH STROKE SIGN -26AA MEDIUM WHITE CIRCLE -26AB MEDIUM BLACK CIRCLE -26AC MEDIUM SMALL WHITE CIRCLE -26AD MARRIAGE SYMBOL -26AE DIVORCE SYMBOL -26AF UNMARRIED PARTNERSHIP SYMBOL -26B0 COFFIN -26B1 FUNERAL URN -26B2 NEUTER -26B3 CERES -26B4 PALLAS -26B5 JUNO -26B6 VESTA -26B7 CHIRON -26B8 BLACK MOON LILITH -26B9 SEXTILE -26BA SEMISEXTILE -26BB QUINCUNX -26BC SESQUIQUADRATE -26BD SOCCER BALL -26BE BASEBALL -26BF SQUARED KEY -26C0 WHITE DRAUGHTS MAN -26C1 WHITE DRAUGHTS KING -26C2 BLACK DRAUGHTS MAN -26C3 BLACK DRAUGHTS KING -26C4 SNOWMAN WITHOUT SNOW -26C5 SUN BEHIND CLOUD -26C6 RAIN -26C7 BLACK SNOWMAN -26C8 THUNDER CLOUD AND RAIN -26C9 TURNED WHITE SHOGI PIECE -26CA TURNED BLACK SHOGI PIECE -26CB WHITE DIAMOND IN SQUARE -26CC CROSSING LANES -26CD DISABLED CAR -26CF PICK -26D0 CAR SLIDING -26D1 HELMET WITH WHITE CROSS -26D2 CIRCLED CROSSING LANES -26D3 CHAINS -26D4 NO ENTRY -26D5 ALTERNATE ONE-WAY LEFT WAY TRAFFIC -26D6 BLACK TWO-WAY LEFT WAY TRAFFIC -26D7 WHITE TWO-WAY LEFT WAY TRAFFIC -26D8 BLACK LEFT LANE MERGE -26D9 WHITE LEFT LANE MERGE -26DA DRIVE SLOW SIGN -26DB HEAVY WHITE DOWN-POINTING TRIANGLE -26DC LEFT CLOSED ENTRY -26DD SQUARED SALTIRE -26DE FALLING DIAGONAL IN WHITE CIRCLE IN BLACK SQUARE -26DF BLACK TRUCK -26E0 RESTRICTED LEFT ENTRY-1 -26E1 RESTRICTED LEFT ENTRY-2 -26E3 HEAVY CIRCLE WITH STROKE AND TWO DOTS ABOVE -26E8 BLACK CROSS ON SHIELD -26E9 SHINTO SHRINE -26EA CHURCH -26EB CASTLE -26EC HISTORIC SITE -26ED GEAR WITHOUT HUB -26EE GEAR WITH HANDLES -26EF MAP SYMBOL FOR LIGHTHOUSE -26F0 MOUNTAIN -26F1 UMBRELLA ON GROUND -26F2 FOUNTAIN -26F3 FLAG IN HOLE -26F4 FERRY -26F5 SAILBOAT -26F6 SQUARE FOUR CORNERS -26F7 SKIER -26F8 ICE SKATE -26F9 PERSON WITH BALL -26FA TENT -26FB JAPANESE BANK SYMBOL -26FC HEADSTONE GRAVEYARD SYMBOL -26FD FUEL PUMP -26FE CUP ON BLACK SQUARE -26FF WHITE FLAG WITH HORIZONTAL MIDDLE BLACK STRIPE -2701 UPPER BLADE SCISSORS -2702 BLACK SCISSORS -2703 LOWER BLADE SCISSORS -2704 WHITE SCISSORS -2706 TELEPHONE LOCATION SIGN -2707 TAPE DRIVE -2708 AIRPLANE -2709 ENVELOPE -270C VICTORY HAND -270D WRITING HAND -270E LOWER RIGHT PENCIL -270F PENCIL -2710 UPPER RIGHT PENCIL -2711 WHITE NIB -2712 BLACK NIB -2713 CHECK MARK -2714 HEAVY CHECK MARK -2715 MULTIPLICATION X -2716 HEAVY MULTIPLICATION X -2717 BALLOT X -2718 HEAVY BALLOT X -2719 OUTLINED GREEK CROSS -271A HEAVY GREEK CROSS -271B OPEN CENTRE CROSS -271C HEAVY OPEN CENTRE CROSS -271D LATIN CROSS -271E SHADOWED WHITE LATIN CROSS -271F OUTLINED LATIN CROSS -2720 MALTESE CROSS -2721 STAR OF DAVID -2722 FOUR TEARDROP-SPOKED ASTERISK -2723 FOUR BALLOON-SPOKED ASTERISK -2724 HEAVY FOUR BALLOON-SPOKED ASTERISK -2725 FOUR CLUB-SPOKED ASTERISK -2726 BLACK FOUR POINTED STAR -2727 WHITE FOUR POINTED STAR -2729 STRESS OUTLINED WHITE STAR -272A CIRCLED WHITE STAR -272B OPEN CENTRE BLACK STAR -272C BLACK CENTRE WHITE STAR -272D OUTLINED BLACK STAR -272E HEAVY OUTLINED BLACK STAR -272F PINWHEEL STAR -2730 SHADOWED WHITE STAR -2731 HEAVY ASTERISK -2732 OPEN CENTRE ASTERISK -2733 EIGHT SPOKED ASTERISK -2734 EIGHT POINTED BLACK STAR -2735 EIGHT POINTED PINWHEEL STAR -2736 SIX POINTED BLACK STAR -2737 EIGHT POINTED RECTILINEAR BLACK STAR -2738 HEAVY EIGHT POINTED RECTILINEAR BLACK STAR -2739 TWELVE POINTED BLACK STAR -273A SIXTEEN POINTED ASTERISK -273B TEARDROP-SPOKED ASTERISK -273C OPEN CENTRE TEARDROP-SPOKED ASTERISK -273D HEAVY TEARDROP-SPOKED ASTERISK -273E SIX PETALLED BLACK AND WHITE FLORETTE -273F BLACK FLORETTE -2740 WHITE FLORETTE -2741 EIGHT PETALLED OUTLINED BLACK FLORETTE -2742 CIRCLED OPEN CENTRE EIGHT POINTED STAR -2743 HEAVY TEARDROP-SPOKED PINWHEEL ASTERISK -2744 SNOWFLAKE -2745 TIGHT TRIFOLIATE SNOWFLAKE -2746 HEAVY CHEVRON SNOWFLAKE -2747 SPARKLE -2748 HEAVY SPARKLE -2749 BALLOON-SPOKED ASTERISK -274A EIGHT TEARDROP-SPOKED PROPELLER ASTERISK -274B HEAVY EIGHT TEARDROP-SPOKED PROPELLER ASTERISK -274D SHADOWED WHITE CIRCLE -274F LOWER RIGHT DROP-SHADOWED WHITE SQUARE -2750 UPPER RIGHT DROP-SHADOWED WHITE SQUARE -2751 LOWER RIGHT SHADOWED WHITE SQUARE -2752 UPPER RIGHT SHADOWED WHITE SQUARE -2756 BLACK DIAMOND MINUS WHITE X -2757 HEAVY EXCLAMATION MARK SYMBOL -2758 LIGHT VERTICAL BAR -2759 MEDIUM VERTICAL BAR -275A HEAVY VERTICAL BAR -275B HEAVY SINGLE TURNED COMMA QUOTATION MARK ORNAMENT -275C HEAVY SINGLE COMMA QUOTATION MARK ORNAMENT -275D HEAVY DOUBLE TURNED COMMA QUOTATION MARK ORNAMENT -275E HEAVY DOUBLE COMMA QUOTATION MARK ORNAMENT -2761 CURVED STEM PARAGRAPH SIGN ORNAMENT -2762 HEAVY EXCLAMATION MARK ORNAMENT -2763 HEAVY HEART EXCLAMATION MARK ORNAMENT -2764 HEAVY BLACK HEART -2765 ROTATED HEAVY BLACK HEART BULLET -2766 FLORAL HEART -2767 ROTATED FLORAL HEART BULLET -2768 MEDIUM LEFT PARENTHESIS ORNAMENT -2769 MEDIUM RIGHT PARENTHESIS ORNAMENT -276A MEDIUM FLATTENED LEFT PARENTHESIS ORNAMENT -276B MEDIUM FLATTENED RIGHT PARENTHESIS ORNAMENT -276C MEDIUM LEFT-POINTING ANGLE BRACKET ORNAMENT -276D MEDIUM RIGHT-POINTING ANGLE BRACKET ORNAMENT -276E HEAVY LEFT-POINTING ANGLE QUOTATION MARK ORNAMENT -276F HEAVY RIGHT-POINTING ANGLE QUOTATION MARK ORNAMENT -2770 HEAVY LEFT-POINTING ANGLE BRACKET ORNAMENT -2771 HEAVY RIGHT-POINTING ANGLE BRACKET ORNAMENT -2772 LIGHT LEFT TORTOISE SHELL BRACKET ORNAMENT -2773 LIGHT RIGHT TORTOISE SHELL BRACKET ORNAMENT -2774 MEDIUM LEFT CURLY BRACKET ORNAMENT -2775 MEDIUM RIGHT CURLY BRACKET ORNAMENT -2776 DINGBAT NEGATIVE CIRCLED DIGIT ONE -2777 DINGBAT NEGATIVE CIRCLED DIGIT TWO -2778 DINGBAT NEGATIVE CIRCLED DIGIT THREE -2779 DINGBAT NEGATIVE CIRCLED DIGIT FOUR -277A DINGBAT NEGATIVE CIRCLED DIGIT FIVE -277B DINGBAT NEGATIVE CIRCLED DIGIT SIX -277C DINGBAT NEGATIVE CIRCLED DIGIT SEVEN -277D DINGBAT NEGATIVE CIRCLED DIGIT EIGHT -277E DINGBAT NEGATIVE CIRCLED DIGIT NINE -277F DINGBAT NEGATIVE CIRCLED NUMBER TEN -2780 DINGBAT CIRCLED SANS-SERIF DIGIT ONE -2781 DINGBAT CIRCLED SANS-SERIF DIGIT TWO -2782 DINGBAT CIRCLED SANS-SERIF DIGIT THREE -2783 DINGBAT CIRCLED SANS-SERIF DIGIT FOUR -2784 DINGBAT CIRCLED SANS-SERIF DIGIT FIVE -2785 DINGBAT CIRCLED SANS-SERIF DIGIT SIX -2786 DINGBAT CIRCLED SANS-SERIF DIGIT SEVEN -2787 DINGBAT CIRCLED SANS-SERIF DIGIT EIGHT -2788 DINGBAT CIRCLED SANS-SERIF DIGIT NINE -2789 DINGBAT CIRCLED SANS-SERIF NUMBER TEN -278A DINGBAT NEGATIVE CIRCLED SANS-SERIF DIGIT ONE -278B DINGBAT NEGATIVE CIRCLED SANS-SERIF DIGIT TWO -278C DINGBAT NEGATIVE CIRCLED SANS-SERIF DIGIT THREE -278D DINGBAT NEGATIVE CIRCLED SANS-SERIF DIGIT FOUR -278E DINGBAT NEGATIVE CIRCLED SANS-SERIF DIGIT FIVE -278F DINGBAT NEGATIVE CIRCLED SANS-SERIF DIGIT SIX -2790 DINGBAT NEGATIVE CIRCLED SANS-SERIF DIGIT SEVEN -2791 DINGBAT NEGATIVE CIRCLED SANS-SERIF DIGIT EIGHT -2792 DINGBAT NEGATIVE CIRCLED SANS-SERIF DIGIT NINE -2793 DINGBAT NEGATIVE CIRCLED SANS-SERIF NUMBER TEN -2794 HEAVY WIDE-HEADED RIGHTWARDS ARROW -2798 HEAVY SOUTH EAST ARROW -2799 HEAVY RIGHTWARDS ARROW -279A HEAVY NORTH EAST ARROW -279B DRAFTING POINT RIGHTWARDS ARROW -279C HEAVY ROUND-TIPPED RIGHTWARDS ARROW -279D TRIANGLE-HEADED RIGHTWARDS ARROW -279E HEAVY TRIANGLE-HEADED RIGHTWARDS ARROW -279F DASHED TRIANGLE-HEADED RIGHTWARDS ARROW -27A0 HEAVY DASHED TRIANGLE-HEADED RIGHTWARDS ARROW -27A1 BLACK RIGHTWARDS ARROW -27A2 THREE-D TOP-LIGHTED RIGHTWARDS ARROWHEAD -27A3 THREE-D BOTTOM-LIGHTED RIGHTWARDS ARROWHEAD -27A4 BLACK RIGHTWARDS ARROWHEAD -27A5 HEAVY BLACK CURVED DOWNWARDS AND RIGHTWARDS ARROW -27A6 HEAVY BLACK CURVED UPWARDS AND RIGHTWARDS ARROW -27A7 SQUAT BLACK RIGHTWARDS ARROW -27A8 HEAVY CONCAVE-POINTED BLACK RIGHTWARDS ARROW -27A9 RIGHT-SHADED WHITE RIGHTWARDS ARROW -27AA LEFT-SHADED WHITE RIGHTWARDS ARROW -27AB BACK-TILTED SHADOWED WHITE RIGHTWARDS ARROW -27AC FRONT-TILTED SHADOWED WHITE RIGHTWARDS ARROW -27AD HEAVY LOWER RIGHT-SHADOWED WHITE RIGHTWARDS ARROW -27AE HEAVY UPPER RIGHT-SHADOWED WHITE RIGHTWARDS ARROW -27AF NOTCHED LOWER RIGHT-SHADOWED WHITE RIGHTWARDS ARROW -27B1 NOTCHED UPPER RIGHT-SHADOWED WHITE RIGHTWARDS ARROW -27B2 CIRCLED HEAVY WHITE RIGHTWARDS ARROW -27B3 WHITE-FEATHERED RIGHTWARDS ARROW -27B4 BLACK-FEATHERED SOUTH EAST ARROW -27B5 BLACK-FEATHERED RIGHTWARDS ARROW -27B6 BLACK-FEATHERED NORTH EAST ARROW -27B7 HEAVY BLACK-FEATHERED SOUTH EAST ARROW -27B8 HEAVY BLACK-FEATHERED RIGHTWARDS ARROW -27B9 HEAVY BLACK-FEATHERED NORTH EAST ARROW -27BA TEARDROP-BARBED RIGHTWARDS ARROW -27BB HEAVY TEARDROP-SHANKED RIGHTWARDS ARROW -27BC WEDGE-TAILED RIGHTWARDS ARROW -27BD HEAVY WEDGE-TAILED RIGHTWARDS ARROW -27BE OPEN-OUTLINED RIGHTWARDS ARROW -27C0 THREE DIMENSIONAL ANGLE -27C1 WHITE TRIANGLE CONTAINING SMALL WHITE TRIANGLE -27C2 PERPENDICULAR -27C3 OPEN SUBSET -27C4 OPEN SUPERSET -27C5 LEFT S-SHAPED BAG DELIMITER -27C6 RIGHT S-SHAPED BAG DELIMITER -27C7 OR WITH DOT INSIDE -27C8 REVERSE SOLIDUS PRECEDING SUBSET -27C9 SUPERSET PRECEDING SOLIDUS -27CA VERTICAL BAR WITH HORIZONTAL STROKE -27CC LONG DIVISION -27D0 WHITE DIAMOND WITH CENTRED DOT -27D1 AND WITH DOT -27D2 ELEMENT OF OPENING UPWARDS -27D3 LOWER RIGHT CORNER WITH DOT -27D4 UPPER LEFT CORNER WITH DOT -27D5 LEFT OUTER JOIN -27D6 RIGHT OUTER JOIN -27D7 FULL OUTER JOIN -27D8 LARGE UP TACK -27D9 LARGE DOWN TACK -27DA LEFT AND RIGHT DOUBLE TURNSTILE -27DB LEFT AND RIGHT TACK -27DC LEFT MULTIMAP -27DD LONG RIGHT TACK -27DE LONG LEFT TACK -27DF UP TACK WITH CIRCLE ABOVE -27E0 LOZENGE DIVIDED BY HORIZONTAL RULE -27E1 WHITE CONCAVE-SIDED DIAMOND -27E2 WHITE CONCAVE-SIDED DIAMOND WITH LEFTWARDS TICK -27E3 WHITE CONCAVE-SIDED DIAMOND WITH RIGHTWARDS TICK -27E4 WHITE SQUARE WITH LEFTWARDS TICK -27E5 WHITE SQUARE WITH RIGHTWARDS TICK -27E6 MATHEMATICAL LEFT WHITE SQUARE BRACKET -27E7 MATHEMATICAL RIGHT WHITE SQUARE BRACKET -27E8 MATHEMATICAL LEFT ANGLE BRACKET -27E9 MATHEMATICAL RIGHT ANGLE BRACKET -27EA MATHEMATICAL LEFT DOUBLE ANGLE BRACKET -27EB MATHEMATICAL RIGHT DOUBLE ANGLE BRACKET -27EC MATHEMATICAL LEFT WHITE TORTOISE SHELL BRACKET -27ED MATHEMATICAL RIGHT WHITE TORTOISE SHELL BRACKET -27EE MATHEMATICAL LEFT FLATTENED PARENTHESIS -27EF MATHEMATICAL RIGHT FLATTENED PARENTHESIS -27F0 UPWARDS QUADRUPLE ARROW -27F1 DOWNWARDS QUADRUPLE ARROW -27F2 ANTICLOCKWISE GAPPED CIRCLE ARROW -27F3 CLOCKWISE GAPPED CIRCLE ARROW -27F4 RIGHT ARROW WITH CIRCLED PLUS -27F5 LONG LEFTWARDS ARROW -27F6 LONG RIGHTWARDS ARROW -27F7 LONG LEFT RIGHT ARROW -27F8 LONG LEFTWARDS DOUBLE ARROW -27F9 LONG RIGHTWARDS DOUBLE ARROW -27FA LONG LEFT RIGHT DOUBLE ARROW -27FB LONG LEFTWARDS ARROW FROM BAR -27FC LONG RIGHTWARDS ARROW FROM BAR -27FD LONG LEFTWARDS DOUBLE ARROW FROM BAR -27FE LONG RIGHTWARDS DOUBLE ARROW FROM BAR -27FF LONG RIGHTWARDS SQUIGGLE ARROW -2800 BRAILLE PATTERN BLANK -2801 BRAILLE PATTERN DOTS-1 -2802 BRAILLE PATTERN DOTS-2 -2803 BRAILLE PATTERN DOTS-12 -2804 BRAILLE PATTERN DOTS-3 -2805 BRAILLE PATTERN DOTS-13 -2806 BRAILLE PATTERN DOTS-23 -2807 BRAILLE PATTERN DOTS-123 -2808 BRAILLE PATTERN DOTS-4 -2809 BRAILLE PATTERN DOTS-14 -280A BRAILLE PATTERN DOTS-24 -280B BRAILLE PATTERN DOTS-124 -280C BRAILLE PATTERN DOTS-34 -280D BRAILLE PATTERN DOTS-134 -280E BRAILLE PATTERN DOTS-234 -280F BRAILLE PATTERN DOTS-1234 -2810 BRAILLE PATTERN DOTS-5 -2811 BRAILLE PATTERN DOTS-15 -2812 BRAILLE PATTERN DOTS-25 -2813 BRAILLE PATTERN DOTS-125 -2814 BRAILLE PATTERN DOTS-35 -2815 BRAILLE PATTERN DOTS-135 -2816 BRAILLE PATTERN DOTS-235 -2817 BRAILLE PATTERN DOTS-1235 -2818 BRAILLE PATTERN DOTS-45 -2819 BRAILLE PATTERN DOTS-145 -281A BRAILLE PATTERN DOTS-245 -281B BRAILLE PATTERN DOTS-1245 -281C BRAILLE PATTERN DOTS-345 -281D BRAILLE PATTERN DOTS-1345 -281E BRAILLE PATTERN DOTS-2345 -281F BRAILLE PATTERN DOTS-12345 -2820 BRAILLE PATTERN DOTS-6 -2821 BRAILLE PATTERN DOTS-16 -2822 BRAILLE PATTERN DOTS-26 -2823 BRAILLE PATTERN DOTS-126 -2824 BRAILLE PATTERN DOTS-36 -2825 BRAILLE PATTERN DOTS-136 -2826 BRAILLE PATTERN DOTS-236 -2827 BRAILLE PATTERN DOTS-1236 -2828 BRAILLE PATTERN DOTS-46 -2829 BRAILLE PATTERN DOTS-146 -282A BRAILLE PATTERN DOTS-246 -282B BRAILLE PATTERN DOTS-1246 -282C BRAILLE PATTERN DOTS-346 -282D BRAILLE PATTERN DOTS-1346 -282E BRAILLE PATTERN DOTS-2346 -282F BRAILLE PATTERN DOTS-12346 -2830 BRAILLE PATTERN DOTS-56 -2831 BRAILLE PATTERN DOTS-156 -2832 BRAILLE PATTERN DOTS-256 -2833 BRAILLE PATTERN DOTS-1256 -2834 BRAILLE PATTERN DOTS-356 -2835 BRAILLE PATTERN DOTS-1356 -2836 BRAILLE PATTERN DOTS-2356 -2837 BRAILLE PATTERN DOTS-12356 -2838 BRAILLE PATTERN DOTS-456 -2839 BRAILLE PATTERN DOTS-1456 -283A BRAILLE PATTERN DOTS-2456 -283B BRAILLE PATTERN DOTS-12456 -283C BRAILLE PATTERN DOTS-3456 -283D BRAILLE PATTERN DOTS-13456 -283E BRAILLE PATTERN DOTS-23456 -283F BRAILLE PATTERN DOTS-123456 -2840 BRAILLE PATTERN DOTS-7 -2841 BRAILLE PATTERN DOTS-17 -2842 BRAILLE PATTERN DOTS-27 -2843 BRAILLE PATTERN DOTS-127 -2844 BRAILLE PATTERN DOTS-37 -2845 BRAILLE PATTERN DOTS-137 -2846 BRAILLE PATTERN DOTS-237 -2847 BRAILLE PATTERN DOTS-1237 -2848 BRAILLE PATTERN DOTS-47 -2849 BRAILLE PATTERN DOTS-147 -284A BRAILLE PATTERN DOTS-247 -284B BRAILLE PATTERN DOTS-1247 -284C BRAILLE PATTERN DOTS-347 -284D BRAILLE PATTERN DOTS-1347 -284E BRAILLE PATTERN DOTS-2347 -284F BRAILLE PATTERN DOTS-12347 -2850 BRAILLE PATTERN DOTS-57 -2851 BRAILLE PATTERN DOTS-157 -2852 BRAILLE PATTERN DOTS-257 -2853 BRAILLE PATTERN DOTS-1257 -2854 BRAILLE PATTERN DOTS-357 -2855 BRAILLE PATTERN DOTS-1357 -2856 BRAILLE PATTERN DOTS-2357 -2857 BRAILLE PATTERN DOTS-12357 -2858 BRAILLE PATTERN DOTS-457 -2859 BRAILLE PATTERN DOTS-1457 -285A BRAILLE PATTERN DOTS-2457 -285B BRAILLE PATTERN DOTS-12457 -285C BRAILLE PATTERN DOTS-3457 -285D BRAILLE PATTERN DOTS-13457 -285E BRAILLE PATTERN DOTS-23457 -285F BRAILLE PATTERN DOTS-123457 -2860 BRAILLE PATTERN DOTS-67 -2861 BRAILLE PATTERN DOTS-167 -2862 BRAILLE PATTERN DOTS-267 -2863 BRAILLE PATTERN DOTS-1267 -2864 BRAILLE PATTERN DOTS-367 -2865 BRAILLE PATTERN DOTS-1367 -2866 BRAILLE PATTERN DOTS-2367 -2867 BRAILLE PATTERN DOTS-12367 -2868 BRAILLE PATTERN DOTS-467 -2869 BRAILLE PATTERN DOTS-1467 -286A BRAILLE PATTERN DOTS-2467 -286B BRAILLE PATTERN DOTS-12467 -286C BRAILLE PATTERN DOTS-3467 -286D BRAILLE PATTERN DOTS-13467 -286E BRAILLE PATTERN DOTS-23467 -286F BRAILLE PATTERN DOTS-123467 -2870 BRAILLE PATTERN DOTS-567 -2871 BRAILLE PATTERN DOTS-1567 -2872 BRAILLE PATTERN DOTS-2567 -2873 BRAILLE PATTERN DOTS-12567 -2874 BRAILLE PATTERN DOTS-3567 -2875 BRAILLE PATTERN DOTS-13567 -2876 BRAILLE PATTERN DOTS-23567 -2877 BRAILLE PATTERN DOTS-123567 -2878 BRAILLE PATTERN DOTS-4567 -2879 BRAILLE PATTERN DOTS-14567 -287A BRAILLE PATTERN DOTS-24567 -287B BRAILLE PATTERN DOTS-124567 -287C BRAILLE PATTERN DOTS-34567 -287D BRAILLE PATTERN DOTS-134567 -287E BRAILLE PATTERN DOTS-234567 -287F BRAILLE PATTERN DOTS-1234567 -2880 BRAILLE PATTERN DOTS-8 -2881 BRAILLE PATTERN DOTS-18 -2882 BRAILLE PATTERN DOTS-28 -2883 BRAILLE PATTERN DOTS-128 -2884 BRAILLE PATTERN DOTS-38 -2885 BRAILLE PATTERN DOTS-138 -2886 BRAILLE PATTERN DOTS-238 -2887 BRAILLE PATTERN DOTS-1238 -2888 BRAILLE PATTERN DOTS-48 -2889 BRAILLE PATTERN DOTS-148 -288A BRAILLE PATTERN DOTS-248 -288B BRAILLE PATTERN DOTS-1248 -288C BRAILLE PATTERN DOTS-348 -288D BRAILLE PATTERN DOTS-1348 -288E BRAILLE PATTERN DOTS-2348 -288F BRAILLE PATTERN DOTS-12348 -2890 BRAILLE PATTERN DOTS-58 -2891 BRAILLE PATTERN DOTS-158 -2892 BRAILLE PATTERN DOTS-258 -2893 BRAILLE PATTERN DOTS-1258 -2894 BRAILLE PATTERN DOTS-358 -2895 BRAILLE PATTERN DOTS-1358 -2896 BRAILLE PATTERN DOTS-2358 -2897 BRAILLE PATTERN DOTS-12358 -2898 BRAILLE PATTERN DOTS-458 -2899 BRAILLE PATTERN DOTS-1458 -289A BRAILLE PATTERN DOTS-2458 -289B BRAILLE PATTERN DOTS-12458 -289C BRAILLE PATTERN DOTS-3458 -289D BRAILLE PATTERN DOTS-13458 -289E BRAILLE PATTERN DOTS-23458 -289F BRAILLE PATTERN DOTS-123458 -28A0 BRAILLE PATTERN DOTS-68 -28A1 BRAILLE PATTERN DOTS-168 -28A2 BRAILLE PATTERN DOTS-268 -28A3 BRAILLE PATTERN DOTS-1268 -28A4 BRAILLE PATTERN DOTS-368 -28A5 BRAILLE PATTERN DOTS-1368 -28A6 BRAILLE PATTERN DOTS-2368 -28A7 BRAILLE PATTERN DOTS-12368 -28A8 BRAILLE PATTERN DOTS-468 -28A9 BRAILLE PATTERN DOTS-1468 -28AA BRAILLE PATTERN DOTS-2468 -28AB BRAILLE PATTERN DOTS-12468 -28AC BRAILLE PATTERN DOTS-3468 -28AD BRAILLE PATTERN DOTS-13468 -28AE BRAILLE PATTERN DOTS-23468 -28AF BRAILLE PATTERN DOTS-123468 -28B0 BRAILLE PATTERN DOTS-568 -28B1 BRAILLE PATTERN DOTS-1568 -28B2 BRAILLE PATTERN DOTS-2568 -28B3 BRAILLE PATTERN DOTS-12568 -28B4 BRAILLE PATTERN DOTS-3568 -28B5 BRAILLE PATTERN DOTS-13568 -28B6 BRAILLE PATTERN DOTS-23568 -28B7 BRAILLE PATTERN DOTS-123568 -28B8 BRAILLE PATTERN DOTS-4568 -28B9 BRAILLE PATTERN DOTS-14568 -28BA BRAILLE PATTERN DOTS-24568 -28BB BRAILLE PATTERN DOTS-124568 -28BC BRAILLE PATTERN DOTS-34568 -28BD BRAILLE PATTERN DOTS-134568 -28BE BRAILLE PATTERN DOTS-234568 -28BF BRAILLE PATTERN DOTS-1234568 -28C0 BRAILLE PATTERN DOTS-78 -28C1 BRAILLE PATTERN DOTS-178 -28C2 BRAILLE PATTERN DOTS-278 -28C3 BRAILLE PATTERN DOTS-1278 -28C4 BRAILLE PATTERN DOTS-378 -28C5 BRAILLE PATTERN DOTS-1378 -28C6 BRAILLE PATTERN DOTS-2378 -28C7 BRAILLE PATTERN DOTS-12378 -28C8 BRAILLE PATTERN DOTS-478 -28C9 BRAILLE PATTERN DOTS-1478 -28CA BRAILLE PATTERN DOTS-2478 -28CB BRAILLE PATTERN DOTS-12478 -28CC BRAILLE PATTERN DOTS-3478 -28CD BRAILLE PATTERN DOTS-13478 -28CE BRAILLE PATTERN DOTS-23478 -28CF BRAILLE PATTERN DOTS-123478 -28D0 BRAILLE PATTERN DOTS-578 -28D1 BRAILLE PATTERN DOTS-1578 -28D2 BRAILLE PATTERN DOTS-2578 -28D3 BRAILLE PATTERN DOTS-12578 -28D4 BRAILLE PATTERN DOTS-3578 -28D5 BRAILLE PATTERN DOTS-13578 -28D6 BRAILLE PATTERN DOTS-23578 -28D7 BRAILLE PATTERN DOTS-123578 -28D8 BRAILLE PATTERN DOTS-4578 -28D9 BRAILLE PATTERN DOTS-14578 -28DA BRAILLE PATTERN DOTS-24578 -28DB BRAILLE PATTERN DOTS-124578 -28DC BRAILLE PATTERN DOTS-34578 -28DD BRAILLE PATTERN DOTS-134578 -28DE BRAILLE PATTERN DOTS-234578 -28DF BRAILLE PATTERN DOTS-1234578 -28E0 BRAILLE PATTERN DOTS-678 -28E1 BRAILLE PATTERN DOTS-1678 -28E2 BRAILLE PATTERN DOTS-2678 -28E3 BRAILLE PATTERN DOTS-12678 -28E4 BRAILLE PATTERN DOTS-3678 -28E5 BRAILLE PATTERN DOTS-13678 -28E6 BRAILLE PATTERN DOTS-23678 -28E7 BRAILLE PATTERN DOTS-123678 -28E8 BRAILLE PATTERN DOTS-4678 -28E9 BRAILLE PATTERN DOTS-14678 -28EA BRAILLE PATTERN DOTS-24678 -28EB BRAILLE PATTERN DOTS-124678 -28EC BRAILLE PATTERN DOTS-34678 -28ED BRAILLE PATTERN DOTS-134678 -28EE BRAILLE PATTERN DOTS-234678 -28EF BRAILLE PATTERN DOTS-1234678 -28F0 BRAILLE PATTERN DOTS-5678 -28F1 BRAILLE PATTERN DOTS-15678 -28F2 BRAILLE PATTERN DOTS-25678 -28F3 BRAILLE PATTERN DOTS-125678 -28F4 BRAILLE PATTERN DOTS-35678 -28F5 BRAILLE PATTERN DOTS-135678 -28F6 BRAILLE PATTERN DOTS-235678 -28F7 BRAILLE PATTERN DOTS-1235678 -28F8 BRAILLE PATTERN DOTS-45678 -28F9 BRAILLE PATTERN DOTS-145678 -28FA BRAILLE PATTERN DOTS-245678 -28FB BRAILLE PATTERN DOTS-1245678 -28FC BRAILLE PATTERN DOTS-345678 -28FD BRAILLE PATTERN DOTS-1345678 -28FE BRAILLE PATTERN DOTS-2345678 -28FF BRAILLE PATTERN DOTS-12345678 -2900 RIGHTWARDS TWO-HEADED ARROW WITH VERTICAL STROKE -2901 RIGHTWARDS TWO-HEADED ARROW WITH DOUBLE VERTICAL STROKE -2902 LEFTWARDS DOUBLE ARROW WITH VERTICAL STROKE -2903 RIGHTWARDS DOUBLE ARROW WITH VERTICAL STROKE -2904 LEFT RIGHT DOUBLE ARROW WITH VERTICAL STROKE -2905 RIGHTWARDS TWO-HEADED ARROW FROM BAR -2906 LEFTWARDS DOUBLE ARROW FROM BAR -2907 RIGHTWARDS DOUBLE ARROW FROM BAR -2908 DOWNWARDS ARROW WITH HORIZONTAL STROKE -2909 UPWARDS ARROW WITH HORIZONTAL STROKE -290A UPWARDS TRIPLE ARROW -290B DOWNWARDS TRIPLE ARROW -290C LEFTWARDS DOUBLE DASH ARROW -290D RIGHTWARDS DOUBLE DASH ARROW -290E LEFTWARDS TRIPLE DASH ARROW -290F RIGHTWARDS TRIPLE DASH ARROW -2910 RIGHTWARDS TWO-HEADED TRIPLE DASH ARROW -2911 RIGHTWARDS ARROW WITH DOTTED STEM -2912 UPWARDS ARROW TO BAR -2913 DOWNWARDS ARROW TO BAR -2914 RIGHTWARDS ARROW WITH TAIL WITH VERTICAL STROKE -2915 RIGHTWARDS ARROW WITH TAIL WITH DOUBLE VERTICAL STROKE -2916 RIGHTWARDS TWO-HEADED ARROW WITH TAIL -2917 RIGHTWARDS TWO-HEADED ARROW WITH TAIL WITH VERTICAL STROKE -2918 RIGHTWARDS TWO-HEADED ARROW WITH TAIL WITH DOUBLE VERTICAL STROKE -2919 LEFTWARDS ARROW-TAIL -291A RIGHTWARDS ARROW-TAIL -291B LEFTWARDS DOUBLE ARROW-TAIL -291C RIGHTWARDS DOUBLE ARROW-TAIL -291D LEFTWARDS ARROW TO BLACK DIAMOND -291E RIGHTWARDS ARROW TO BLACK DIAMOND -291F LEFTWARDS ARROW FROM BAR TO BLACK DIAMOND -2920 RIGHTWARDS ARROW FROM BAR TO BLACK DIAMOND -2921 NORTH WEST AND SOUTH EAST ARROW -2922 NORTH EAST AND SOUTH WEST ARROW -2923 NORTH WEST ARROW WITH HOOK -2924 NORTH EAST ARROW WITH HOOK -2925 SOUTH EAST ARROW WITH HOOK -2926 SOUTH WEST ARROW WITH HOOK -2927 NORTH WEST ARROW AND NORTH EAST ARROW -2928 NORTH EAST ARROW AND SOUTH EAST ARROW -2929 SOUTH EAST ARROW AND SOUTH WEST ARROW -292A SOUTH WEST ARROW AND NORTH WEST ARROW -292B RISING DIAGONAL CROSSING FALLING DIAGONAL -292C FALLING DIAGONAL CROSSING RISING DIAGONAL -292D SOUTH EAST ARROW CROSSING NORTH EAST ARROW -292E NORTH EAST ARROW CROSSING SOUTH EAST ARROW -292F FALLING DIAGONAL CROSSING NORTH EAST ARROW -2930 RISING DIAGONAL CROSSING SOUTH EAST ARROW -2931 NORTH EAST ARROW CROSSING NORTH WEST ARROW -2932 NORTH WEST ARROW CROSSING NORTH EAST ARROW -2933 WAVE ARROW POINTING DIRECTLY RIGHT -2934 ARROW POINTING RIGHTWARDS THEN CURVING UPWARDS -2935 ARROW POINTING RIGHTWARDS THEN CURVING DOWNWARDS -2936 ARROW POINTING DOWNWARDS THEN CURVING LEFTWARDS -2937 ARROW POINTING DOWNWARDS THEN CURVING RIGHTWARDS -2938 RIGHT-SIDE ARC CLOCKWISE ARROW -2939 LEFT-SIDE ARC ANTICLOCKWISE ARROW -293A TOP ARC ANTICLOCKWISE ARROW -293B BOTTOM ARC ANTICLOCKWISE ARROW -293C TOP ARC CLOCKWISE ARROW WITH MINUS -293D TOP ARC ANTICLOCKWISE ARROW WITH PLUS -293E LOWER RIGHT SEMICIRCULAR CLOCKWISE ARROW -293F LOWER LEFT SEMICIRCULAR ANTICLOCKWISE ARROW -2940 ANTICLOCKWISE CLOSED CIRCLE ARROW -2941 CLOCKWISE CLOSED CIRCLE ARROW -2942 RIGHTWARDS ARROW ABOVE SHORT LEFTWARDS ARROW -2943 LEFTWARDS ARROW ABOVE SHORT RIGHTWARDS ARROW -2944 SHORT RIGHTWARDS ARROW ABOVE LEFTWARDS ARROW -2945 RIGHTWARDS ARROW WITH PLUS BELOW -2946 LEFTWARDS ARROW WITH PLUS BELOW -2947 RIGHTWARDS ARROW THROUGH X -2948 LEFT RIGHT ARROW THROUGH SMALL CIRCLE -2949 UPWARDS TWO-HEADED ARROW FROM SMALL CIRCLE -294A LEFT BARB UP RIGHT BARB DOWN HARPOON -294B LEFT BARB DOWN RIGHT BARB UP HARPOON -294C UP BARB RIGHT DOWN BARB LEFT HARPOON -294D UP BARB LEFT DOWN BARB RIGHT HARPOON -294E LEFT BARB UP RIGHT BARB UP HARPOON -294F UP BARB RIGHT DOWN BARB RIGHT HARPOON -2950 LEFT BARB DOWN RIGHT BARB DOWN HARPOON -2951 UP BARB LEFT DOWN BARB LEFT HARPOON -2952 LEFTWARDS HARPOON WITH BARB UP TO BAR -2953 RIGHTWARDS HARPOON WITH BARB UP TO BAR -2954 UPWARDS HARPOON WITH BARB RIGHT TO BAR -2955 DOWNWARDS HARPOON WITH BARB RIGHT TO BAR -2956 LEFTWARDS HARPOON WITH BARB DOWN TO BAR -2957 RIGHTWARDS HARPOON WITH BARB DOWN TO BAR -2958 UPWARDS HARPOON WITH BARB LEFT TO BAR -2959 DOWNWARDS HARPOON WITH BARB LEFT TO BAR -295A LEFTWARDS HARPOON WITH BARB UP FROM BAR -295B RIGHTWARDS HARPOON WITH BARB UP FROM BAR -295C UPWARDS HARPOON WITH BARB RIGHT FROM BAR -295D DOWNWARDS HARPOON WITH BARB RIGHT FROM BAR -295E LEFTWARDS HARPOON WITH BARB DOWN FROM BAR -295F RIGHTWARDS HARPOON WITH BARB DOWN FROM BAR -2960 UPWARDS HARPOON WITH BARB LEFT FROM BAR -2961 DOWNWARDS HARPOON WITH BARB LEFT FROM BAR -2962 LEFTWARDS HARPOON WITH BARB UP ABOVE LEFTWARDS HARPOON WITH BARB DOWN -2963 UPWARDS HARPOON WITH BARB LEFT BESIDE UPWARDS HARPOON WITH BARB RIGHT -2964 RIGHTWARDS HARPOON WITH BARB UP ABOVE RIGHTWARDS HARPOON WITH BARB DOWN -2965 DOWNWARDS HARPOON WITH BARB LEFT BESIDE DOWNWARDS HARPOON WITH BARB RIGHT -2966 LEFTWARDS HARPOON WITH BARB UP ABOVE RIGHTWARDS HARPOON WITH BARB UP -2967 LEFTWARDS HARPOON WITH BARB DOWN ABOVE RIGHTWARDS HARPOON WITH BARB DOWN -2968 RIGHTWARDS HARPOON WITH BARB UP ABOVE LEFTWARDS HARPOON WITH BARB UP -2969 RIGHTWARDS HARPOON WITH BARB DOWN ABOVE LEFTWARDS HARPOON WITH BARB DOWN -296A LEFTWARDS HARPOON WITH BARB UP ABOVE LONG DASH -296B LEFTWARDS HARPOON WITH BARB DOWN BELOW LONG DASH -296C RIGHTWARDS HARPOON WITH BARB UP ABOVE LONG DASH -296D RIGHTWARDS HARPOON WITH BARB DOWN BELOW LONG DASH -296E UPWARDS HARPOON WITH BARB LEFT BESIDE DOWNWARDS HARPOON WITH BARB RIGHT -296F DOWNWARDS HARPOON WITH BARB LEFT BESIDE UPWARDS HARPOON WITH BARB RIGHT -2970 RIGHT DOUBLE ARROW WITH ROUNDED HEAD -2971 EQUALS SIGN ABOVE RIGHTWARDS ARROW -2972 TILDE OPERATOR ABOVE RIGHTWARDS ARROW -2973 LEFTWARDS ARROW ABOVE TILDE OPERATOR -2974 RIGHTWARDS ARROW ABOVE TILDE OPERATOR -2975 RIGHTWARDS ARROW ABOVE ALMOST EQUAL TO -2976 LESS-THAN ABOVE LEFTWARDS ARROW -2977 LEFTWARDS ARROW THROUGH LESS-THAN -2978 GREATER-THAN ABOVE RIGHTWARDS ARROW -2979 SUBSET ABOVE RIGHTWARDS ARROW -297A LEFTWARDS ARROW THROUGH SUBSET -297B SUPERSET ABOVE LEFTWARDS ARROW -297C LEFT FISH TAIL -297D RIGHT FISH TAIL -297E UP FISH TAIL -297F DOWN FISH TAIL -2980 TRIPLE VERTICAL BAR DELIMITER -2981 Z NOTATION SPOT -2982 Z NOTATION TYPE COLON -2983 LEFT WHITE CURLY BRACKET -2984 RIGHT WHITE CURLY BRACKET -2985 LEFT WHITE PARENTHESIS -2986 RIGHT WHITE PARENTHESIS -2987 Z NOTATION LEFT IMAGE BRACKET -2988 Z NOTATION RIGHT IMAGE BRACKET -2989 Z NOTATION LEFT BINDING BRACKET -298A Z NOTATION RIGHT BINDING BRACKET -298B LEFT SQUARE BRACKET WITH UNDERBAR -298C RIGHT SQUARE BRACKET WITH UNDERBAR -298D LEFT SQUARE BRACKET WITH TICK IN TOP CORNER -298E RIGHT SQUARE BRACKET WITH TICK IN BOTTOM CORNER -298F LEFT SQUARE BRACKET WITH TICK IN BOTTOM CORNER -2990 RIGHT SQUARE BRACKET WITH TICK IN TOP CORNER -2991 LEFT ANGLE BRACKET WITH DOT -2992 RIGHT ANGLE BRACKET WITH DOT -2993 LEFT ARC LESS-THAN BRACKET -2994 RIGHT ARC GREATER-THAN BRACKET -2995 DOUBLE LEFT ARC GREATER-THAN BRACKET -2996 DOUBLE RIGHT ARC LESS-THAN BRACKET -2997 LEFT BLACK TORTOISE SHELL BRACKET -2998 RIGHT BLACK TORTOISE SHELL BRACKET -2999 DOTTED FENCE -299A VERTICAL ZIGZAG LINE -299B MEASURED ANGLE OPENING LEFT -299C RIGHT ANGLE VARIANT WITH SQUARE -299D MEASURED RIGHT ANGLE WITH DOT -299E ANGLE WITH S INSIDE -299F ACUTE ANGLE -29A0 SPHERICAL ANGLE OPENING LEFT -29A1 SPHERICAL ANGLE OPENING UP -29A2 TURNED ANGLE -29A3 REVERSED ANGLE -29A4 ANGLE WITH UNDERBAR -29A5 REVERSED ANGLE WITH UNDERBAR -29A6 OBLIQUE ANGLE OPENING UP -29A7 OBLIQUE ANGLE OPENING DOWN -29A8 MEASURED ANGLE WITH OPEN ARM ENDING IN ARROW POINTING UP AND RIGHT -29A9 MEASURED ANGLE WITH OPEN ARM ENDING IN ARROW POINTING UP AND LEFT -29AA MEASURED ANGLE WITH OPEN ARM ENDING IN ARROW POINTING DOWN AND RIGHT -29AB MEASURED ANGLE WITH OPEN ARM ENDING IN ARROW POINTING DOWN AND LEFT -29AC MEASURED ANGLE WITH OPEN ARM ENDING IN ARROW POINTING RIGHT AND UP -29AD MEASURED ANGLE WITH OPEN ARM ENDING IN ARROW POINTING LEFT AND UP -29AE MEASURED ANGLE WITH OPEN ARM ENDING IN ARROW POINTING RIGHT AND DOWN -29AF MEASURED ANGLE WITH OPEN ARM ENDING IN ARROW POINTING LEFT AND DOWN -29B0 REVERSED EMPTY SET -29B1 EMPTY SET WITH OVERBAR -29B2 EMPTY SET WITH SMALL CIRCLE ABOVE -29B3 EMPTY SET WITH RIGHT ARROW ABOVE -29B4 EMPTY SET WITH LEFT ARROW ABOVE -29B5 CIRCLE WITH HORIZONTAL BAR -29B6 CIRCLED VERTICAL BAR -29B7 CIRCLED PARALLEL -29B8 CIRCLED REVERSE SOLIDUS -29B9 CIRCLED PERPENDICULAR -29BA CIRCLE DIVIDED BY HORIZONTAL BAR AND TOP HALF DIVIDED BY VERTICAL BAR -29BB CIRCLE WITH SUPERIMPOSED X -29BC CIRCLED ANTICLOCKWISE-ROTATED DIVISION SIGN -29BD UP ARROW THROUGH CIRCLE -29BE CIRCLED WHITE BULLET -29BF CIRCLED BULLET -29C0 CIRCLED LESS-THAN -29C1 CIRCLED GREATER-THAN -29C2 CIRCLE WITH SMALL CIRCLE TO THE RIGHT -29C3 CIRCLE WITH TWO HORIZONTAL STROKES TO THE RIGHT -29C4 SQUARED RISING DIAGONAL SLASH -29C5 SQUARED FALLING DIAGONAL SLASH -29C6 SQUARED ASTERISK -29C7 SQUARED SMALL CIRCLE -29C8 SQUARED SQUARE -29C9 TWO JOINED SQUARES -29CA TRIANGLE WITH DOT ABOVE -29CB TRIANGLE WITH UNDERBAR -29CC S IN TRIANGLE -29CD TRIANGLE WITH SERIFS AT BOTTOM -29CE RIGHT TRIANGLE ABOVE LEFT TRIANGLE -29CF LEFT TRIANGLE BESIDE VERTICAL BAR -29D0 VERTICAL BAR BESIDE RIGHT TRIANGLE -29D1 BOWTIE WITH LEFT HALF BLACK -29D2 BOWTIE WITH RIGHT HALF BLACK -29D3 BLACK BOWTIE -29D4 TIMES WITH LEFT HALF BLACK -29D5 TIMES WITH RIGHT HALF BLACK -29D6 WHITE HOURGLASS -29D7 BLACK HOURGLASS -29D8 LEFT WIGGLY FENCE -29D9 RIGHT WIGGLY FENCE -29DA LEFT DOUBLE WIGGLY FENCE -29DB RIGHT DOUBLE WIGGLY FENCE -29DC INCOMPLETE INFINITY -29DD TIE OVER INFINITY -29DE INFINITY NEGATED WITH VERTICAL BAR -29DF DOUBLE-ENDED MULTIMAP -29E0 SQUARE WITH CONTOURED OUTLINE -29E1 INCREASES AS -29E2 SHUFFLE PRODUCT -29E3 EQUALS SIGN AND SLANTED PARALLEL -29E4 EQUALS SIGN AND SLANTED PARALLEL WITH TILDE ABOVE -29E5 IDENTICAL TO AND SLANTED PARALLEL -29E6 GLEICH STARK -29E7 THERMODYNAMIC -29E8 DOWN-POINTING TRIANGLE WITH LEFT HALF BLACK -29E9 DOWN-POINTING TRIANGLE WITH RIGHT HALF BLACK -29EA BLACK DIAMOND WITH DOWN ARROW -29EB BLACK LOZENGE -29EC WHITE CIRCLE WITH DOWN ARROW -29ED BLACK CIRCLE WITH DOWN ARROW -29EE ERROR-BARRED WHITE SQUARE -29EF ERROR-BARRED BLACK SQUARE -29F0 ERROR-BARRED WHITE DIAMOND -29F1 ERROR-BARRED BLACK DIAMOND -29F2 ERROR-BARRED WHITE CIRCLE -29F3 ERROR-BARRED BLACK CIRCLE -29F4 RULE-DELAYED -29F5 REVERSE SOLIDUS OPERATOR -29F6 SOLIDUS WITH OVERBAR -29F7 REVERSE SOLIDUS WITH HORIZONTAL STROKE -29F8 BIG SOLIDUS -29F9 BIG REVERSE SOLIDUS -29FA DOUBLE PLUS -29FB TRIPLE PLUS -29FC LEFT-POINTING CURVED ANGLE BRACKET -29FD RIGHT-POINTING CURVED ANGLE BRACKET -29FE TINY -29FF MINY -2A00 N-ARY CIRCLED DOT OPERATOR -2A01 N-ARY CIRCLED PLUS OPERATOR -2A02 N-ARY CIRCLED TIMES OPERATOR -2A03 N-ARY UNION OPERATOR WITH DOT -2A04 N-ARY UNION OPERATOR WITH PLUS -2A05 N-ARY SQUARE INTERSECTION OPERATOR -2A06 N-ARY SQUARE UNION OPERATOR -2A07 TWO LOGICAL AND OPERATOR -2A08 TWO LOGICAL OR OPERATOR -2A09 N-ARY TIMES OPERATOR -2A0A MODULO TWO SUM -2A0B SUMMATION WITH INTEGRAL -2A0C QUADRUPLE INTEGRAL OPERATOR -2A0D FINITE PART INTEGRAL -2A0E INTEGRAL WITH DOUBLE STROKE -2A0F INTEGRAL AVERAGE WITH SLASH -2A10 CIRCULATION FUNCTION -2A11 ANTICLOCKWISE INTEGRATION -2A12 LINE INTEGRATION WITH RECTANGULAR PATH AROUND POLE -2A13 LINE INTEGRATION WITH SEMICIRCULAR PATH AROUND POLE -2A14 LINE INTEGRATION NOT INCLUDING THE POLE -2A15 INTEGRAL AROUND A POINT OPERATOR -2A16 QUATERNION INTEGRAL OPERATOR -2A17 INTEGRAL WITH LEFTWARDS ARROW WITH HOOK -2A18 INTEGRAL WITH TIMES SIGN -2A19 INTEGRAL WITH INTERSECTION -2A1A INTEGRAL WITH UNION -2A1B INTEGRAL WITH OVERBAR -2A1C INTEGRAL WITH UNDERBAR -2A1D JOIN -2A1E LARGE LEFT TRIANGLE OPERATOR -2A1F Z NOTATION SCHEMA COMPOSITION -2A20 Z NOTATION SCHEMA PIPING -2A21 Z NOTATION SCHEMA PROJECTION -2A22 PLUS SIGN WITH SMALL CIRCLE ABOVE -2A23 PLUS SIGN WITH CIRCUMFLEX ACCENT ABOVE -2A24 PLUS SIGN WITH TILDE ABOVE -2A25 PLUS SIGN WITH DOT BELOW -2A26 PLUS SIGN WITH TILDE BELOW -2A27 PLUS SIGN WITH SUBSCRIPT TWO -2A28 PLUS SIGN WITH BLACK TRIANGLE -2A29 MINUS SIGN WITH COMMA ABOVE -2A2A MINUS SIGN WITH DOT BELOW -2A2B MINUS SIGN WITH FALLING DOTS -2A2C MINUS SIGN WITH RISING DOTS -2A2D PLUS SIGN IN LEFT HALF CIRCLE -2A2E PLUS SIGN IN RIGHT HALF CIRCLE -2A2F VECTOR OR CROSS PRODUCT -2A30 MULTIPLICATION SIGN WITH DOT ABOVE -2A31 MULTIPLICATION SIGN WITH UNDERBAR -2A32 SEMIDIRECT PRODUCT WITH BOTTOM CLOSED -2A33 SMASH PRODUCT -2A34 MULTIPLICATION SIGN IN LEFT HALF CIRCLE -2A35 MULTIPLICATION SIGN IN RIGHT HALF CIRCLE -2A36 CIRCLED MULTIPLICATION SIGN WITH CIRCUMFLEX ACCENT -2A37 MULTIPLICATION SIGN IN DOUBLE CIRCLE -2A38 CIRCLED DIVISION SIGN -2A39 PLUS SIGN IN TRIANGLE -2A3A MINUS SIGN IN TRIANGLE -2A3B MULTIPLICATION SIGN IN TRIANGLE -2A3C INTERIOR PRODUCT -2A3D RIGHTHAND INTERIOR PRODUCT -2A3E Z NOTATION RELATIONAL COMPOSITION -2A3F AMALGAMATION OR COPRODUCT -2A40 INTERSECTION WITH DOT -2A41 UNION WITH MINUS SIGN -2A42 UNION WITH OVERBAR -2A43 INTERSECTION WITH OVERBAR -2A44 INTERSECTION WITH LOGICAL AND -2A45 UNION WITH LOGICAL OR -2A46 UNION ABOVE INTERSECTION -2A47 INTERSECTION ABOVE UNION -2A48 UNION ABOVE BAR ABOVE INTERSECTION -2A49 INTERSECTION ABOVE BAR ABOVE UNION -2A4A UNION BESIDE AND JOINED WITH UNION -2A4B INTERSECTION BESIDE AND JOINED WITH INTERSECTION -2A4C CLOSED UNION WITH SERIFS -2A4D CLOSED INTERSECTION WITH SERIFS -2A4E DOUBLE SQUARE INTERSECTION -2A4F DOUBLE SQUARE UNION -2A50 CLOSED UNION WITH SERIFS AND SMASH PRODUCT -2A51 LOGICAL AND WITH DOT ABOVE -2A52 LOGICAL OR WITH DOT ABOVE -2A53 DOUBLE LOGICAL AND -2A54 DOUBLE LOGICAL OR -2A55 TWO INTERSECTING LOGICAL AND -2A56 TWO INTERSECTING LOGICAL OR -2A57 SLOPING LARGE OR -2A58 SLOPING LARGE AND -2A59 LOGICAL OR OVERLAPPING LOGICAL AND -2A5A LOGICAL AND WITH MIDDLE STEM -2A5B LOGICAL OR WITH MIDDLE STEM -2A5C LOGICAL AND WITH HORIZONTAL DASH -2A5D LOGICAL OR WITH HORIZONTAL DASH -2A5E LOGICAL AND WITH DOUBLE OVERBAR -2A5F LOGICAL AND WITH UNDERBAR -2A60 LOGICAL AND WITH DOUBLE UNDERBAR -2A61 SMALL VEE WITH UNDERBAR -2A62 LOGICAL OR WITH DOUBLE OVERBAR -2A63 LOGICAL OR WITH DOUBLE UNDERBAR -2A64 Z NOTATION DOMAIN ANTIRESTRICTION -2A65 Z NOTATION RANGE ANTIRESTRICTION -2A66 EQUALS SIGN WITH DOT BELOW -2A67 IDENTICAL WITH DOT ABOVE -2A68 TRIPLE HORIZONTAL BAR WITH DOUBLE VERTICAL STROKE -2A69 TRIPLE HORIZONTAL BAR WITH TRIPLE VERTICAL STROKE -2A6A TILDE OPERATOR WITH DOT ABOVE -2A6B TILDE OPERATOR WITH RISING DOTS -2A6C SIMILAR MINUS SIMILAR -2A6D CONGRUENT WITH DOT ABOVE -2A6E EQUALS WITH ASTERISK -2A6F ALMOST EQUAL TO WITH CIRCUMFLEX ACCENT -2A70 APPROXIMATELY EQUAL OR EQUAL TO -2A71 EQUALS SIGN ABOVE PLUS SIGN -2A72 PLUS SIGN ABOVE EQUALS SIGN -2A73 EQUALS SIGN ABOVE TILDE OPERATOR -2A74 DOUBLE COLON EQUAL -2A75 TWO CONSECUTIVE EQUALS SIGNS -2A76 THREE CONSECUTIVE EQUALS SIGNS -2A77 EQUALS SIGN WITH TWO DOTS ABOVE AND TWO DOTS BELOW -2A78 EQUIVALENT WITH FOUR DOTS ABOVE -2A79 LESS-THAN WITH CIRCLE INSIDE -2A7A GREATER-THAN WITH CIRCLE INSIDE -2A7B LESS-THAN WITH QUESTION MARK ABOVE -2A7C GREATER-THAN WITH QUESTION MARK ABOVE -2A7D LESS-THAN OR SLANTED EQUAL TO -2A7E GREATER-THAN OR SLANTED EQUAL TO -2A7F LESS-THAN OR SLANTED EQUAL TO WITH DOT INSIDE -2A80 GREATER-THAN OR SLANTED EQUAL TO WITH DOT INSIDE -2A81 LESS-THAN OR SLANTED EQUAL TO WITH DOT ABOVE -2A82 GREATER-THAN OR SLANTED EQUAL TO WITH DOT ABOVE -2A83 LESS-THAN OR SLANTED EQUAL TO WITH DOT ABOVE RIGHT -2A84 GREATER-THAN OR SLANTED EQUAL TO WITH DOT ABOVE LEFT -2A85 LESS-THAN OR APPROXIMATE -2A86 GREATER-THAN OR APPROXIMATE -2A87 LESS-THAN AND SINGLE-LINE NOT EQUAL TO -2A88 GREATER-THAN AND SINGLE-LINE NOT EQUAL TO -2A89 LESS-THAN AND NOT APPROXIMATE -2A8A GREATER-THAN AND NOT APPROXIMATE -2A8B LESS-THAN ABOVE DOUBLE-LINE EQUAL ABOVE GREATER-THAN -2A8C GREATER-THAN ABOVE DOUBLE-LINE EQUAL ABOVE LESS-THAN -2A8D LESS-THAN ABOVE SIMILAR OR EQUAL -2A8E GREATER-THAN ABOVE SIMILAR OR EQUAL -2A8F LESS-THAN ABOVE SIMILAR ABOVE GREATER-THAN -2A90 GREATER-THAN ABOVE SIMILAR ABOVE LESS-THAN -2A91 LESS-THAN ABOVE GREATER-THAN ABOVE DOUBLE-LINE EQUAL -2A92 GREATER-THAN ABOVE LESS-THAN ABOVE DOUBLE-LINE EQUAL -2A93 LESS-THAN ABOVE SLANTED EQUAL ABOVE GREATER-THAN ABOVE SLANTED EQUAL -2A94 GREATER-THAN ABOVE SLANTED EQUAL ABOVE LESS-THAN ABOVE SLANTED EQUAL -2A95 SLANTED EQUAL TO OR LESS-THAN -2A96 SLANTED EQUAL TO OR GREATER-THAN -2A97 SLANTED EQUAL TO OR LESS-THAN WITH DOT INSIDE -2A98 SLANTED EQUAL TO OR GREATER-THAN WITH DOT INSIDE -2A99 DOUBLE-LINE EQUAL TO OR LESS-THAN -2A9A DOUBLE-LINE EQUAL TO OR GREATER-THAN -2A9B DOUBLE-LINE SLANTED EQUAL TO OR LESS-THAN -2A9C DOUBLE-LINE SLANTED EQUAL TO OR GREATER-THAN -2A9D SIMILAR OR LESS-THAN -2A9E SIMILAR OR GREATER-THAN -2A9F SIMILAR ABOVE LESS-THAN ABOVE EQUALS SIGN -2AA0 SIMILAR ABOVE GREATER-THAN ABOVE EQUALS SIGN -2AA1 DOUBLE NESTED LESS-THAN -2AA2 DOUBLE NESTED GREATER-THAN -2AA3 DOUBLE NESTED LESS-THAN WITH UNDERBAR -2AA4 GREATER-THAN OVERLAPPING LESS-THAN -2AA5 GREATER-THAN BESIDE LESS-THAN -2AA6 LESS-THAN CLOSED BY CURVE -2AA7 GREATER-THAN CLOSED BY CURVE -2AA8 LESS-THAN CLOSED BY CURVE ABOVE SLANTED EQUAL -2AA9 GREATER-THAN CLOSED BY CURVE ABOVE SLANTED EQUAL -2AAA SMALLER THAN -2AAB LARGER THAN -2AAC SMALLER THAN OR EQUAL TO -2AAD LARGER THAN OR EQUAL TO -2AAE EQUALS SIGN WITH BUMPY ABOVE -2AAF PRECEDES ABOVE SINGLE-LINE EQUALS SIGN -2AB0 SUCCEEDS ABOVE SINGLE-LINE EQUALS SIGN -2AB1 PRECEDES ABOVE SINGLE-LINE NOT EQUAL TO -2AB2 SUCCEEDS ABOVE SINGLE-LINE NOT EQUAL TO -2AB3 PRECEDES ABOVE EQUALS SIGN -2AB4 SUCCEEDS ABOVE EQUALS SIGN -2AB5 PRECEDES ABOVE NOT EQUAL TO -2AB6 SUCCEEDS ABOVE NOT EQUAL TO -2AB7 PRECEDES ABOVE ALMOST EQUAL TO -2AB8 SUCCEEDS ABOVE ALMOST EQUAL TO -2AB9 PRECEDES ABOVE NOT ALMOST EQUAL TO -2ABA SUCCEEDS ABOVE NOT ALMOST EQUAL TO -2ABB DOUBLE PRECEDES -2ABC DOUBLE SUCCEEDS -2ABD SUBSET WITH DOT -2ABE SUPERSET WITH DOT -2ABF SUBSET WITH PLUS SIGN BELOW -2AC0 SUPERSET WITH PLUS SIGN BELOW -2AC1 SUBSET WITH MULTIPLICATION SIGN BELOW -2AC2 SUPERSET WITH MULTIPLICATION SIGN BELOW -2AC3 SUBSET OF OR EQUAL TO WITH DOT ABOVE -2AC4 SUPERSET OF OR EQUAL TO WITH DOT ABOVE -2AC5 SUBSET OF ABOVE EQUALS SIGN -2AC6 SUPERSET OF ABOVE EQUALS SIGN -2AC7 SUBSET OF ABOVE TILDE OPERATOR -2AC8 SUPERSET OF ABOVE TILDE OPERATOR -2AC9 SUBSET OF ABOVE ALMOST EQUAL TO -2ACA SUPERSET OF ABOVE ALMOST EQUAL TO -2ACB SUBSET OF ABOVE NOT EQUAL TO -2ACC SUPERSET OF ABOVE NOT EQUAL TO -2ACD SQUARE LEFT OPEN BOX OPERATOR -2ACE SQUARE RIGHT OPEN BOX OPERATOR -2ACF CLOSED SUBSET -2AD0 CLOSED SUPERSET -2AD1 CLOSED SUBSET OR EQUAL TO -2AD2 CLOSED SUPERSET OR EQUAL TO -2AD3 SUBSET ABOVE SUPERSET -2AD4 SUPERSET ABOVE SUBSET -2AD5 SUBSET ABOVE SUBSET -2AD6 SUPERSET ABOVE SUPERSET -2AD7 SUPERSET BESIDE SUBSET -2AD8 SUPERSET BESIDE AND JOINED BY DASH WITH SUBSET -2AD9 ELEMENT OF OPENING DOWNWARDS -2ADA PITCHFORK WITH TEE TOP -2ADB TRANSVERSAL INTERSECTION -2ADC FORKING -2ADD NONFORKING -2ADE SHORT LEFT TACK -2ADF SHORT DOWN TACK -2AE0 SHORT UP TACK -2AE1 PERPENDICULAR WITH S -2AE2 VERTICAL BAR TRIPLE RIGHT TURNSTILE -2AE3 DOUBLE VERTICAL BAR LEFT TURNSTILE -2AE4 VERTICAL BAR DOUBLE LEFT TURNSTILE -2AE5 DOUBLE VERTICAL BAR DOUBLE LEFT TURNSTILE -2AE6 LONG DASH FROM LEFT MEMBER OF DOUBLE VERTICAL -2AE7 SHORT DOWN TACK WITH OVERBAR -2AE8 SHORT UP TACK WITH UNDERBAR -2AE9 SHORT UP TACK ABOVE SHORT DOWN TACK -2AEA DOUBLE DOWN TACK -2AEB DOUBLE UP TACK -2AEC DOUBLE STROKE NOT SIGN -2AED REVERSED DOUBLE STROKE NOT SIGN -2AEE DOES NOT DIVIDE WITH REVERSED NEGATION SLASH -2AEF VERTICAL LINE WITH CIRCLE ABOVE -2AF0 VERTICAL LINE WITH CIRCLE BELOW -2AF1 DOWN TACK WITH CIRCLE BELOW -2AF2 PARALLEL WITH HORIZONTAL STROKE -2AF3 PARALLEL WITH TILDE OPERATOR -2AF4 TRIPLE VERTICAL BAR BINARY RELATION -2AF5 TRIPLE VERTICAL BAR WITH HORIZONTAL STROKE -2AF6 TRIPLE COLON OPERATOR -2AF7 TRIPLE NESTED LESS-THAN -2AF8 TRIPLE NESTED GREATER-THAN -2AF9 DOUBLE-LINE SLANTED LESS-THAN OR EQUAL TO -2AFA DOUBLE-LINE SLANTED GREATER-THAN OR EQUAL TO -2AFB TRIPLE SOLIDUS BINARY RELATION -2AFC LARGE TRIPLE VERTICAL BAR OPERATOR -2AFD DOUBLE SOLIDUS OPERATOR -2AFE WHITE VERTICAL BAR -2AFF N-ARY WHITE VERTICAL BAR -2B00 NORTH EAST WHITE ARROW -2B01 NORTH WEST WHITE ARROW -2B02 SOUTH EAST WHITE ARROW -2B03 SOUTH WEST WHITE ARROW -2B04 LEFT RIGHT WHITE ARROW -2B05 LEFTWARDS BLACK ARROW -2B06 UPWARDS BLACK ARROW -2B07 DOWNWARDS BLACK ARROW -2B08 NORTH EAST BLACK ARROW -2B09 NORTH WEST BLACK ARROW -2B0A SOUTH EAST BLACK ARROW -2B0B SOUTH WEST BLACK ARROW -2B0C LEFT RIGHT BLACK ARROW -2B0D UP DOWN BLACK ARROW -2B0E RIGHTWARDS ARROW WITH TIP DOWNWARDS -2B0F RIGHTWARDS ARROW WITH TIP UPWARDS -2B10 LEFTWARDS ARROW WITH TIP DOWNWARDS -2B11 LEFTWARDS ARROW WITH TIP UPWARDS -2B12 SQUARE WITH TOP HALF BLACK -2B13 SQUARE WITH BOTTOM HALF BLACK -2B14 SQUARE WITH UPPER RIGHT DIAGONAL HALF BLACK -2B15 SQUARE WITH LOWER LEFT DIAGONAL HALF BLACK -2B16 DIAMOND WITH LEFT HALF BLACK -2B17 DIAMOND WITH RIGHT HALF BLACK -2B18 DIAMOND WITH TOP HALF BLACK -2B19 DIAMOND WITH BOTTOM HALF BLACK -2B1A DOTTED SQUARE -2B1B BLACK LARGE SQUARE -2B1C WHITE LARGE SQUARE -2B1D BLACK VERY SMALL SQUARE -2B1E WHITE VERY SMALL SQUARE -2B1F BLACK PENTAGON -2B20 WHITE PENTAGON -2B21 WHITE HEXAGON -2B22 BLACK HEXAGON -2B23 HORIZONTAL BLACK HEXAGON -2B24 BLACK LARGE CIRCLE -2B25 BLACK MEDIUM DIAMOND -2B26 WHITE MEDIUM DIAMOND -2B27 BLACK MEDIUM LOZENGE -2B28 WHITE MEDIUM LOZENGE -2B29 BLACK SMALL DIAMOND -2B2A BLACK SMALL LOZENGE -2B2B WHITE SMALL LOZENGE -2B2C BLACK HORIZONTAL ELLIPSE -2B2D WHITE HORIZONTAL ELLIPSE -2B2E BLACK VERTICAL ELLIPSE -2B2F WHITE VERTICAL ELLIPSE -2B30 LEFT ARROW WITH SMALL CIRCLE -2B31 THREE LEFTWARDS ARROWS -2B32 LEFT ARROW WITH CIRCLED PLUS -2B33 LONG LEFTWARDS SQUIGGLE ARROW -2B34 LEFTWARDS TWO-HEADED ARROW WITH VERTICAL STROKE -2B35 LEFTWARDS TWO-HEADED ARROW WITH DOUBLE VERTICAL STROKE -2B36 LEFTWARDS TWO-HEADED ARROW FROM BAR -2B37 LEFTWARDS TWO-HEADED TRIPLE DASH ARROW -2B38 LEFTWARDS ARROW WITH DOTTED STEM -2B39 LEFTWARDS ARROW WITH TAIL WITH VERTICAL STROKE -2B3A LEFTWARDS ARROW WITH TAIL WITH DOUBLE VERTICAL STROKE -2B3B LEFTWARDS TWO-HEADED ARROW WITH TAIL -2B3C LEFTWARDS TWO-HEADED ARROW WITH TAIL WITH VERTICAL STROKE -2B3D LEFTWARDS TWO-HEADED ARROW WITH TAIL WITH DOUBLE VERTICAL STROKE -2B3E LEFTWARDS ARROW THROUGH X -2B3F WAVE ARROW POINTING DIRECTLY LEFT -2B40 EQUALS SIGN ABOVE LEFTWARDS ARROW -2B41 REVERSE TILDE OPERATOR ABOVE LEFTWARDS ARROW -2B42 LEFTWARDS ARROW ABOVE REVERSE ALMOST EQUAL TO -2B43 RIGHTWARDS ARROW THROUGH GREATER-THAN -2B44 RIGHTWARDS ARROW THROUGH SUPERSET -2B45 LEFTWARDS QUADRUPLE ARROW -2B46 RIGHTWARDS QUADRUPLE ARROW -2B47 REVERSE TILDE OPERATOR ABOVE RIGHTWARDS ARROW -2B48 RIGHTWARDS ARROW ABOVE REVERSE ALMOST EQUAL TO -2B49 TILDE OPERATOR ABOVE LEFTWARDS ARROW -2B4A LEFTWARDS ARROW ABOVE ALMOST EQUAL TO -2B4B LEFTWARDS ARROW ABOVE REVERSE TILDE OPERATOR -2B4C RIGHTWARDS ARROW ABOVE REVERSE TILDE OPERATOR -2B50 WHITE MEDIUM STAR -2B51 BLACK SMALL STAR -2B52 WHITE SMALL STAR -2B53 BLACK RIGHT-POINTING PENTAGON -2B54 WHITE RIGHT-POINTING PENTAGON -2B55 HEAVY LARGE CIRCLE -2B56 HEAVY OVAL WITH OVAL INSIDE -2B57 HEAVY CIRCLE WITH CIRCLE INSIDE -2B58 HEAVY CIRCLE -2B59 HEAVY CIRCLED SALTIRE -2C00 GLAGOLITIC CAPITAL LETTER AZU -2C01 GLAGOLITIC CAPITAL LETTER BUKY -2C02 GLAGOLITIC CAPITAL LETTER VEDE -2C03 GLAGOLITIC CAPITAL LETTER GLAGOLI -2C04 GLAGOLITIC CAPITAL LETTER DOBRO -2C05 GLAGOLITIC CAPITAL LETTER YESTU -2C06 GLAGOLITIC CAPITAL LETTER ZHIVETE -2C07 GLAGOLITIC CAPITAL LETTER DZELO -2C08 GLAGOLITIC CAPITAL LETTER ZEMLJA -2C09 GLAGOLITIC CAPITAL LETTER IZHE -2C0A GLAGOLITIC CAPITAL LETTER INITIAL IZHE -2C0B GLAGOLITIC CAPITAL LETTER I -2C0C GLAGOLITIC CAPITAL LETTER DJERVI -2C0D GLAGOLITIC CAPITAL LETTER KAKO -2C0E GLAGOLITIC CAPITAL LETTER LJUDIJE -2C0F GLAGOLITIC CAPITAL LETTER MYSLITE -2C10 GLAGOLITIC CAPITAL LETTER NASHI -2C11 GLAGOLITIC CAPITAL LETTER ONU -2C12 GLAGOLITIC CAPITAL LETTER POKOJI -2C13 GLAGOLITIC CAPITAL LETTER RITSI -2C14 GLAGOLITIC CAPITAL LETTER SLOVO -2C15 GLAGOLITIC CAPITAL LETTER TVRIDO -2C16 GLAGOLITIC CAPITAL LETTER UKU -2C17 GLAGOLITIC CAPITAL LETTER FRITU -2C18 GLAGOLITIC CAPITAL LETTER HERU -2C19 GLAGOLITIC CAPITAL LETTER OTU -2C1A GLAGOLITIC CAPITAL LETTER PE -2C1B GLAGOLITIC CAPITAL LETTER SHTA -2C1C GLAGOLITIC CAPITAL LETTER TSI -2C1D GLAGOLITIC CAPITAL LETTER CHRIVI -2C1E GLAGOLITIC CAPITAL LETTER SHA -2C1F GLAGOLITIC CAPITAL LETTER YERU -2C20 GLAGOLITIC CAPITAL LETTER YERI -2C21 GLAGOLITIC CAPITAL LETTER YATI -2C22 GLAGOLITIC CAPITAL LETTER SPIDERY HA -2C23 GLAGOLITIC CAPITAL LETTER YU -2C24 GLAGOLITIC CAPITAL LETTER SMALL YUS -2C25 GLAGOLITIC CAPITAL LETTER SMALL YUS WITH TAIL -2C26 GLAGOLITIC CAPITAL LETTER YO -2C27 GLAGOLITIC CAPITAL LETTER IOTATED SMALL YUS -2C28 GLAGOLITIC CAPITAL LETTER BIG YUS -2C29 GLAGOLITIC CAPITAL LETTER IOTATED BIG YUS -2C2A GLAGOLITIC CAPITAL LETTER FITA -2C2B GLAGOLITIC CAPITAL LETTER IZHITSA -2C2C GLAGOLITIC CAPITAL LETTER SHTAPIC -2C2D GLAGOLITIC CAPITAL LETTER TROKUTASTI A -2C2E GLAGOLITIC CAPITAL LETTER LATINATE MYSLITE -2C30 GLAGOLITIC SMALL LETTER AZU -2C31 GLAGOLITIC SMALL LETTER BUKY -2C32 GLAGOLITIC SMALL LETTER VEDE -2C33 GLAGOLITIC SMALL LETTER GLAGOLI -2C34 GLAGOLITIC SMALL LETTER DOBRO -2C35 GLAGOLITIC SMALL LETTER YESTU -2C36 GLAGOLITIC SMALL LETTER ZHIVETE -2C37 GLAGOLITIC SMALL LETTER DZELO -2C38 GLAGOLITIC SMALL LETTER ZEMLJA -2C39 GLAGOLITIC SMALL LETTER IZHE -2C3A GLAGOLITIC SMALL LETTER INITIAL IZHE -2C3B GLAGOLITIC SMALL LETTER I -2C3C GLAGOLITIC SMALL LETTER DJERVI -2C3D GLAGOLITIC SMALL LETTER KAKO -2C3E GLAGOLITIC SMALL LETTER LJUDIJE -2C3F GLAGOLITIC SMALL LETTER MYSLITE -2C40 GLAGOLITIC SMALL LETTER NASHI -2C41 GLAGOLITIC SMALL LETTER ONU -2C42 GLAGOLITIC SMALL LETTER POKOJI -2C43 GLAGOLITIC SMALL LETTER RITSI -2C44 GLAGOLITIC SMALL LETTER SLOVO -2C45 GLAGOLITIC SMALL LETTER TVRIDO -2C46 GLAGOLITIC SMALL LETTER UKU -2C47 GLAGOLITIC SMALL LETTER FRITU -2C48 GLAGOLITIC SMALL LETTER HERU -2C49 GLAGOLITIC SMALL LETTER OTU -2C4A GLAGOLITIC SMALL LETTER PE -2C4B GLAGOLITIC SMALL LETTER SHTA -2C4C GLAGOLITIC SMALL LETTER TSI -2C4D GLAGOLITIC SMALL LETTER CHRIVI -2C4E GLAGOLITIC SMALL LETTER SHA -2C4F GLAGOLITIC SMALL LETTER YERU -2C50 GLAGOLITIC SMALL LETTER YERI -2C51 GLAGOLITIC SMALL LETTER YATI -2C52 GLAGOLITIC SMALL LETTER SPIDERY HA -2C53 GLAGOLITIC SMALL LETTER YU -2C54 GLAGOLITIC SMALL LETTER SMALL YUS -2C55 GLAGOLITIC SMALL LETTER SMALL YUS WITH TAIL -2C56 GLAGOLITIC SMALL LETTER YO -2C57 GLAGOLITIC SMALL LETTER IOTATED SMALL YUS -2C58 GLAGOLITIC SMALL LETTER BIG YUS -2C59 GLAGOLITIC SMALL LETTER IOTATED BIG YUS -2C5A GLAGOLITIC SMALL LETTER FITA -2C5B GLAGOLITIC SMALL LETTER IZHITSA -2C5C GLAGOLITIC SMALL LETTER SHTAPIC -2C5D GLAGOLITIC SMALL LETTER TROKUTASTI A -2C5E GLAGOLITIC SMALL LETTER LATINATE MYSLITE -2C60 LATIN CAPITAL LETTER L WITH DOUBLE BAR -2C61 LATIN SMALL LETTER L WITH DOUBLE BAR -2C62 LATIN CAPITAL LETTER L WITH MIDDLE TILDE -2C63 LATIN CAPITAL LETTER P WITH STROKE -2C64 LATIN CAPITAL LETTER R WITH TAIL -2C65 LATIN SMALL LETTER A WITH STROKE -2C66 LATIN SMALL LETTER T WITH DIAGONAL STROKE -2C67 LATIN CAPITAL LETTER H WITH DESCENDER -2C68 LATIN SMALL LETTER H WITH DESCENDER -2C69 LATIN CAPITAL LETTER K WITH DESCENDER -2C6A LATIN SMALL LETTER K WITH DESCENDER -2C6B LATIN CAPITAL LETTER Z WITH DESCENDER -2C6C LATIN SMALL LETTER Z WITH DESCENDER -2C6D LATIN CAPITAL LETTER ALPHA -2C6E LATIN CAPITAL LETTER M WITH HOOK -2C6F LATIN CAPITAL LETTER TURNED A -2C70 LATIN CAPITAL LETTER TURNED ALPHA -2C71 LATIN SMALL LETTER V WITH RIGHT HOOK -2C72 LATIN CAPITAL LETTER W WITH HOOK -2C73 LATIN SMALL LETTER W WITH HOOK -2C74 LATIN SMALL LETTER V WITH CURL -2C75 LATIN CAPITAL LETTER HALF H -2C76 LATIN SMALL LETTER HALF H -2C77 LATIN SMALL LETTER TAILLESS PHI -2C78 LATIN SMALL LETTER E WITH NOTCH -2C79 LATIN SMALL LETTER TURNED R WITH TAIL -2C7A LATIN SMALL LETTER O WITH LOW RING INSIDE -2C7B LATIN LETTER SMALL CAPITAL TURNED E -2C7C LATIN SUBSCRIPT SMALL LETTER J -2C7D MODIFIER LETTER CAPITAL V -2C7E LATIN CAPITAL LETTER S WITH SWASH TAIL -2C7F LATIN CAPITAL LETTER Z WITH SWASH TAIL -2C80 COPTIC CAPITAL LETTER ALFA -2C81 COPTIC SMALL LETTER ALFA -2C82 COPTIC CAPITAL LETTER VIDA -2C83 COPTIC SMALL LETTER VIDA -2C84 COPTIC CAPITAL LETTER GAMMA -2C85 COPTIC SMALL LETTER GAMMA -2C86 COPTIC CAPITAL LETTER DALDA -2C87 COPTIC SMALL LETTER DALDA -2C88 COPTIC CAPITAL LETTER EIE -2C89 COPTIC SMALL LETTER EIE -2C8A COPTIC CAPITAL LETTER SOU -2C8B COPTIC SMALL LETTER SOU -2C8C COPTIC CAPITAL LETTER ZATA -2C8D COPTIC SMALL LETTER ZATA -2C8E COPTIC CAPITAL LETTER HATE -2C8F COPTIC SMALL LETTER HATE -2C90 COPTIC CAPITAL LETTER THETHE -2C91 COPTIC SMALL LETTER THETHE -2C92 COPTIC CAPITAL LETTER IAUDA -2C93 COPTIC SMALL LETTER IAUDA -2C94 COPTIC CAPITAL LETTER KAPA -2C95 COPTIC SMALL LETTER KAPA -2C96 COPTIC CAPITAL LETTER LAULA -2C97 COPTIC SMALL LETTER LAULA -2C98 COPTIC CAPITAL LETTER MI -2C99 COPTIC SMALL LETTER MI -2C9A COPTIC CAPITAL LETTER NI -2C9B COPTIC SMALL LETTER NI -2C9C COPTIC CAPITAL LETTER KSI -2C9D COPTIC SMALL LETTER KSI -2C9E COPTIC CAPITAL LETTER O -2C9F COPTIC SMALL LETTER O -2CA0 COPTIC CAPITAL LETTER PI -2CA1 COPTIC SMALL LETTER PI -2CA2 COPTIC CAPITAL LETTER RO -2CA3 COPTIC SMALL LETTER RO -2CA4 COPTIC CAPITAL LETTER SIMA -2CA5 COPTIC SMALL LETTER SIMA -2CA6 COPTIC CAPITAL LETTER TAU -2CA7 COPTIC SMALL LETTER TAU -2CA8 COPTIC CAPITAL LETTER UA -2CA9 COPTIC SMALL LETTER UA -2CAA COPTIC CAPITAL LETTER FI -2CAB COPTIC SMALL LETTER FI -2CAC COPTIC CAPITAL LETTER KHI -2CAD COPTIC SMALL LETTER KHI -2CAE COPTIC CAPITAL LETTER PSI -2CAF COPTIC SMALL LETTER PSI -2CB0 COPTIC CAPITAL LETTER OOU -2CB1 COPTIC SMALL LETTER OOU -2CB2 COPTIC CAPITAL LETTER DIALECT-P ALEF -2CB3 COPTIC SMALL LETTER DIALECT-P ALEF -2CB4 COPTIC CAPITAL LETTER OLD COPTIC AIN -2CB5 COPTIC SMALL LETTER OLD COPTIC AIN -2CB6 COPTIC CAPITAL LETTER CRYPTOGRAMMIC EIE -2CB7 COPTIC SMALL LETTER CRYPTOGRAMMIC EIE -2CB8 COPTIC CAPITAL LETTER DIALECT-P KAPA -2CB9 COPTIC SMALL LETTER DIALECT-P KAPA -2CBA COPTIC CAPITAL LETTER DIALECT-P NI -2CBB COPTIC SMALL LETTER DIALECT-P NI -2CBC COPTIC CAPITAL LETTER CRYPTOGRAMMIC NI -2CBD COPTIC SMALL LETTER CRYPTOGRAMMIC NI -2CBE COPTIC CAPITAL LETTER OLD COPTIC OOU -2CBF COPTIC SMALL LETTER OLD COPTIC OOU -2CC0 COPTIC CAPITAL LETTER SAMPI -2CC1 COPTIC SMALL LETTER SAMPI -2CC2 COPTIC CAPITAL LETTER CROSSED SHEI -2CC3 COPTIC SMALL LETTER CROSSED SHEI -2CC4 COPTIC CAPITAL LETTER OLD COPTIC SHEI -2CC5 COPTIC SMALL LETTER OLD COPTIC SHEI -2CC6 COPTIC CAPITAL LETTER OLD COPTIC ESH -2CC7 COPTIC SMALL LETTER OLD COPTIC ESH -2CC8 COPTIC CAPITAL LETTER AKHMIMIC KHEI -2CC9 COPTIC SMALL LETTER AKHMIMIC KHEI -2CCA COPTIC CAPITAL LETTER DIALECT-P HORI -2CCB COPTIC SMALL LETTER DIALECT-P HORI -2CCC COPTIC CAPITAL LETTER OLD COPTIC HORI -2CCD COPTIC SMALL LETTER OLD COPTIC HORI -2CCE COPTIC CAPITAL LETTER OLD COPTIC HA -2CCF COPTIC SMALL LETTER OLD COPTIC HA -2CD0 COPTIC CAPITAL LETTER L-SHAPED HA -2CD1 COPTIC SMALL LETTER L-SHAPED HA -2CD2 COPTIC CAPITAL LETTER OLD COPTIC HEI -2CD3 COPTIC SMALL LETTER OLD COPTIC HEI -2CD4 COPTIC CAPITAL LETTER OLD COPTIC HAT -2CD5 COPTIC SMALL LETTER OLD COPTIC HAT -2CD6 COPTIC CAPITAL LETTER OLD COPTIC GANGIA -2CD7 COPTIC SMALL LETTER OLD COPTIC GANGIA -2CD8 COPTIC CAPITAL LETTER OLD COPTIC DJA -2CD9 COPTIC SMALL LETTER OLD COPTIC DJA -2CDA COPTIC CAPITAL LETTER OLD COPTIC SHIMA -2CDB COPTIC SMALL LETTER OLD COPTIC SHIMA -2CDC COPTIC CAPITAL LETTER OLD NUBIAN SHIMA -2CDD COPTIC SMALL LETTER OLD NUBIAN SHIMA -2CDE COPTIC CAPITAL LETTER OLD NUBIAN NGI -2CDF COPTIC SMALL LETTER OLD NUBIAN NGI -2CE0 COPTIC CAPITAL LETTER OLD NUBIAN NYI -2CE1 COPTIC SMALL LETTER OLD NUBIAN NYI -2CE2 COPTIC CAPITAL LETTER OLD NUBIAN WAU -2CE3 COPTIC SMALL LETTER OLD NUBIAN WAU -2CE4 COPTIC SYMBOL KAI -2CE5 COPTIC SYMBOL MI RO -2CE6 COPTIC SYMBOL PI RO -2CE7 COPTIC SYMBOL STAUROS -2CE8 COPTIC SYMBOL TAU RO -2CE9 COPTIC SYMBOL KHI RO -2CEA COPTIC SYMBOL SHIMA SIMA -2CEB COPTIC CAPITAL LETTER CRYPTOGRAMMIC SHEI -2CEC COPTIC SMALL LETTER CRYPTOGRAMMIC SHEI -2CED COPTIC CAPITAL LETTER CRYPTOGRAMMIC GANGIA -2CEE COPTIC SMALL LETTER CRYPTOGRAMMIC GANGIA -2CEF COPTIC COMBINING NI ABOVE -2CF0 COPTIC COMBINING SPIRITUS ASPER -2CF1 COPTIC COMBINING SPIRITUS LENIS -2CF9 COPTIC OLD NUBIAN FULL STOP -2CFA COPTIC OLD NUBIAN DIRECT QUESTION MARK -2CFB COPTIC OLD NUBIAN INDIRECT QUESTION MARK -2CFC COPTIC OLD NUBIAN VERSE DIVIDER -2CFD COPTIC FRACTION ONE HALF -2CFE COPTIC FULL STOP -2CFF COPTIC MORPHOLOGICAL DIVIDER -2D00 GEORGIAN SMALL LETTER AN -2D01 GEORGIAN SMALL LETTER BAN -2D02 GEORGIAN SMALL LETTER GAN -2D03 GEORGIAN SMALL LETTER DON -2D04 GEORGIAN SMALL LETTER EN -2D05 GEORGIAN SMALL LETTER VIN -2D06 GEORGIAN SMALL LETTER ZEN -2D07 GEORGIAN SMALL LETTER TAN -2D08 GEORGIAN SMALL LETTER IN -2D09 GEORGIAN SMALL LETTER KAN -2D0A GEORGIAN SMALL LETTER LAS -2D0B GEORGIAN SMALL LETTER MAN -2D0C GEORGIAN SMALL LETTER NAR -2D0D GEORGIAN SMALL LETTER ON -2D0E GEORGIAN SMALL LETTER PAR -2D0F GEORGIAN SMALL LETTER ZHAR -2D10 GEORGIAN SMALL LETTER RAE -2D11 GEORGIAN SMALL LETTER SAN -2D12 GEORGIAN SMALL LETTER TAR -2D13 GEORGIAN SMALL LETTER UN -2D14 GEORGIAN SMALL LETTER PHAR -2D15 GEORGIAN SMALL LETTER KHAR -2D16 GEORGIAN SMALL LETTER GHAN -2D17 GEORGIAN SMALL LETTER QAR -2D18 GEORGIAN SMALL LETTER SHIN -2D19 GEORGIAN SMALL LETTER CHIN -2D1A GEORGIAN SMALL LETTER CAN -2D1B GEORGIAN SMALL LETTER JIL -2D1C GEORGIAN SMALL LETTER CIL -2D1D GEORGIAN SMALL LETTER CHAR -2D1E GEORGIAN SMALL LETTER XAN -2D1F GEORGIAN SMALL LETTER JHAN -2D20 GEORGIAN SMALL LETTER HAE -2D21 GEORGIAN SMALL LETTER HE -2D22 GEORGIAN SMALL LETTER HIE -2D23 GEORGIAN SMALL LETTER WE -2D24 GEORGIAN SMALL LETTER HAR -2D25 GEORGIAN SMALL LETTER HOE -2D30 TIFINAGH LETTER YA -2D31 TIFINAGH LETTER YAB -2D32 TIFINAGH LETTER YABH -2D33 TIFINAGH LETTER YAG -2D34 TIFINAGH LETTER YAGHH -2D35 TIFINAGH LETTER BERBER ACADEMY YAJ -2D36 TIFINAGH LETTER YAJ -2D37 TIFINAGH LETTER YAD -2D38 TIFINAGH LETTER YADH -2D39 TIFINAGH LETTER YADD -2D3A TIFINAGH LETTER YADDH -2D3B TIFINAGH LETTER YEY -2D3C TIFINAGH LETTER YAF -2D3D TIFINAGH LETTER YAK -2D3E TIFINAGH LETTER TUAREG YAK -2D3F TIFINAGH LETTER YAKHH -2D40 TIFINAGH LETTER YAH -2D41 TIFINAGH LETTER BERBER ACADEMY YAH -2D42 TIFINAGH LETTER TUAREG YAH -2D43 TIFINAGH LETTER YAHH -2D44 TIFINAGH LETTER YAA -2D45 TIFINAGH LETTER YAKH -2D46 TIFINAGH LETTER TUAREG YAKH -2D47 TIFINAGH LETTER YAQ -2D48 TIFINAGH LETTER TUAREG YAQ -2D49 TIFINAGH LETTER YI -2D4A TIFINAGH LETTER YAZH -2D4B TIFINAGH LETTER AHAGGAR YAZH -2D4C TIFINAGH LETTER TUAREG YAZH -2D4D TIFINAGH LETTER YAL -2D4E TIFINAGH LETTER YAM -2D4F TIFINAGH LETTER YAN -2D50 TIFINAGH LETTER TUAREG YAGN -2D51 TIFINAGH LETTER TUAREG YANG -2D52 TIFINAGH LETTER YAP -2D53 TIFINAGH LETTER YU -2D54 TIFINAGH LETTER YAR -2D55 TIFINAGH LETTER YARR -2D56 TIFINAGH LETTER YAGH -2D57 TIFINAGH LETTER TUAREG YAGH -2D58 TIFINAGH LETTER AYER YAGH -2D59 TIFINAGH LETTER YAS -2D5A TIFINAGH LETTER YASS -2D5B TIFINAGH LETTER YASH -2D5C TIFINAGH LETTER YAT -2D5D TIFINAGH LETTER YATH -2D5E TIFINAGH LETTER YACH -2D5F TIFINAGH LETTER YATT -2D60 TIFINAGH LETTER YAV -2D61 TIFINAGH LETTER YAW -2D62 TIFINAGH LETTER YAY -2D63 TIFINAGH LETTER YAZ -2D64 TIFINAGH LETTER TAWELLEMET YAZ -2D65 TIFINAGH LETTER YAZZ -2D6F TIFINAGH MODIFIER LETTER LABIALIZATION MARK -2D80 ETHIOPIC SYLLABLE LOA -2D81 ETHIOPIC SYLLABLE MOA -2D82 ETHIOPIC SYLLABLE ROA -2D83 ETHIOPIC SYLLABLE SOA -2D84 ETHIOPIC SYLLABLE SHOA -2D85 ETHIOPIC SYLLABLE BOA -2D86 ETHIOPIC SYLLABLE TOA -2D87 ETHIOPIC SYLLABLE COA -2D88 ETHIOPIC SYLLABLE NOA -2D89 ETHIOPIC SYLLABLE NYOA -2D8A ETHIOPIC SYLLABLE GLOTTAL OA -2D8B ETHIOPIC SYLLABLE ZOA -2D8C ETHIOPIC SYLLABLE DOA -2D8D ETHIOPIC SYLLABLE DDOA -2D8E ETHIOPIC SYLLABLE JOA -2D8F ETHIOPIC SYLLABLE THOA -2D90 ETHIOPIC SYLLABLE CHOA -2D91 ETHIOPIC SYLLABLE PHOA -2D92 ETHIOPIC SYLLABLE POA -2D93 ETHIOPIC SYLLABLE GGWA -2D94 ETHIOPIC SYLLABLE GGWI -2D95 ETHIOPIC SYLLABLE GGWEE -2D96 ETHIOPIC SYLLABLE GGWE -2DA0 ETHIOPIC SYLLABLE SSA -2DA1 ETHIOPIC SYLLABLE SSU -2DA2 ETHIOPIC SYLLABLE SSI -2DA3 ETHIOPIC SYLLABLE SSAA -2DA4 ETHIOPIC SYLLABLE SSEE -2DA5 ETHIOPIC SYLLABLE SSE -2DA6 ETHIOPIC SYLLABLE SSO -2DA8 ETHIOPIC SYLLABLE CCA -2DA9 ETHIOPIC SYLLABLE CCU -2DAA ETHIOPIC SYLLABLE CCI -2DAB ETHIOPIC SYLLABLE CCAA -2DAC ETHIOPIC SYLLABLE CCEE -2DAD ETHIOPIC SYLLABLE CCE -2DAE ETHIOPIC SYLLABLE CCO -2DB0 ETHIOPIC SYLLABLE ZZA -2DB1 ETHIOPIC SYLLABLE ZZU -2DB2 ETHIOPIC SYLLABLE ZZI -2DB3 ETHIOPIC SYLLABLE ZZAA -2DB4 ETHIOPIC SYLLABLE ZZEE -2DB5 ETHIOPIC SYLLABLE ZZE -2DB6 ETHIOPIC SYLLABLE ZZO -2DB8 ETHIOPIC SYLLABLE CCHA -2DB9 ETHIOPIC SYLLABLE CCHU -2DBA ETHIOPIC SYLLABLE CCHI -2DBB ETHIOPIC SYLLABLE CCHAA -2DBC ETHIOPIC SYLLABLE CCHEE -2DBD ETHIOPIC SYLLABLE CCHE -2DBE ETHIOPIC SYLLABLE CCHO -2DC0 ETHIOPIC SYLLABLE QYA -2DC1 ETHIOPIC SYLLABLE QYU -2DC2 ETHIOPIC SYLLABLE QYI -2DC3 ETHIOPIC SYLLABLE QYAA -2DC4 ETHIOPIC SYLLABLE QYEE -2DC5 ETHIOPIC SYLLABLE QYE -2DC6 ETHIOPIC SYLLABLE QYO -2DC8 ETHIOPIC SYLLABLE KYA -2DC9 ETHIOPIC SYLLABLE KYU -2DCA ETHIOPIC SYLLABLE KYI -2DCB ETHIOPIC SYLLABLE KYAA -2DCC ETHIOPIC SYLLABLE KYEE -2DCD ETHIOPIC SYLLABLE KYE -2DCE ETHIOPIC SYLLABLE KYO -2DD0 ETHIOPIC SYLLABLE XYA -2DD1 ETHIOPIC SYLLABLE XYU -2DD2 ETHIOPIC SYLLABLE XYI -2DD3 ETHIOPIC SYLLABLE XYAA -2DD4 ETHIOPIC SYLLABLE XYEE -2DD5 ETHIOPIC SYLLABLE XYE -2DD6 ETHIOPIC SYLLABLE XYO -2DD8 ETHIOPIC SYLLABLE GYA -2DD9 ETHIOPIC SYLLABLE GYU -2DDA ETHIOPIC SYLLABLE GYI -2DDB ETHIOPIC SYLLABLE GYAA -2DDC ETHIOPIC SYLLABLE GYEE -2DDD ETHIOPIC SYLLABLE GYE -2DDE ETHIOPIC SYLLABLE GYO -2DE0 COMBINING CYRILLIC LETTER BE -2DE1 COMBINING CYRILLIC LETTER VE -2DE2 COMBINING CYRILLIC LETTER GHE -2DE3 COMBINING CYRILLIC LETTER DE -2DE4 COMBINING CYRILLIC LETTER ZHE -2DE5 COMBINING CYRILLIC LETTER ZE -2DE6 COMBINING CYRILLIC LETTER KA -2DE7 COMBINING CYRILLIC LETTER EL -2DE8 COMBINING CYRILLIC LETTER EM -2DE9 COMBINING CYRILLIC LETTER EN -2DEA COMBINING CYRILLIC LETTER O -2DEB COMBINING CYRILLIC LETTER PE -2DEC COMBINING CYRILLIC LETTER ER -2DED COMBINING CYRILLIC LETTER ES -2DEE COMBINING CYRILLIC LETTER TE -2DEF COMBINING CYRILLIC LETTER HA -2DF0 COMBINING CYRILLIC LETTER TSE -2DF1 COMBINING CYRILLIC LETTER CHE -2DF2 COMBINING CYRILLIC LETTER SHA -2DF3 COMBINING CYRILLIC LETTER SHCHA -2DF4 COMBINING CYRILLIC LETTER FITA -2DF5 COMBINING CYRILLIC LETTER ES-TE -2DF6 COMBINING CYRILLIC LETTER A -2DF7 COMBINING CYRILLIC LETTER IE -2DF8 COMBINING CYRILLIC LETTER DJERV -2DF9 COMBINING CYRILLIC LETTER MONOGRAPH UK -2DFA COMBINING CYRILLIC LETTER YAT -2DFB COMBINING CYRILLIC LETTER YU -2DFC COMBINING CYRILLIC LETTER IOTIFIED A -2DFD COMBINING CYRILLIC LETTER LITTLE YUS -2DFE COMBINING CYRILLIC LETTER BIG YUS -2DFF COMBINING CYRILLIC LETTER IOTIFIED BIG YUS -2E00 RIGHT ANGLE SUBSTITUTION MARKER -2E01 RIGHT ANGLE DOTTED SUBSTITUTION MARKER -2E02 LEFT SUBSTITUTION BRACKET -2E03 RIGHT SUBSTITUTION BRACKET -2E04 LEFT DOTTED SUBSTITUTION BRACKET -2E05 RIGHT DOTTED SUBSTITUTION BRACKET -2E06 RAISED INTERPOLATION MARKER -2E07 RAISED DOTTED INTERPOLATION MARKER -2E08 DOTTED TRANSPOSITION MARKER -2E09 LEFT TRANSPOSITION BRACKET -2E0A RIGHT TRANSPOSITION BRACKET -2E0B RAISED SQUARE -2E0C LEFT RAISED OMISSION BRACKET -2E0D RIGHT RAISED OMISSION BRACKET -2E0E EDITORIAL CORONIS -2E0F PARAGRAPHOS -2E10 FORKED PARAGRAPHOS -2E11 REVERSED FORKED PARAGRAPHOS -2E12 HYPODIASTOLE -2E13 DOTTED OBELOS -2E14 DOWNWARDS ANCORA -2E15 UPWARDS ANCORA -2E16 DOTTED RIGHT-POINTING ANGLE -2E17 DOUBLE OBLIQUE HYPHEN -2E18 INVERTED INTERROBANG -2E19 PALM BRANCH -2E1A HYPHEN WITH DIAERESIS -2E1B TILDE WITH RING ABOVE -2E1C LEFT LOW PARAPHRASE BRACKET -2E1D RIGHT LOW PARAPHRASE BRACKET -2E1E TILDE WITH DOT ABOVE -2E1F TILDE WITH DOT BELOW -2E20 LEFT VERTICAL BAR WITH QUILL -2E21 RIGHT VERTICAL BAR WITH QUILL -2E22 TOP LEFT HALF BRACKET -2E23 TOP RIGHT HALF BRACKET -2E24 BOTTOM LEFT HALF BRACKET -2E25 BOTTOM RIGHT HALF BRACKET -2E26 LEFT SIDEWAYS U BRACKET -2E27 RIGHT SIDEWAYS U BRACKET -2E28 LEFT DOUBLE PARENTHESIS -2E29 RIGHT DOUBLE PARENTHESIS -2E2A TWO DOTS OVER ONE DOT PUNCTUATION -2E2B ONE DOT OVER TWO DOTS PUNCTUATION -2E2C SQUARED FOUR DOT PUNCTUATION -2E2D FIVE DOT MARK -2E2E REVERSED QUESTION MARK -2E2F VERTICAL TILDE -2E30 RING POINT -2E31 WORD SEPARATOR MIDDLE DOT -2E80 CJK RADICAL REPEAT -2E81 CJK RADICAL CLIFF -2E82 CJK RADICAL SECOND ONE -2E83 CJK RADICAL SECOND TWO -2E84 CJK RADICAL SECOND THREE -2E85 CJK RADICAL PERSON -2E86 CJK RADICAL BOX -2E87 CJK RADICAL TABLE -2E88 CJK RADICAL KNIFE ONE -2E89 CJK RADICAL KNIFE TWO -2E8A CJK RADICAL DIVINATION -2E8B CJK RADICAL SEAL -2E8C CJK RADICAL SMALL ONE -2E8D CJK RADICAL SMALL TWO -2E8E CJK RADICAL LAME ONE -2E8F CJK RADICAL LAME TWO -2E90 CJK RADICAL LAME THREE -2E91 CJK RADICAL LAME FOUR -2E92 CJK RADICAL SNAKE -2E93 CJK RADICAL THREAD -2E94 CJK RADICAL SNOUT ONE -2E95 CJK RADICAL SNOUT TWO -2E96 CJK RADICAL HEART ONE -2E97 CJK RADICAL HEART TWO -2E98 CJK RADICAL HAND -2E99 CJK RADICAL RAP -2E9B CJK RADICAL CHOKE -2E9C CJK RADICAL SUN -2E9D CJK RADICAL MOON -2E9E CJK RADICAL DEATH -2E9F CJK RADICAL MOTHER -2EA0 CJK RADICAL CIVILIAN -2EA1 CJK RADICAL WATER ONE -2EA2 CJK RADICAL WATER TWO -2EA3 CJK RADICAL FIRE -2EA4 CJK RADICAL PAW ONE -2EA5 CJK RADICAL PAW TWO -2EA6 CJK RADICAL SIMPLIFIED HALF TREE TRUNK -2EA7 CJK RADICAL COW -2EA8 CJK RADICAL DOG -2EA9 CJK RADICAL JADE -2EAA CJK RADICAL BOLT OF CLOTH -2EAB CJK RADICAL EYE -2EAC CJK RADICAL SPIRIT ONE -2EAD CJK RADICAL SPIRIT TWO -2EAE CJK RADICAL BAMBOO -2EAF CJK RADICAL SILK -2EB0 CJK RADICAL C-SIMPLIFIED SILK -2EB1 CJK RADICAL NET ONE -2EB2 CJK RADICAL NET TWO -2EB3 CJK RADICAL NET THREE -2EB4 CJK RADICAL NET FOUR -2EB5 CJK RADICAL MESH -2EB6 CJK RADICAL SHEEP -2EB7 CJK RADICAL RAM -2EB8 CJK RADICAL EWE -2EB9 CJK RADICAL OLD -2EBA CJK RADICAL BRUSH ONE -2EBB CJK RADICAL BRUSH TWO -2EBC CJK RADICAL MEAT -2EBD CJK RADICAL MORTAR -2EBE CJK RADICAL GRASS ONE -2EBF CJK RADICAL GRASS TWO -2EC0 CJK RADICAL GRASS THREE -2EC1 CJK RADICAL TIGER -2EC2 CJK RADICAL CLOTHES -2EC3 CJK RADICAL WEST ONE -2EC4 CJK RADICAL WEST TWO -2EC5 CJK RADICAL C-SIMPLIFIED SEE -2EC6 CJK RADICAL SIMPLIFIED HORN -2EC7 CJK RADICAL HORN -2EC8 CJK RADICAL C-SIMPLIFIED SPEECH -2EC9 CJK RADICAL C-SIMPLIFIED SHELL -2ECA CJK RADICAL FOOT -2ECB CJK RADICAL C-SIMPLIFIED CART -2ECC CJK RADICAL SIMPLIFIED WALK -2ECD CJK RADICAL WALK ONE -2ECE CJK RADICAL WALK TWO -2ECF CJK RADICAL CITY -2ED0 CJK RADICAL C-SIMPLIFIED GOLD -2ED1 CJK RADICAL LONG ONE -2ED2 CJK RADICAL LONG TWO -2ED3 CJK RADICAL C-SIMPLIFIED LONG -2ED4 CJK RADICAL C-SIMPLIFIED GATE -2ED5 CJK RADICAL MOUND ONE -2ED6 CJK RADICAL MOUND TWO -2ED7 CJK RADICAL RAIN -2ED8 CJK RADICAL BLUE -2ED9 CJK RADICAL C-SIMPLIFIED TANNED LEATHER -2EDA CJK RADICAL C-SIMPLIFIED LEAF -2EDB CJK RADICAL C-SIMPLIFIED WIND -2EDC CJK RADICAL C-SIMPLIFIED FLY -2EDD CJK RADICAL EAT ONE -2EDE CJK RADICAL EAT TWO -2EDF CJK RADICAL EAT THREE -2EE0 CJK RADICAL C-SIMPLIFIED EAT -2EE1 CJK RADICAL HEAD -2EE2 CJK RADICAL C-SIMPLIFIED HORSE -2EE3 CJK RADICAL BONE -2EE4 CJK RADICAL GHOST -2EE5 CJK RADICAL C-SIMPLIFIED FISH -2EE6 CJK RADICAL C-SIMPLIFIED BIRD -2EE7 CJK RADICAL C-SIMPLIFIED SALT -2EE8 CJK RADICAL SIMPLIFIED WHEAT -2EE9 CJK RADICAL SIMPLIFIED YELLOW -2EEA CJK RADICAL C-SIMPLIFIED FROG -2EEB CJK RADICAL J-SIMPLIFIED EVEN -2EEC CJK RADICAL C-SIMPLIFIED EVEN -2EED CJK RADICAL J-SIMPLIFIED TOOTH -2EEE CJK RADICAL C-SIMPLIFIED TOOTH -2EEF CJK RADICAL J-SIMPLIFIED DRAGON -2EF0 CJK RADICAL C-SIMPLIFIED DRAGON -2EF1 CJK RADICAL TURTLE -2EF2 CJK RADICAL J-SIMPLIFIED TURTLE -2EF3 CJK RADICAL C-SIMPLIFIED TURTLE -2F00 KANGXI RADICAL ONE -2F01 KANGXI RADICAL LINE -2F02 KANGXI RADICAL DOT -2F03 KANGXI RADICAL SLASH -2F04 KANGXI RADICAL SECOND -2F05 KANGXI RADICAL HOOK -2F06 KANGXI RADICAL TWO -2F07 KANGXI RADICAL LID -2F08 KANGXI RADICAL MAN -2F09 KANGXI RADICAL LEGS -2F0A KANGXI RADICAL ENTER -2F0B KANGXI RADICAL EIGHT -2F0C KANGXI RADICAL DOWN BOX -2F0D KANGXI RADICAL COVER -2F0E KANGXI RADICAL ICE -2F0F KANGXI RADICAL TABLE -2F10 KANGXI RADICAL OPEN BOX -2F11 KANGXI RADICAL KNIFE -2F12 KANGXI RADICAL POWER -2F13 KANGXI RADICAL WRAP -2F14 KANGXI RADICAL SPOON -2F15 KANGXI RADICAL RIGHT OPEN BOX -2F16 KANGXI RADICAL HIDING ENCLOSURE -2F17 KANGXI RADICAL TEN -2F18 KANGXI RADICAL DIVINATION -2F19 KANGXI RADICAL SEAL -2F1A KANGXI RADICAL CLIFF -2F1B KANGXI RADICAL PRIVATE -2F1C KANGXI RADICAL AGAIN -2F1D KANGXI RADICAL MOUTH -2F1E KANGXI RADICAL ENCLOSURE -2F1F KANGXI RADICAL EARTH -2F20 KANGXI RADICAL SCHOLAR -2F21 KANGXI RADICAL GO -2F22 KANGXI RADICAL GO SLOWLY -2F23 KANGXI RADICAL EVENING -2F24 KANGXI RADICAL BIG -2F25 KANGXI RADICAL WOMAN -2F26 KANGXI RADICAL CHILD -2F27 KANGXI RADICAL ROOF -2F28 KANGXI RADICAL INCH -2F29 KANGXI RADICAL SMALL -2F2A KANGXI RADICAL LAME -2F2B KANGXI RADICAL CORPSE -2F2C KANGXI RADICAL SPROUT -2F2D KANGXI RADICAL MOUNTAIN -2F2E KANGXI RADICAL RIVER -2F2F KANGXI RADICAL WORK -2F30 KANGXI RADICAL ONESELF -2F31 KANGXI RADICAL TURBAN -2F32 KANGXI RADICAL DRY -2F33 KANGXI RADICAL SHORT THREAD -2F34 KANGXI RADICAL DOTTED CLIFF -2F35 KANGXI RADICAL LONG STRIDE -2F36 KANGXI RADICAL TWO HANDS -2F37 KANGXI RADICAL SHOOT -2F38 KANGXI RADICAL BOW -2F39 KANGXI RADICAL SNOUT -2F3A KANGXI RADICAL BRISTLE -2F3B KANGXI RADICAL STEP -2F3C KANGXI RADICAL HEART -2F3D KANGXI RADICAL HALBERD -2F3E KANGXI RADICAL DOOR -2F3F KANGXI RADICAL HAND -2F40 KANGXI RADICAL BRANCH -2F41 KANGXI RADICAL RAP -2F42 KANGXI RADICAL SCRIPT -2F43 KANGXI RADICAL DIPPER -2F44 KANGXI RADICAL AXE -2F45 KANGXI RADICAL SQUARE -2F46 KANGXI RADICAL NOT -2F47 KANGXI RADICAL SUN -2F48 KANGXI RADICAL SAY -2F49 KANGXI RADICAL MOON -2F4A KANGXI RADICAL TREE -2F4B KANGXI RADICAL LACK -2F4C KANGXI RADICAL STOP -2F4D KANGXI RADICAL DEATH -2F4E KANGXI RADICAL WEAPON -2F4F KANGXI RADICAL DO NOT -2F50 KANGXI RADICAL COMPARE -2F51 KANGXI RADICAL FUR -2F52 KANGXI RADICAL CLAN -2F53 KANGXI RADICAL STEAM -2F54 KANGXI RADICAL WATER -2F55 KANGXI RADICAL FIRE -2F56 KANGXI RADICAL CLAW -2F57 KANGXI RADICAL FATHER -2F58 KANGXI RADICAL DOUBLE X -2F59 KANGXI RADICAL HALF TREE TRUNK -2F5A KANGXI RADICAL SLICE -2F5B KANGXI RADICAL FANG -2F5C KANGXI RADICAL COW -2F5D KANGXI RADICAL DOG -2F5E KANGXI RADICAL PROFOUND -2F5F KANGXI RADICAL JADE -2F60 KANGXI RADICAL MELON -2F61 KANGXI RADICAL TILE -2F62 KANGXI RADICAL SWEET -2F63 KANGXI RADICAL LIFE -2F64 KANGXI RADICAL USE -2F65 KANGXI RADICAL FIELD -2F66 KANGXI RADICAL BOLT OF CLOTH -2F67 KANGXI RADICAL SICKNESS -2F68 KANGXI RADICAL DOTTED TENT -2F69 KANGXI RADICAL WHITE -2F6A KANGXI RADICAL SKIN -2F6B KANGXI RADICAL DISH -2F6C KANGXI RADICAL EYE -2F6D KANGXI RADICAL SPEAR -2F6E KANGXI RADICAL ARROW -2F6F KANGXI RADICAL STONE -2F70 KANGXI RADICAL SPIRIT -2F71 KANGXI RADICAL TRACK -2F72 KANGXI RADICAL GRAIN -2F73 KANGXI RADICAL CAVE -2F74 KANGXI RADICAL STAND -2F75 KANGXI RADICAL BAMBOO -2F76 KANGXI RADICAL RICE -2F77 KANGXI RADICAL SILK -2F78 KANGXI RADICAL JAR -2F79 KANGXI RADICAL NET -2F7A KANGXI RADICAL SHEEP -2F7B KANGXI RADICAL FEATHER -2F7C KANGXI RADICAL OLD -2F7D KANGXI RADICAL AND -2F7E KANGXI RADICAL PLOW -2F7F KANGXI RADICAL EAR -2F80 KANGXI RADICAL BRUSH -2F81 KANGXI RADICAL MEAT -2F82 KANGXI RADICAL MINISTER -2F83 KANGXI RADICAL SELF -2F84 KANGXI RADICAL ARRIVE -2F85 KANGXI RADICAL MORTAR -2F86 KANGXI RADICAL TONGUE -2F87 KANGXI RADICAL OPPOSE -2F88 KANGXI RADICAL BOAT -2F89 KANGXI RADICAL STOPPING -2F8A KANGXI RADICAL COLOR -2F8B KANGXI RADICAL GRASS -2F8C KANGXI RADICAL TIGER -2F8D KANGXI RADICAL INSECT -2F8E KANGXI RADICAL BLOOD -2F8F KANGXI RADICAL WALK ENCLOSURE -2F90 KANGXI RADICAL CLOTHES -2F91 KANGXI RADICAL WEST -2F92 KANGXI RADICAL SEE -2F93 KANGXI RADICAL HORN -2F94 KANGXI RADICAL SPEECH -2F95 KANGXI RADICAL VALLEY -2F96 KANGXI RADICAL BEAN -2F97 KANGXI RADICAL PIG -2F98 KANGXI RADICAL BADGER -2F99 KANGXI RADICAL SHELL -2F9A KANGXI RADICAL RED -2F9B KANGXI RADICAL RUN -2F9C KANGXI RADICAL FOOT -2F9D KANGXI RADICAL BODY -2F9E KANGXI RADICAL CART -2F9F KANGXI RADICAL BITTER -2FA0 KANGXI RADICAL MORNING -2FA1 KANGXI RADICAL WALK -2FA2 KANGXI RADICAL CITY -2FA3 KANGXI RADICAL WINE -2FA4 KANGXI RADICAL DISTINGUISH -2FA5 KANGXI RADICAL VILLAGE -2FA6 KANGXI RADICAL GOLD -2FA7 KANGXI RADICAL LONG -2FA8 KANGXI RADICAL GATE -2FA9 KANGXI RADICAL MOUND -2FAA KANGXI RADICAL SLAVE -2FAB KANGXI RADICAL SHORT TAILED BIRD -2FAC KANGXI RADICAL RAIN -2FAD KANGXI RADICAL BLUE -2FAE KANGXI RADICAL WRONG -2FAF KANGXI RADICAL FACE -2FB0 KANGXI RADICAL LEATHER -2FB1 KANGXI RADICAL TANNED LEATHER -2FB2 KANGXI RADICAL LEEK -2FB3 KANGXI RADICAL SOUND -2FB4 KANGXI RADICAL LEAF -2FB5 KANGXI RADICAL WIND -2FB6 KANGXI RADICAL FLY -2FB7 KANGXI RADICAL EAT -2FB8 KANGXI RADICAL HEAD -2FB9 KANGXI RADICAL FRAGRANT -2FBA KANGXI RADICAL HORSE -2FBB KANGXI RADICAL BONE -2FBC KANGXI RADICAL TALL -2FBD KANGXI RADICAL HAIR -2FBE KANGXI RADICAL FIGHT -2FBF KANGXI RADICAL SACRIFICIAL WINE -2FC0 KANGXI RADICAL CAULDRON -2FC1 KANGXI RADICAL GHOST -2FC2 KANGXI RADICAL FISH -2FC3 KANGXI RADICAL BIRD -2FC4 KANGXI RADICAL SALT -2FC5 KANGXI RADICAL DEER -2FC6 KANGXI RADICAL WHEAT -2FC7 KANGXI RADICAL HEMP -2FC8 KANGXI RADICAL YELLOW -2FC9 KANGXI RADICAL MILLET -2FCA KANGXI RADICAL BLACK -2FCB KANGXI RADICAL EMBROIDERY -2FCC KANGXI RADICAL FROG -2FCD KANGXI RADICAL TRIPOD -2FCE KANGXI RADICAL DRUM -2FCF KANGXI RADICAL RAT -2FD0 KANGXI RADICAL NOSE -2FD1 KANGXI RADICAL EVEN -2FD2 KANGXI RADICAL TOOTH -2FD3 KANGXI RADICAL DRAGON -2FD4 KANGXI RADICAL TURTLE -2FD5 KANGXI RADICAL FLUTE -2FF0 IDEOGRAPHIC DESCRIPTION CHARACTER LEFT TO RIGHT -2FF1 IDEOGRAPHIC DESCRIPTION CHARACTER ABOVE TO BELOW -2FF2 IDEOGRAPHIC DESCRIPTION CHARACTER LEFT TO MIDDLE AND RIGHT -2FF3 IDEOGRAPHIC DESCRIPTION CHARACTER ABOVE TO MIDDLE AND BELOW -2FF4 IDEOGRAPHIC DESCRIPTION CHARACTER FULL SURROUND -2FF5 IDEOGRAPHIC DESCRIPTION CHARACTER SURROUND FROM ABOVE -2FF6 IDEOGRAPHIC DESCRIPTION CHARACTER SURROUND FROM BELOW -2FF7 IDEOGRAPHIC DESCRIPTION CHARACTER SURROUND FROM LEFT -2FF8 IDEOGRAPHIC DESCRIPTION CHARACTER SURROUND FROM UPPER LEFT -2FF9 IDEOGRAPHIC DESCRIPTION CHARACTER SURROUND FROM UPPER RIGHT -2FFA IDEOGRAPHIC DESCRIPTION CHARACTER SURROUND FROM LOWER LEFT -2FFB IDEOGRAPHIC DESCRIPTION CHARACTER OVERLAID -3000 IDEOGRAPHIC SPACE -3001 IDEOGRAPHIC COMMA -3002 IDEOGRAPHIC FULL STOP -3003 DITTO MARK -3004 JAPANESE INDUSTRIAL STANDARD SYMBOL -3005 IDEOGRAPHIC ITERATION MARK -3006 IDEOGRAPHIC CLOSING MARK -3007 IDEOGRAPHIC NUMBER ZERO -3008 LEFT ANGLE BRACKET -3009 RIGHT ANGLE BRACKET -300A LEFT DOUBLE ANGLE BRACKET -300B RIGHT DOUBLE ANGLE BRACKET -300C LEFT CORNER BRACKET -300D RIGHT CORNER BRACKET -300E LEFT WHITE CORNER BRACKET -300F RIGHT WHITE CORNER BRACKET -3010 LEFT BLACK LENTICULAR BRACKET -3011 RIGHT BLACK LENTICULAR BRACKET -3012 POSTAL MARK -3013 GETA MARK -3014 LEFT TORTOISE SHELL BRACKET -3015 RIGHT TORTOISE SHELL BRACKET -3016 LEFT WHITE LENTICULAR BRACKET -3017 RIGHT WHITE LENTICULAR BRACKET -3018 LEFT WHITE TORTOISE SHELL BRACKET -3019 RIGHT WHITE TORTOISE SHELL BRACKET -301A LEFT WHITE SQUARE BRACKET -301B RIGHT WHITE SQUARE BRACKET -301C WAVE DASH -301D REVERSED DOUBLE PRIME QUOTATION MARK -301E DOUBLE PRIME QUOTATION MARK -301F LOW DOUBLE PRIME QUOTATION MARK -3020 POSTAL MARK FACE -3021 HANGZHOU NUMERAL ONE -3022 HANGZHOU NUMERAL TWO -3023 HANGZHOU NUMERAL THREE -3024 HANGZHOU NUMERAL FOUR -3025 HANGZHOU NUMERAL FIVE -3026 HANGZHOU NUMERAL SIX -3027 HANGZHOU NUMERAL SEVEN -3028 HANGZHOU NUMERAL EIGHT -3029 HANGZHOU NUMERAL NINE -302A IDEOGRAPHIC LEVEL TONE MARK -302B IDEOGRAPHIC RISING TONE MARK -302C IDEOGRAPHIC DEPARTING TONE MARK -302D IDEOGRAPHIC ENTERING TONE MARK -302E HANGUL SINGLE DOT TONE MARK -302F HANGUL DOUBLE DOT TONE MARK -3030 WAVY DASH -3031 VERTICAL KANA REPEAT MARK -3032 VERTICAL KANA REPEAT WITH VOICED SOUND MARK -3033 VERTICAL KANA REPEAT MARK UPPER HALF -3034 VERTICAL KANA REPEAT WITH VOICED SOUND MARK UPPER HALF -3035 VERTICAL KANA REPEAT MARK LOWER HALF -3036 CIRCLED POSTAL MARK -3037 IDEOGRAPHIC TELEGRAPH LINE FEED SEPARATOR SYMBOL -3038 HANGZHOU NUMERAL TEN -3039 HANGZHOU NUMERAL TWENTY -303A HANGZHOU NUMERAL THIRTY -303B VERTICAL IDEOGRAPHIC ITERATION MARK -303C MASU MARK -303D PART ALTERNATION MARK -303E IDEOGRAPHIC VARIATION INDICATOR -303F IDEOGRAPHIC HALF FILL SPACE -3041 HIRAGANA LETTER SMALL A -3042 HIRAGANA LETTER A -3043 HIRAGANA LETTER SMALL I -3044 HIRAGANA LETTER I -3045 HIRAGANA LETTER SMALL U -3046 HIRAGANA LETTER U -3047 HIRAGANA LETTER SMALL E -3048 HIRAGANA LETTER E -3049 HIRAGANA LETTER SMALL O -304A HIRAGANA LETTER O -304B HIRAGANA LETTER KA -304C HIRAGANA LETTER GA -304D HIRAGANA LETTER KI -304E HIRAGANA LETTER GI -304F HIRAGANA LETTER KU -3050 HIRAGANA LETTER GU -3051 HIRAGANA LETTER KE -3052 HIRAGANA LETTER GE -3053 HIRAGANA LETTER KO -3054 HIRAGANA LETTER GO -3055 HIRAGANA LETTER SA -3056 HIRAGANA LETTER ZA -3057 HIRAGANA LETTER SI -3058 HIRAGANA LETTER ZI -3059 HIRAGANA LETTER SU -305A HIRAGANA LETTER ZU -305B HIRAGANA LETTER SE -305C HIRAGANA LETTER ZE -305D HIRAGANA LETTER SO -305E HIRAGANA LETTER ZO -305F HIRAGANA LETTER TA -3060 HIRAGANA LETTER DA -3061 HIRAGANA LETTER TI -3062 HIRAGANA LETTER DI -3063 HIRAGANA LETTER SMALL TU -3064 HIRAGANA LETTER TU -3065 HIRAGANA LETTER DU -3066 HIRAGANA LETTER TE -3067 HIRAGANA LETTER DE -3068 HIRAGANA LETTER TO -3069 HIRAGANA LETTER DO -306A HIRAGANA LETTER NA -306B HIRAGANA LETTER NI -306C HIRAGANA LETTER NU -306D HIRAGANA LETTER NE -306E HIRAGANA LETTER NO -306F HIRAGANA LETTER HA -3070 HIRAGANA LETTER BA -3071 HIRAGANA LETTER PA -3072 HIRAGANA LETTER HI -3073 HIRAGANA LETTER BI -3074 HIRAGANA LETTER PI -3075 HIRAGANA LETTER HU -3076 HIRAGANA LETTER BU -3077 HIRAGANA LETTER PU -3078 HIRAGANA LETTER HE -3079 HIRAGANA LETTER BE -307A HIRAGANA LETTER PE -307B HIRAGANA LETTER HO -307C HIRAGANA LETTER BO -307D HIRAGANA LETTER PO -307E HIRAGANA LETTER MA -307F HIRAGANA LETTER MI -3080 HIRAGANA LETTER MU -3081 HIRAGANA LETTER ME -3082 HIRAGANA LETTER MO -3083 HIRAGANA LETTER SMALL YA -3084 HIRAGANA LETTER YA -3085 HIRAGANA LETTER SMALL YU -3086 HIRAGANA LETTER YU -3087 HIRAGANA LETTER SMALL YO -3088 HIRAGANA LETTER YO -3089 HIRAGANA LETTER RA -308A HIRAGANA LETTER RI -308B HIRAGANA LETTER RU -308C HIRAGANA LETTER RE -308D HIRAGANA LETTER RO -308E HIRAGANA LETTER SMALL WA -308F HIRAGANA LETTER WA -3090 HIRAGANA LETTER WI -3091 HIRAGANA LETTER WE -3092 HIRAGANA LETTER WO -3093 HIRAGANA LETTER N -3094 HIRAGANA LETTER VU -3095 HIRAGANA LETTER SMALL KA -3096 HIRAGANA LETTER SMALL KE -3099 COMBINING KATAKANA-HIRAGANA VOICED SOUND MARK -309A COMBINING KATAKANA-HIRAGANA SEMI-VOICED SOUND MARK -309B KATAKANA-HIRAGANA VOICED SOUND MARK -309C KATAKANA-HIRAGANA SEMI-VOICED SOUND MARK -309D HIRAGANA ITERATION MARK -309E HIRAGANA VOICED ITERATION MARK -309F HIRAGANA DIGRAPH YORI -30A0 KATAKANA-HIRAGANA DOUBLE HYPHEN -30A1 KATAKANA LETTER SMALL A -30A2 KATAKANA LETTER A -30A3 KATAKANA LETTER SMALL I -30A4 KATAKANA LETTER I -30A5 KATAKANA LETTER SMALL U -30A6 KATAKANA LETTER U -30A7 KATAKANA LETTER SMALL E -30A8 KATAKANA LETTER E -30A9 KATAKANA LETTER SMALL O -30AA KATAKANA LETTER O -30AB KATAKANA LETTER KA -30AC KATAKANA LETTER GA -30AD KATAKANA LETTER KI -30AE KATAKANA LETTER GI -30AF KATAKANA LETTER KU -30B0 KATAKANA LETTER GU -30B1 KATAKANA LETTER KE -30B2 KATAKANA LETTER GE -30B3 KATAKANA LETTER KO -30B4 KATAKANA LETTER GO -30B5 KATAKANA LETTER SA -30B6 KATAKANA LETTER ZA -30B7 KATAKANA LETTER SI -30B8 KATAKANA LETTER ZI -30B9 KATAKANA LETTER SU -30BA KATAKANA LETTER ZU -30BB KATAKANA LETTER SE -30BC KATAKANA LETTER ZE -30BD KATAKANA LETTER SO -30BE KATAKANA LETTER ZO -30BF KATAKANA LETTER TA -30C0 KATAKANA LETTER DA -30C1 KATAKANA LETTER TI -30C2 KATAKANA LETTER DI -30C3 KATAKANA LETTER SMALL TU -30C4 KATAKANA LETTER TU -30C5 KATAKANA LETTER DU -30C6 KATAKANA LETTER TE -30C7 KATAKANA LETTER DE -30C8 KATAKANA LETTER TO -30C9 KATAKANA LETTER DO -30CA KATAKANA LETTER NA -30CB KATAKANA LETTER NI -30CC KATAKANA LETTER NU -30CD KATAKANA LETTER NE -30CE KATAKANA LETTER NO -30CF KATAKANA LETTER HA -30D0 KATAKANA LETTER BA -30D1 KATAKANA LETTER PA -30D2 KATAKANA LETTER HI -30D3 KATAKANA LETTER BI -30D4 KATAKANA LETTER PI -30D5 KATAKANA LETTER HU -30D6 KATAKANA LETTER BU -30D7 KATAKANA LETTER PU -30D8 KATAKANA LETTER HE -30D9 KATAKANA LETTER BE -30DA KATAKANA LETTER PE -30DB KATAKANA LETTER HO -30DC KATAKANA LETTER BO -30DD KATAKANA LETTER PO -30DE KATAKANA LETTER MA -30DF KATAKANA LETTER MI -30E0 KATAKANA LETTER MU -30E1 KATAKANA LETTER ME -30E2 KATAKANA LETTER MO -30E3 KATAKANA LETTER SMALL YA -30E4 KATAKANA LETTER YA -30E5 KATAKANA LETTER SMALL YU -30E6 KATAKANA LETTER YU -30E7 KATAKANA LETTER SMALL YO -30E8 KATAKANA LETTER YO -30E9 KATAKANA LETTER RA -30EA KATAKANA LETTER RI -30EB KATAKANA LETTER RU -30EC KATAKANA LETTER RE -30ED KATAKANA LETTER RO -30EE KATAKANA LETTER SMALL WA -30EF KATAKANA LETTER WA -30F0 KATAKANA LETTER WI -30F1 KATAKANA LETTER WE -30F2 KATAKANA LETTER WO -30F3 KATAKANA LETTER N -30F4 KATAKANA LETTER VU -30F5 KATAKANA LETTER SMALL KA -30F6 KATAKANA LETTER SMALL KE -30F7 KATAKANA LETTER VA -30F8 KATAKANA LETTER VI -30F9 KATAKANA LETTER VE -30FA KATAKANA LETTER VO -30FB KATAKANA MIDDLE DOT -30FC KATAKANA-HIRAGANA PROLONGED SOUND MARK -30FD KATAKANA ITERATION MARK -30FE KATAKANA VOICED ITERATION MARK -30FF KATAKANA DIGRAPH KOTO -3105 BOPOMOFO LETTER B -3106 BOPOMOFO LETTER P -3107 BOPOMOFO LETTER M -3108 BOPOMOFO LETTER F -3109 BOPOMOFO LETTER D -310A BOPOMOFO LETTER T -310B BOPOMOFO LETTER N -310C BOPOMOFO LETTER L -310D BOPOMOFO LETTER G -310E BOPOMOFO LETTER K -310F BOPOMOFO LETTER H -3110 BOPOMOFO LETTER J -3111 BOPOMOFO LETTER Q -3112 BOPOMOFO LETTER X -3113 BOPOMOFO LETTER ZH -3114 BOPOMOFO LETTER CH -3115 BOPOMOFO LETTER SH -3116 BOPOMOFO LETTER R -3117 BOPOMOFO LETTER Z -3118 BOPOMOFO LETTER C -3119 BOPOMOFO LETTER S -311A BOPOMOFO LETTER A -311B BOPOMOFO LETTER O -311C BOPOMOFO LETTER E -311D BOPOMOFO LETTER EH -311E BOPOMOFO LETTER AI -311F BOPOMOFO LETTER EI -3120 BOPOMOFO LETTER AU -3121 BOPOMOFO LETTER OU -3122 BOPOMOFO LETTER AN -3123 BOPOMOFO LETTER EN -3124 BOPOMOFO LETTER ANG -3125 BOPOMOFO LETTER ENG -3126 BOPOMOFO LETTER ER -3127 BOPOMOFO LETTER I -3128 BOPOMOFO LETTER U -3129 BOPOMOFO LETTER IU -312A BOPOMOFO LETTER V -312B BOPOMOFO LETTER NG -312C BOPOMOFO LETTER GN -312D BOPOMOFO LETTER IH -3131 HANGUL LETTER KIYEOK -3132 HANGUL LETTER SSANGKIYEOK -3133 HANGUL LETTER KIYEOK-SIOS -3134 HANGUL LETTER NIEUN -3135 HANGUL LETTER NIEUN-CIEUC -3136 HANGUL LETTER NIEUN-HIEUH -3137 HANGUL LETTER TIKEUT -3138 HANGUL LETTER SSANGTIKEUT -3139 HANGUL LETTER RIEUL -313A HANGUL LETTER RIEUL-KIYEOK -313B HANGUL LETTER RIEUL-MIEUM -313C HANGUL LETTER RIEUL-PIEUP -313D HANGUL LETTER RIEUL-SIOS -313E HANGUL LETTER RIEUL-THIEUTH -313F HANGUL LETTER RIEUL-PHIEUPH -3140 HANGUL LETTER RIEUL-HIEUH -3141 HANGUL LETTER MIEUM -3142 HANGUL LETTER PIEUP -3143 HANGUL LETTER SSANGPIEUP -3144 HANGUL LETTER PIEUP-SIOS -3145 HANGUL LETTER SIOS -3146 HANGUL LETTER SSANGSIOS -3147 HANGUL LETTER IEUNG -3148 HANGUL LETTER CIEUC -3149 HANGUL LETTER SSANGCIEUC -314A HANGUL LETTER CHIEUCH -314B HANGUL LETTER KHIEUKH -314C HANGUL LETTER THIEUTH -314D HANGUL LETTER PHIEUPH -314E HANGUL LETTER HIEUH -314F HANGUL LETTER A -3150 HANGUL LETTER AE -3151 HANGUL LETTER YA -3152 HANGUL LETTER YAE -3153 HANGUL LETTER EO -3154 HANGUL LETTER E -3155 HANGUL LETTER YEO -3156 HANGUL LETTER YE -3157 HANGUL LETTER O -3158 HANGUL LETTER WA -3159 HANGUL LETTER WAE -315A HANGUL LETTER OE -315B HANGUL LETTER YO -315C HANGUL LETTER U -315D HANGUL LETTER WEO -315E HANGUL LETTER WE -315F HANGUL LETTER WI -3160 HANGUL LETTER YU -3161 HANGUL LETTER EU -3162 HANGUL LETTER YI -3163 HANGUL LETTER I -3164 HANGUL FILLER -3165 HANGUL LETTER SSANGNIEUN -3166 HANGUL LETTER NIEUN-TIKEUT -3167 HANGUL LETTER NIEUN-SIOS -3168 HANGUL LETTER NIEUN-PANSIOS -3169 HANGUL LETTER RIEUL-KIYEOK-SIOS -316A HANGUL LETTER RIEUL-TIKEUT -316B HANGUL LETTER RIEUL-PIEUP-SIOS -316C HANGUL LETTER RIEUL-PANSIOS -316D HANGUL LETTER RIEUL-YEORINHIEUH -316E HANGUL LETTER MIEUM-PIEUP -316F HANGUL LETTER MIEUM-SIOS -3170 HANGUL LETTER MIEUM-PANSIOS -3171 HANGUL LETTER KAPYEOUNMIEUM -3172 HANGUL LETTER PIEUP-KIYEOK -3173 HANGUL LETTER PIEUP-TIKEUT -3174 HANGUL LETTER PIEUP-SIOS-KIYEOK -3175 HANGUL LETTER PIEUP-SIOS-TIKEUT -3176 HANGUL LETTER PIEUP-CIEUC -3177 HANGUL LETTER PIEUP-THIEUTH -3178 HANGUL LETTER KAPYEOUNPIEUP -3179 HANGUL LETTER KAPYEOUNSSANGPIEUP -317A HANGUL LETTER SIOS-KIYEOK -317B HANGUL LETTER SIOS-NIEUN -317C HANGUL LETTER SIOS-TIKEUT -317D HANGUL LETTER SIOS-PIEUP -317E HANGUL LETTER SIOS-CIEUC -317F HANGUL LETTER PANSIOS -3180 HANGUL LETTER SSANGIEUNG -3181 HANGUL LETTER YESIEUNG -3182 HANGUL LETTER YESIEUNG-SIOS -3183 HANGUL LETTER YESIEUNG-PANSIOS -3184 HANGUL LETTER KAPYEOUNPHIEUPH -3185 HANGUL LETTER SSANGHIEUH -3186 HANGUL LETTER YEORINHIEUH -3187 HANGUL LETTER YO-YA -3188 HANGUL LETTER YO-YAE -3189 HANGUL LETTER YO-I -318A HANGUL LETTER YU-YEO -318B HANGUL LETTER YU-YE -318C HANGUL LETTER YU-I -318D HANGUL LETTER ARAEA -318E HANGUL LETTER ARAEAE -3190 IDEOGRAPHIC ANNOTATION LINKING MARK -3191 IDEOGRAPHIC ANNOTATION REVERSE MARK -3192 IDEOGRAPHIC ANNOTATION ONE MARK -3193 IDEOGRAPHIC ANNOTATION TWO MARK -3194 IDEOGRAPHIC ANNOTATION THREE MARK -3195 IDEOGRAPHIC ANNOTATION FOUR MARK -3196 IDEOGRAPHIC ANNOTATION TOP MARK -3197 IDEOGRAPHIC ANNOTATION MIDDLE MARK -3198 IDEOGRAPHIC ANNOTATION BOTTOM MARK -3199 IDEOGRAPHIC ANNOTATION FIRST MARK -319A IDEOGRAPHIC ANNOTATION SECOND MARK -319B IDEOGRAPHIC ANNOTATION THIRD MARK -319C IDEOGRAPHIC ANNOTATION FOURTH MARK -319D IDEOGRAPHIC ANNOTATION HEAVEN MARK -319E IDEOGRAPHIC ANNOTATION EARTH MARK -319F IDEOGRAPHIC ANNOTATION MAN MARK -31A0 BOPOMOFO LETTER BU -31A1 BOPOMOFO LETTER ZI -31A2 BOPOMOFO LETTER JI -31A3 BOPOMOFO LETTER GU -31A4 BOPOMOFO LETTER EE -31A5 BOPOMOFO LETTER ENN -31A6 BOPOMOFO LETTER OO -31A7 BOPOMOFO LETTER ONN -31A8 BOPOMOFO LETTER IR -31A9 BOPOMOFO LETTER ANN -31AA BOPOMOFO LETTER INN -31AB BOPOMOFO LETTER UNN -31AC BOPOMOFO LETTER IM -31AD BOPOMOFO LETTER NGG -31AE BOPOMOFO LETTER AINN -31AF BOPOMOFO LETTER AUNN -31B0 BOPOMOFO LETTER AM -31B1 BOPOMOFO LETTER OM -31B2 BOPOMOFO LETTER ONG -31B3 BOPOMOFO LETTER INNN -31B4 BOPOMOFO FINAL LETTER P -31B5 BOPOMOFO FINAL LETTER T -31B6 BOPOMOFO FINAL LETTER K -31B7 BOPOMOFO FINAL LETTER H -31C0 CJK STROKE T -31C1 CJK STROKE WG -31C2 CJK STROKE XG -31C3 CJK STROKE BXG -31C4 CJK STROKE SW -31C5 CJK STROKE HZZ -31C6 CJK STROKE HZG -31C7 CJK STROKE HP -31C8 CJK STROKE HZWG -31C9 CJK STROKE SZWG -31CA CJK STROKE HZT -31CB CJK STROKE HZZP -31CC CJK STROKE HPWG -31CD CJK STROKE HZW -31CE CJK STROKE HZZZ -31CF CJK STROKE N -31D0 CJK STROKE H -31D1 CJK STROKE S -31D2 CJK STROKE P -31D3 CJK STROKE SP -31D4 CJK STROKE D -31D5 CJK STROKE HZ -31D6 CJK STROKE HG -31D7 CJK STROKE SZ -31D8 CJK STROKE SWZ -31D9 CJK STROKE ST -31DA CJK STROKE SG -31DB CJK STROKE PD -31DC CJK STROKE PZ -31DD CJK STROKE TN -31DE CJK STROKE SZZ -31DF CJK STROKE SWG -31E0 CJK STROKE HXWG -31E1 CJK STROKE HZZZG -31E2 CJK STROKE PG -31E3 CJK STROKE Q -31F0 KATAKANA LETTER SMALL KU -31F1 KATAKANA LETTER SMALL SI -31F2 KATAKANA LETTER SMALL SU -31F3 KATAKANA LETTER SMALL TO -31F4 KATAKANA LETTER SMALL NU -31F5 KATAKANA LETTER SMALL HA -31F6 KATAKANA LETTER SMALL HI -31F7 KATAKANA LETTER SMALL HU -31F8 KATAKANA LETTER SMALL HE -31F9 KATAKANA LETTER SMALL HO -31FA KATAKANA LETTER SMALL MU -31FB KATAKANA LETTER SMALL RA -31FC KATAKANA LETTER SMALL RI -31FD KATAKANA LETTER SMALL RU -31FE KATAKANA LETTER SMALL RE -31FF KATAKANA LETTER SMALL RO -3200 PARENTHESIZED HANGUL KIYEOK -3201 PARENTHESIZED HANGUL NIEUN -3202 PARENTHESIZED HANGUL TIKEUT -3203 PARENTHESIZED HANGUL RIEUL -3204 PARENTHESIZED HANGUL MIEUM -3205 PARENTHESIZED HANGUL PIEUP -3206 PARENTHESIZED HANGUL SIOS -3207 PARENTHESIZED HANGUL IEUNG -3208 PARENTHESIZED HANGUL CIEUC -3209 PARENTHESIZED HANGUL CHIEUCH -320A PARENTHESIZED HANGUL KHIEUKH -320B PARENTHESIZED HANGUL THIEUTH -320C PARENTHESIZED HANGUL PHIEUPH -320D PARENTHESIZED HANGUL HIEUH -320E PARENTHESIZED HANGUL KIYEOK A -320F PARENTHESIZED HANGUL NIEUN A -3210 PARENTHESIZED HANGUL TIKEUT A -3211 PARENTHESIZED HANGUL RIEUL A -3212 PARENTHESIZED HANGUL MIEUM A -3213 PARENTHESIZED HANGUL PIEUP A -3214 PARENTHESIZED HANGUL SIOS A -3215 PARENTHESIZED HANGUL IEUNG A -3216 PARENTHESIZED HANGUL CIEUC A -3217 PARENTHESIZED HANGUL CHIEUCH A -3218 PARENTHESIZED HANGUL KHIEUKH A -3219 PARENTHESIZED HANGUL THIEUTH A -321A PARENTHESIZED HANGUL PHIEUPH A -321B PARENTHESIZED HANGUL HIEUH A -321C PARENTHESIZED HANGUL CIEUC U -321D PARENTHESIZED KOREAN CHARACTER OJEON -321E PARENTHESIZED KOREAN CHARACTER O HU -3220 PARENTHESIZED IDEOGRAPH ONE -3221 PARENTHESIZED IDEOGRAPH TWO -3222 PARENTHESIZED IDEOGRAPH THREE -3223 PARENTHESIZED IDEOGRAPH FOUR -3224 PARENTHESIZED IDEOGRAPH FIVE -3225 PARENTHESIZED IDEOGRAPH SIX -3226 PARENTHESIZED IDEOGRAPH SEVEN -3227 PARENTHESIZED IDEOGRAPH EIGHT -3228 PARENTHESIZED IDEOGRAPH NINE -3229 PARENTHESIZED IDEOGRAPH TEN -322A PARENTHESIZED IDEOGRAPH MOON -322B PARENTHESIZED IDEOGRAPH FIRE -322C PARENTHESIZED IDEOGRAPH WATER -322D PARENTHESIZED IDEOGRAPH WOOD -322E PARENTHESIZED IDEOGRAPH METAL -322F PARENTHESIZED IDEOGRAPH EARTH -3230 PARENTHESIZED IDEOGRAPH SUN -3231 PARENTHESIZED IDEOGRAPH STOCK -3232 PARENTHESIZED IDEOGRAPH HAVE -3233 PARENTHESIZED IDEOGRAPH SOCIETY -3234 PARENTHESIZED IDEOGRAPH NAME -3235 PARENTHESIZED IDEOGRAPH SPECIAL -3236 PARENTHESIZED IDEOGRAPH FINANCIAL -3237 PARENTHESIZED IDEOGRAPH CONGRATULATION -3238 PARENTHESIZED IDEOGRAPH LABOR -3239 PARENTHESIZED IDEOGRAPH REPRESENT -323A PARENTHESIZED IDEOGRAPH CALL -323B PARENTHESIZED IDEOGRAPH STUDY -323C PARENTHESIZED IDEOGRAPH SUPERVISE -323D PARENTHESIZED IDEOGRAPH ENTERPRISE -323E PARENTHESIZED IDEOGRAPH RESOURCE -323F PARENTHESIZED IDEOGRAPH ALLIANCE -3240 PARENTHESIZED IDEOGRAPH FESTIVAL -3241 PARENTHESIZED IDEOGRAPH REST -3242 PARENTHESIZED IDEOGRAPH SELF -3243 PARENTHESIZED IDEOGRAPH REACH -3244 CIRCLED IDEOGRAPH QUESTION -3245 CIRCLED IDEOGRAPH KINDERGARTEN -3246 CIRCLED IDEOGRAPH SCHOOL -3247 CIRCLED IDEOGRAPH KOTO -3248 CIRCLED NUMBER TEN ON BLACK SQUARE -3249 CIRCLED NUMBER TWENTY ON BLACK SQUARE -324A CIRCLED NUMBER THIRTY ON BLACK SQUARE -324B CIRCLED NUMBER FORTY ON BLACK SQUARE -324C CIRCLED NUMBER FIFTY ON BLACK SQUARE -324D CIRCLED NUMBER SIXTY ON BLACK SQUARE -324E CIRCLED NUMBER SEVENTY ON BLACK SQUARE -324F CIRCLED NUMBER EIGHTY ON BLACK SQUARE -3250 PARTNERSHIP SIGN -3251 CIRCLED NUMBER TWENTY ONE -3252 CIRCLED NUMBER TWENTY TWO -3253 CIRCLED NUMBER TWENTY THREE -3254 CIRCLED NUMBER TWENTY FOUR -3255 CIRCLED NUMBER TWENTY FIVE -3256 CIRCLED NUMBER TWENTY SIX -3257 CIRCLED NUMBER TWENTY SEVEN -3258 CIRCLED NUMBER TWENTY EIGHT -3259 CIRCLED NUMBER TWENTY NINE -325A CIRCLED NUMBER THIRTY -325B CIRCLED NUMBER THIRTY ONE -325C CIRCLED NUMBER THIRTY TWO -325D CIRCLED NUMBER THIRTY THREE -325E CIRCLED NUMBER THIRTY FOUR -325F CIRCLED NUMBER THIRTY FIVE -3260 CIRCLED HANGUL KIYEOK -3261 CIRCLED HANGUL NIEUN -3262 CIRCLED HANGUL TIKEUT -3263 CIRCLED HANGUL RIEUL -3264 CIRCLED HANGUL MIEUM -3265 CIRCLED HANGUL PIEUP -3266 CIRCLED HANGUL SIOS -3267 CIRCLED HANGUL IEUNG -3268 CIRCLED HANGUL CIEUC -3269 CIRCLED HANGUL CHIEUCH -326A CIRCLED HANGUL KHIEUKH -326B CIRCLED HANGUL THIEUTH -326C CIRCLED HANGUL PHIEUPH -326D CIRCLED HANGUL HIEUH -326E CIRCLED HANGUL KIYEOK A -326F CIRCLED HANGUL NIEUN A -3270 CIRCLED HANGUL TIKEUT A -3271 CIRCLED HANGUL RIEUL A -3272 CIRCLED HANGUL MIEUM A -3273 CIRCLED HANGUL PIEUP A -3274 CIRCLED HANGUL SIOS A -3275 CIRCLED HANGUL IEUNG A -3276 CIRCLED HANGUL CIEUC A -3277 CIRCLED HANGUL CHIEUCH A -3278 CIRCLED HANGUL KHIEUKH A -3279 CIRCLED HANGUL THIEUTH A -327A CIRCLED HANGUL PHIEUPH A -327B CIRCLED HANGUL HIEUH A -327C CIRCLED KOREAN CHARACTER CHAMKO -327D CIRCLED KOREAN CHARACTER JUEUI -327E CIRCLED HANGUL IEUNG U -327F KOREAN STANDARD SYMBOL -3280 CIRCLED IDEOGRAPH ONE -3281 CIRCLED IDEOGRAPH TWO -3282 CIRCLED IDEOGRAPH THREE -3283 CIRCLED IDEOGRAPH FOUR -3284 CIRCLED IDEOGRAPH FIVE -3285 CIRCLED IDEOGRAPH SIX -3286 CIRCLED IDEOGRAPH SEVEN -3287 CIRCLED IDEOGRAPH EIGHT -3288 CIRCLED IDEOGRAPH NINE -3289 CIRCLED IDEOGRAPH TEN -328A CIRCLED IDEOGRAPH MOON -328B CIRCLED IDEOGRAPH FIRE -328C CIRCLED IDEOGRAPH WATER -328D CIRCLED IDEOGRAPH WOOD -328E CIRCLED IDEOGRAPH METAL -328F CIRCLED IDEOGRAPH EARTH -3290 CIRCLED IDEOGRAPH SUN -3291 CIRCLED IDEOGRAPH STOCK -3292 CIRCLED IDEOGRAPH HAVE -3293 CIRCLED IDEOGRAPH SOCIETY -3294 CIRCLED IDEOGRAPH NAME -3295 CIRCLED IDEOGRAPH SPECIAL -3296 CIRCLED IDEOGRAPH FINANCIAL -3297 CIRCLED IDEOGRAPH CONGRATULATION -3298 CIRCLED IDEOGRAPH LABOR -3299 CIRCLED IDEOGRAPH SECRET -329A CIRCLED IDEOGRAPH MALE -329B CIRCLED IDEOGRAPH FEMALE -329C CIRCLED IDEOGRAPH SUITABLE -329D CIRCLED IDEOGRAPH EXCELLENT -329E CIRCLED IDEOGRAPH PRINT -329F CIRCLED IDEOGRAPH ATTENTION -32A0 CIRCLED IDEOGRAPH ITEM -32A1 CIRCLED IDEOGRAPH REST -32A2 CIRCLED IDEOGRAPH COPY -32A3 CIRCLED IDEOGRAPH CORRECT -32A4 CIRCLED IDEOGRAPH HIGH -32A5 CIRCLED IDEOGRAPH CENTRE -32A6 CIRCLED IDEOGRAPH LOW -32A7 CIRCLED IDEOGRAPH LEFT -32A8 CIRCLED IDEOGRAPH RIGHT -32A9 CIRCLED IDEOGRAPH MEDICINE -32AA CIRCLED IDEOGRAPH RELIGION -32AB CIRCLED IDEOGRAPH STUDY -32AC CIRCLED IDEOGRAPH SUPERVISE -32AD CIRCLED IDEOGRAPH ENTERPRISE -32AE CIRCLED IDEOGRAPH RESOURCE -32AF CIRCLED IDEOGRAPH ALLIANCE -32B0 CIRCLED IDEOGRAPH NIGHT -32B1 CIRCLED NUMBER THIRTY SIX -32B2 CIRCLED NUMBER THIRTY SEVEN -32B3 CIRCLED NUMBER THIRTY EIGHT -32B4 CIRCLED NUMBER THIRTY NINE -32B5 CIRCLED NUMBER FORTY -32B6 CIRCLED NUMBER FORTY ONE -32B7 CIRCLED NUMBER FORTY TWO -32B8 CIRCLED NUMBER FORTY THREE -32B9 CIRCLED NUMBER FORTY FOUR -32BA CIRCLED NUMBER FORTY FIVE -32BB CIRCLED NUMBER FORTY SIX -32BC CIRCLED NUMBER FORTY SEVEN -32BD CIRCLED NUMBER FORTY EIGHT -32BE CIRCLED NUMBER FORTY NINE -32BF CIRCLED NUMBER FIFTY -32C0 IDEOGRAPHIC TELEGRAPH SYMBOL FOR JANUARY -32C1 IDEOGRAPHIC TELEGRAPH SYMBOL FOR FEBRUARY -32C2 IDEOGRAPHIC TELEGRAPH SYMBOL FOR MARCH -32C3 IDEOGRAPHIC TELEGRAPH SYMBOL FOR APRIL -32C4 IDEOGRAPHIC TELEGRAPH SYMBOL FOR MAY -32C5 IDEOGRAPHIC TELEGRAPH SYMBOL FOR JUNE -32C6 IDEOGRAPHIC TELEGRAPH SYMBOL FOR JULY -32C7 IDEOGRAPHIC TELEGRAPH SYMBOL FOR AUGUST -32C8 IDEOGRAPHIC TELEGRAPH SYMBOL FOR SEPTEMBER -32C9 IDEOGRAPHIC TELEGRAPH SYMBOL FOR OCTOBER -32CA IDEOGRAPHIC TELEGRAPH SYMBOL FOR NOVEMBER -32CB IDEOGRAPHIC TELEGRAPH SYMBOL FOR DECEMBER -32CC SQUARE HG -32CD SQUARE ERG -32CE SQUARE EV -32CF LIMITED LIABILITY SIGN -32D0 CIRCLED KATAKANA A -32D1 CIRCLED KATAKANA I -32D2 CIRCLED KATAKANA U -32D3 CIRCLED KATAKANA E -32D4 CIRCLED KATAKANA O -32D5 CIRCLED KATAKANA KA -32D6 CIRCLED KATAKANA KI -32D7 CIRCLED KATAKANA KU -32D8 CIRCLED KATAKANA KE -32D9 CIRCLED KATAKANA KO -32DA CIRCLED KATAKANA SA -32DB CIRCLED KATAKANA SI -32DC CIRCLED KATAKANA SU -32DD CIRCLED KATAKANA SE -32DE CIRCLED KATAKANA SO -32DF CIRCLED KATAKANA TA -32E0 CIRCLED KATAKANA TI -32E1 CIRCLED KATAKANA TU -32E2 CIRCLED KATAKANA TE -32E3 CIRCLED KATAKANA TO -32E4 CIRCLED KATAKANA NA -32E5 CIRCLED KATAKANA NI -32E6 CIRCLED KATAKANA NU -32E7 CIRCLED KATAKANA NE -32E8 CIRCLED KATAKANA NO -32E9 CIRCLED KATAKANA HA -32EA CIRCLED KATAKANA HI -32EB CIRCLED KATAKANA HU -32EC CIRCLED KATAKANA HE -32ED CIRCLED KATAKANA HO -32EE CIRCLED KATAKANA MA -32EF CIRCLED KATAKANA MI -32F0 CIRCLED KATAKANA MU -32F1 CIRCLED KATAKANA ME -32F2 CIRCLED KATAKANA MO -32F3 CIRCLED KATAKANA YA -32F4 CIRCLED KATAKANA YU -32F5 CIRCLED KATAKANA YO -32F6 CIRCLED KATAKANA RA -32F7 CIRCLED KATAKANA RI -32F8 CIRCLED KATAKANA RU -32F9 CIRCLED KATAKANA RE -32FA CIRCLED KATAKANA RO -32FB CIRCLED KATAKANA WA -32FC CIRCLED KATAKANA WI -32FD CIRCLED KATAKANA WE -32FE CIRCLED KATAKANA WO -3300 SQUARE APAATO -3301 SQUARE ARUHUA -3302 SQUARE ANPEA -3303 SQUARE AARU -3304 SQUARE ININGU -3305 SQUARE INTI -3306 SQUARE UON -3307 SQUARE ESUKUUDO -3308 SQUARE EEKAA -3309 SQUARE ONSU -330A SQUARE OOMU -330B SQUARE KAIRI -330C SQUARE KARATTO -330D SQUARE KARORII -330E SQUARE GARON -330F SQUARE GANMA -3310 SQUARE GIGA -3311 SQUARE GINII -3312 SQUARE KYURII -3313 SQUARE GIRUDAA -3314 SQUARE KIRO -3315 SQUARE KIROGURAMU -3316 SQUARE KIROMEETORU -3317 SQUARE KIROWATTO -3318 SQUARE GURAMU -3319 SQUARE GURAMUTON -331A SQUARE KURUZEIRO -331B SQUARE KUROONE -331C SQUARE KEESU -331D SQUARE KORUNA -331E SQUARE KOOPO -331F SQUARE SAIKURU -3320 SQUARE SANTIIMU -3321 SQUARE SIRINGU -3322 SQUARE SENTI -3323 SQUARE SENTO -3324 SQUARE DAASU -3325 SQUARE DESI -3326 SQUARE DORU -3327 SQUARE TON -3328 SQUARE NANO -3329 SQUARE NOTTO -332A SQUARE HAITU -332B SQUARE PAASENTO -332C SQUARE PAATU -332D SQUARE BAARERU -332E SQUARE PIASUTORU -332F SQUARE PIKURU -3330 SQUARE PIKO -3331 SQUARE BIRU -3332 SQUARE HUARADDO -3333 SQUARE HUIITO -3334 SQUARE BUSSYERU -3335 SQUARE HURAN -3336 SQUARE HEKUTAARU -3337 SQUARE PESO -3338 SQUARE PENIHI -3339 SQUARE HERUTU -333A SQUARE PENSU -333B SQUARE PEEZI -333C SQUARE BEETA -333D SQUARE POINTO -333E SQUARE BORUTO -333F SQUARE HON -3340 SQUARE PONDO -3341 SQUARE HOORU -3342 SQUARE HOON -3343 SQUARE MAIKURO -3344 SQUARE MAIRU -3345 SQUARE MAHHA -3346 SQUARE MARUKU -3347 SQUARE MANSYON -3348 SQUARE MIKURON -3349 SQUARE MIRI -334A SQUARE MIRIBAARU -334B SQUARE MEGA -334C SQUARE MEGATON -334D SQUARE MEETORU -334E SQUARE YAADO -334F SQUARE YAARU -3350 SQUARE YUAN -3351 SQUARE RITTORU -3352 SQUARE RIRA -3353 SQUARE RUPII -3354 SQUARE RUUBURU -3355 SQUARE REMU -3356 SQUARE RENTOGEN -3357 SQUARE WATTO -3358 IDEOGRAPHIC TELEGRAPH SYMBOL FOR HOUR ZERO -3359 IDEOGRAPHIC TELEGRAPH SYMBOL FOR HOUR ONE -335A IDEOGRAPHIC TELEGRAPH SYMBOL FOR HOUR TWO -335B IDEOGRAPHIC TELEGRAPH SYMBOL FOR HOUR THREE -335C IDEOGRAPHIC TELEGRAPH SYMBOL FOR HOUR FOUR -335D IDEOGRAPHIC TELEGRAPH SYMBOL FOR HOUR FIVE -335E IDEOGRAPHIC TELEGRAPH SYMBOL FOR HOUR SIX -335F IDEOGRAPHIC TELEGRAPH SYMBOL FOR HOUR SEVEN -3360 IDEOGRAPHIC TELEGRAPH SYMBOL FOR HOUR EIGHT -3361 IDEOGRAPHIC TELEGRAPH SYMBOL FOR HOUR NINE -3362 IDEOGRAPHIC TELEGRAPH SYMBOL FOR HOUR TEN -3363 IDEOGRAPHIC TELEGRAPH SYMBOL FOR HOUR ELEVEN -3364 IDEOGRAPHIC TELEGRAPH SYMBOL FOR HOUR TWELVE -3365 IDEOGRAPHIC TELEGRAPH SYMBOL FOR HOUR THIRTEEN -3366 IDEOGRAPHIC TELEGRAPH SYMBOL FOR HOUR FOURTEEN -3367 IDEOGRAPHIC TELEGRAPH SYMBOL FOR HOUR FIFTEEN -3368 IDEOGRAPHIC TELEGRAPH SYMBOL FOR HOUR SIXTEEN -3369 IDEOGRAPHIC TELEGRAPH SYMBOL FOR HOUR SEVENTEEN -336A IDEOGRAPHIC TELEGRAPH SYMBOL FOR HOUR EIGHTEEN -336B IDEOGRAPHIC TELEGRAPH SYMBOL FOR HOUR NINETEEN -336C IDEOGRAPHIC TELEGRAPH SYMBOL FOR HOUR TWENTY -336D IDEOGRAPHIC TELEGRAPH SYMBOL FOR HOUR TWENTY-ONE -336E IDEOGRAPHIC TELEGRAPH SYMBOL FOR HOUR TWENTY-TWO -336F IDEOGRAPHIC TELEGRAPH SYMBOL FOR HOUR TWENTY-THREE -3370 IDEOGRAPHIC TELEGRAPH SYMBOL FOR HOUR TWENTY-FOUR -3371 SQUARE HPA -3372 SQUARE DA -3373 SQUARE AU -3374 SQUARE BAR -3375 SQUARE OV -3376 SQUARE PC -3377 SQUARE DM -3378 SQUARE DM SQUARED -3379 SQUARE DM CUBED -337A SQUARE IU -337B SQUARE ERA NAME HEISEI -337C SQUARE ERA NAME SYOUWA -337D SQUARE ERA NAME TAISYOU -337E SQUARE ERA NAME MEIZI -337F SQUARE CORPORATION -3380 SQUARE PA AMPS -3381 SQUARE NA -3382 SQUARE MU A -3383 SQUARE MA -3384 SQUARE KA -3385 SQUARE KB -3386 SQUARE MB -3387 SQUARE GB -3388 SQUARE CAL -3389 SQUARE KCAL -338A SQUARE PF -338B SQUARE NF -338C SQUARE MU F -338D SQUARE MU G -338E SQUARE MG -338F SQUARE KG -3390 SQUARE HZ -3391 SQUARE KHZ -3392 SQUARE MHZ -3393 SQUARE GHZ -3394 SQUARE THZ -3395 SQUARE MU L -3396 SQUARE ML -3397 SQUARE DL -3398 SQUARE KL -3399 SQUARE FM -339A SQUARE NM -339B SQUARE MU M -339C SQUARE MM -339D SQUARE CM -339E SQUARE KM -339F SQUARE MM SQUARED -33A0 SQUARE CM SQUARED -33A1 SQUARE M SQUARED -33A2 SQUARE KM SQUARED -33A3 SQUARE MM CUBED -33A4 SQUARE CM CUBED -33A5 SQUARE M CUBED -33A6 SQUARE KM CUBED -33A7 SQUARE M OVER S -33A8 SQUARE M OVER S SQUARED -33A9 SQUARE PA -33AA SQUARE KPA -33AB SQUARE MPA -33AC SQUARE GPA -33AD SQUARE RAD -33AE SQUARE RAD OVER S -33AF SQUARE RAD OVER S SQUARED -33B0 SQUARE PS -33B1 SQUARE NS -33B2 SQUARE MU S -33B3 SQUARE MS -33B4 SQUARE PV -33B5 SQUARE NV -33B6 SQUARE MU V -33B7 SQUARE MV -33B8 SQUARE KV -33B9 SQUARE MV MEGA -33BA SQUARE PW -33BB SQUARE NW -33BC SQUARE MU W -33BD SQUARE MW -33BE SQUARE KW -33BF SQUARE MW MEGA -33C0 SQUARE K OHM -33C1 SQUARE M OHM -33C2 SQUARE AM -33C3 SQUARE BQ -33C4 SQUARE CC -33C5 SQUARE CD -33C6 SQUARE C OVER KG -33C7 SQUARE CO -33C8 SQUARE DB -33C9 SQUARE GY -33CA SQUARE HA -33CB SQUARE HP -33CC SQUARE IN -33CD SQUARE KK -33CE SQUARE KM CAPITAL -33CF SQUARE KT -33D0 SQUARE LM -33D1 SQUARE LN -33D2 SQUARE LOG -33D3 SQUARE LX -33D4 SQUARE MB SMALL -33D5 SQUARE MIL -33D6 SQUARE MOL -33D7 SQUARE PH -33D8 SQUARE PM -33D9 SQUARE PPM -33DA SQUARE PR -33DB SQUARE SR -33DC SQUARE SV -33DD SQUARE WB -33DE SQUARE V OVER M -33DF SQUARE A OVER M -33E0 IDEOGRAPHIC TELEGRAPH SYMBOL FOR DAY ONE -33E1 IDEOGRAPHIC TELEGRAPH SYMBOL FOR DAY TWO -33E2 IDEOGRAPHIC TELEGRAPH SYMBOL FOR DAY THREE -33E3 IDEOGRAPHIC TELEGRAPH SYMBOL FOR DAY FOUR -33E4 IDEOGRAPHIC TELEGRAPH SYMBOL FOR DAY FIVE -33E5 IDEOGRAPHIC TELEGRAPH SYMBOL FOR DAY SIX -33E6 IDEOGRAPHIC TELEGRAPH SYMBOL FOR DAY SEVEN -33E7 IDEOGRAPHIC TELEGRAPH SYMBOL FOR DAY EIGHT -33E8 IDEOGRAPHIC TELEGRAPH SYMBOL FOR DAY NINE -33E9 IDEOGRAPHIC TELEGRAPH SYMBOL FOR DAY TEN -33EA IDEOGRAPHIC TELEGRAPH SYMBOL FOR DAY ELEVEN -33EB IDEOGRAPHIC TELEGRAPH SYMBOL FOR DAY TWELVE -33EC IDEOGRAPHIC TELEGRAPH SYMBOL FOR DAY THIRTEEN -33ED IDEOGRAPHIC TELEGRAPH SYMBOL FOR DAY FOURTEEN -33EE IDEOGRAPHIC TELEGRAPH SYMBOL FOR DAY FIFTEEN -33EF IDEOGRAPHIC TELEGRAPH SYMBOL FOR DAY SIXTEEN -33F0 IDEOGRAPHIC TELEGRAPH SYMBOL FOR DAY SEVENTEEN -33F1 IDEOGRAPHIC TELEGRAPH SYMBOL FOR DAY EIGHTEEN -33F2 IDEOGRAPHIC TELEGRAPH SYMBOL FOR DAY NINETEEN -33F3 IDEOGRAPHIC TELEGRAPH SYMBOL FOR DAY TWENTY -33F4 IDEOGRAPHIC TELEGRAPH SYMBOL FOR DAY TWENTY-ONE -33F5 IDEOGRAPHIC TELEGRAPH SYMBOL FOR DAY TWENTY-TWO -33F6 IDEOGRAPHIC TELEGRAPH SYMBOL FOR DAY TWENTY-THREE -33F7 IDEOGRAPHIC TELEGRAPH SYMBOL FOR DAY TWENTY-FOUR -33F8 IDEOGRAPHIC TELEGRAPH SYMBOL FOR DAY TWENTY-FIVE -33F9 IDEOGRAPHIC TELEGRAPH SYMBOL FOR DAY TWENTY-SIX -33FA IDEOGRAPHIC TELEGRAPH SYMBOL FOR DAY TWENTY-SEVEN -33FB IDEOGRAPHIC TELEGRAPH SYMBOL FOR DAY TWENTY-EIGHT -33FC IDEOGRAPHIC TELEGRAPH SYMBOL FOR DAY TWENTY-NINE -33FD IDEOGRAPHIC TELEGRAPH SYMBOL FOR DAY THIRTY -33FE IDEOGRAPHIC TELEGRAPH SYMBOL FOR DAY THIRTY-ONE -33FF SQUARE GAL -3400 <CJK Ideograph Extension A, First> -4DB5 <CJK Ideograph Extension A, Last> -4DC0 HEXAGRAM FOR THE CREATIVE HEAVEN -4DC1 HEXAGRAM FOR THE RECEPTIVE EARTH -4DC2 HEXAGRAM FOR DIFFICULTY AT THE BEGINNING -4DC3 HEXAGRAM FOR YOUTHFUL FOLLY -4DC4 HEXAGRAM FOR WAITING -4DC5 HEXAGRAM FOR CONFLICT -4DC6 HEXAGRAM FOR THE ARMY -4DC7 HEXAGRAM FOR HOLDING TOGETHER -4DC8 HEXAGRAM FOR SMALL TAMING -4DC9 HEXAGRAM FOR TREADING -4DCA HEXAGRAM FOR PEACE -4DCB HEXAGRAM FOR STANDSTILL -4DCC HEXAGRAM FOR FELLOWSHIP -4DCD HEXAGRAM FOR GREAT POSSESSION -4DCE HEXAGRAM FOR MODESTY -4DCF HEXAGRAM FOR ENTHUSIASM -4DD0 HEXAGRAM FOR FOLLOWING -4DD1 HEXAGRAM FOR WORK ON THE DECAYED -4DD2 HEXAGRAM FOR APPROACH -4DD3 HEXAGRAM FOR CONTEMPLATION -4DD4 HEXAGRAM FOR BITING THROUGH -4DD5 HEXAGRAM FOR GRACE -4DD6 HEXAGRAM FOR SPLITTING APART -4DD7 HEXAGRAM FOR RETURN -4DD8 HEXAGRAM FOR INNOCENCE -4DD9 HEXAGRAM FOR GREAT TAMING -4DDA HEXAGRAM FOR MOUTH CORNERS -4DDB HEXAGRAM FOR GREAT PREPONDERANCE -4DDC HEXAGRAM FOR THE ABYSMAL WATER -4DDD HEXAGRAM FOR THE CLINGING FIRE -4DDE HEXAGRAM FOR INFLUENCE -4DDF HEXAGRAM FOR DURATION -4DE0 HEXAGRAM FOR RETREAT -4DE1 HEXAGRAM FOR GREAT POWER -4DE2 HEXAGRAM FOR PROGRESS -4DE3 HEXAGRAM FOR DARKENING OF THE LIGHT -4DE4 HEXAGRAM FOR THE FAMILY -4DE5 HEXAGRAM FOR OPPOSITION -4DE6 HEXAGRAM FOR OBSTRUCTION -4DE7 HEXAGRAM FOR DELIVERANCE -4DE8 HEXAGRAM FOR DECREASE -4DE9 HEXAGRAM FOR INCREASE -4DEA HEXAGRAM FOR BREAKTHROUGH -4DEB HEXAGRAM FOR COMING TO MEET -4DEC HEXAGRAM FOR GATHERING TOGETHER -4DED HEXAGRAM FOR PUSHING UPWARD -4DEE HEXAGRAM FOR OPPRESSION -4DEF HEXAGRAM FOR THE WELL -4DF0 HEXAGRAM FOR REVOLUTION -4DF1 HEXAGRAM FOR THE CAULDRON -4DF2 HEXAGRAM FOR THE AROUSING THUNDER -4DF3 HEXAGRAM FOR THE KEEPING STILL MOUNTAIN -4DF4 HEXAGRAM FOR DEVELOPMENT -4DF5 HEXAGRAM FOR THE MARRYING MAIDEN -4DF6 HEXAGRAM FOR ABUNDANCE -4DF7 HEXAGRAM FOR THE WANDERER -4DF8 HEXAGRAM FOR THE GENTLE WIND -4DF9 HEXAGRAM FOR THE JOYOUS LAKE -4DFA HEXAGRAM FOR DISPERSION -4DFB HEXAGRAM FOR LIMITATION -4DFC HEXAGRAM FOR INNER TRUTH -4DFD HEXAGRAM FOR SMALL PREPONDERANCE -4DFE HEXAGRAM FOR AFTER COMPLETION -4DFF HEXAGRAM FOR BEFORE COMPLETION -4E00 <CJK Ideograph, First> -9FCB <CJK Ideograph, Last> -A000 YI SYLLABLE IT -A001 YI SYLLABLE IX -A002 YI SYLLABLE I -A003 YI SYLLABLE IP -A004 YI SYLLABLE IET -A005 YI SYLLABLE IEX -A006 YI SYLLABLE IE -A007 YI SYLLABLE IEP -A008 YI SYLLABLE AT -A009 YI SYLLABLE AX -A00A YI SYLLABLE A -A00B YI SYLLABLE AP -A00C YI SYLLABLE UOX -A00D YI SYLLABLE UO -A00E YI SYLLABLE UOP -A00F YI SYLLABLE OT -A010 YI SYLLABLE OX -A011 YI SYLLABLE O -A012 YI SYLLABLE OP -A013 YI SYLLABLE EX -A014 YI SYLLABLE E -A015 YI SYLLABLE WU -A016 YI SYLLABLE BIT -A017 YI SYLLABLE BIX -A018 YI SYLLABLE BI -A019 YI SYLLABLE BIP -A01A YI SYLLABLE BIET -A01B YI SYLLABLE BIEX -A01C YI SYLLABLE BIE -A01D YI SYLLABLE BIEP -A01E YI SYLLABLE BAT -A01F YI SYLLABLE BAX -A020 YI SYLLABLE BA -A021 YI SYLLABLE BAP -A022 YI SYLLABLE BUOX -A023 YI SYLLABLE BUO -A024 YI SYLLABLE BUOP -A025 YI SYLLABLE BOT -A026 YI SYLLABLE BOX -A027 YI SYLLABLE BO -A028 YI SYLLABLE BOP -A029 YI SYLLABLE BEX -A02A YI SYLLABLE BE -A02B YI SYLLABLE BEP -A02C YI SYLLABLE BUT -A02D YI SYLLABLE BUX -A02E YI SYLLABLE BU -A02F YI SYLLABLE BUP -A030 YI SYLLABLE BURX -A031 YI SYLLABLE BUR -A032 YI SYLLABLE BYT -A033 YI SYLLABLE BYX -A034 YI SYLLABLE BY -A035 YI SYLLABLE BYP -A036 YI SYLLABLE BYRX -A037 YI SYLLABLE BYR -A038 YI SYLLABLE PIT -A039 YI SYLLABLE PIX -A03A YI SYLLABLE PI -A03B YI SYLLABLE PIP -A03C YI SYLLABLE PIEX -A03D YI SYLLABLE PIE -A03E YI SYLLABLE PIEP -A03F YI SYLLABLE PAT -A040 YI SYLLABLE PAX -A041 YI SYLLABLE PA -A042 YI SYLLABLE PAP -A043 YI SYLLABLE PUOX -A044 YI SYLLABLE PUO -A045 YI SYLLABLE PUOP -A046 YI SYLLABLE POT -A047 YI SYLLABLE POX -A048 YI SYLLABLE PO -A049 YI SYLLABLE POP -A04A YI SYLLABLE PUT -A04B YI SYLLABLE PUX -A04C YI SYLLABLE PU -A04D YI SYLLABLE PUP -A04E YI SYLLABLE PURX -A04F YI SYLLABLE PUR -A050 YI SYLLABLE PYT -A051 YI SYLLABLE PYX -A052 YI SYLLABLE PY -A053 YI SYLLABLE PYP -A054 YI SYLLABLE PYRX -A055 YI SYLLABLE PYR -A056 YI SYLLABLE BBIT -A057 YI SYLLABLE BBIX -A058 YI SYLLABLE BBI -A059 YI SYLLABLE BBIP -A05A YI SYLLABLE BBIET -A05B YI SYLLABLE BBIEX -A05C YI SYLLABLE BBIE -A05D YI SYLLABLE BBIEP -A05E YI SYLLABLE BBAT -A05F YI SYLLABLE BBAX -A060 YI SYLLABLE BBA -A061 YI SYLLABLE BBAP -A062 YI SYLLABLE BBUOX -A063 YI SYLLABLE BBUO -A064 YI SYLLABLE BBUOP -A065 YI SYLLABLE BBOT -A066 YI SYLLABLE BBOX -A067 YI SYLLABLE BBO -A068 YI SYLLABLE BBOP -A069 YI SYLLABLE BBEX -A06A YI SYLLABLE BBE -A06B YI SYLLABLE BBEP -A06C YI SYLLABLE BBUT -A06D YI SYLLABLE BBUX -A06E YI SYLLABLE BBU -A06F YI SYLLABLE BBUP -A070 YI SYLLABLE BBURX -A071 YI SYLLABLE BBUR -A072 YI SYLLABLE BBYT -A073 YI SYLLABLE BBYX -A074 YI SYLLABLE BBY -A075 YI SYLLABLE BBYP -A076 YI SYLLABLE NBIT -A077 YI SYLLABLE NBIX -A078 YI SYLLABLE NBI -A079 YI SYLLABLE NBIP -A07A YI SYLLABLE NBIEX -A07B YI SYLLABLE NBIE -A07C YI SYLLABLE NBIEP -A07D YI SYLLABLE NBAT -A07E YI SYLLABLE NBAX -A07F YI SYLLABLE NBA -A080 YI SYLLABLE NBAP -A081 YI SYLLABLE NBOT -A082 YI SYLLABLE NBOX -A083 YI SYLLABLE NBO -A084 YI SYLLABLE NBOP -A085 YI SYLLABLE NBUT -A086 YI SYLLABLE NBUX -A087 YI SYLLABLE NBU -A088 YI SYLLABLE NBUP -A089 YI SYLLABLE NBURX -A08A YI SYLLABLE NBUR -A08B YI SYLLABLE NBYT -A08C YI SYLLABLE NBYX -A08D YI SYLLABLE NBY -A08E YI SYLLABLE NBYP -A08F YI SYLLABLE NBYRX -A090 YI SYLLABLE NBYR -A091 YI SYLLABLE HMIT -A092 YI SYLLABLE HMIX -A093 YI SYLLABLE HMI -A094 YI SYLLABLE HMIP -A095 YI SYLLABLE HMIEX -A096 YI SYLLABLE HMIE -A097 YI SYLLABLE HMIEP -A098 YI SYLLABLE HMAT -A099 YI SYLLABLE HMAX -A09A YI SYLLABLE HMA -A09B YI SYLLABLE HMAP -A09C YI SYLLABLE HMUOX -A09D YI SYLLABLE HMUO -A09E YI SYLLABLE HMUOP -A09F YI SYLLABLE HMOT -A0A0 YI SYLLABLE HMOX -A0A1 YI SYLLABLE HMO -A0A2 YI SYLLABLE HMOP -A0A3 YI SYLLABLE HMUT -A0A4 YI SYLLABLE HMUX -A0A5 YI SYLLABLE HMU -A0A6 YI SYLLABLE HMUP -A0A7 YI SYLLABLE HMURX -A0A8 YI SYLLABLE HMUR -A0A9 YI SYLLABLE HMYX -A0AA YI SYLLABLE HMY -A0AB YI SYLLABLE HMYP -A0AC YI SYLLABLE HMYRX -A0AD YI SYLLABLE HMYR -A0AE YI SYLLABLE MIT -A0AF YI SYLLABLE MIX -A0B0 YI SYLLABLE MI -A0B1 YI SYLLABLE MIP -A0B2 YI SYLLABLE MIEX -A0B3 YI SYLLABLE MIE -A0B4 YI SYLLABLE MIEP -A0B5 YI SYLLABLE MAT -A0B6 YI SYLLABLE MAX -A0B7 YI SYLLABLE MA -A0B8 YI SYLLABLE MAP -A0B9 YI SYLLABLE MUOT -A0BA YI SYLLABLE MUOX -A0BB YI SYLLABLE MUO -A0BC YI SYLLABLE MUOP -A0BD YI SYLLABLE MOT -A0BE YI SYLLABLE MOX -A0BF YI SYLLABLE MO -A0C0 YI SYLLABLE MOP -A0C1 YI SYLLABLE MEX -A0C2 YI SYLLABLE ME -A0C3 YI SYLLABLE MUT -A0C4 YI SYLLABLE MUX -A0C5 YI SYLLABLE MU -A0C6 YI SYLLABLE MUP -A0C7 YI SYLLABLE MURX -A0C8 YI SYLLABLE MUR -A0C9 YI SYLLABLE MYT -A0CA YI SYLLABLE MYX -A0CB YI SYLLABLE MY -A0CC YI SYLLABLE MYP -A0CD YI SYLLABLE FIT -A0CE YI SYLLABLE FIX -A0CF YI SYLLABLE FI -A0D0 YI SYLLABLE FIP -A0D1 YI SYLLABLE FAT -A0D2 YI SYLLABLE FAX -A0D3 YI SYLLABLE FA -A0D4 YI SYLLABLE FAP -A0D5 YI SYLLABLE FOX -A0D6 YI SYLLABLE FO -A0D7 YI SYLLABLE FOP -A0D8 YI SYLLABLE FUT -A0D9 YI SYLLABLE FUX -A0DA YI SYLLABLE FU -A0DB YI SYLLABLE FUP -A0DC YI SYLLABLE FURX -A0DD YI SYLLABLE FUR -A0DE YI SYLLABLE FYT -A0DF YI SYLLABLE FYX -A0E0 YI SYLLABLE FY -A0E1 YI SYLLABLE FYP -A0E2 YI SYLLABLE VIT -A0E3 YI SYLLABLE VIX -A0E4 YI SYLLABLE VI -A0E5 YI SYLLABLE VIP -A0E6 YI SYLLABLE VIET -A0E7 YI SYLLABLE VIEX -A0E8 YI SYLLABLE VIE -A0E9 YI SYLLABLE VIEP -A0EA YI SYLLABLE VAT -A0EB YI SYLLABLE VAX -A0EC YI SYLLABLE VA -A0ED YI SYLLABLE VAP -A0EE YI SYLLABLE VOT -A0EF YI SYLLABLE VOX -A0F0 YI SYLLABLE VO -A0F1 YI SYLLABLE VOP -A0F2 YI SYLLABLE VEX -A0F3 YI SYLLABLE VEP -A0F4 YI SYLLABLE VUT -A0F5 YI SYLLABLE VUX -A0F6 YI SYLLABLE VU -A0F7 YI SYLLABLE VUP -A0F8 YI SYLLABLE VURX -A0F9 YI SYLLABLE VUR -A0FA YI SYLLABLE VYT -A0FB YI SYLLABLE VYX -A0FC YI SYLLABLE VY -A0FD YI SYLLABLE VYP -A0FE YI SYLLABLE VYRX -A0FF YI SYLLABLE VYR -A100 YI SYLLABLE DIT -A101 YI SYLLABLE DIX -A102 YI SYLLABLE DI -A103 YI SYLLABLE DIP -A104 YI SYLLABLE DIEX -A105 YI SYLLABLE DIE -A106 YI SYLLABLE DIEP -A107 YI SYLLABLE DAT -A108 YI SYLLABLE DAX -A109 YI SYLLABLE DA -A10A YI SYLLABLE DAP -A10B YI SYLLABLE DUOX -A10C YI SYLLABLE DUO -A10D YI SYLLABLE DOT -A10E YI SYLLABLE DOX -A10F YI SYLLABLE DO -A110 YI SYLLABLE DOP -A111 YI SYLLABLE DEX -A112 YI SYLLABLE DE -A113 YI SYLLABLE DEP -A114 YI SYLLABLE DUT -A115 YI SYLLABLE DUX -A116 YI SYLLABLE DU -A117 YI SYLLABLE DUP -A118 YI SYLLABLE DURX -A119 YI SYLLABLE DUR -A11A YI SYLLABLE TIT -A11B YI SYLLABLE TIX -A11C YI SYLLABLE TI -A11D YI SYLLABLE TIP -A11E YI SYLLABLE TIEX -A11F YI SYLLABLE TIE -A120 YI SYLLABLE TIEP -A121 YI SYLLABLE TAT -A122 YI SYLLABLE TAX -A123 YI SYLLABLE TA -A124 YI SYLLABLE TAP -A125 YI SYLLABLE TUOT -A126 YI SYLLABLE TUOX -A127 YI SYLLABLE TUO -A128 YI SYLLABLE TUOP -A129 YI SYLLABLE TOT -A12A YI SYLLABLE TOX -A12B YI SYLLABLE TO -A12C YI SYLLABLE TOP -A12D YI SYLLABLE TEX -A12E YI SYLLABLE TE -A12F YI SYLLABLE TEP -A130 YI SYLLABLE TUT -A131 YI SYLLABLE TUX -A132 YI SYLLABLE TU -A133 YI SYLLABLE TUP -A134 YI SYLLABLE TURX -A135 YI SYLLABLE TUR -A136 YI SYLLABLE DDIT -A137 YI SYLLABLE DDIX -A138 YI SYLLABLE DDI -A139 YI SYLLABLE DDIP -A13A YI SYLLABLE DDIEX -A13B YI SYLLABLE DDIE -A13C YI SYLLABLE DDIEP -A13D YI SYLLABLE DDAT -A13E YI SYLLABLE DDAX -A13F YI SYLLABLE DDA -A140 YI SYLLABLE DDAP -A141 YI SYLLABLE DDUOX -A142 YI SYLLABLE DDUO -A143 YI SYLLABLE DDUOP -A144 YI SYLLABLE DDOT -A145 YI SYLLABLE DDOX -A146 YI SYLLABLE DDO -A147 YI SYLLABLE DDOP -A148 YI SYLLABLE DDEX -A149 YI SYLLABLE DDE -A14A YI SYLLABLE DDEP -A14B YI SYLLABLE DDUT -A14C YI SYLLABLE DDUX -A14D YI SYLLABLE DDU -A14E YI SYLLABLE DDUP -A14F YI SYLLABLE DDURX -A150 YI SYLLABLE DDUR -A151 YI SYLLABLE NDIT -A152 YI SYLLABLE NDIX -A153 YI SYLLABLE NDI -A154 YI SYLLABLE NDIP -A155 YI SYLLABLE NDIEX -A156 YI SYLLABLE NDIE -A157 YI SYLLABLE NDAT -A158 YI SYLLABLE NDAX -A159 YI SYLLABLE NDA -A15A YI SYLLABLE NDAP -A15B YI SYLLABLE NDOT -A15C YI SYLLABLE NDOX -A15D YI SYLLABLE NDO -A15E YI SYLLABLE NDOP -A15F YI SYLLABLE NDEX -A160 YI SYLLABLE NDE -A161 YI SYLLABLE NDEP -A162 YI SYLLABLE NDUT -A163 YI SYLLABLE NDUX -A164 YI SYLLABLE NDU -A165 YI SYLLABLE NDUP -A166 YI SYLLABLE NDURX -A167 YI SYLLABLE NDUR -A168 YI SYLLABLE HNIT -A169 YI SYLLABLE HNIX -A16A YI SYLLABLE HNI -A16B YI SYLLABLE HNIP -A16C YI SYLLABLE HNIET -A16D YI SYLLABLE HNIEX -A16E YI SYLLABLE HNIE -A16F YI SYLLABLE HNIEP -A170 YI SYLLABLE HNAT -A171 YI SYLLABLE HNAX -A172 YI SYLLABLE HNA -A173 YI SYLLABLE HNAP -A174 YI SYLLABLE HNUOX -A175 YI SYLLABLE HNUO -A176 YI SYLLABLE HNOT -A177 YI SYLLABLE HNOX -A178 YI SYLLABLE HNOP -A179 YI SYLLABLE HNEX -A17A YI SYLLABLE HNE -A17B YI SYLLABLE HNEP -A17C YI SYLLABLE HNUT -A17D YI SYLLABLE NIT -A17E YI SYLLABLE NIX -A17F YI SYLLABLE NI -A180 YI SYLLABLE NIP -A181 YI SYLLABLE NIEX -A182 YI SYLLABLE NIE -A183 YI SYLLABLE NIEP -A184 YI SYLLABLE NAX -A185 YI SYLLABLE NA -A186 YI SYLLABLE NAP -A187 YI SYLLABLE NUOX -A188 YI SYLLABLE NUO -A189 YI SYLLABLE NUOP -A18A YI SYLLABLE NOT -A18B YI SYLLABLE NOX -A18C YI SYLLABLE NO -A18D YI SYLLABLE NOP -A18E YI SYLLABLE NEX -A18F YI SYLLABLE NE -A190 YI SYLLABLE NEP -A191 YI SYLLABLE NUT -A192 YI SYLLABLE NUX -A193 YI SYLLABLE NU -A194 YI SYLLABLE NUP -A195 YI SYLLABLE NURX -A196 YI SYLLABLE NUR -A197 YI SYLLABLE HLIT -A198 YI SYLLABLE HLIX -A199 YI SYLLABLE HLI -A19A YI SYLLABLE HLIP -A19B YI SYLLABLE HLIEX -A19C YI SYLLABLE HLIE -A19D YI SYLLABLE HLIEP -A19E YI SYLLABLE HLAT -A19F YI SYLLABLE HLAX -A1A0 YI SYLLABLE HLA -A1A1 YI SYLLABLE HLAP -A1A2 YI SYLLABLE HLUOX -A1A3 YI SYLLABLE HLUO -A1A4 YI SYLLABLE HLUOP -A1A5 YI SYLLABLE HLOX -A1A6 YI SYLLABLE HLO -A1A7 YI SYLLABLE HLOP -A1A8 YI SYLLABLE HLEX -A1A9 YI SYLLABLE HLE -A1AA YI SYLLABLE HLEP -A1AB YI SYLLABLE HLUT -A1AC YI SYLLABLE HLUX -A1AD YI SYLLABLE HLU -A1AE YI SYLLABLE HLUP -A1AF YI SYLLABLE HLURX -A1B0 YI SYLLABLE HLUR -A1B1 YI SYLLABLE HLYT -A1B2 YI SYLLABLE HLYX -A1B3 YI SYLLABLE HLY -A1B4 YI SYLLABLE HLYP -A1B5 YI SYLLABLE HLYRX -A1B6 YI SYLLABLE HLYR -A1B7 YI SYLLABLE LIT -A1B8 YI SYLLABLE LIX -A1B9 YI SYLLABLE LI -A1BA YI SYLLABLE LIP -A1BB YI SYLLABLE LIET -A1BC YI SYLLABLE LIEX -A1BD YI SYLLABLE LIE -A1BE YI SYLLABLE LIEP -A1BF YI SYLLABLE LAT -A1C0 YI SYLLABLE LAX -A1C1 YI SYLLABLE LA -A1C2 YI SYLLABLE LAP -A1C3 YI SYLLABLE LUOT -A1C4 YI SYLLABLE LUOX -A1C5 YI SYLLABLE LUO -A1C6 YI SYLLABLE LUOP -A1C7 YI SYLLABLE LOT -A1C8 YI SYLLABLE LOX -A1C9 YI SYLLABLE LO -A1CA YI SYLLABLE LOP -A1CB YI SYLLABLE LEX -A1CC YI SYLLABLE LE -A1CD YI SYLLABLE LEP -A1CE YI SYLLABLE LUT -A1CF YI SYLLABLE LUX -A1D0 YI SYLLABLE LU -A1D1 YI SYLLABLE LUP -A1D2 YI SYLLABLE LURX -A1D3 YI SYLLABLE LUR -A1D4 YI SYLLABLE LYT -A1D5 YI SYLLABLE LYX -A1D6 YI SYLLABLE LY -A1D7 YI SYLLABLE LYP -A1D8 YI SYLLABLE LYRX -A1D9 YI SYLLABLE LYR -A1DA YI SYLLABLE GIT -A1DB YI SYLLABLE GIX -A1DC YI SYLLABLE GI -A1DD YI SYLLABLE GIP -A1DE YI SYLLABLE GIET -A1DF YI SYLLABLE GIEX -A1E0 YI SYLLABLE GIE -A1E1 YI SYLLABLE GIEP -A1E2 YI SYLLABLE GAT -A1E3 YI SYLLABLE GAX -A1E4 YI SYLLABLE GA -A1E5 YI SYLLABLE GAP -A1E6 YI SYLLABLE GUOT -A1E7 YI SYLLABLE GUOX -A1E8 YI SYLLABLE GUO -A1E9 YI SYLLABLE GUOP -A1EA YI SYLLABLE GOT -A1EB YI SYLLABLE GOX -A1EC YI SYLLABLE GO -A1ED YI SYLLABLE GOP -A1EE YI SYLLABLE GET -A1EF YI SYLLABLE GEX -A1F0 YI SYLLABLE GE -A1F1 YI SYLLABLE GEP -A1F2 YI SYLLABLE GUT -A1F3 YI SYLLABLE GUX -A1F4 YI SYLLABLE GU -A1F5 YI SYLLABLE GUP -A1F6 YI SYLLABLE GURX -A1F7 YI SYLLABLE GUR -A1F8 YI SYLLABLE KIT -A1F9 YI SYLLABLE KIX -A1FA YI SYLLABLE KI -A1FB YI SYLLABLE KIP -A1FC YI SYLLABLE KIEX -A1FD YI SYLLABLE KIE -A1FE YI SYLLABLE KIEP -A1FF YI SYLLABLE KAT -A200 YI SYLLABLE KAX -A201 YI SYLLABLE KA -A202 YI SYLLABLE KAP -A203 YI SYLLABLE KUOX -A204 YI SYLLABLE KUO -A205 YI SYLLABLE KUOP -A206 YI SYLLABLE KOT -A207 YI SYLLABLE KOX -A208 YI SYLLABLE KO -A209 YI SYLLABLE KOP -A20A YI SYLLABLE KET -A20B YI SYLLABLE KEX -A20C YI SYLLABLE KE -A20D YI SYLLABLE KEP -A20E YI SYLLABLE KUT -A20F YI SYLLABLE KUX -A210 YI SYLLABLE KU -A211 YI SYLLABLE KUP -A212 YI SYLLABLE KURX -A213 YI SYLLABLE KUR -A214 YI SYLLABLE GGIT -A215 YI SYLLABLE GGIX -A216 YI SYLLABLE GGI -A217 YI SYLLABLE GGIEX -A218 YI SYLLABLE GGIE -A219 YI SYLLABLE GGIEP -A21A YI SYLLABLE GGAT -A21B YI SYLLABLE GGAX -A21C YI SYLLABLE GGA -A21D YI SYLLABLE GGAP -A21E YI SYLLABLE GGUOT -A21F YI SYLLABLE GGUOX -A220 YI SYLLABLE GGUO -A221 YI SYLLABLE GGUOP -A222 YI SYLLABLE GGOT -A223 YI SYLLABLE GGOX -A224 YI SYLLABLE GGO -A225 YI SYLLABLE GGOP -A226 YI SYLLABLE GGET -A227 YI SYLLABLE GGEX -A228 YI SYLLABLE GGE -A229 YI SYLLABLE GGEP -A22A YI SYLLABLE GGUT -A22B YI SYLLABLE GGUX -A22C YI SYLLABLE GGU -A22D YI SYLLABLE GGUP -A22E YI SYLLABLE GGURX -A22F YI SYLLABLE GGUR -A230 YI SYLLABLE MGIEX -A231 YI SYLLABLE MGIE -A232 YI SYLLABLE MGAT -A233 YI SYLLABLE MGAX -A234 YI SYLLABLE MGA -A235 YI SYLLABLE MGAP -A236 YI SYLLABLE MGUOX -A237 YI SYLLABLE MGUO -A238 YI SYLLABLE MGUOP -A239 YI SYLLABLE MGOT -A23A YI SYLLABLE MGOX -A23B YI SYLLABLE MGO -A23C YI SYLLABLE MGOP -A23D YI SYLLABLE MGEX -A23E YI SYLLABLE MGE -A23F YI SYLLABLE MGEP -A240 YI SYLLABLE MGUT -A241 YI SYLLABLE MGUX -A242 YI SYLLABLE MGU -A243 YI SYLLABLE MGUP -A244 YI SYLLABLE MGURX -A245 YI SYLLABLE MGUR -A246 YI SYLLABLE HXIT -A247 YI SYLLABLE HXIX -A248 YI SYLLABLE HXI -A249 YI SYLLABLE HXIP -A24A YI SYLLABLE HXIET -A24B YI SYLLABLE HXIEX -A24C YI SYLLABLE HXIE -A24D YI SYLLABLE HXIEP -A24E YI SYLLABLE HXAT -A24F YI SYLLABLE HXAX -A250 YI SYLLABLE HXA -A251 YI SYLLABLE HXAP -A252 YI SYLLABLE HXUOT -A253 YI SYLLABLE HXUOX -A254 YI SYLLABLE HXUO -A255 YI SYLLABLE HXUOP -A256 YI SYLLABLE HXOT -A257 YI SYLLABLE HXOX -A258 YI SYLLABLE HXO -A259 YI SYLLABLE HXOP -A25A YI SYLLABLE HXEX -A25B YI SYLLABLE HXE -A25C YI SYLLABLE HXEP -A25D YI SYLLABLE NGIEX -A25E YI SYLLABLE NGIE -A25F YI SYLLABLE NGIEP -A260 YI SYLLABLE NGAT -A261 YI SYLLABLE NGAX -A262 YI SYLLABLE NGA -A263 YI SYLLABLE NGAP -A264 YI SYLLABLE NGUOT -A265 YI SYLLABLE NGUOX -A266 YI SYLLABLE NGUO -A267 YI SYLLABLE NGOT -A268 YI SYLLABLE NGOX -A269 YI SYLLABLE NGO -A26A YI SYLLABLE NGOP -A26B YI SYLLABLE NGEX -A26C YI SYLLABLE NGE -A26D YI SYLLABLE NGEP -A26E YI SYLLABLE HIT -A26F YI SYLLABLE HIEX -A270 YI SYLLABLE HIE -A271 YI SYLLABLE HAT -A272 YI SYLLABLE HAX -A273 YI SYLLABLE HA -A274 YI SYLLABLE HAP -A275 YI SYLLABLE HUOT -A276 YI SYLLABLE HUOX -A277 YI SYLLABLE HUO -A278 YI SYLLABLE HUOP -A279 YI SYLLABLE HOT -A27A YI SYLLABLE HOX -A27B YI SYLLABLE HO -A27C YI SYLLABLE HOP -A27D YI SYLLABLE HEX -A27E YI SYLLABLE HE -A27F YI SYLLABLE HEP -A280 YI SYLLABLE WAT -A281 YI SYLLABLE WAX -A282 YI SYLLABLE WA -A283 YI SYLLABLE WAP -A284 YI SYLLABLE WUOX -A285 YI SYLLABLE WUO -A286 YI SYLLABLE WUOP -A287 YI SYLLABLE WOX -A288 YI SYLLABLE WO -A289 YI SYLLABLE WOP -A28A YI SYLLABLE WEX -A28B YI SYLLABLE WE -A28C YI SYLLABLE WEP -A28D YI SYLLABLE ZIT -A28E YI SYLLABLE ZIX -A28F YI SYLLABLE ZI -A290 YI SYLLABLE ZIP -A291 YI SYLLABLE ZIEX -A292 YI SYLLABLE ZIE -A293 YI SYLLABLE ZIEP -A294 YI SYLLABLE ZAT -A295 YI SYLLABLE ZAX -A296 YI SYLLABLE ZA -A297 YI SYLLABLE ZAP -A298 YI SYLLABLE ZUOX -A299 YI SYLLABLE ZUO -A29A YI SYLLABLE ZUOP -A29B YI SYLLABLE ZOT -A29C YI SYLLABLE ZOX -A29D YI SYLLABLE ZO -A29E YI SYLLABLE ZOP -A29F YI SYLLABLE ZEX -A2A0 YI SYLLABLE ZE -A2A1 YI SYLLABLE ZEP -A2A2 YI SYLLABLE ZUT -A2A3 YI SYLLABLE ZUX -A2A4 YI SYLLABLE ZU -A2A5 YI SYLLABLE ZUP -A2A6 YI SYLLABLE ZURX -A2A7 YI SYLLABLE ZUR -A2A8 YI SYLLABLE ZYT -A2A9 YI SYLLABLE ZYX -A2AA YI SYLLABLE ZY -A2AB YI SYLLABLE ZYP -A2AC YI SYLLABLE ZYRX -A2AD YI SYLLABLE ZYR -A2AE YI SYLLABLE CIT -A2AF YI SYLLABLE CIX -A2B0 YI SYLLABLE CI -A2B1 YI SYLLABLE CIP -A2B2 YI SYLLABLE CIET -A2B3 YI SYLLABLE CIEX -A2B4 YI SYLLABLE CIE -A2B5 YI SYLLABLE CIEP -A2B6 YI SYLLABLE CAT -A2B7 YI SYLLABLE CAX -A2B8 YI SYLLABLE CA -A2B9 YI SYLLABLE CAP -A2BA YI SYLLABLE CUOX -A2BB YI SYLLABLE CUO -A2BC YI SYLLABLE CUOP -A2BD YI SYLLABLE COT -A2BE YI SYLLABLE COX -A2BF YI SYLLABLE CO -A2C0 YI SYLLABLE COP -A2C1 YI SYLLABLE CEX -A2C2 YI SYLLABLE CE -A2C3 YI SYLLABLE CEP -A2C4 YI SYLLABLE CUT -A2C5 YI SYLLABLE CUX -A2C6 YI SYLLABLE CU -A2C7 YI SYLLABLE CUP -A2C8 YI SYLLABLE CURX -A2C9 YI SYLLABLE CUR -A2CA YI SYLLABLE CYT -A2CB YI SYLLABLE CYX -A2CC YI SYLLABLE CY -A2CD YI SYLLABLE CYP -A2CE YI SYLLABLE CYRX -A2CF YI SYLLABLE CYR -A2D0 YI SYLLABLE ZZIT -A2D1 YI SYLLABLE ZZIX -A2D2 YI SYLLABLE ZZI -A2D3 YI SYLLABLE ZZIP -A2D4 YI SYLLABLE ZZIET -A2D5 YI SYLLABLE ZZIEX -A2D6 YI SYLLABLE ZZIE -A2D7 YI SYLLABLE ZZIEP -A2D8 YI SYLLABLE ZZAT -A2D9 YI SYLLABLE ZZAX -A2DA YI SYLLABLE ZZA -A2DB YI SYLLABLE ZZAP -A2DC YI SYLLABLE ZZOX -A2DD YI SYLLABLE ZZO -A2DE YI SYLLABLE ZZOP -A2DF YI SYLLABLE ZZEX -A2E0 YI SYLLABLE ZZE -A2E1 YI SYLLABLE ZZEP -A2E2 YI SYLLABLE ZZUX -A2E3 YI SYLLABLE ZZU -A2E4 YI SYLLABLE ZZUP -A2E5 YI SYLLABLE ZZURX -A2E6 YI SYLLABLE ZZUR -A2E7 YI SYLLABLE ZZYT -A2E8 YI SYLLABLE ZZYX -A2E9 YI SYLLABLE ZZY -A2EA YI SYLLABLE ZZYP -A2EB YI SYLLABLE ZZYRX -A2EC YI SYLLABLE ZZYR -A2ED YI SYLLABLE NZIT -A2EE YI SYLLABLE NZIX -A2EF YI SYLLABLE NZI -A2F0 YI SYLLABLE NZIP -A2F1 YI SYLLABLE NZIEX -A2F2 YI SYLLABLE NZIE -A2F3 YI SYLLABLE NZIEP -A2F4 YI SYLLABLE NZAT -A2F5 YI SYLLABLE NZAX -A2F6 YI SYLLABLE NZA -A2F7 YI SYLLABLE NZAP -A2F8 YI SYLLABLE NZUOX -A2F9 YI SYLLABLE NZUO -A2FA YI SYLLABLE NZOX -A2FB YI SYLLABLE NZOP -A2FC YI SYLLABLE NZEX -A2FD YI SYLLABLE NZE -A2FE YI SYLLABLE NZUX -A2FF YI SYLLABLE NZU -A300 YI SYLLABLE NZUP -A301 YI SYLLABLE NZURX -A302 YI SYLLABLE NZUR -A303 YI SYLLABLE NZYT -A304 YI SYLLABLE NZYX -A305 YI SYLLABLE NZY -A306 YI SYLLABLE NZYP -A307 YI SYLLABLE NZYRX -A308 YI SYLLABLE NZYR -A309 YI SYLLABLE SIT -A30A YI SYLLABLE SIX -A30B YI SYLLABLE SI -A30C YI SYLLABLE SIP -A30D YI SYLLABLE SIEX -A30E YI SYLLABLE SIE -A30F YI SYLLABLE SIEP -A310 YI SYLLABLE SAT -A311 YI SYLLABLE SAX -A312 YI SYLLABLE SA -A313 YI SYLLABLE SAP -A314 YI SYLLABLE SUOX -A315 YI SYLLABLE SUO -A316 YI SYLLABLE SUOP -A317 YI SYLLABLE SOT -A318 YI SYLLABLE SOX -A319 YI SYLLABLE SO -A31A YI SYLLABLE SOP -A31B YI SYLLABLE SEX -A31C YI SYLLABLE SE -A31D YI SYLLABLE SEP -A31E YI SYLLABLE SUT -A31F YI SYLLABLE SUX -A320 YI SYLLABLE SU -A321 YI SYLLABLE SUP -A322 YI SYLLABLE SURX -A323 YI SYLLABLE SUR -A324 YI SYLLABLE SYT -A325 YI SYLLABLE SYX -A326 YI SYLLABLE SY -A327 YI SYLLABLE SYP -A328 YI SYLLABLE SYRX -A329 YI SYLLABLE SYR -A32A YI SYLLABLE SSIT -A32B YI SYLLABLE SSIX -A32C YI SYLLABLE SSI -A32D YI SYLLABLE SSIP -A32E YI SYLLABLE SSIEX -A32F YI SYLLABLE SSIE -A330 YI SYLLABLE SSIEP -A331 YI SYLLABLE SSAT -A332 YI SYLLABLE SSAX -A333 YI SYLLABLE SSA -A334 YI SYLLABLE SSAP -A335 YI SYLLABLE SSOT -A336 YI SYLLABLE SSOX -A337 YI SYLLABLE SSO -A338 YI SYLLABLE SSOP -A339 YI SYLLABLE SSEX -A33A YI SYLLABLE SSE -A33B YI SYLLABLE SSEP -A33C YI SYLLABLE SSUT -A33D YI SYLLABLE SSUX -A33E YI SYLLABLE SSU -A33F YI SYLLABLE SSUP -A340 YI SYLLABLE SSYT -A341 YI SYLLABLE SSYX -A342 YI SYLLABLE SSY -A343 YI SYLLABLE SSYP -A344 YI SYLLABLE SSYRX -A345 YI SYLLABLE SSYR -A346 YI SYLLABLE ZHAT -A347 YI SYLLABLE ZHAX -A348 YI SYLLABLE ZHA -A349 YI SYLLABLE ZHAP -A34A YI SYLLABLE ZHUOX -A34B YI SYLLABLE ZHUO -A34C YI SYLLABLE ZHUOP -A34D YI SYLLABLE ZHOT -A34E YI SYLLABLE ZHOX -A34F YI SYLLABLE ZHO -A350 YI SYLLABLE ZHOP -A351 YI SYLLABLE ZHET -A352 YI SYLLABLE ZHEX -A353 YI SYLLABLE ZHE -A354 YI SYLLABLE ZHEP -A355 YI SYLLABLE ZHUT -A356 YI SYLLABLE ZHUX -A357 YI SYLLABLE ZHU -A358 YI SYLLABLE ZHUP -A359 YI SYLLABLE ZHURX -A35A YI SYLLABLE ZHUR -A35B YI SYLLABLE ZHYT -A35C YI SYLLABLE ZHYX -A35D YI SYLLABLE ZHY -A35E YI SYLLABLE ZHYP -A35F YI SYLLABLE ZHYRX -A360 YI SYLLABLE ZHYR -A361 YI SYLLABLE CHAT -A362 YI SYLLABLE CHAX -A363 YI SYLLABLE CHA -A364 YI SYLLABLE CHAP -A365 YI SYLLABLE CHUOT -A366 YI SYLLABLE CHUOX -A367 YI SYLLABLE CHUO -A368 YI SYLLABLE CHUOP -A369 YI SYLLABLE CHOT -A36A YI SYLLABLE CHOX -A36B YI SYLLABLE CHO -A36C YI SYLLABLE CHOP -A36D YI SYLLABLE CHET -A36E YI SYLLABLE CHEX -A36F YI SYLLABLE CHE -A370 YI SYLLABLE CHEP -A371 YI SYLLABLE CHUX -A372 YI SYLLABLE CHU -A373 YI SYLLABLE CHUP -A374 YI SYLLABLE CHURX -A375 YI SYLLABLE CHUR -A376 YI SYLLABLE CHYT -A377 YI SYLLABLE CHYX -A378 YI SYLLABLE CHY -A379 YI SYLLABLE CHYP -A37A YI SYLLABLE CHYRX -A37B YI SYLLABLE CHYR -A37C YI SYLLABLE RRAX -A37D YI SYLLABLE RRA -A37E YI SYLLABLE RRUOX -A37F YI SYLLABLE RRUO -A380 YI SYLLABLE RROT -A381 YI SYLLABLE RROX -A382 YI SYLLABLE RRO -A383 YI SYLLABLE RROP -A384 YI SYLLABLE RRET -A385 YI SYLLABLE RREX -A386 YI SYLLABLE RRE -A387 YI SYLLABLE RREP -A388 YI SYLLABLE RRUT -A389 YI SYLLABLE RRUX -A38A YI SYLLABLE RRU -A38B YI SYLLABLE RRUP -A38C YI SYLLABLE RRURX -A38D YI SYLLABLE RRUR -A38E YI SYLLABLE RRYT -A38F YI SYLLABLE RRYX -A390 YI SYLLABLE RRY -A391 YI SYLLABLE RRYP -A392 YI SYLLABLE RRYRX -A393 YI SYLLABLE RRYR -A394 YI SYLLABLE NRAT -A395 YI SYLLABLE NRAX -A396 YI SYLLABLE NRA -A397 YI SYLLABLE NRAP -A398 YI SYLLABLE NROX -A399 YI SYLLABLE NRO -A39A YI SYLLABLE NROP -A39B YI SYLLABLE NRET -A39C YI SYLLABLE NREX -A39D YI SYLLABLE NRE -A39E YI SYLLABLE NREP -A39F YI SYLLABLE NRUT -A3A0 YI SYLLABLE NRUX -A3A1 YI SYLLABLE NRU -A3A2 YI SYLLABLE NRUP -A3A3 YI SYLLABLE NRURX -A3A4 YI SYLLABLE NRUR -A3A5 YI SYLLABLE NRYT -A3A6 YI SYLLABLE NRYX -A3A7 YI SYLLABLE NRY -A3A8 YI SYLLABLE NRYP -A3A9 YI SYLLABLE NRYRX -A3AA YI SYLLABLE NRYR -A3AB YI SYLLABLE SHAT -A3AC YI SYLLABLE SHAX -A3AD YI SYLLABLE SHA -A3AE YI SYLLABLE SHAP -A3AF YI SYLLABLE SHUOX -A3B0 YI SYLLABLE SHUO -A3B1 YI SYLLABLE SHUOP -A3B2 YI SYLLABLE SHOT -A3B3 YI SYLLABLE SHOX -A3B4 YI SYLLABLE SHO -A3B5 YI SYLLABLE SHOP -A3B6 YI SYLLABLE SHET -A3B7 YI SYLLABLE SHEX -A3B8 YI SYLLABLE SHE -A3B9 YI SYLLABLE SHEP -A3BA YI SYLLABLE SHUT -A3BB YI SYLLABLE SHUX -A3BC YI SYLLABLE SHU -A3BD YI SYLLABLE SHUP -A3BE YI SYLLABLE SHURX -A3BF YI SYLLABLE SHUR -A3C0 YI SYLLABLE SHYT -A3C1 YI SYLLABLE SHYX -A3C2 YI SYLLABLE SHY -A3C3 YI SYLLABLE SHYP -A3C4 YI SYLLABLE SHYRX -A3C5 YI SYLLABLE SHYR -A3C6 YI SYLLABLE RAT -A3C7 YI SYLLABLE RAX -A3C8 YI SYLLABLE RA -A3C9 YI SYLLABLE RAP -A3CA YI SYLLABLE RUOX -A3CB YI SYLLABLE RUO -A3CC YI SYLLABLE RUOP -A3CD YI SYLLABLE ROT -A3CE YI SYLLABLE ROX -A3CF YI SYLLABLE RO -A3D0 YI SYLLABLE ROP -A3D1 YI SYLLABLE REX -A3D2 YI SYLLABLE RE -A3D3 YI SYLLABLE REP -A3D4 YI SYLLABLE RUT -A3D5 YI SYLLABLE RUX -A3D6 YI SYLLABLE RU -A3D7 YI SYLLABLE RUP -A3D8 YI SYLLABLE RURX -A3D9 YI SYLLABLE RUR -A3DA YI SYLLABLE RYT -A3DB YI SYLLABLE RYX -A3DC YI SYLLABLE RY -A3DD YI SYLLABLE RYP -A3DE YI SYLLABLE RYRX -A3DF YI SYLLABLE RYR -A3E0 YI SYLLABLE JIT -A3E1 YI SYLLABLE JIX -A3E2 YI SYLLABLE JI -A3E3 YI SYLLABLE JIP -A3E4 YI SYLLABLE JIET -A3E5 YI SYLLABLE JIEX -A3E6 YI SYLLABLE JIE -A3E7 YI SYLLABLE JIEP -A3E8 YI SYLLABLE JUOT -A3E9 YI SYLLABLE JUOX -A3EA YI SYLLABLE JUO -A3EB YI SYLLABLE JUOP -A3EC YI SYLLABLE JOT -A3ED YI SYLLABLE JOX -A3EE YI SYLLABLE JO -A3EF YI SYLLABLE JOP -A3F0 YI SYLLABLE JUT -A3F1 YI SYLLABLE JUX -A3F2 YI SYLLABLE JU -A3F3 YI SYLLABLE JUP -A3F4 YI SYLLABLE JURX -A3F5 YI SYLLABLE JUR -A3F6 YI SYLLABLE JYT -A3F7 YI SYLLABLE JYX -A3F8 YI SYLLABLE JY -A3F9 YI SYLLABLE JYP -A3FA YI SYLLABLE JYRX -A3FB YI SYLLABLE JYR -A3FC YI SYLLABLE QIT -A3FD YI SYLLABLE QIX -A3FE YI SYLLABLE QI -A3FF YI SYLLABLE QIP -A400 YI SYLLABLE QIET -A401 YI SYLLABLE QIEX -A402 YI SYLLABLE QIE -A403 YI SYLLABLE QIEP -A404 YI SYLLABLE QUOT -A405 YI SYLLABLE QUOX -A406 YI SYLLABLE QUO -A407 YI SYLLABLE QUOP -A408 YI SYLLABLE QOT -A409 YI SYLLABLE QOX -A40A YI SYLLABLE QO -A40B YI SYLLABLE QOP -A40C YI SYLLABLE QUT -A40D YI SYLLABLE QUX -A40E YI SYLLABLE QU -A40F YI SYLLABLE QUP -A410 YI SYLLABLE QURX -A411 YI SYLLABLE QUR -A412 YI SYLLABLE QYT -A413 YI SYLLABLE QYX -A414 YI SYLLABLE QY -A415 YI SYLLABLE QYP -A416 YI SYLLABLE QYRX -A417 YI SYLLABLE QYR -A418 YI SYLLABLE JJIT -A419 YI SYLLABLE JJIX -A41A YI SYLLABLE JJI -A41B YI SYLLABLE JJIP -A41C YI SYLLABLE JJIET -A41D YI SYLLABLE JJIEX -A41E YI SYLLABLE JJIE -A41F YI SYLLABLE JJIEP -A420 YI SYLLABLE JJUOX -A421 YI SYLLABLE JJUO -A422 YI SYLLABLE JJUOP -A423 YI SYLLABLE JJOT -A424 YI SYLLABLE JJOX -A425 YI SYLLABLE JJO -A426 YI SYLLABLE JJOP -A427 YI SYLLABLE JJUT -A428 YI SYLLABLE JJUX -A429 YI SYLLABLE JJU -A42A YI SYLLABLE JJUP -A42B YI SYLLABLE JJURX -A42C YI SYLLABLE JJUR -A42D YI SYLLABLE JJYT -A42E YI SYLLABLE JJYX -A42F YI SYLLABLE JJY -A430 YI SYLLABLE JJYP -A431 YI SYLLABLE NJIT -A432 YI SYLLABLE NJIX -A433 YI SYLLABLE NJI -A434 YI SYLLABLE NJIP -A435 YI SYLLABLE NJIET -A436 YI SYLLABLE NJIEX -A437 YI SYLLABLE NJIE -A438 YI SYLLABLE NJIEP -A439 YI SYLLABLE NJUOX -A43A YI SYLLABLE NJUO -A43B YI SYLLABLE NJOT -A43C YI SYLLABLE NJOX -A43D YI SYLLABLE NJO -A43E YI SYLLABLE NJOP -A43F YI SYLLABLE NJUX -A440 YI SYLLABLE NJU -A441 YI SYLLABLE NJUP -A442 YI SYLLABLE NJURX -A443 YI SYLLABLE NJUR -A444 YI SYLLABLE NJYT -A445 YI SYLLABLE NJYX -A446 YI SYLLABLE NJY -A447 YI SYLLABLE NJYP -A448 YI SYLLABLE NJYRX -A449 YI SYLLABLE NJYR -A44A YI SYLLABLE NYIT -A44B YI SYLLABLE NYIX -A44C YI SYLLABLE NYI -A44D YI SYLLABLE NYIP -A44E YI SYLLABLE NYIET -A44F YI SYLLABLE NYIEX -A450 YI SYLLABLE NYIE -A451 YI SYLLABLE NYIEP -A452 YI SYLLABLE NYUOX -A453 YI SYLLABLE NYUO -A454 YI SYLLABLE NYUOP -A455 YI SYLLABLE NYOT -A456 YI SYLLABLE NYOX -A457 YI SYLLABLE NYO -A458 YI SYLLABLE NYOP -A459 YI SYLLABLE NYUT -A45A YI SYLLABLE NYUX -A45B YI SYLLABLE NYU -A45C YI SYLLABLE NYUP -A45D YI SYLLABLE XIT -A45E YI SYLLABLE XIX -A45F YI SYLLABLE XI -A460 YI SYLLABLE XIP -A461 YI SYLLABLE XIET -A462 YI SYLLABLE XIEX -A463 YI SYLLABLE XIE -A464 YI SYLLABLE XIEP -A465 YI SYLLABLE XUOX -A466 YI SYLLABLE XUO -A467 YI SYLLABLE XOT -A468 YI SYLLABLE XOX -A469 YI SYLLABLE XO -A46A YI SYLLABLE XOP -A46B YI SYLLABLE XYT -A46C YI SYLLABLE XYX -A46D YI SYLLABLE XY -A46E YI SYLLABLE XYP -A46F YI SYLLABLE XYRX -A470 YI SYLLABLE XYR -A471 YI SYLLABLE YIT -A472 YI SYLLABLE YIX -A473 YI SYLLABLE YI -A474 YI SYLLABLE YIP -A475 YI SYLLABLE YIET -A476 YI SYLLABLE YIEX -A477 YI SYLLABLE YIE -A478 YI SYLLABLE YIEP -A479 YI SYLLABLE YUOT -A47A YI SYLLABLE YUOX -A47B YI SYLLABLE YUO -A47C YI SYLLABLE YUOP -A47D YI SYLLABLE YOT -A47E YI SYLLABLE YOX -A47F YI SYLLABLE YO -A480 YI SYLLABLE YOP -A481 YI SYLLABLE YUT -A482 YI SYLLABLE YUX -A483 YI SYLLABLE YU -A484 YI SYLLABLE YUP -A485 YI SYLLABLE YURX -A486 YI SYLLABLE YUR -A487 YI SYLLABLE YYT -A488 YI SYLLABLE YYX -A489 YI SYLLABLE YY -A48A YI SYLLABLE YYP -A48B YI SYLLABLE YYRX -A48C YI SYLLABLE YYR -A490 YI RADICAL QOT -A491 YI RADICAL LI -A492 YI RADICAL KIT -A493 YI RADICAL NYIP -A494 YI RADICAL CYP -A495 YI RADICAL SSI -A496 YI RADICAL GGOP -A497 YI RADICAL GEP -A498 YI RADICAL MI -A499 YI RADICAL HXIT -A49A YI RADICAL LYR -A49B YI RADICAL BBUT -A49C YI RADICAL MOP -A49D YI RADICAL YO -A49E YI RADICAL PUT -A49F YI RADICAL HXUO -A4A0 YI RADICAL TAT -A4A1 YI RADICAL GA -A4A2 YI RADICAL ZUP -A4A3 YI RADICAL CYT -A4A4 YI RADICAL DDUR -A4A5 YI RADICAL BUR -A4A6 YI RADICAL GGUO -A4A7 YI RADICAL NYOP -A4A8 YI RADICAL TU -A4A9 YI RADICAL OP -A4AA YI RADICAL JJUT -A4AB YI RADICAL ZOT -A4AC YI RADICAL PYT -A4AD YI RADICAL HMO -A4AE YI RADICAL YIT -A4AF YI RADICAL VUR -A4B0 YI RADICAL SHY -A4B1 YI RADICAL VEP -A4B2 YI RADICAL ZA -A4B3 YI RADICAL JO -A4B4 YI RADICAL NZUP -A4B5 YI RADICAL JJY -A4B6 YI RADICAL GOT -A4B7 YI RADICAL JJIE -A4B8 YI RADICAL WO -A4B9 YI RADICAL DU -A4BA YI RADICAL SHUR -A4BB YI RADICAL LIE -A4BC YI RADICAL CY -A4BD YI RADICAL CUOP -A4BE YI RADICAL CIP -A4BF YI RADICAL HXOP -A4C0 YI RADICAL SHAT -A4C1 YI RADICAL ZUR -A4C2 YI RADICAL SHOP -A4C3 YI RADICAL CHE -A4C4 YI RADICAL ZZIET -A4C5 YI RADICAL NBIE -A4C6 YI RADICAL KE -A4D0 LISU LETTER BA -A4D1 LISU LETTER PA -A4D2 LISU LETTER PHA -A4D3 LISU LETTER DA -A4D4 LISU LETTER TA -A4D5 LISU LETTER THA -A4D6 LISU LETTER GA -A4D7 LISU LETTER KA -A4D8 LISU LETTER KHA -A4D9 LISU LETTER JA -A4DA LISU LETTER CA -A4DB LISU LETTER CHA -A4DC LISU LETTER DZA -A4DD LISU LETTER TSA -A4DE LISU LETTER TSHA -A4DF LISU LETTER MA -A4E0 LISU LETTER NA -A4E1 LISU LETTER LA -A4E2 LISU LETTER SA -A4E3 LISU LETTER ZHA -A4E4 LISU LETTER ZA -A4E5 LISU LETTER NGA -A4E6 LISU LETTER HA -A4E7 LISU LETTER XA -A4E8 LISU LETTER HHA -A4E9 LISU LETTER FA -A4EA LISU LETTER WA -A4EB LISU LETTER SHA -A4EC LISU LETTER YA -A4ED LISU LETTER GHA -A4EE LISU LETTER A -A4EF LISU LETTER AE -A4F0 LISU LETTER E -A4F1 LISU LETTER EU -A4F2 LISU LETTER I -A4F3 LISU LETTER O -A4F4 LISU LETTER U -A4F5 LISU LETTER UE -A4F6 LISU LETTER UH -A4F7 LISU LETTER OE -A4F8 LISU LETTER TONE MYA TI -A4F9 LISU LETTER TONE NA PO -A4FA LISU LETTER TONE MYA CYA -A4FB LISU LETTER TONE MYA BO -A4FC LISU LETTER TONE MYA NA -A4FD LISU LETTER TONE MYA JEU -A4FE LISU PUNCTUATION COMMA -A4FF LISU PUNCTUATION FULL STOP -A500 VAI SYLLABLE EE -A501 VAI SYLLABLE EEN -A502 VAI SYLLABLE HEE -A503 VAI SYLLABLE WEE -A504 VAI SYLLABLE WEEN -A505 VAI SYLLABLE PEE -A506 VAI SYLLABLE BHEE -A507 VAI SYLLABLE BEE -A508 VAI SYLLABLE MBEE -A509 VAI SYLLABLE KPEE -A50A VAI SYLLABLE MGBEE -A50B VAI SYLLABLE GBEE -A50C VAI SYLLABLE FEE -A50D VAI SYLLABLE VEE -A50E VAI SYLLABLE TEE -A50F VAI SYLLABLE THEE -A510 VAI SYLLABLE DHEE -A511 VAI SYLLABLE DHHEE -A512 VAI SYLLABLE LEE -A513 VAI SYLLABLE REE -A514 VAI SYLLABLE DEE -A515 VAI SYLLABLE NDEE -A516 VAI SYLLABLE SEE -A517 VAI SYLLABLE SHEE -A518 VAI SYLLABLE ZEE -A519 VAI SYLLABLE ZHEE -A51A VAI SYLLABLE CEE -A51B VAI SYLLABLE JEE -A51C VAI SYLLABLE NJEE -A51D VAI SYLLABLE YEE -A51E VAI SYLLABLE KEE -A51F VAI SYLLABLE NGGEE -A520 VAI SYLLABLE GEE -A521 VAI SYLLABLE MEE -A522 VAI SYLLABLE NEE -A523 VAI SYLLABLE NYEE -A524 VAI SYLLABLE I -A525 VAI SYLLABLE IN -A526 VAI SYLLABLE HI -A527 VAI SYLLABLE HIN -A528 VAI SYLLABLE WI -A529 VAI SYLLABLE WIN -A52A VAI SYLLABLE PI -A52B VAI SYLLABLE BHI -A52C VAI SYLLABLE BI -A52D VAI SYLLABLE MBI -A52E VAI SYLLABLE KPI -A52F VAI SYLLABLE MGBI -A530 VAI SYLLABLE GBI -A531 VAI SYLLABLE FI -A532 VAI SYLLABLE VI -A533 VAI SYLLABLE TI -A534 VAI SYLLABLE THI -A535 VAI SYLLABLE DHI -A536 VAI SYLLABLE DHHI -A537 VAI SYLLABLE LI -A538 VAI SYLLABLE RI -A539 VAI SYLLABLE DI -A53A VAI SYLLABLE NDI -A53B VAI SYLLABLE SI -A53C VAI SYLLABLE SHI -A53D VAI SYLLABLE ZI -A53E VAI SYLLABLE ZHI -A53F VAI SYLLABLE CI -A540 VAI SYLLABLE JI -A541 VAI SYLLABLE NJI -A542 VAI SYLLABLE YI -A543 VAI SYLLABLE KI -A544 VAI SYLLABLE NGGI -A545 VAI SYLLABLE GI -A546 VAI SYLLABLE MI -A547 VAI SYLLABLE NI -A548 VAI SYLLABLE NYI -A549 VAI SYLLABLE A -A54A VAI SYLLABLE AN -A54B VAI SYLLABLE NGAN -A54C VAI SYLLABLE HA -A54D VAI SYLLABLE HAN -A54E VAI SYLLABLE WA -A54F VAI SYLLABLE WAN -A550 VAI SYLLABLE PA -A551 VAI SYLLABLE BHA -A552 VAI SYLLABLE BA -A553 VAI SYLLABLE MBA -A554 VAI SYLLABLE KPA -A555 VAI SYLLABLE KPAN -A556 VAI SYLLABLE MGBA -A557 VAI SYLLABLE GBA -A558 VAI SYLLABLE FA -A559 VAI SYLLABLE VA -A55A VAI SYLLABLE TA -A55B VAI SYLLABLE THA -A55C VAI SYLLABLE DHA -A55D VAI SYLLABLE DHHA -A55E VAI SYLLABLE LA -A55F VAI SYLLABLE RA -A560 VAI SYLLABLE DA -A561 VAI SYLLABLE NDA -A562 VAI SYLLABLE SA -A563 VAI SYLLABLE SHA -A564 VAI SYLLABLE ZA -A565 VAI SYLLABLE ZHA -A566 VAI SYLLABLE CA -A567 VAI SYLLABLE JA -A568 VAI SYLLABLE NJA -A569 VAI SYLLABLE YA -A56A VAI SYLLABLE KA -A56B VAI SYLLABLE KAN -A56C VAI SYLLABLE NGGA -A56D VAI SYLLABLE GA -A56E VAI SYLLABLE MA -A56F VAI SYLLABLE NA -A570 VAI SYLLABLE NYA -A571 VAI SYLLABLE OO -A572 VAI SYLLABLE OON -A573 VAI SYLLABLE HOO -A574 VAI SYLLABLE WOO -A575 VAI SYLLABLE WOON -A576 VAI SYLLABLE POO -A577 VAI SYLLABLE BHOO -A578 VAI SYLLABLE BOO -A579 VAI SYLLABLE MBOO -A57A VAI SYLLABLE KPOO -A57B VAI SYLLABLE MGBOO -A57C VAI SYLLABLE GBOO -A57D VAI SYLLABLE FOO -A57E VAI SYLLABLE VOO -A57F VAI SYLLABLE TOO -A580 VAI SYLLABLE THOO -A581 VAI SYLLABLE DHOO -A582 VAI SYLLABLE DHHOO -A583 VAI SYLLABLE LOO -A584 VAI SYLLABLE ROO -A585 VAI SYLLABLE DOO -A586 VAI SYLLABLE NDOO -A587 VAI SYLLABLE SOO -A588 VAI SYLLABLE SHOO -A589 VAI SYLLABLE ZOO -A58A VAI SYLLABLE ZHOO -A58B VAI SYLLABLE COO -A58C VAI SYLLABLE JOO -A58D VAI SYLLABLE NJOO -A58E VAI SYLLABLE YOO -A58F VAI SYLLABLE KOO -A590 VAI SYLLABLE NGGOO -A591 VAI SYLLABLE GOO -A592 VAI SYLLABLE MOO -A593 VAI SYLLABLE NOO -A594 VAI SYLLABLE NYOO -A595 VAI SYLLABLE U -A596 VAI SYLLABLE UN -A597 VAI SYLLABLE HU -A598 VAI SYLLABLE HUN -A599 VAI SYLLABLE WU -A59A VAI SYLLABLE WUN -A59B VAI SYLLABLE PU -A59C VAI SYLLABLE BHU -A59D VAI SYLLABLE BU -A59E VAI SYLLABLE MBU -A59F VAI SYLLABLE KPU -A5A0 VAI SYLLABLE MGBU -A5A1 VAI SYLLABLE GBU -A5A2 VAI SYLLABLE FU -A5A3 VAI SYLLABLE VU -A5A4 VAI SYLLABLE TU -A5A5 VAI SYLLABLE THU -A5A6 VAI SYLLABLE DHU -A5A7 VAI SYLLABLE DHHU -A5A8 VAI SYLLABLE LU -A5A9 VAI SYLLABLE RU -A5AA VAI SYLLABLE DU -A5AB VAI SYLLABLE NDU -A5AC VAI SYLLABLE SU -A5AD VAI SYLLABLE SHU -A5AE VAI SYLLABLE ZU -A5AF VAI SYLLABLE ZHU -A5B0 VAI SYLLABLE CU -A5B1 VAI SYLLABLE JU -A5B2 VAI SYLLABLE NJU -A5B3 VAI SYLLABLE YU -A5B4 VAI SYLLABLE KU -A5B5 VAI SYLLABLE NGGU -A5B6 VAI SYLLABLE GU -A5B7 VAI SYLLABLE MU -A5B8 VAI SYLLABLE NU -A5B9 VAI SYLLABLE NYU -A5BA VAI SYLLABLE O -A5BB VAI SYLLABLE ON -A5BC VAI SYLLABLE NGON -A5BD VAI SYLLABLE HO -A5BE VAI SYLLABLE HON -A5BF VAI SYLLABLE WO -A5C0 VAI SYLLABLE WON -A5C1 VAI SYLLABLE PO -A5C2 VAI SYLLABLE BHO -A5C3 VAI SYLLABLE BO -A5C4 VAI SYLLABLE MBO -A5C5 VAI SYLLABLE KPO -A5C6 VAI SYLLABLE MGBO -A5C7 VAI SYLLABLE GBO -A5C8 VAI SYLLABLE GBON -A5C9 VAI SYLLABLE FO -A5CA VAI SYLLABLE VO -A5CB VAI SYLLABLE TO -A5CC VAI SYLLABLE THO -A5CD VAI SYLLABLE DHO -A5CE VAI SYLLABLE DHHO -A5CF VAI SYLLABLE LO -A5D0 VAI SYLLABLE RO -A5D1 VAI SYLLABLE DO -A5D2 VAI SYLLABLE NDO -A5D3 VAI SYLLABLE SO -A5D4 VAI SYLLABLE SHO -A5D5 VAI SYLLABLE ZO -A5D6 VAI SYLLABLE ZHO -A5D7 VAI SYLLABLE CO -A5D8 VAI SYLLABLE JO -A5D9 VAI SYLLABLE NJO -A5DA VAI SYLLABLE YO -A5DB VAI SYLLABLE KO -A5DC VAI SYLLABLE NGGO -A5DD VAI SYLLABLE GO -A5DE VAI SYLLABLE MO -A5DF VAI SYLLABLE NO -A5E0 VAI SYLLABLE NYO -A5E1 VAI SYLLABLE E -A5E2 VAI SYLLABLE EN -A5E3 VAI SYLLABLE NGEN -A5E4 VAI SYLLABLE HE -A5E5 VAI SYLLABLE HEN -A5E6 VAI SYLLABLE WE -A5E7 VAI SYLLABLE WEN -A5E8 VAI SYLLABLE PE -A5E9 VAI SYLLABLE BHE -A5EA VAI SYLLABLE BE -A5EB VAI SYLLABLE MBE -A5EC VAI SYLLABLE KPE -A5ED VAI SYLLABLE KPEN -A5EE VAI SYLLABLE MGBE -A5EF VAI SYLLABLE GBE -A5F0 VAI SYLLABLE GBEN -A5F1 VAI SYLLABLE FE -A5F2 VAI SYLLABLE VE -A5F3 VAI SYLLABLE TE -A5F4 VAI SYLLABLE THE -A5F5 VAI SYLLABLE DHE -A5F6 VAI SYLLABLE DHHE -A5F7 VAI SYLLABLE LE -A5F8 VAI SYLLABLE RE -A5F9 VAI SYLLABLE DE -A5FA VAI SYLLABLE NDE -A5FB VAI SYLLABLE SE -A5FC VAI SYLLABLE SHE -A5FD VAI SYLLABLE ZE -A5FE VAI SYLLABLE ZHE -A5FF VAI SYLLABLE CE -A600 VAI SYLLABLE JE -A601 VAI SYLLABLE NJE -A602 VAI SYLLABLE YE -A603 VAI SYLLABLE KE -A604 VAI SYLLABLE NGGE -A605 VAI SYLLABLE NGGEN -A606 VAI SYLLABLE GE -A607 VAI SYLLABLE GEN -A608 VAI SYLLABLE ME -A609 VAI SYLLABLE NE -A60A VAI SYLLABLE NYE -A60B VAI SYLLABLE NG -A60C VAI SYLLABLE LENGTHENER -A60D VAI COMMA -A60E VAI FULL STOP -A60F VAI QUESTION MARK -A610 VAI SYLLABLE NDOLE FA -A611 VAI SYLLABLE NDOLE KA -A612 VAI SYLLABLE NDOLE SOO -A613 VAI SYMBOL FEENG -A614 VAI SYMBOL KEENG -A615 VAI SYMBOL TING -A616 VAI SYMBOL NII -A617 VAI SYMBOL BANG -A618 VAI SYMBOL FAA -A619 VAI SYMBOL TAA -A61A VAI SYMBOL DANG -A61B VAI SYMBOL DOONG -A61C VAI SYMBOL KUNG -A61D VAI SYMBOL TONG -A61E VAI SYMBOL DO-O -A61F VAI SYMBOL JONG -A620 VAI DIGIT ZERO -A621 VAI DIGIT ONE -A622 VAI DIGIT TWO -A623 VAI DIGIT THREE -A624 VAI DIGIT FOUR -A625 VAI DIGIT FIVE -A626 VAI DIGIT SIX -A627 VAI DIGIT SEVEN -A628 VAI DIGIT EIGHT -A629 VAI DIGIT NINE -A62A VAI SYLLABLE NDOLE MA -A62B VAI SYLLABLE NDOLE DO -A640 CYRILLIC CAPITAL LETTER ZEMLYA -A641 CYRILLIC SMALL LETTER ZEMLYA -A642 CYRILLIC CAPITAL LETTER DZELO -A643 CYRILLIC SMALL LETTER DZELO -A644 CYRILLIC CAPITAL LETTER REVERSED DZE -A645 CYRILLIC SMALL LETTER REVERSED DZE -A646 CYRILLIC CAPITAL LETTER IOTA -A647 CYRILLIC SMALL LETTER IOTA -A648 CYRILLIC CAPITAL LETTER DJERV -A649 CYRILLIC SMALL LETTER DJERV -A64A CYRILLIC CAPITAL LETTER MONOGRAPH UK -A64B CYRILLIC SMALL LETTER MONOGRAPH UK -A64C CYRILLIC CAPITAL LETTER BROAD OMEGA -A64D CYRILLIC SMALL LETTER BROAD OMEGA -A64E CYRILLIC CAPITAL LETTER NEUTRAL YER -A64F CYRILLIC SMALL LETTER NEUTRAL YER -A650 CYRILLIC CAPITAL LETTER YERU WITH BACK YER -A651 CYRILLIC SMALL LETTER YERU WITH BACK YER -A652 CYRILLIC CAPITAL LETTER IOTIFIED YAT -A653 CYRILLIC SMALL LETTER IOTIFIED YAT -A654 CYRILLIC CAPITAL LETTER REVERSED YU -A655 CYRILLIC SMALL LETTER REVERSED YU -A656 CYRILLIC CAPITAL LETTER IOTIFIED A -A657 CYRILLIC SMALL LETTER IOTIFIED A -A658 CYRILLIC CAPITAL LETTER CLOSED LITTLE YUS -A659 CYRILLIC SMALL LETTER CLOSED LITTLE YUS -A65A CYRILLIC CAPITAL LETTER BLENDED YUS -A65B CYRILLIC SMALL LETTER BLENDED YUS -A65C CYRILLIC CAPITAL LETTER IOTIFIED CLOSED LITTLE YUS -A65D CYRILLIC SMALL LETTER IOTIFIED CLOSED LITTLE YUS -A65E CYRILLIC CAPITAL LETTER YN -A65F CYRILLIC SMALL LETTER YN -A662 CYRILLIC CAPITAL LETTER SOFT DE -A663 CYRILLIC SMALL LETTER SOFT DE -A664 CYRILLIC CAPITAL LETTER SOFT EL -A665 CYRILLIC SMALL LETTER SOFT EL -A666 CYRILLIC CAPITAL LETTER SOFT EM -A667 CYRILLIC SMALL LETTER SOFT EM -A668 CYRILLIC CAPITAL LETTER MONOCULAR O -A669 CYRILLIC SMALL LETTER MONOCULAR O -A66A CYRILLIC CAPITAL LETTER BINOCULAR O -A66B CYRILLIC SMALL LETTER BINOCULAR O -A66C CYRILLIC CAPITAL LETTER DOUBLE MONOCULAR O -A66D CYRILLIC SMALL LETTER DOUBLE MONOCULAR O -A66E CYRILLIC LETTER MULTIOCULAR O -A66F COMBINING CYRILLIC VZMET -A670 COMBINING CYRILLIC TEN MILLIONS SIGN -A671 COMBINING CYRILLIC HUNDRED MILLIONS SIGN -A672 COMBINING CYRILLIC THOUSAND MILLIONS SIGN -A673 SLAVONIC ASTERISK -A67C COMBINING CYRILLIC KAVYKA -A67D COMBINING CYRILLIC PAYEROK -A67E CYRILLIC KAVYKA -A67F CYRILLIC PAYEROK -A680 CYRILLIC CAPITAL LETTER DWE -A681 CYRILLIC SMALL LETTER DWE -A682 CYRILLIC CAPITAL LETTER DZWE -A683 CYRILLIC SMALL LETTER DZWE -A684 CYRILLIC CAPITAL LETTER ZHWE -A685 CYRILLIC SMALL LETTER ZHWE -A686 CYRILLIC CAPITAL LETTER CCHE -A687 CYRILLIC SMALL LETTER CCHE -A688 CYRILLIC CAPITAL LETTER DZZE -A689 CYRILLIC SMALL LETTER DZZE -A68A CYRILLIC CAPITAL LETTER TE WITH MIDDLE HOOK -A68B CYRILLIC SMALL LETTER TE WITH MIDDLE HOOK -A68C CYRILLIC CAPITAL LETTER TWE -A68D CYRILLIC SMALL LETTER TWE -A68E CYRILLIC CAPITAL LETTER TSWE -A68F CYRILLIC SMALL LETTER TSWE -A690 CYRILLIC CAPITAL LETTER TSSE -A691 CYRILLIC SMALL LETTER TSSE -A692 CYRILLIC CAPITAL LETTER TCHE -A693 CYRILLIC SMALL LETTER TCHE -A694 CYRILLIC CAPITAL LETTER HWE -A695 CYRILLIC SMALL LETTER HWE -A696 CYRILLIC CAPITAL LETTER SHWE -A697 CYRILLIC SMALL LETTER SHWE -A6A0 BAMUM LETTER A -A6A1 BAMUM LETTER KA -A6A2 BAMUM LETTER U -A6A3 BAMUM LETTER KU -A6A4 BAMUM LETTER EE -A6A5 BAMUM LETTER REE -A6A6 BAMUM LETTER TAE -A6A7 BAMUM LETTER O -A6A8 BAMUM LETTER NYI -A6A9 BAMUM LETTER I -A6AA BAMUM LETTER LA -A6AB BAMUM LETTER PA -A6AC BAMUM LETTER RII -A6AD BAMUM LETTER RIEE -A6AE BAMUM LETTER LEEEE -A6AF BAMUM LETTER MEEEE -A6B0 BAMUM LETTER TAA -A6B1 BAMUM LETTER NDAA -A6B2 BAMUM LETTER NJAEM -A6B3 BAMUM LETTER M -A6B4 BAMUM LETTER SUU -A6B5 BAMUM LETTER MU -A6B6 BAMUM LETTER SHII -A6B7 BAMUM LETTER SI -A6B8 BAMUM LETTER SHEUX -A6B9 BAMUM LETTER SEUX -A6BA BAMUM LETTER KYEE -A6BB BAMUM LETTER KET -A6BC BAMUM LETTER NUAE -A6BD BAMUM LETTER NU -A6BE BAMUM LETTER NJUAE -A6BF BAMUM LETTER YOQ -A6C0 BAMUM LETTER SHU -A6C1 BAMUM LETTER YUQ -A6C2 BAMUM LETTER YA -A6C3 BAMUM LETTER NSHA -A6C4 BAMUM LETTER KEUX -A6C5 BAMUM LETTER PEUX -A6C6 BAMUM LETTER NJEE -A6C7 BAMUM LETTER NTEE -A6C8 BAMUM LETTER PUE -A6C9 BAMUM LETTER WUE -A6CA BAMUM LETTER PEE -A6CB BAMUM LETTER FEE -A6CC BAMUM LETTER RU -A6CD BAMUM LETTER LU -A6CE BAMUM LETTER MI -A6CF BAMUM LETTER NI -A6D0 BAMUM LETTER REUX -A6D1 BAMUM LETTER RAE -A6D2 BAMUM LETTER KEN -A6D3 BAMUM LETTER NGKWAEN -A6D4 BAMUM LETTER NGGA -A6D5 BAMUM LETTER NGA -A6D6 BAMUM LETTER SHO -A6D7 BAMUM LETTER PUAE -A6D8 BAMUM LETTER FU -A6D9 BAMUM LETTER FOM -A6DA BAMUM LETTER WA -A6DB BAMUM LETTER NA -A6DC BAMUM LETTER LI -A6DD BAMUM LETTER PI -A6DE BAMUM LETTER LOQ -A6DF BAMUM LETTER KO -A6E0 BAMUM LETTER MBEN -A6E1 BAMUM LETTER REN -A6E2 BAMUM LETTER MEN -A6E3 BAMUM LETTER MA -A6E4 BAMUM LETTER TI -A6E5 BAMUM LETTER KI -A6E6 BAMUM LETTER MO -A6E7 BAMUM LETTER MBAA -A6E8 BAMUM LETTER TET -A6E9 BAMUM LETTER KPA -A6EA BAMUM LETTER TEN -A6EB BAMUM LETTER NTUU -A6EC BAMUM LETTER SAMBA -A6ED BAMUM LETTER FAAMAE -A6EE BAMUM LETTER KOVUU -A6EF BAMUM LETTER KOGHOM -A6F0 BAMUM COMBINING MARK KOQNDON -A6F1 BAMUM COMBINING MARK TUKWENTIS -A6F2 BAMUM NJAEMLI -A6F3 BAMUM FULL STOP -A6F4 BAMUM COLON -A6F5 BAMUM COMMA -A6F6 BAMUM SEMICOLON -A6F7 BAMUM QUESTION MARK -A700 MODIFIER LETTER CHINESE TONE YIN PING -A701 MODIFIER LETTER CHINESE TONE YANG PING -A702 MODIFIER LETTER CHINESE TONE YIN SHANG -A703 MODIFIER LETTER CHINESE TONE YANG SHANG -A704 MODIFIER LETTER CHINESE TONE YIN QU -A705 MODIFIER LETTER CHINESE TONE YANG QU -A706 MODIFIER LETTER CHINESE TONE YIN RU -A707 MODIFIER LETTER CHINESE TONE YANG RU -A708 MODIFIER LETTER EXTRA-HIGH DOTTED TONE BAR -A709 MODIFIER LETTER HIGH DOTTED TONE BAR -A70A MODIFIER LETTER MID DOTTED TONE BAR -A70B MODIFIER LETTER LOW DOTTED TONE BAR -A70C MODIFIER LETTER EXTRA-LOW DOTTED TONE BAR -A70D MODIFIER LETTER EXTRA-HIGH DOTTED LEFT-STEM TONE BAR -A70E MODIFIER LETTER HIGH DOTTED LEFT-STEM TONE BAR -A70F MODIFIER LETTER MID DOTTED LEFT-STEM TONE BAR -A710 MODIFIER LETTER LOW DOTTED LEFT-STEM TONE BAR -A711 MODIFIER LETTER EXTRA-LOW DOTTED LEFT-STEM TONE BAR -A712 MODIFIER LETTER EXTRA-HIGH LEFT-STEM TONE BAR -A713 MODIFIER LETTER HIGH LEFT-STEM TONE BAR -A714 MODIFIER LETTER MID LEFT-STEM TONE BAR -A715 MODIFIER LETTER LOW LEFT-STEM TONE BAR -A716 MODIFIER LETTER EXTRA-LOW LEFT-STEM TONE BAR -A717 MODIFIER LETTER DOT VERTICAL BAR -A718 MODIFIER LETTER DOT SLASH -A719 MODIFIER LETTER DOT HORIZONTAL BAR -A71A MODIFIER LETTER LOWER RIGHT CORNER ANGLE -A71B MODIFIER LETTER RAISED UP ARROW -A71C MODIFIER LETTER RAISED DOWN ARROW -A71D MODIFIER LETTER RAISED EXCLAMATION MARK -A71E MODIFIER LETTER RAISED INVERTED EXCLAMATION MARK -A71F MODIFIER LETTER LOW INVERTED EXCLAMATION MARK -A720 MODIFIER LETTER STRESS AND HIGH TONE -A721 MODIFIER LETTER STRESS AND LOW TONE -A722 LATIN CAPITAL LETTER EGYPTOLOGICAL ALEF -A723 LATIN SMALL LETTER EGYPTOLOGICAL ALEF -A724 LATIN CAPITAL LETTER EGYPTOLOGICAL AIN -A725 LATIN SMALL LETTER EGYPTOLOGICAL AIN -A726 LATIN CAPITAL LETTER HENG -A727 LATIN SMALL LETTER HENG -A728 LATIN CAPITAL LETTER TZ -A729 LATIN SMALL LETTER TZ -A72A LATIN CAPITAL LETTER TRESILLO -A72B LATIN SMALL LETTER TRESILLO -A72C LATIN CAPITAL LETTER CUATRILLO -A72D LATIN SMALL LETTER CUATRILLO -A72E LATIN CAPITAL LETTER CUATRILLO WITH COMMA -A72F LATIN SMALL LETTER CUATRILLO WITH COMMA -A730 LATIN LETTER SMALL CAPITAL F -A731 LATIN LETTER SMALL CAPITAL S -A732 LATIN CAPITAL LETTER AA -A733 LATIN SMALL LETTER AA -A734 LATIN CAPITAL LETTER AO -A735 LATIN SMALL LETTER AO -A736 LATIN CAPITAL LETTER AU -A737 LATIN SMALL LETTER AU -A738 LATIN CAPITAL LETTER AV -A739 LATIN SMALL LETTER AV -A73A LATIN CAPITAL LETTER AV WITH HORIZONTAL BAR -A73B LATIN SMALL LETTER AV WITH HORIZONTAL BAR -A73C LATIN CAPITAL LETTER AY -A73D LATIN SMALL LETTER AY -A73E LATIN CAPITAL LETTER REVERSED C WITH DOT -A73F LATIN SMALL LETTER REVERSED C WITH DOT -A740 LATIN CAPITAL LETTER K WITH STROKE -A741 LATIN SMALL LETTER K WITH STROKE -A742 LATIN CAPITAL LETTER K WITH DIAGONAL STROKE -A743 LATIN SMALL LETTER K WITH DIAGONAL STROKE -A744 LATIN CAPITAL LETTER K WITH STROKE AND DIAGONAL STROKE -A745 LATIN SMALL LETTER K WITH STROKE AND DIAGONAL STROKE -A746 LATIN CAPITAL LETTER BROKEN L -A747 LATIN SMALL LETTER BROKEN L -A748 LATIN CAPITAL LETTER L WITH HIGH STROKE -A749 LATIN SMALL LETTER L WITH HIGH STROKE -A74A LATIN CAPITAL LETTER O WITH LONG STROKE OVERLAY -A74B LATIN SMALL LETTER O WITH LONG STROKE OVERLAY -A74C LATIN CAPITAL LETTER O WITH LOOP -A74D LATIN SMALL LETTER O WITH LOOP -A74E LATIN CAPITAL LETTER OO -A74F LATIN SMALL LETTER OO -A750 LATIN CAPITAL LETTER P WITH STROKE THROUGH DESCENDER -A751 LATIN SMALL LETTER P WITH STROKE THROUGH DESCENDER -A752 LATIN CAPITAL LETTER P WITH FLOURISH -A753 LATIN SMALL LETTER P WITH FLOURISH -A754 LATIN CAPITAL LETTER P WITH SQUIRREL TAIL -A755 LATIN SMALL LETTER P WITH SQUIRREL TAIL -A756 LATIN CAPITAL LETTER Q WITH STROKE THROUGH DESCENDER -A757 LATIN SMALL LETTER Q WITH STROKE THROUGH DESCENDER -A758 LATIN CAPITAL LETTER Q WITH DIAGONAL STROKE -A759 LATIN SMALL LETTER Q WITH DIAGONAL STROKE -A75A LATIN CAPITAL LETTER R ROTUNDA -A75B LATIN SMALL LETTER R ROTUNDA -A75C LATIN CAPITAL LETTER RUM ROTUNDA -A75D LATIN SMALL LETTER RUM ROTUNDA -A75E LATIN CAPITAL LETTER V WITH DIAGONAL STROKE -A75F LATIN SMALL LETTER V WITH DIAGONAL STROKE -A760 LATIN CAPITAL LETTER VY -A761 LATIN SMALL LETTER VY -A762 LATIN CAPITAL LETTER VISIGOTHIC Z -A763 LATIN SMALL LETTER VISIGOTHIC Z -A764 LATIN CAPITAL LETTER THORN WITH STROKE -A765 LATIN SMALL LETTER THORN WITH STROKE -A766 LATIN CAPITAL LETTER THORN WITH STROKE THROUGH DESCENDER -A767 LATIN SMALL LETTER THORN WITH STROKE THROUGH DESCENDER -A768 LATIN CAPITAL LETTER VEND -A769 LATIN SMALL LETTER VEND -A76A LATIN CAPITAL LETTER ET -A76B LATIN SMALL LETTER ET -A76C LATIN CAPITAL LETTER IS -A76D LATIN SMALL LETTER IS -A76E LATIN CAPITAL LETTER CON -A76F LATIN SMALL LETTER CON -A770 MODIFIER LETTER US -A771 LATIN SMALL LETTER DUM -A772 LATIN SMALL LETTER LUM -A773 LATIN SMALL LETTER MUM -A774 LATIN SMALL LETTER NUM -A775 LATIN SMALL LETTER RUM -A776 LATIN LETTER SMALL CAPITAL RUM -A777 LATIN SMALL LETTER TUM -A778 LATIN SMALL LETTER UM -A779 LATIN CAPITAL LETTER INSULAR D -A77A LATIN SMALL LETTER INSULAR D -A77B LATIN CAPITAL LETTER INSULAR F -A77C LATIN SMALL LETTER INSULAR F -A77D LATIN CAPITAL LETTER INSULAR G -A77E LATIN CAPITAL LETTER TURNED INSULAR G -A77F LATIN SMALL LETTER TURNED INSULAR G -A780 LATIN CAPITAL LETTER TURNED L -A781 LATIN SMALL LETTER TURNED L -A782 LATIN CAPITAL LETTER INSULAR R -A783 LATIN SMALL LETTER INSULAR R -A784 LATIN CAPITAL LETTER INSULAR S -A785 LATIN SMALL LETTER INSULAR S -A786 LATIN CAPITAL LETTER INSULAR T -A787 LATIN SMALL LETTER INSULAR T -A788 MODIFIER LETTER LOW CIRCUMFLEX ACCENT -A789 MODIFIER LETTER COLON -A78A MODIFIER LETTER SHORT EQUALS SIGN -A78B LATIN CAPITAL LETTER SALTILLO -A78C LATIN SMALL LETTER SALTILLO -A7FB LATIN EPIGRAPHIC LETTER REVERSED F -A7FC LATIN EPIGRAPHIC LETTER REVERSED P -A7FD LATIN EPIGRAPHIC LETTER INVERTED M -A7FE LATIN EPIGRAPHIC LETTER I LONGA -A7FF LATIN EPIGRAPHIC LETTER ARCHAIC M -A800 SYLOTI NAGRI LETTER A -A801 SYLOTI NAGRI LETTER I -A802 SYLOTI NAGRI SIGN DVISVARA -A803 SYLOTI NAGRI LETTER U -A804 SYLOTI NAGRI LETTER E -A805 SYLOTI NAGRI LETTER O -A806 SYLOTI NAGRI SIGN HASANTA -A807 SYLOTI NAGRI LETTER KO -A808 SYLOTI NAGRI LETTER KHO -A809 SYLOTI NAGRI LETTER GO -A80A SYLOTI NAGRI LETTER GHO -A80B SYLOTI NAGRI SIGN ANUSVARA -A80C SYLOTI NAGRI LETTER CO -A80D SYLOTI NAGRI LETTER CHO -A80E SYLOTI NAGRI LETTER JO -A80F SYLOTI NAGRI LETTER JHO -A810 SYLOTI NAGRI LETTER TTO -A811 SYLOTI NAGRI LETTER TTHO -A812 SYLOTI NAGRI LETTER DDO -A813 SYLOTI NAGRI LETTER DDHO -A814 SYLOTI NAGRI LETTER TO -A815 SYLOTI NAGRI LETTER THO -A816 SYLOTI NAGRI LETTER DO -A817 SYLOTI NAGRI LETTER DHO -A818 SYLOTI NAGRI LETTER NO -A819 SYLOTI NAGRI LETTER PO -A81A SYLOTI NAGRI LETTER PHO -A81B SYLOTI NAGRI LETTER BO -A81C SYLOTI NAGRI LETTER BHO -A81D SYLOTI NAGRI LETTER MO -A81E SYLOTI NAGRI LETTER RO -A81F SYLOTI NAGRI LETTER LO -A820 SYLOTI NAGRI LETTER RRO -A821 SYLOTI NAGRI LETTER SO -A822 SYLOTI NAGRI LETTER HO -A823 SYLOTI NAGRI VOWEL SIGN A -A824 SYLOTI NAGRI VOWEL SIGN I -A825 SYLOTI NAGRI VOWEL SIGN U -A826 SYLOTI NAGRI VOWEL SIGN E -A827 SYLOTI NAGRI VOWEL SIGN OO -A828 SYLOTI NAGRI POETRY MARK-1 -A829 SYLOTI NAGRI POETRY MARK-2 -A82A SYLOTI NAGRI POETRY MARK-3 -A82B SYLOTI NAGRI POETRY MARK-4 -A830 NORTH INDIC FRACTION ONE QUARTER -A831 NORTH INDIC FRACTION ONE HALF -A832 NORTH INDIC FRACTION THREE QUARTERS -A833 NORTH INDIC FRACTION ONE SIXTEENTH -A834 NORTH INDIC FRACTION ONE EIGHTH -A835 NORTH INDIC FRACTION THREE SIXTEENTHS -A836 NORTH INDIC QUARTER MARK -A837 NORTH INDIC PLACEHOLDER MARK -A838 NORTH INDIC RUPEE MARK -A839 NORTH INDIC QUANTITY MARK -A840 PHAGS-PA LETTER KA -A841 PHAGS-PA LETTER KHA -A842 PHAGS-PA LETTER GA -A843 PHAGS-PA LETTER NGA -A844 PHAGS-PA LETTER CA -A845 PHAGS-PA LETTER CHA -A846 PHAGS-PA LETTER JA -A847 PHAGS-PA LETTER NYA -A848 PHAGS-PA LETTER TA -A849 PHAGS-PA LETTER THA -A84A PHAGS-PA LETTER DA -A84B PHAGS-PA LETTER NA -A84C PHAGS-PA LETTER PA -A84D PHAGS-PA LETTER PHA -A84E PHAGS-PA LETTER BA -A84F PHAGS-PA LETTER MA -A850 PHAGS-PA LETTER TSA -A851 PHAGS-PA LETTER TSHA -A852 PHAGS-PA LETTER DZA -A853 PHAGS-PA LETTER WA -A854 PHAGS-PA LETTER ZHA -A855 PHAGS-PA LETTER ZA -A856 PHAGS-PA LETTER SMALL A -A857 PHAGS-PA LETTER YA -A858 PHAGS-PA LETTER RA -A859 PHAGS-PA LETTER LA -A85A PHAGS-PA LETTER SHA -A85B PHAGS-PA LETTER SA -A85C PHAGS-PA LETTER HA -A85D PHAGS-PA LETTER A -A85E PHAGS-PA LETTER I -A85F PHAGS-PA LETTER U -A860 PHAGS-PA LETTER E -A861 PHAGS-PA LETTER O -A862 PHAGS-PA LETTER QA -A863 PHAGS-PA LETTER XA -A864 PHAGS-PA LETTER FA -A865 PHAGS-PA LETTER GGA -A866 PHAGS-PA LETTER EE -A867 PHAGS-PA SUBJOINED LETTER WA -A868 PHAGS-PA SUBJOINED LETTER YA -A869 PHAGS-PA LETTER TTA -A86A PHAGS-PA LETTER TTHA -A86B PHAGS-PA LETTER DDA -A86C PHAGS-PA LETTER NNA -A86D PHAGS-PA LETTER ALTERNATE YA -A86E PHAGS-PA LETTER VOICELESS SHA -A86F PHAGS-PA LETTER VOICED HA -A870 PHAGS-PA LETTER ASPIRATED FA -A871 PHAGS-PA SUBJOINED LETTER RA -A872 PHAGS-PA SUPERFIXED LETTER RA -A873 PHAGS-PA LETTER CANDRABINDU -A874 PHAGS-PA SINGLE HEAD MARK -A875 PHAGS-PA DOUBLE HEAD MARK -A876 PHAGS-PA MARK SHAD -A877 PHAGS-PA MARK DOUBLE SHAD -A880 SAURASHTRA SIGN ANUSVARA -A881 SAURASHTRA SIGN VISARGA -A882 SAURASHTRA LETTER A -A883 SAURASHTRA LETTER AA -A884 SAURASHTRA LETTER I -A885 SAURASHTRA LETTER II -A886 SAURASHTRA LETTER U -A887 SAURASHTRA LETTER UU -A888 SAURASHTRA LETTER VOCALIC R -A889 SAURASHTRA LETTER VOCALIC RR -A88A SAURASHTRA LETTER VOCALIC L -A88B SAURASHTRA LETTER VOCALIC LL -A88C SAURASHTRA LETTER E -A88D SAURASHTRA LETTER EE -A88E SAURASHTRA LETTER AI -A88F SAURASHTRA LETTER O -A890 SAURASHTRA LETTER OO -A891 SAURASHTRA LETTER AU -A892 SAURASHTRA LETTER KA -A893 SAURASHTRA LETTER KHA -A894 SAURASHTRA LETTER GA -A895 SAURASHTRA LETTER GHA -A896 SAURASHTRA LETTER NGA -A897 SAURASHTRA LETTER CA -A898 SAURASHTRA LETTER CHA -A899 SAURASHTRA LETTER JA -A89A SAURASHTRA LETTER JHA -A89B SAURASHTRA LETTER NYA -A89C SAURASHTRA LETTER TTA -A89D SAURASHTRA LETTER TTHA -A89E SAURASHTRA LETTER DDA -A89F SAURASHTRA LETTER DDHA -A8A0 SAURASHTRA LETTER NNA -A8A1 SAURASHTRA LETTER TA -A8A2 SAURASHTRA LETTER THA -A8A3 SAURASHTRA LETTER DA -A8A4 SAURASHTRA LETTER DHA -A8A5 SAURASHTRA LETTER NA -A8A6 SAURASHTRA LETTER PA -A8A7 SAURASHTRA LETTER PHA -A8A8 SAURASHTRA LETTER BA -A8A9 SAURASHTRA LETTER BHA -A8AA SAURASHTRA LETTER MA -A8AB SAURASHTRA LETTER YA -A8AC SAURASHTRA LETTER RA -A8AD SAURASHTRA LETTER LA -A8AE SAURASHTRA LETTER VA -A8AF SAURASHTRA LETTER SHA -A8B0 SAURASHTRA LETTER SSA -A8B1 SAURASHTRA LETTER SA -A8B2 SAURASHTRA LETTER HA -A8B3 SAURASHTRA LETTER LLA -A8B4 SAURASHTRA CONSONANT SIGN HAARU -A8B5 SAURASHTRA VOWEL SIGN AA -A8B6 SAURASHTRA VOWEL SIGN I -A8B7 SAURASHTRA VOWEL SIGN II -A8B8 SAURASHTRA VOWEL SIGN U -A8B9 SAURASHTRA VOWEL SIGN UU -A8BA SAURASHTRA VOWEL SIGN VOCALIC R -A8BB SAURASHTRA VOWEL SIGN VOCALIC RR -A8BC SAURASHTRA VOWEL SIGN VOCALIC L -A8BD SAURASHTRA VOWEL SIGN VOCALIC LL -A8BE SAURASHTRA VOWEL SIGN E -A8BF SAURASHTRA VOWEL SIGN EE -A8C0 SAURASHTRA VOWEL SIGN AI -A8C1 SAURASHTRA VOWEL SIGN O -A8C2 SAURASHTRA VOWEL SIGN OO -A8C3 SAURASHTRA VOWEL SIGN AU -A8C4 SAURASHTRA SIGN VIRAMA -A8CE SAURASHTRA DANDA -A8CF SAURASHTRA DOUBLE DANDA -A8D0 SAURASHTRA DIGIT ZERO -A8D1 SAURASHTRA DIGIT ONE -A8D2 SAURASHTRA DIGIT TWO -A8D3 SAURASHTRA DIGIT THREE -A8D4 SAURASHTRA DIGIT FOUR -A8D5 SAURASHTRA DIGIT FIVE -A8D6 SAURASHTRA DIGIT SIX -A8D7 SAURASHTRA DIGIT SEVEN -A8D8 SAURASHTRA DIGIT EIGHT -A8D9 SAURASHTRA DIGIT NINE -A8E0 COMBINING DEVANAGARI DIGIT ZERO -A8E1 COMBINING DEVANAGARI DIGIT ONE -A8E2 COMBINING DEVANAGARI DIGIT TWO -A8E3 COMBINING DEVANAGARI DIGIT THREE -A8E4 COMBINING DEVANAGARI DIGIT FOUR -A8E5 COMBINING DEVANAGARI DIGIT FIVE -A8E6 COMBINING DEVANAGARI DIGIT SIX -A8E7 COMBINING DEVANAGARI DIGIT SEVEN -A8E8 COMBINING DEVANAGARI DIGIT EIGHT -A8E9 COMBINING DEVANAGARI DIGIT NINE -A8EA COMBINING DEVANAGARI LETTER A -A8EB COMBINING DEVANAGARI LETTER U -A8EC COMBINING DEVANAGARI LETTER KA -A8ED COMBINING DEVANAGARI LETTER NA -A8EE COMBINING DEVANAGARI LETTER PA -A8EF COMBINING DEVANAGARI LETTER RA -A8F0 COMBINING DEVANAGARI LETTER VI -A8F1 COMBINING DEVANAGARI SIGN AVAGRAHA -A8F2 DEVANAGARI SIGN SPACING CANDRABINDU -A8F3 DEVANAGARI SIGN CANDRABINDU VIRAMA -A8F4 DEVANAGARI SIGN DOUBLE CANDRABINDU VIRAMA -A8F5 DEVANAGARI SIGN CANDRABINDU TWO -A8F6 DEVANAGARI SIGN CANDRABINDU THREE -A8F7 DEVANAGARI SIGN CANDRABINDU AVAGRAHA -A8F8 DEVANAGARI SIGN PUSHPIKA -A8F9 DEVANAGARI GAP FILLER -A8FA DEVANAGARI CARET -A8FB DEVANAGARI HEADSTROKE -A900 KAYAH LI DIGIT ZERO -A901 KAYAH LI DIGIT ONE -A902 KAYAH LI DIGIT TWO -A903 KAYAH LI DIGIT THREE -A904 KAYAH LI DIGIT FOUR -A905 KAYAH LI DIGIT FIVE -A906 KAYAH LI DIGIT SIX -A907 KAYAH LI DIGIT SEVEN -A908 KAYAH LI DIGIT EIGHT -A909 KAYAH LI DIGIT NINE -A90A KAYAH LI LETTER KA -A90B KAYAH LI LETTER KHA -A90C KAYAH LI LETTER GA -A90D KAYAH LI LETTER NGA -A90E KAYAH LI LETTER SA -A90F KAYAH LI LETTER SHA -A910 KAYAH LI LETTER ZA -A911 KAYAH LI LETTER NYA -A912 KAYAH LI LETTER TA -A913 KAYAH LI LETTER HTA -A914 KAYAH LI LETTER NA -A915 KAYAH LI LETTER PA -A916 KAYAH LI LETTER PHA -A917 KAYAH LI LETTER MA -A918 KAYAH LI LETTER DA -A919 KAYAH LI LETTER BA -A91A KAYAH LI LETTER RA -A91B KAYAH LI LETTER YA -A91C KAYAH LI LETTER LA -A91D KAYAH LI LETTER WA -A91E KAYAH LI LETTER THA -A91F KAYAH LI LETTER HA -A920 KAYAH LI LETTER VA -A921 KAYAH LI LETTER CA -A922 KAYAH LI LETTER A -A923 KAYAH LI LETTER OE -A924 KAYAH LI LETTER I -A925 KAYAH LI LETTER OO -A926 KAYAH LI VOWEL UE -A927 KAYAH LI VOWEL E -A928 KAYAH LI VOWEL U -A929 KAYAH LI VOWEL EE -A92A KAYAH LI VOWEL O -A92B KAYAH LI TONE PLOPHU -A92C KAYAH LI TONE CALYA -A92D KAYAH LI TONE CALYA PLOPHU -A92E KAYAH LI SIGN CWI -A92F KAYAH LI SIGN SHYA -A930 REJANG LETTER KA -A931 REJANG LETTER GA -A932 REJANG LETTER NGA -A933 REJANG LETTER TA -A934 REJANG LETTER DA -A935 REJANG LETTER NA -A936 REJANG LETTER PA -A937 REJANG LETTER BA -A938 REJANG LETTER MA -A939 REJANG LETTER CA -A93A REJANG LETTER JA -A93B REJANG LETTER NYA -A93C REJANG LETTER SA -A93D REJANG LETTER RA -A93E REJANG LETTER LA -A93F REJANG LETTER YA -A940 REJANG LETTER WA -A941 REJANG LETTER HA -A942 REJANG LETTER MBA -A943 REJANG LETTER NGGA -A944 REJANG LETTER NDA -A945 REJANG LETTER NYJA -A946 REJANG LETTER A -A947 REJANG VOWEL SIGN I -A948 REJANG VOWEL SIGN U -A949 REJANG VOWEL SIGN E -A94A REJANG VOWEL SIGN AI -A94B REJANG VOWEL SIGN O -A94C REJANG VOWEL SIGN AU -A94D REJANG VOWEL SIGN EU -A94E REJANG VOWEL SIGN EA -A94F REJANG CONSONANT SIGN NG -A950 REJANG CONSONANT SIGN N -A951 REJANG CONSONANT SIGN R -A952 REJANG CONSONANT SIGN H -A953 REJANG VIRAMA -A95F REJANG SECTION MARK -A960 HANGUL CHOSEONG TIKEUT-MIEUM -A961 HANGUL CHOSEONG TIKEUT-PIEUP -A962 HANGUL CHOSEONG TIKEUT-SIOS -A963 HANGUL CHOSEONG TIKEUT-CIEUC -A964 HANGUL CHOSEONG RIEUL-KIYEOK -A965 HANGUL CHOSEONG RIEUL-SSANGKIYEOK -A966 HANGUL CHOSEONG RIEUL-TIKEUT -A967 HANGUL CHOSEONG RIEUL-SSANGTIKEUT -A968 HANGUL CHOSEONG RIEUL-MIEUM -A969 HANGUL CHOSEONG RIEUL-PIEUP -A96A HANGUL CHOSEONG RIEUL-SSANGPIEUP -A96B HANGUL CHOSEONG RIEUL-KAPYEOUNPIEUP -A96C HANGUL CHOSEONG RIEUL-SIOS -A96D HANGUL CHOSEONG RIEUL-CIEUC -A96E HANGUL CHOSEONG RIEUL-KHIEUKH -A96F HANGUL CHOSEONG MIEUM-KIYEOK -A970 HANGUL CHOSEONG MIEUM-TIKEUT -A971 HANGUL CHOSEONG MIEUM-SIOS -A972 HANGUL CHOSEONG PIEUP-SIOS-THIEUTH -A973 HANGUL CHOSEONG PIEUP-KHIEUKH -A974 HANGUL CHOSEONG PIEUP-HIEUH -A975 HANGUL CHOSEONG SSANGSIOS-PIEUP -A976 HANGUL CHOSEONG IEUNG-RIEUL -A977 HANGUL CHOSEONG IEUNG-HIEUH -A978 HANGUL CHOSEONG SSANGCIEUC-HIEUH -A979 HANGUL CHOSEONG SSANGTHIEUTH -A97A HANGUL CHOSEONG PHIEUPH-HIEUH -A97B HANGUL CHOSEONG HIEUH-SIOS -A97C HANGUL CHOSEONG SSANGYEORINHIEUH -A980 JAVANESE SIGN PANYANGGA -A981 JAVANESE SIGN CECAK -A982 JAVANESE SIGN LAYAR -A983 JAVANESE SIGN WIGNYAN -A984 JAVANESE LETTER A -A985 JAVANESE LETTER I KAWI -A986 JAVANESE LETTER I -A987 JAVANESE LETTER II -A988 JAVANESE LETTER U -A989 JAVANESE LETTER PA CEREK -A98A JAVANESE LETTER NGA LELET -A98B JAVANESE LETTER NGA LELET RASWADI -A98C JAVANESE LETTER E -A98D JAVANESE LETTER AI -A98E JAVANESE LETTER O -A98F JAVANESE LETTER KA -A990 JAVANESE LETTER KA SASAK -A991 JAVANESE LETTER KA MURDA -A992 JAVANESE LETTER GA -A993 JAVANESE LETTER GA MURDA -A994 JAVANESE LETTER NGA -A995 JAVANESE LETTER CA -A996 JAVANESE LETTER CA MURDA -A997 JAVANESE LETTER JA -A998 JAVANESE LETTER NYA MURDA -A999 JAVANESE LETTER JA MAHAPRANA -A99A JAVANESE LETTER NYA -A99B JAVANESE LETTER TTA -A99C JAVANESE LETTER TTA MAHAPRANA -A99D JAVANESE LETTER DDA -A99E JAVANESE LETTER DDA MAHAPRANA -A99F JAVANESE LETTER NA MURDA -A9A0 JAVANESE LETTER TA -A9A1 JAVANESE LETTER TA MURDA -A9A2 JAVANESE LETTER DA -A9A3 JAVANESE LETTER DA MAHAPRANA -A9A4 JAVANESE LETTER NA -A9A5 JAVANESE LETTER PA -A9A6 JAVANESE LETTER PA MURDA -A9A7 JAVANESE LETTER BA -A9A8 JAVANESE LETTER BA MURDA -A9A9 JAVANESE LETTER MA -A9AA JAVANESE LETTER YA -A9AB JAVANESE LETTER RA -A9AC JAVANESE LETTER RA AGUNG -A9AD JAVANESE LETTER LA -A9AE JAVANESE LETTER WA -A9AF JAVANESE LETTER SA MURDA -A9B0 JAVANESE LETTER SA MAHAPRANA -A9B1 JAVANESE LETTER SA -A9B2 JAVANESE LETTER HA -A9B3 JAVANESE SIGN CECAK TELU -A9B4 JAVANESE VOWEL SIGN TARUNG -A9B5 JAVANESE VOWEL SIGN TOLONG -A9B6 JAVANESE VOWEL SIGN WULU -A9B7 JAVANESE VOWEL SIGN WULU MELIK -A9B8 JAVANESE VOWEL SIGN SUKU -A9B9 JAVANESE VOWEL SIGN SUKU MENDUT -A9BA JAVANESE VOWEL SIGN TALING -A9BB JAVANESE VOWEL SIGN DIRGA MURE -A9BC JAVANESE VOWEL SIGN PEPET -A9BD JAVANESE CONSONANT SIGN KERET -A9BE JAVANESE CONSONANT SIGN PENGKAL -A9BF JAVANESE CONSONANT SIGN CAKRA -A9C0 JAVANESE PANGKON -A9C1 JAVANESE LEFT RERENGGAN -A9C2 JAVANESE RIGHT RERENGGAN -A9C3 JAVANESE PADA ANDAP -A9C4 JAVANESE PADA MADYA -A9C5 JAVANESE PADA LUHUR -A9C6 JAVANESE PADA WINDU -A9C7 JAVANESE PADA PANGKAT -A9C8 JAVANESE PADA LINGSA -A9C9 JAVANESE PADA LUNGSI -A9CA JAVANESE PADA ADEG -A9CB JAVANESE PADA ADEG ADEG -A9CC JAVANESE PADA PISELEH -A9CD JAVANESE TURNED PADA PISELEH -A9CF JAVANESE PANGRANGKEP -A9D0 JAVANESE DIGIT ZERO -A9D1 JAVANESE DIGIT ONE -A9D2 JAVANESE DIGIT TWO -A9D3 JAVANESE DIGIT THREE -A9D4 JAVANESE DIGIT FOUR -A9D5 JAVANESE DIGIT FIVE -A9D6 JAVANESE DIGIT SIX -A9D7 JAVANESE DIGIT SEVEN -A9D8 JAVANESE DIGIT EIGHT -A9D9 JAVANESE DIGIT NINE -A9DE JAVANESE PADA TIRTA TUMETES -A9DF JAVANESE PADA ISEN-ISEN -AA00 CHAM LETTER A -AA01 CHAM LETTER I -AA02 CHAM LETTER U -AA03 CHAM LETTER E -AA04 CHAM LETTER AI -AA05 CHAM LETTER O -AA06 CHAM LETTER KA -AA07 CHAM LETTER KHA -AA08 CHAM LETTER GA -AA09 CHAM LETTER GHA -AA0A CHAM LETTER NGUE -AA0B CHAM LETTER NGA -AA0C CHAM LETTER CHA -AA0D CHAM LETTER CHHA -AA0E CHAM LETTER JA -AA0F CHAM LETTER JHA -AA10 CHAM LETTER NHUE -AA11 CHAM LETTER NHA -AA12 CHAM LETTER NHJA -AA13 CHAM LETTER TA -AA14 CHAM LETTER THA -AA15 CHAM LETTER DA -AA16 CHAM LETTER DHA -AA17 CHAM LETTER NUE -AA18 CHAM LETTER NA -AA19 CHAM LETTER DDA -AA1A CHAM LETTER PA -AA1B CHAM LETTER PPA -AA1C CHAM LETTER PHA -AA1D CHAM LETTER BA -AA1E CHAM LETTER BHA -AA1F CHAM LETTER MUE -AA20 CHAM LETTER MA -AA21 CHAM LETTER BBA -AA22 CHAM LETTER YA -AA23 CHAM LETTER RA -AA24 CHAM LETTER LA -AA25 CHAM LETTER VA -AA26 CHAM LETTER SSA -AA27 CHAM LETTER SA -AA28 CHAM LETTER HA -AA29 CHAM VOWEL SIGN AA -AA2A CHAM VOWEL SIGN I -AA2B CHAM VOWEL SIGN II -AA2C CHAM VOWEL SIGN EI -AA2D CHAM VOWEL SIGN U -AA2E CHAM VOWEL SIGN OE -AA2F CHAM VOWEL SIGN O -AA30 CHAM VOWEL SIGN AI -AA31 CHAM VOWEL SIGN AU -AA32 CHAM VOWEL SIGN UE -AA33 CHAM CONSONANT SIGN YA -AA34 CHAM CONSONANT SIGN RA -AA35 CHAM CONSONANT SIGN LA -AA36 CHAM CONSONANT SIGN WA -AA40 CHAM LETTER FINAL K -AA41 CHAM LETTER FINAL G -AA42 CHAM LETTER FINAL NG -AA43 CHAM CONSONANT SIGN FINAL NG -AA44 CHAM LETTER FINAL CH -AA45 CHAM LETTER FINAL T -AA46 CHAM LETTER FINAL N -AA47 CHAM LETTER FINAL P -AA48 CHAM LETTER FINAL Y -AA49 CHAM LETTER FINAL R -AA4A CHAM LETTER FINAL L -AA4B CHAM LETTER FINAL SS -AA4C CHAM CONSONANT SIGN FINAL M -AA4D CHAM CONSONANT SIGN FINAL H -AA50 CHAM DIGIT ZERO -AA51 CHAM DIGIT ONE -AA52 CHAM DIGIT TWO -AA53 CHAM DIGIT THREE -AA54 CHAM DIGIT FOUR -AA55 CHAM DIGIT FIVE -AA56 CHAM DIGIT SIX -AA57 CHAM DIGIT SEVEN -AA58 CHAM DIGIT EIGHT -AA59 CHAM DIGIT NINE -AA5C CHAM PUNCTUATION SPIRAL -AA5D CHAM PUNCTUATION DANDA -AA5E CHAM PUNCTUATION DOUBLE DANDA -AA5F CHAM PUNCTUATION TRIPLE DANDA -AA60 MYANMAR LETTER KHAMTI GA -AA61 MYANMAR LETTER KHAMTI CA -AA62 MYANMAR LETTER KHAMTI CHA -AA63 MYANMAR LETTER KHAMTI JA -AA64 MYANMAR LETTER KHAMTI JHA -AA65 MYANMAR LETTER KHAMTI NYA -AA66 MYANMAR LETTER KHAMTI TTA -AA67 MYANMAR LETTER KHAMTI TTHA -AA68 MYANMAR LETTER KHAMTI DDA -AA69 MYANMAR LETTER KHAMTI DDHA -AA6A MYANMAR LETTER KHAMTI DHA -AA6B MYANMAR LETTER KHAMTI NA -AA6C MYANMAR LETTER KHAMTI SA -AA6D MYANMAR LETTER KHAMTI HA -AA6E MYANMAR LETTER KHAMTI HHA -AA6F MYANMAR LETTER KHAMTI FA -AA70 MYANMAR MODIFIER LETTER KHAMTI REDUPLICATION -AA71 MYANMAR LETTER KHAMTI XA -AA72 MYANMAR LETTER KHAMTI ZA -AA73 MYANMAR LETTER KHAMTI RA -AA74 MYANMAR LOGOGRAM KHAMTI OAY -AA75 MYANMAR LOGOGRAM KHAMTI QN -AA76 MYANMAR LOGOGRAM KHAMTI HM -AA77 MYANMAR SYMBOL AITON EXCLAMATION -AA78 MYANMAR SYMBOL AITON ONE -AA79 MYANMAR SYMBOL AITON TWO -AA7A MYANMAR LETTER AITON RA -AA7B MYANMAR SIGN PAO KAREN TONE -AA80 TAI VIET LETTER LOW KO -AA81 TAI VIET LETTER HIGH KO -AA82 TAI VIET LETTER LOW KHO -AA83 TAI VIET LETTER HIGH KHO -AA84 TAI VIET LETTER LOW KHHO -AA85 TAI VIET LETTER HIGH KHHO -AA86 TAI VIET LETTER LOW GO -AA87 TAI VIET LETTER HIGH GO -AA88 TAI VIET LETTER LOW NGO -AA89 TAI VIET LETTER HIGH NGO -AA8A TAI VIET LETTER LOW CO -AA8B TAI VIET LETTER HIGH CO -AA8C TAI VIET LETTER LOW CHO -AA8D TAI VIET LETTER HIGH CHO -AA8E TAI VIET LETTER LOW SO -AA8F TAI VIET LETTER HIGH SO -AA90 TAI VIET LETTER LOW NYO -AA91 TAI VIET LETTER HIGH NYO -AA92 TAI VIET LETTER LOW DO -AA93 TAI VIET LETTER HIGH DO -AA94 TAI VIET LETTER LOW TO -AA95 TAI VIET LETTER HIGH TO -AA96 TAI VIET LETTER LOW THO -AA97 TAI VIET LETTER HIGH THO -AA98 TAI VIET LETTER LOW NO -AA99 TAI VIET LETTER HIGH NO -AA9A TAI VIET LETTER LOW BO -AA9B TAI VIET LETTER HIGH BO -AA9C TAI VIET LETTER LOW PO -AA9D TAI VIET LETTER HIGH PO -AA9E TAI VIET LETTER LOW PHO -AA9F TAI VIET LETTER HIGH PHO -AAA0 TAI VIET LETTER LOW FO -AAA1 TAI VIET LETTER HIGH FO -AAA2 TAI VIET LETTER LOW MO -AAA3 TAI VIET LETTER HIGH MO -AAA4 TAI VIET LETTER LOW YO -AAA5 TAI VIET LETTER HIGH YO -AAA6 TAI VIET LETTER LOW RO -AAA7 TAI VIET LETTER HIGH RO -AAA8 TAI VIET LETTER LOW LO -AAA9 TAI VIET LETTER HIGH LO -AAAA TAI VIET LETTER LOW VO -AAAB TAI VIET LETTER HIGH VO -AAAC TAI VIET LETTER LOW HO -AAAD TAI VIET LETTER HIGH HO -AAAE TAI VIET LETTER LOW O -AAAF TAI VIET LETTER HIGH O -AAB0 TAI VIET MAI KANG -AAB1 TAI VIET VOWEL AA -AAB2 TAI VIET VOWEL I -AAB3 TAI VIET VOWEL UE -AAB4 TAI VIET VOWEL U -AAB5 TAI VIET VOWEL E -AAB6 TAI VIET VOWEL O -AAB7 TAI VIET MAI KHIT -AAB8 TAI VIET VOWEL IA -AAB9 TAI VIET VOWEL UEA -AABA TAI VIET VOWEL UA -AABB TAI VIET VOWEL AUE -AABC TAI VIET VOWEL AY -AABD TAI VIET VOWEL AN -AABE TAI VIET VOWEL AM -AABF TAI VIET TONE MAI EK -AAC0 TAI VIET TONE MAI NUENG -AAC1 TAI VIET TONE MAI THO -AAC2 TAI VIET TONE MAI SONG -AADB TAI VIET SYMBOL KON -AADC TAI VIET SYMBOL NUENG -AADD TAI VIET SYMBOL SAM -AADE TAI VIET SYMBOL HO HOI -AADF TAI VIET SYMBOL KOI KOI -ABC0 MEETEI MAYEK LETTER KOK -ABC1 MEETEI MAYEK LETTER SAM -ABC2 MEETEI MAYEK LETTER LAI -ABC3 MEETEI MAYEK LETTER MIT -ABC4 MEETEI MAYEK LETTER PA -ABC5 MEETEI MAYEK LETTER NA -ABC6 MEETEI MAYEK LETTER CHIL -ABC7 MEETEI MAYEK LETTER TIL -ABC8 MEETEI MAYEK LETTER KHOU -ABC9 MEETEI MAYEK LETTER NGOU -ABCA MEETEI MAYEK LETTER THOU -ABCB MEETEI MAYEK LETTER WAI -ABCC MEETEI MAYEK LETTER YANG -ABCD MEETEI MAYEK LETTER HUK -ABCE MEETEI MAYEK LETTER UN -ABCF MEETEI MAYEK LETTER I -ABD0 MEETEI MAYEK LETTER PHAM -ABD1 MEETEI MAYEK LETTER ATIYA -ABD2 MEETEI MAYEK LETTER GOK -ABD3 MEETEI MAYEK LETTER JHAM -ABD4 MEETEI MAYEK LETTER RAI -ABD5 MEETEI MAYEK LETTER BA -ABD6 MEETEI MAYEK LETTER JIL -ABD7 MEETEI MAYEK LETTER DIL -ABD8 MEETEI MAYEK LETTER GHOU -ABD9 MEETEI MAYEK LETTER DHOU -ABDA MEETEI MAYEK LETTER BHAM -ABDB MEETEI MAYEK LETTER KOK LONSUM -ABDC MEETEI MAYEK LETTER LAI LONSUM -ABDD MEETEI MAYEK LETTER MIT LONSUM -ABDE MEETEI MAYEK LETTER PA LONSUM -ABDF MEETEI MAYEK LETTER NA LONSUM -ABE0 MEETEI MAYEK LETTER TIL LONSUM -ABE1 MEETEI MAYEK LETTER NGOU LONSUM -ABE2 MEETEI MAYEK LETTER I LONSUM -ABE3 MEETEI MAYEK VOWEL SIGN ONAP -ABE4 MEETEI MAYEK VOWEL SIGN INAP -ABE5 MEETEI MAYEK VOWEL SIGN ANAP -ABE6 MEETEI MAYEK VOWEL SIGN YENAP -ABE7 MEETEI MAYEK VOWEL SIGN SOUNAP -ABE8 MEETEI MAYEK VOWEL SIGN UNAP -ABE9 MEETEI MAYEK VOWEL SIGN CHEINAP -ABEA MEETEI MAYEK VOWEL SIGN NUNG -ABEB MEETEI MAYEK CHEIKHEI -ABEC MEETEI MAYEK LUM IYEK -ABED MEETEI MAYEK APUN IYEK -ABF0 MEETEI MAYEK DIGIT ZERO -ABF1 MEETEI MAYEK DIGIT ONE -ABF2 MEETEI MAYEK DIGIT TWO -ABF3 MEETEI MAYEK DIGIT THREE -ABF4 MEETEI MAYEK DIGIT FOUR -ABF5 MEETEI MAYEK DIGIT FIVE -ABF6 MEETEI MAYEK DIGIT SIX -ABF7 MEETEI MAYEK DIGIT SEVEN -ABF8 MEETEI MAYEK DIGIT EIGHT -ABF9 MEETEI MAYEK DIGIT NINE -AC00 <Hangul Syllable, First> -D7A3 <Hangul Syllable, Last> -D7B0 HANGUL JUNGSEONG O-YEO -D7B1 HANGUL JUNGSEONG O-O-I -D7B2 HANGUL JUNGSEONG YO-A -D7B3 HANGUL JUNGSEONG YO-AE -D7B4 HANGUL JUNGSEONG YO-EO -D7B5 HANGUL JUNGSEONG U-YEO -D7B6 HANGUL JUNGSEONG U-I-I -D7B7 HANGUL JUNGSEONG YU-AE -D7B8 HANGUL JUNGSEONG YU-O -D7B9 HANGUL JUNGSEONG EU-A -D7BA HANGUL JUNGSEONG EU-EO -D7BB HANGUL JUNGSEONG EU-E -D7BC HANGUL JUNGSEONG EU-O -D7BD HANGUL JUNGSEONG I-YA-O -D7BE HANGUL JUNGSEONG I-YAE -D7BF HANGUL JUNGSEONG I-YEO -D7C0 HANGUL JUNGSEONG I-YE -D7C1 HANGUL JUNGSEONG I-O-I -D7C2 HANGUL JUNGSEONG I-YO -D7C3 HANGUL JUNGSEONG I-YU -D7C4 HANGUL JUNGSEONG I-I -D7C5 HANGUL JUNGSEONG ARAEA-A -D7C6 HANGUL JUNGSEONG ARAEA-E -D7CB HANGUL JONGSEONG NIEUN-RIEUL -D7CC HANGUL JONGSEONG NIEUN-CHIEUCH -D7CD HANGUL JONGSEONG SSANGTIKEUT -D7CE HANGUL JONGSEONG SSANGTIKEUT-PIEUP -D7CF HANGUL JONGSEONG TIKEUT-PIEUP -D7D0 HANGUL JONGSEONG TIKEUT-SIOS -D7D1 HANGUL JONGSEONG TIKEUT-SIOS-KIYEOK -D7D2 HANGUL JONGSEONG TIKEUT-CIEUC -D7D3 HANGUL JONGSEONG TIKEUT-CHIEUCH -D7D4 HANGUL JONGSEONG TIKEUT-THIEUTH -D7D5 HANGUL JONGSEONG RIEUL-SSANGKIYEOK -D7D6 HANGUL JONGSEONG RIEUL-KIYEOK-HIEUH -D7D7 HANGUL JONGSEONG SSANGRIEUL-KHIEUKH -D7D8 HANGUL JONGSEONG RIEUL-MIEUM-HIEUH -D7D9 HANGUL JONGSEONG RIEUL-PIEUP-TIKEUT -D7DA HANGUL JONGSEONG RIEUL-PIEUP-PHIEUPH -D7DB HANGUL JONGSEONG RIEUL-YESIEUNG -D7DC HANGUL JONGSEONG RIEUL-YEORINHIEUH-HIEUH -D7DD HANGUL JONGSEONG KAPYEOUNRIEUL -D7DE HANGUL JONGSEONG MIEUM-NIEUN -D7DF HANGUL JONGSEONG MIEUM-SSANGNIEUN -D7E0 HANGUL JONGSEONG SSANGMIEUM -D7E1 HANGUL JONGSEONG MIEUM-PIEUP-SIOS -D7E2 HANGUL JONGSEONG MIEUM-CIEUC -D7E3 HANGUL JONGSEONG PIEUP-TIKEUT -D7E4 HANGUL JONGSEONG PIEUP-RIEUL-PHIEUPH -D7E5 HANGUL JONGSEONG PIEUP-MIEUM -D7E6 HANGUL JONGSEONG SSANGPIEUP -D7E7 HANGUL JONGSEONG PIEUP-SIOS-TIKEUT -D7E8 HANGUL JONGSEONG PIEUP-CIEUC -D7E9 HANGUL JONGSEONG PIEUP-CHIEUCH -D7EA HANGUL JONGSEONG SIOS-MIEUM -D7EB HANGUL JONGSEONG SIOS-KAPYEOUNPIEUP -D7EC HANGUL JONGSEONG SSANGSIOS-KIYEOK -D7ED HANGUL JONGSEONG SSANGSIOS-TIKEUT -D7EE HANGUL JONGSEONG SIOS-PANSIOS -D7EF HANGUL JONGSEONG SIOS-CIEUC -D7F0 HANGUL JONGSEONG SIOS-CHIEUCH -D7F1 HANGUL JONGSEONG SIOS-THIEUTH -D7F2 HANGUL JONGSEONG SIOS-HIEUH -D7F3 HANGUL JONGSEONG PANSIOS-PIEUP -D7F4 HANGUL JONGSEONG PANSIOS-KAPYEOUNPIEUP -D7F5 HANGUL JONGSEONG YESIEUNG-MIEUM -D7F6 HANGUL JONGSEONG YESIEUNG-HIEUH -D7F7 HANGUL JONGSEONG CIEUC-PIEUP -D7F8 HANGUL JONGSEONG CIEUC-SSANGPIEUP -D7F9 HANGUL JONGSEONG SSANGCIEUC -D7FA HANGUL JONGSEONG PHIEUPH-SIOS -D7FB HANGUL JONGSEONG PHIEUPH-THIEUTH -D800 <Non Private Use High Surrogate, First> -DB7F <Non Private Use High Surrogate, Last> -DB80 <Private Use High Surrogate, First> -DBFF <Private Use High Surrogate, Last> -DC00 <Low Surrogate, First> -DFFF <Low Surrogate, Last> -E000 <Private Use, First> -F8FF <Private Use, Last> -F900 CJK COMPATIBILITY IDEOGRAPH-F900 -F901 CJK COMPATIBILITY IDEOGRAPH-F901 -F902 CJK COMPATIBILITY IDEOGRAPH-F902 -F903 CJK COMPATIBILITY IDEOGRAPH-F903 -F904 CJK COMPATIBILITY IDEOGRAPH-F904 -F905 CJK COMPATIBILITY IDEOGRAPH-F905 -F906 CJK COMPATIBILITY IDEOGRAPH-F906 -F907 CJK COMPATIBILITY IDEOGRAPH-F907 -F908 CJK COMPATIBILITY IDEOGRAPH-F908 -F909 CJK COMPATIBILITY IDEOGRAPH-F909 -F90A CJK COMPATIBILITY IDEOGRAPH-F90A -F90B CJK COMPATIBILITY IDEOGRAPH-F90B -F90C CJK COMPATIBILITY IDEOGRAPH-F90C -F90D CJK COMPATIBILITY IDEOGRAPH-F90D -F90E CJK COMPATIBILITY IDEOGRAPH-F90E -F90F CJK COMPATIBILITY IDEOGRAPH-F90F -F910 CJK COMPATIBILITY IDEOGRAPH-F910 -F911 CJK COMPATIBILITY IDEOGRAPH-F911 -F912 CJK COMPATIBILITY IDEOGRAPH-F912 -F913 CJK COMPATIBILITY IDEOGRAPH-F913 -F914 CJK COMPATIBILITY IDEOGRAPH-F914 -F915 CJK COMPATIBILITY IDEOGRAPH-F915 -F916 CJK COMPATIBILITY IDEOGRAPH-F916 -F917 CJK COMPATIBILITY IDEOGRAPH-F917 -F918 CJK COMPATIBILITY IDEOGRAPH-F918 -F919 CJK COMPATIBILITY IDEOGRAPH-F919 -F91A CJK COMPATIBILITY IDEOGRAPH-F91A -F91B CJK COMPATIBILITY IDEOGRAPH-F91B -F91C CJK COMPATIBILITY IDEOGRAPH-F91C -F91D CJK COMPATIBILITY IDEOGRAPH-F91D -F91E CJK COMPATIBILITY IDEOGRAPH-F91E -F91F CJK COMPATIBILITY IDEOGRAPH-F91F -F920 CJK COMPATIBILITY IDEOGRAPH-F920 -F921 CJK COMPATIBILITY IDEOGRAPH-F921 -F922 CJK COMPATIBILITY IDEOGRAPH-F922 -F923 CJK COMPATIBILITY IDEOGRAPH-F923 -F924 CJK COMPATIBILITY IDEOGRAPH-F924 -F925 CJK COMPATIBILITY IDEOGRAPH-F925 -F926 CJK COMPATIBILITY IDEOGRAPH-F926 -F927 CJK COMPATIBILITY IDEOGRAPH-F927 -F928 CJK COMPATIBILITY IDEOGRAPH-F928 -F929 CJK COMPATIBILITY IDEOGRAPH-F929 -F92A CJK COMPATIBILITY IDEOGRAPH-F92A -F92B CJK COMPATIBILITY IDEOGRAPH-F92B -F92C CJK COMPATIBILITY IDEOGRAPH-F92C -F92D CJK COMPATIBILITY IDEOGRAPH-F92D -F92E CJK COMPATIBILITY IDEOGRAPH-F92E -F92F CJK COMPATIBILITY IDEOGRAPH-F92F -F930 CJK COMPATIBILITY IDEOGRAPH-F930 -F931 CJK COMPATIBILITY IDEOGRAPH-F931 -F932 CJK COMPATIBILITY IDEOGRAPH-F932 -F933 CJK COMPATIBILITY IDEOGRAPH-F933 -F934 CJK COMPATIBILITY IDEOGRAPH-F934 -F935 CJK COMPATIBILITY IDEOGRAPH-F935 -F936 CJK COMPATIBILITY IDEOGRAPH-F936 -F937 CJK COMPATIBILITY IDEOGRAPH-F937 -F938 CJK COMPATIBILITY IDEOGRAPH-F938 -F939 CJK COMPATIBILITY IDEOGRAPH-F939 -F93A CJK COMPATIBILITY IDEOGRAPH-F93A -F93B CJK COMPATIBILITY IDEOGRAPH-F93B -F93C CJK COMPATIBILITY IDEOGRAPH-F93C -F93D CJK COMPATIBILITY IDEOGRAPH-F93D -F93E CJK COMPATIBILITY IDEOGRAPH-F93E -F93F CJK COMPATIBILITY IDEOGRAPH-F93F -F940 CJK COMPATIBILITY IDEOGRAPH-F940 -F941 CJK COMPATIBILITY IDEOGRAPH-F941 -F942 CJK COMPATIBILITY IDEOGRAPH-F942 -F943 CJK COMPATIBILITY IDEOGRAPH-F943 -F944 CJK COMPATIBILITY IDEOGRAPH-F944 -F945 CJK COMPATIBILITY IDEOGRAPH-F945 -F946 CJK COMPATIBILITY IDEOGRAPH-F946 -F947 CJK COMPATIBILITY IDEOGRAPH-F947 -F948 CJK COMPATIBILITY IDEOGRAPH-F948 -F949 CJK COMPATIBILITY IDEOGRAPH-F949 -F94A CJK COMPATIBILITY IDEOGRAPH-F94A -F94B CJK COMPATIBILITY IDEOGRAPH-F94B -F94C CJK COMPATIBILITY IDEOGRAPH-F94C -F94D CJK COMPATIBILITY IDEOGRAPH-F94D -F94E CJK COMPATIBILITY IDEOGRAPH-F94E -F94F CJK COMPATIBILITY IDEOGRAPH-F94F -F950 CJK COMPATIBILITY IDEOGRAPH-F950 -F951 CJK COMPATIBILITY IDEOGRAPH-F951 -F952 CJK COMPATIBILITY IDEOGRAPH-F952 -F953 CJK COMPATIBILITY IDEOGRAPH-F953 -F954 CJK COMPATIBILITY IDEOGRAPH-F954 -F955 CJK COMPATIBILITY IDEOGRAPH-F955 -F956 CJK COMPATIBILITY IDEOGRAPH-F956 -F957 CJK COMPATIBILITY IDEOGRAPH-F957 -F958 CJK COMPATIBILITY IDEOGRAPH-F958 -F959 CJK COMPATIBILITY IDEOGRAPH-F959 -F95A CJK COMPATIBILITY IDEOGRAPH-F95A -F95B CJK COMPATIBILITY IDEOGRAPH-F95B -F95C CJK COMPATIBILITY IDEOGRAPH-F95C -F95D CJK COMPATIBILITY IDEOGRAPH-F95D -F95E CJK COMPATIBILITY IDEOGRAPH-F95E -F95F CJK COMPATIBILITY IDEOGRAPH-F95F -F960 CJK COMPATIBILITY IDEOGRAPH-F960 -F961 CJK COMPATIBILITY IDEOGRAPH-F961 -F962 CJK COMPATIBILITY IDEOGRAPH-F962 -F963 CJK COMPATIBILITY IDEOGRAPH-F963 -F964 CJK COMPATIBILITY IDEOGRAPH-F964 -F965 CJK COMPATIBILITY IDEOGRAPH-F965 -F966 CJK COMPATIBILITY IDEOGRAPH-F966 -F967 CJK COMPATIBILITY IDEOGRAPH-F967 -F968 CJK COMPATIBILITY IDEOGRAPH-F968 -F969 CJK COMPATIBILITY IDEOGRAPH-F969 -F96A CJK COMPATIBILITY IDEOGRAPH-F96A -F96B CJK COMPATIBILITY IDEOGRAPH-F96B -F96C CJK COMPATIBILITY IDEOGRAPH-F96C -F96D CJK COMPATIBILITY IDEOGRAPH-F96D -F96E CJK COMPATIBILITY IDEOGRAPH-F96E -F96F CJK COMPATIBILITY IDEOGRAPH-F96F -F970 CJK COMPATIBILITY IDEOGRAPH-F970 -F971 CJK COMPATIBILITY IDEOGRAPH-F971 -F972 CJK COMPATIBILITY IDEOGRAPH-F972 -F973 CJK COMPATIBILITY IDEOGRAPH-F973 -F974 CJK COMPATIBILITY IDEOGRAPH-F974 -F975 CJK COMPATIBILITY IDEOGRAPH-F975 -F976 CJK COMPATIBILITY IDEOGRAPH-F976 -F977 CJK COMPATIBILITY IDEOGRAPH-F977 -F978 CJK COMPATIBILITY IDEOGRAPH-F978 -F979 CJK COMPATIBILITY IDEOGRAPH-F979 -F97A CJK COMPATIBILITY IDEOGRAPH-F97A -F97B CJK COMPATIBILITY IDEOGRAPH-F97B -F97C CJK COMPATIBILITY IDEOGRAPH-F97C -F97D CJK COMPATIBILITY IDEOGRAPH-F97D -F97E CJK COMPATIBILITY IDEOGRAPH-F97E -F97F CJK COMPATIBILITY IDEOGRAPH-F97F -F980 CJK COMPATIBILITY IDEOGRAPH-F980 -F981 CJK COMPATIBILITY IDEOGRAPH-F981 -F982 CJK COMPATIBILITY IDEOGRAPH-F982 -F983 CJK COMPATIBILITY IDEOGRAPH-F983 -F984 CJK COMPATIBILITY IDEOGRAPH-F984 -F985 CJK COMPATIBILITY IDEOGRAPH-F985 -F986 CJK COMPATIBILITY IDEOGRAPH-F986 -F987 CJK COMPATIBILITY IDEOGRAPH-F987 -F988 CJK COMPATIBILITY IDEOGRAPH-F988 -F989 CJK COMPATIBILITY IDEOGRAPH-F989 -F98A CJK COMPATIBILITY IDEOGRAPH-F98A -F98B CJK COMPATIBILITY IDEOGRAPH-F98B -F98C CJK COMPATIBILITY IDEOGRAPH-F98C -F98D CJK COMPATIBILITY IDEOGRAPH-F98D -F98E CJK COMPATIBILITY IDEOGRAPH-F98E -F98F CJK COMPATIBILITY IDEOGRAPH-F98F -F990 CJK COMPATIBILITY IDEOGRAPH-F990 -F991 CJK COMPATIBILITY IDEOGRAPH-F991 -F992 CJK COMPATIBILITY IDEOGRAPH-F992 -F993 CJK COMPATIBILITY IDEOGRAPH-F993 -F994 CJK COMPATIBILITY IDEOGRAPH-F994 -F995 CJK COMPATIBILITY IDEOGRAPH-F995 -F996 CJK COMPATIBILITY IDEOGRAPH-F996 -F997 CJK COMPATIBILITY IDEOGRAPH-F997 -F998 CJK COMPATIBILITY IDEOGRAPH-F998 -F999 CJK COMPATIBILITY IDEOGRAPH-F999 -F99A CJK COMPATIBILITY IDEOGRAPH-F99A -F99B CJK COMPATIBILITY IDEOGRAPH-F99B -F99C CJK COMPATIBILITY IDEOGRAPH-F99C -F99D CJK COMPATIBILITY IDEOGRAPH-F99D -F99E CJK COMPATIBILITY IDEOGRAPH-F99E -F99F CJK COMPATIBILITY IDEOGRAPH-F99F -F9A0 CJK COMPATIBILITY IDEOGRAPH-F9A0 -F9A1 CJK COMPATIBILITY IDEOGRAPH-F9A1 -F9A2 CJK COMPATIBILITY IDEOGRAPH-F9A2 -F9A3 CJK COMPATIBILITY IDEOGRAPH-F9A3 -F9A4 CJK COMPATIBILITY IDEOGRAPH-F9A4 -F9A5 CJK COMPATIBILITY IDEOGRAPH-F9A5 -F9A6 CJK COMPATIBILITY IDEOGRAPH-F9A6 -F9A7 CJK COMPATIBILITY IDEOGRAPH-F9A7 -F9A8 CJK COMPATIBILITY IDEOGRAPH-F9A8 -F9A9 CJK COMPATIBILITY IDEOGRAPH-F9A9 -F9AA CJK COMPATIBILITY IDEOGRAPH-F9AA -F9AB CJK COMPATIBILITY IDEOGRAPH-F9AB -F9AC CJK COMPATIBILITY IDEOGRAPH-F9AC -F9AD CJK COMPATIBILITY IDEOGRAPH-F9AD -F9AE CJK COMPATIBILITY IDEOGRAPH-F9AE -F9AF CJK COMPATIBILITY IDEOGRAPH-F9AF -F9B0 CJK COMPATIBILITY IDEOGRAPH-F9B0 -F9B1 CJK COMPATIBILITY IDEOGRAPH-F9B1 -F9B2 CJK COMPATIBILITY IDEOGRAPH-F9B2 -F9B3 CJK COMPATIBILITY IDEOGRAPH-F9B3 -F9B4 CJK COMPATIBILITY IDEOGRAPH-F9B4 -F9B5 CJK COMPATIBILITY IDEOGRAPH-F9B5 -F9B6 CJK COMPATIBILITY IDEOGRAPH-F9B6 -F9B7 CJK COMPATIBILITY IDEOGRAPH-F9B7 -F9B8 CJK COMPATIBILITY IDEOGRAPH-F9B8 -F9B9 CJK COMPATIBILITY IDEOGRAPH-F9B9 -F9BA CJK COMPATIBILITY IDEOGRAPH-F9BA -F9BB CJK COMPATIBILITY IDEOGRAPH-F9BB -F9BC CJK COMPATIBILITY IDEOGRAPH-F9BC -F9BD CJK COMPATIBILITY IDEOGRAPH-F9BD -F9BE CJK COMPATIBILITY IDEOGRAPH-F9BE -F9BF CJK COMPATIBILITY IDEOGRAPH-F9BF -F9C0 CJK COMPATIBILITY IDEOGRAPH-F9C0 -F9C1 CJK COMPATIBILITY IDEOGRAPH-F9C1 -F9C2 CJK COMPATIBILITY IDEOGRAPH-F9C2 -F9C3 CJK COMPATIBILITY IDEOGRAPH-F9C3 -F9C4 CJK COMPATIBILITY IDEOGRAPH-F9C4 -F9C5 CJK COMPATIBILITY IDEOGRAPH-F9C5 -F9C6 CJK COMPATIBILITY IDEOGRAPH-F9C6 -F9C7 CJK COMPATIBILITY IDEOGRAPH-F9C7 -F9C8 CJK COMPATIBILITY IDEOGRAPH-F9C8 -F9C9 CJK COMPATIBILITY IDEOGRAPH-F9C9 -F9CA CJK COMPATIBILITY IDEOGRAPH-F9CA -F9CB CJK COMPATIBILITY IDEOGRAPH-F9CB -F9CC CJK COMPATIBILITY IDEOGRAPH-F9CC -F9CD CJK COMPATIBILITY IDEOGRAPH-F9CD -F9CE CJK COMPATIBILITY IDEOGRAPH-F9CE -F9CF CJK COMPATIBILITY IDEOGRAPH-F9CF -F9D0 CJK COMPATIBILITY IDEOGRAPH-F9D0 -F9D1 CJK COMPATIBILITY IDEOGRAPH-F9D1 -F9D2 CJK COMPATIBILITY IDEOGRAPH-F9D2 -F9D3 CJK COMPATIBILITY IDEOGRAPH-F9D3 -F9D4 CJK COMPATIBILITY IDEOGRAPH-F9D4 -F9D5 CJK COMPATIBILITY IDEOGRAPH-F9D5 -F9D6 CJK COMPATIBILITY IDEOGRAPH-F9D6 -F9D7 CJK COMPATIBILITY IDEOGRAPH-F9D7 -F9D8 CJK COMPATIBILITY IDEOGRAPH-F9D8 -F9D9 CJK COMPATIBILITY IDEOGRAPH-F9D9 -F9DA CJK COMPATIBILITY IDEOGRAPH-F9DA -F9DB CJK COMPATIBILITY IDEOGRAPH-F9DB -F9DC CJK COMPATIBILITY IDEOGRAPH-F9DC -F9DD CJK COMPATIBILITY IDEOGRAPH-F9DD -F9DE CJK COMPATIBILITY IDEOGRAPH-F9DE -F9DF CJK COMPATIBILITY IDEOGRAPH-F9DF -F9E0 CJK COMPATIBILITY IDEOGRAPH-F9E0 -F9E1 CJK COMPATIBILITY IDEOGRAPH-F9E1 -F9E2 CJK COMPATIBILITY IDEOGRAPH-F9E2 -F9E3 CJK COMPATIBILITY IDEOGRAPH-F9E3 -F9E4 CJK COMPATIBILITY IDEOGRAPH-F9E4 -F9E5 CJK COMPATIBILITY IDEOGRAPH-F9E5 -F9E6 CJK COMPATIBILITY IDEOGRAPH-F9E6 -F9E7 CJK COMPATIBILITY IDEOGRAPH-F9E7 -F9E8 CJK COMPATIBILITY IDEOGRAPH-F9E8 -F9E9 CJK COMPATIBILITY IDEOGRAPH-F9E9 -F9EA CJK COMPATIBILITY IDEOGRAPH-F9EA -F9EB CJK COMPATIBILITY IDEOGRAPH-F9EB -F9EC CJK COMPATIBILITY IDEOGRAPH-F9EC -F9ED CJK COMPATIBILITY IDEOGRAPH-F9ED -F9EE CJK COMPATIBILITY IDEOGRAPH-F9EE -F9EF CJK COMPATIBILITY IDEOGRAPH-F9EF -F9F0 CJK COMPATIBILITY IDEOGRAPH-F9F0 -F9F1 CJK COMPATIBILITY IDEOGRAPH-F9F1 -F9F2 CJK COMPATIBILITY IDEOGRAPH-F9F2 -F9F3 CJK COMPATIBILITY IDEOGRAPH-F9F3 -F9F4 CJK COMPATIBILITY IDEOGRAPH-F9F4 -F9F5 CJK COMPATIBILITY IDEOGRAPH-F9F5 -F9F6 CJK COMPATIBILITY IDEOGRAPH-F9F6 -F9F7 CJK COMPATIBILITY IDEOGRAPH-F9F7 -F9F8 CJK COMPATIBILITY IDEOGRAPH-F9F8 -F9F9 CJK COMPATIBILITY IDEOGRAPH-F9F9 -F9FA CJK COMPATIBILITY IDEOGRAPH-F9FA -F9FB CJK COMPATIBILITY IDEOGRAPH-F9FB -F9FC CJK COMPATIBILITY IDEOGRAPH-F9FC -F9FD CJK COMPATIBILITY IDEOGRAPH-F9FD -F9FE CJK COMPATIBILITY IDEOGRAPH-F9FE -F9FF CJK COMPATIBILITY IDEOGRAPH-F9FF -FA00 CJK COMPATIBILITY IDEOGRAPH-FA00 -FA01 CJK COMPATIBILITY IDEOGRAPH-FA01 -FA02 CJK COMPATIBILITY IDEOGRAPH-FA02 -FA03 CJK COMPATIBILITY IDEOGRAPH-FA03 -FA04 CJK COMPATIBILITY IDEOGRAPH-FA04 -FA05 CJK COMPATIBILITY IDEOGRAPH-FA05 -FA06 CJK COMPATIBILITY IDEOGRAPH-FA06 -FA07 CJK COMPATIBILITY IDEOGRAPH-FA07 -FA08 CJK COMPATIBILITY IDEOGRAPH-FA08 -FA09 CJK COMPATIBILITY IDEOGRAPH-FA09 -FA0A CJK COMPATIBILITY IDEOGRAPH-FA0A -FA0B CJK COMPATIBILITY IDEOGRAPH-FA0B -FA0C CJK COMPATIBILITY IDEOGRAPH-FA0C -FA0D CJK COMPATIBILITY IDEOGRAPH-FA0D -FA0E CJK COMPATIBILITY IDEOGRAPH-FA0E -FA0F CJK COMPATIBILITY IDEOGRAPH-FA0F -FA10 CJK COMPATIBILITY IDEOGRAPH-FA10 -FA11 CJK COMPATIBILITY IDEOGRAPH-FA11 -FA12 CJK COMPATIBILITY IDEOGRAPH-FA12 -FA13 CJK COMPATIBILITY IDEOGRAPH-FA13 -FA14 CJK COMPATIBILITY IDEOGRAPH-FA14 -FA15 CJK COMPATIBILITY IDEOGRAPH-FA15 -FA16 CJK COMPATIBILITY IDEOGRAPH-FA16 -FA17 CJK COMPATIBILITY IDEOGRAPH-FA17 -FA18 CJK COMPATIBILITY IDEOGRAPH-FA18 -FA19 CJK COMPATIBILITY IDEOGRAPH-FA19 -FA1A CJK COMPATIBILITY IDEOGRAPH-FA1A -FA1B CJK COMPATIBILITY IDEOGRAPH-FA1B -FA1C CJK COMPATIBILITY IDEOGRAPH-FA1C -FA1D CJK COMPATIBILITY IDEOGRAPH-FA1D -FA1E CJK COMPATIBILITY IDEOGRAPH-FA1E -FA1F CJK COMPATIBILITY IDEOGRAPH-FA1F -FA20 CJK COMPATIBILITY IDEOGRAPH-FA20 -FA21 CJK COMPATIBILITY IDEOGRAPH-FA21 -FA22 CJK COMPATIBILITY IDEOGRAPH-FA22 -FA23 CJK COMPATIBILITY IDEOGRAPH-FA23 -FA24 CJK COMPATIBILITY IDEOGRAPH-FA24 -FA25 CJK COMPATIBILITY IDEOGRAPH-FA25 -FA26 CJK COMPATIBILITY IDEOGRAPH-FA26 -FA27 CJK COMPATIBILITY IDEOGRAPH-FA27 -FA28 CJK COMPATIBILITY IDEOGRAPH-FA28 -FA29 CJK COMPATIBILITY IDEOGRAPH-FA29 -FA2A CJK COMPATIBILITY IDEOGRAPH-FA2A -FA2B CJK COMPATIBILITY IDEOGRAPH-FA2B -FA2C CJK COMPATIBILITY IDEOGRAPH-FA2C -FA2D CJK COMPATIBILITY IDEOGRAPH-FA2D -FA30 CJK COMPATIBILITY IDEOGRAPH-FA30 -FA31 CJK COMPATIBILITY IDEOGRAPH-FA31 -FA32 CJK COMPATIBILITY IDEOGRAPH-FA32 -FA33 CJK COMPATIBILITY IDEOGRAPH-FA33 -FA34 CJK COMPATIBILITY IDEOGRAPH-FA34 -FA35 CJK COMPATIBILITY IDEOGRAPH-FA35 -FA36 CJK COMPATIBILITY IDEOGRAPH-FA36 -FA37 CJK COMPATIBILITY IDEOGRAPH-FA37 -FA38 CJK COMPATIBILITY IDEOGRAPH-FA38 -FA39 CJK COMPATIBILITY IDEOGRAPH-FA39 -FA3A CJK COMPATIBILITY IDEOGRAPH-FA3A -FA3B CJK COMPATIBILITY IDEOGRAPH-FA3B -FA3C CJK COMPATIBILITY IDEOGRAPH-FA3C -FA3D CJK COMPATIBILITY IDEOGRAPH-FA3D -FA3E CJK COMPATIBILITY IDEOGRAPH-FA3E -FA3F CJK COMPATIBILITY IDEOGRAPH-FA3F -FA40 CJK COMPATIBILITY IDEOGRAPH-FA40 -FA41 CJK COMPATIBILITY IDEOGRAPH-FA41 -FA42 CJK COMPATIBILITY IDEOGRAPH-FA42 -FA43 CJK COMPATIBILITY IDEOGRAPH-FA43 -FA44 CJK COMPATIBILITY IDEOGRAPH-FA44 -FA45 CJK COMPATIBILITY IDEOGRAPH-FA45 -FA46 CJK COMPATIBILITY IDEOGRAPH-FA46 -FA47 CJK COMPATIBILITY IDEOGRAPH-FA47 -FA48 CJK COMPATIBILITY IDEOGRAPH-FA48 -FA49 CJK COMPATIBILITY IDEOGRAPH-FA49 -FA4A CJK COMPATIBILITY IDEOGRAPH-FA4A -FA4B CJK COMPATIBILITY IDEOGRAPH-FA4B -FA4C CJK COMPATIBILITY IDEOGRAPH-FA4C -FA4D CJK COMPATIBILITY IDEOGRAPH-FA4D -FA4E CJK COMPATIBILITY IDEOGRAPH-FA4E -FA4F CJK COMPATIBILITY IDEOGRAPH-FA4F -FA50 CJK COMPATIBILITY IDEOGRAPH-FA50 -FA51 CJK COMPATIBILITY IDEOGRAPH-FA51 -FA52 CJK COMPATIBILITY IDEOGRAPH-FA52 -FA53 CJK COMPATIBILITY IDEOGRAPH-FA53 -FA54 CJK COMPATIBILITY IDEOGRAPH-FA54 -FA55 CJK COMPATIBILITY IDEOGRAPH-FA55 -FA56 CJK COMPATIBILITY IDEOGRAPH-FA56 -FA57 CJK COMPATIBILITY IDEOGRAPH-FA57 -FA58 CJK COMPATIBILITY IDEOGRAPH-FA58 -FA59 CJK COMPATIBILITY IDEOGRAPH-FA59 -FA5A CJK COMPATIBILITY IDEOGRAPH-FA5A -FA5B CJK COMPATIBILITY IDEOGRAPH-FA5B -FA5C CJK COMPATIBILITY IDEOGRAPH-FA5C -FA5D CJK COMPATIBILITY IDEOGRAPH-FA5D -FA5E CJK COMPATIBILITY IDEOGRAPH-FA5E -FA5F CJK COMPATIBILITY IDEOGRAPH-FA5F -FA60 CJK COMPATIBILITY IDEOGRAPH-FA60 -FA61 CJK COMPATIBILITY IDEOGRAPH-FA61 -FA62 CJK COMPATIBILITY IDEOGRAPH-FA62 -FA63 CJK COMPATIBILITY IDEOGRAPH-FA63 -FA64 CJK COMPATIBILITY IDEOGRAPH-FA64 -FA65 CJK COMPATIBILITY IDEOGRAPH-FA65 -FA66 CJK COMPATIBILITY IDEOGRAPH-FA66 -FA67 CJK COMPATIBILITY IDEOGRAPH-FA67 -FA68 CJK COMPATIBILITY IDEOGRAPH-FA68 -FA69 CJK COMPATIBILITY IDEOGRAPH-FA69 -FA6A CJK COMPATIBILITY IDEOGRAPH-FA6A -FA6B CJK COMPATIBILITY IDEOGRAPH-FA6B -FA6C CJK COMPATIBILITY IDEOGRAPH-FA6C -FA6D CJK COMPATIBILITY IDEOGRAPH-FA6D -FA70 CJK COMPATIBILITY IDEOGRAPH-FA70 -FA71 CJK COMPATIBILITY IDEOGRAPH-FA71 -FA72 CJK COMPATIBILITY IDEOGRAPH-FA72 -FA73 CJK COMPATIBILITY IDEOGRAPH-FA73 -FA74 CJK COMPATIBILITY IDEOGRAPH-FA74 -FA75 CJK COMPATIBILITY IDEOGRAPH-FA75 -FA76 CJK COMPATIBILITY IDEOGRAPH-FA76 -FA77 CJK COMPATIBILITY IDEOGRAPH-FA77 -FA78 CJK COMPATIBILITY IDEOGRAPH-FA78 -FA79 CJK COMPATIBILITY IDEOGRAPH-FA79 -FA7A CJK COMPATIBILITY IDEOGRAPH-FA7A -FA7B CJK COMPATIBILITY IDEOGRAPH-FA7B -FA7C CJK COMPATIBILITY IDEOGRAPH-FA7C -FA7D CJK COMPATIBILITY IDEOGRAPH-FA7D -FA7E CJK COMPATIBILITY IDEOGRAPH-FA7E -FA7F CJK COMPATIBILITY IDEOGRAPH-FA7F -FA80 CJK COMPATIBILITY IDEOGRAPH-FA80 -FA81 CJK COMPATIBILITY IDEOGRAPH-FA81 -FA82 CJK COMPATIBILITY IDEOGRAPH-FA82 -FA83 CJK COMPATIBILITY IDEOGRAPH-FA83 -FA84 CJK COMPATIBILITY IDEOGRAPH-FA84 -FA85 CJK COMPATIBILITY IDEOGRAPH-FA85 -FA86 CJK COMPATIBILITY IDEOGRAPH-FA86 -FA87 CJK COMPATIBILITY IDEOGRAPH-FA87 -FA88 CJK COMPATIBILITY IDEOGRAPH-FA88 -FA89 CJK COMPATIBILITY IDEOGRAPH-FA89 -FA8A CJK COMPATIBILITY IDEOGRAPH-FA8A -FA8B CJK COMPATIBILITY IDEOGRAPH-FA8B -FA8C CJK COMPATIBILITY IDEOGRAPH-FA8C -FA8D CJK COMPATIBILITY IDEOGRAPH-FA8D -FA8E CJK COMPATIBILITY IDEOGRAPH-FA8E -FA8F CJK COMPATIBILITY IDEOGRAPH-FA8F -FA90 CJK COMPATIBILITY IDEOGRAPH-FA90 -FA91 CJK COMPATIBILITY IDEOGRAPH-FA91 -FA92 CJK COMPATIBILITY IDEOGRAPH-FA92 -FA93 CJK COMPATIBILITY IDEOGRAPH-FA93 -FA94 CJK COMPATIBILITY IDEOGRAPH-FA94 -FA95 CJK COMPATIBILITY IDEOGRAPH-FA95 -FA96 CJK COMPATIBILITY IDEOGRAPH-FA96 -FA97 CJK COMPATIBILITY IDEOGRAPH-FA97 -FA98 CJK COMPATIBILITY IDEOGRAPH-FA98 -FA99 CJK COMPATIBILITY IDEOGRAPH-FA99 -FA9A CJK COMPATIBILITY IDEOGRAPH-FA9A -FA9B CJK COMPATIBILITY IDEOGRAPH-FA9B -FA9C CJK COMPATIBILITY IDEOGRAPH-FA9C -FA9D CJK COMPATIBILITY IDEOGRAPH-FA9D -FA9E CJK COMPATIBILITY IDEOGRAPH-FA9E -FA9F CJK COMPATIBILITY IDEOGRAPH-FA9F -FAA0 CJK COMPATIBILITY IDEOGRAPH-FAA0 -FAA1 CJK COMPATIBILITY IDEOGRAPH-FAA1 -FAA2 CJK COMPATIBILITY IDEOGRAPH-FAA2 -FAA3 CJK COMPATIBILITY IDEOGRAPH-FAA3 -FAA4 CJK COMPATIBILITY IDEOGRAPH-FAA4 -FAA5 CJK COMPATIBILITY IDEOGRAPH-FAA5 -FAA6 CJK COMPATIBILITY IDEOGRAPH-FAA6 -FAA7 CJK COMPATIBILITY IDEOGRAPH-FAA7 -FAA8 CJK COMPATIBILITY IDEOGRAPH-FAA8 -FAA9 CJK COMPATIBILITY IDEOGRAPH-FAA9 -FAAA CJK COMPATIBILITY IDEOGRAPH-FAAA -FAAB CJK COMPATIBILITY IDEOGRAPH-FAAB -FAAC CJK COMPATIBILITY IDEOGRAPH-FAAC -FAAD CJK COMPATIBILITY IDEOGRAPH-FAAD -FAAE CJK COMPATIBILITY IDEOGRAPH-FAAE -FAAF CJK COMPATIBILITY IDEOGRAPH-FAAF -FAB0 CJK COMPATIBILITY IDEOGRAPH-FAB0 -FAB1 CJK COMPATIBILITY IDEOGRAPH-FAB1 -FAB2 CJK COMPATIBILITY IDEOGRAPH-FAB2 -FAB3 CJK COMPATIBILITY IDEOGRAPH-FAB3 -FAB4 CJK COMPATIBILITY IDEOGRAPH-FAB4 -FAB5 CJK COMPATIBILITY IDEOGRAPH-FAB5 -FAB6 CJK COMPATIBILITY IDEOGRAPH-FAB6 -FAB7 CJK COMPATIBILITY IDEOGRAPH-FAB7 -FAB8 CJK COMPATIBILITY IDEOGRAPH-FAB8 -FAB9 CJK COMPATIBILITY IDEOGRAPH-FAB9 -FABA CJK COMPATIBILITY IDEOGRAPH-FABA -FABB CJK COMPATIBILITY IDEOGRAPH-FABB -FABC CJK COMPATIBILITY IDEOGRAPH-FABC -FABD CJK COMPATIBILITY IDEOGRAPH-FABD -FABE CJK COMPATIBILITY IDEOGRAPH-FABE -FABF CJK COMPATIBILITY IDEOGRAPH-FABF -FAC0 CJK COMPATIBILITY IDEOGRAPH-FAC0 -FAC1 CJK COMPATIBILITY IDEOGRAPH-FAC1 -FAC2 CJK COMPATIBILITY IDEOGRAPH-FAC2 -FAC3 CJK COMPATIBILITY IDEOGRAPH-FAC3 -FAC4 CJK COMPATIBILITY IDEOGRAPH-FAC4 -FAC5 CJK COMPATIBILITY IDEOGRAPH-FAC5 -FAC6 CJK COMPATIBILITY IDEOGRAPH-FAC6 -FAC7 CJK COMPATIBILITY IDEOGRAPH-FAC7 -FAC8 CJK COMPATIBILITY IDEOGRAPH-FAC8 -FAC9 CJK COMPATIBILITY IDEOGRAPH-FAC9 -FACA CJK COMPATIBILITY IDEOGRAPH-FACA -FACB CJK COMPATIBILITY IDEOGRAPH-FACB -FACC CJK COMPATIBILITY IDEOGRAPH-FACC -FACD CJK COMPATIBILITY IDEOGRAPH-FACD -FACE CJK COMPATIBILITY IDEOGRAPH-FACE -FACF CJK COMPATIBILITY IDEOGRAPH-FACF -FAD0 CJK COMPATIBILITY IDEOGRAPH-FAD0 -FAD1 CJK COMPATIBILITY IDEOGRAPH-FAD1 -FAD2 CJK COMPATIBILITY IDEOGRAPH-FAD2 -FAD3 CJK COMPATIBILITY IDEOGRAPH-FAD3 -FAD4 CJK COMPATIBILITY IDEOGRAPH-FAD4 -FAD5 CJK COMPATIBILITY IDEOGRAPH-FAD5 -FAD6 CJK COMPATIBILITY IDEOGRAPH-FAD6 -FAD7 CJK COMPATIBILITY IDEOGRAPH-FAD7 -FAD8 CJK COMPATIBILITY IDEOGRAPH-FAD8 -FAD9 CJK COMPATIBILITY IDEOGRAPH-FAD9 -FB00 LATIN SMALL LIGATURE FF -FB01 LATIN SMALL LIGATURE FI -FB02 LATIN SMALL LIGATURE FL -FB03 LATIN SMALL LIGATURE FFI -FB04 LATIN SMALL LIGATURE FFL -FB05 LATIN SMALL LIGATURE LONG S T -FB06 LATIN SMALL LIGATURE ST -FB13 ARMENIAN SMALL LIGATURE MEN NOW -FB14 ARMENIAN SMALL LIGATURE MEN ECH -FB15 ARMENIAN SMALL LIGATURE MEN INI -FB16 ARMENIAN SMALL LIGATURE VEW NOW -FB17 ARMENIAN SMALL LIGATURE MEN XEH -FB1D HEBREW LETTER YOD WITH HIRIQ -FB1E HEBREW POINT JUDEO-SPANISH VARIKA -FB1F HEBREW LIGATURE YIDDISH YOD YOD PATAH -FB20 HEBREW LETTER ALTERNATIVE AYIN -FB21 HEBREW LETTER WIDE ALEF -FB22 HEBREW LETTER WIDE DALET -FB23 HEBREW LETTER WIDE HE -FB24 HEBREW LETTER WIDE KAF -FB25 HEBREW LETTER WIDE LAMED -FB26 HEBREW LETTER WIDE FINAL MEM -FB27 HEBREW LETTER WIDE RESH -FB28 HEBREW LETTER WIDE TAV -FB29 HEBREW LETTER ALTERNATIVE PLUS SIGN -FB2A HEBREW LETTER SHIN WITH SHIN DOT -FB2B HEBREW LETTER SHIN WITH SIN DOT -FB2C HEBREW LETTER SHIN WITH DAGESH AND SHIN DOT -FB2D HEBREW LETTER SHIN WITH DAGESH AND SIN DOT -FB2E HEBREW LETTER ALEF WITH PATAH -FB2F HEBREW LETTER ALEF WITH QAMATS -FB30 HEBREW LETTER ALEF WITH MAPIQ -FB31 HEBREW LETTER BET WITH DAGESH -FB32 HEBREW LETTER GIMEL WITH DAGESH -FB33 HEBREW LETTER DALET WITH DAGESH -FB34 HEBREW LETTER HE WITH MAPIQ -FB35 HEBREW LETTER VAV WITH DAGESH -FB36 HEBREW LETTER ZAYIN WITH DAGESH -FB38 HEBREW LETTER TET WITH DAGESH -FB39 HEBREW LETTER YOD WITH DAGESH -FB3A HEBREW LETTER FINAL KAF WITH DAGESH -FB3B HEBREW LETTER KAF WITH DAGESH -FB3C HEBREW LETTER LAMED WITH DAGESH -FB3E HEBREW LETTER MEM WITH DAGESH -FB40 HEBREW LETTER NUN WITH DAGESH -FB41 HEBREW LETTER SAMEKH WITH DAGESH -FB43 HEBREW LETTER FINAL PE WITH DAGESH -FB44 HEBREW LETTER PE WITH DAGESH -FB46 HEBREW LETTER TSADI WITH DAGESH -FB47 HEBREW LETTER QOF WITH DAGESH -FB48 HEBREW LETTER RESH WITH DAGESH -FB49 HEBREW LETTER SHIN WITH DAGESH -FB4A HEBREW LETTER TAV WITH DAGESH -FB4B HEBREW LETTER VAV WITH HOLAM -FB4C HEBREW LETTER BET WITH RAFE -FB4D HEBREW LETTER KAF WITH RAFE -FB4E HEBREW LETTER PE WITH RAFE -FB4F HEBREW LIGATURE ALEF LAMED -FB50 ARABIC LETTER ALEF WASLA ISOLATED FORM -FB51 ARABIC LETTER ALEF WASLA FINAL FORM -FB52 ARABIC LETTER BEEH ISOLATED FORM -FB53 ARABIC LETTER BEEH FINAL FORM -FB54 ARABIC LETTER BEEH INITIAL FORM -FB55 ARABIC LETTER BEEH MEDIAL FORM -FB56 ARABIC LETTER PEH ISOLATED FORM -FB57 ARABIC LETTER PEH FINAL FORM -FB58 ARABIC LETTER PEH INITIAL FORM -FB59 ARABIC LETTER PEH MEDIAL FORM -FB5A ARABIC LETTER BEHEH ISOLATED FORM -FB5B ARABIC LETTER BEHEH FINAL FORM -FB5C ARABIC LETTER BEHEH INITIAL FORM -FB5D ARABIC LETTER BEHEH MEDIAL FORM -FB5E ARABIC LETTER TTEHEH ISOLATED FORM -FB5F ARABIC LETTER TTEHEH FINAL FORM -FB60 ARABIC LETTER TTEHEH INITIAL FORM -FB61 ARABIC LETTER TTEHEH MEDIAL FORM -FB62 ARABIC LETTER TEHEH ISOLATED FORM -FB63 ARABIC LETTER TEHEH FINAL FORM -FB64 ARABIC LETTER TEHEH INITIAL FORM -FB65 ARABIC LETTER TEHEH MEDIAL FORM -FB66 ARABIC LETTER TTEH ISOLATED FORM -FB67 ARABIC LETTER TTEH FINAL FORM -FB68 ARABIC LETTER TTEH INITIAL FORM -FB69 ARABIC LETTER TTEH MEDIAL FORM -FB6A ARABIC LETTER VEH ISOLATED FORM -FB6B ARABIC LETTER VEH FINAL FORM -FB6C ARABIC LETTER VEH INITIAL FORM -FB6D ARABIC LETTER VEH MEDIAL FORM -FB6E ARABIC LETTER PEHEH ISOLATED FORM -FB6F ARABIC LETTER PEHEH FINAL FORM -FB70 ARABIC LETTER PEHEH INITIAL FORM -FB71 ARABIC LETTER PEHEH MEDIAL FORM -FB72 ARABIC LETTER DYEH ISOLATED FORM -FB73 ARABIC LETTER DYEH FINAL FORM -FB74 ARABIC LETTER DYEH INITIAL FORM -FB75 ARABIC LETTER DYEH MEDIAL FORM -FB76 ARABIC LETTER NYEH ISOLATED FORM -FB77 ARABIC LETTER NYEH FINAL FORM -FB78 ARABIC LETTER NYEH INITIAL FORM -FB79 ARABIC LETTER NYEH MEDIAL FORM -FB7A ARABIC LETTER TCHEH ISOLATED FORM -FB7B ARABIC LETTER TCHEH FINAL FORM -FB7C ARABIC LETTER TCHEH INITIAL FORM -FB7D ARABIC LETTER TCHEH MEDIAL FORM -FB7E ARABIC LETTER TCHEHEH ISOLATED FORM -FB7F ARABIC LETTER TCHEHEH FINAL FORM -FB80 ARABIC LETTER TCHEHEH INITIAL FORM -FB81 ARABIC LETTER TCHEHEH MEDIAL FORM -FB82 ARABIC LETTER DDAHAL ISOLATED FORM -FB83 ARABIC LETTER DDAHAL FINAL FORM -FB84 ARABIC LETTER DAHAL ISOLATED FORM -FB85 ARABIC LETTER DAHAL FINAL FORM -FB86 ARABIC LETTER DUL ISOLATED FORM -FB87 ARABIC LETTER DUL FINAL FORM -FB88 ARABIC LETTER DDAL ISOLATED FORM -FB89 ARABIC LETTER DDAL FINAL FORM -FB8A ARABIC LETTER JEH ISOLATED FORM -FB8B ARABIC LETTER JEH FINAL FORM -FB8C ARABIC LETTER RREH ISOLATED FORM -FB8D ARABIC LETTER RREH FINAL FORM -FB8E ARABIC LETTER KEHEH ISOLATED FORM -FB8F ARABIC LETTER KEHEH FINAL FORM -FB90 ARABIC LETTER KEHEH INITIAL FORM -FB91 ARABIC LETTER KEHEH MEDIAL FORM -FB92 ARABIC LETTER GAF ISOLATED FORM -FB93 ARABIC LETTER GAF FINAL FORM -FB94 ARABIC LETTER GAF INITIAL FORM -FB95 ARABIC LETTER GAF MEDIAL FORM -FB96 ARABIC LETTER GUEH ISOLATED FORM -FB97 ARABIC LETTER GUEH FINAL FORM -FB98 ARABIC LETTER GUEH INITIAL FORM -FB99 ARABIC LETTER GUEH MEDIAL FORM -FB9A ARABIC LETTER NGOEH ISOLATED FORM -FB9B ARABIC LETTER NGOEH FINAL FORM -FB9C ARABIC LETTER NGOEH INITIAL FORM -FB9D ARABIC LETTER NGOEH MEDIAL FORM -FB9E ARABIC LETTER NOON GHUNNA ISOLATED FORM -FB9F ARABIC LETTER NOON GHUNNA FINAL FORM -FBA0 ARABIC LETTER RNOON ISOLATED FORM -FBA1 ARABIC LETTER RNOON FINAL FORM -FBA2 ARABIC LETTER RNOON INITIAL FORM -FBA3 ARABIC LETTER RNOON MEDIAL FORM -FBA4 ARABIC LETTER HEH WITH YEH ABOVE ISOLATED FORM -FBA5 ARABIC LETTER HEH WITH YEH ABOVE FINAL FORM -FBA6 ARABIC LETTER HEH GOAL ISOLATED FORM -FBA7 ARABIC LETTER HEH GOAL FINAL FORM -FBA8 ARABIC LETTER HEH GOAL INITIAL FORM -FBA9 ARABIC LETTER HEH GOAL MEDIAL FORM -FBAA ARABIC LETTER HEH DOACHASHMEE ISOLATED FORM -FBAB ARABIC LETTER HEH DOACHASHMEE FINAL FORM -FBAC ARABIC LETTER HEH DOACHASHMEE INITIAL FORM -FBAD ARABIC LETTER HEH DOACHASHMEE MEDIAL FORM -FBAE ARABIC LETTER YEH BARREE ISOLATED FORM -FBAF ARABIC LETTER YEH BARREE FINAL FORM -FBB0 ARABIC LETTER YEH BARREE WITH HAMZA ABOVE ISOLATED FORM -FBB1 ARABIC LETTER YEH BARREE WITH HAMZA ABOVE FINAL FORM -FBD3 ARABIC LETTER NG ISOLATED FORM -FBD4 ARABIC LETTER NG FINAL FORM -FBD5 ARABIC LETTER NG INITIAL FORM -FBD6 ARABIC LETTER NG MEDIAL FORM -FBD7 ARABIC LETTER U ISOLATED FORM -FBD8 ARABIC LETTER U FINAL FORM -FBD9 ARABIC LETTER OE ISOLATED FORM -FBDA ARABIC LETTER OE FINAL FORM -FBDB ARABIC LETTER YU ISOLATED FORM -FBDC ARABIC LETTER YU FINAL FORM -FBDD ARABIC LETTER U WITH HAMZA ABOVE ISOLATED FORM -FBDE ARABIC LETTER VE ISOLATED FORM -FBDF ARABIC LETTER VE FINAL FORM -FBE0 ARABIC LETTER KIRGHIZ OE ISOLATED FORM -FBE1 ARABIC LETTER KIRGHIZ OE FINAL FORM -FBE2 ARABIC LETTER KIRGHIZ YU ISOLATED FORM -FBE3 ARABIC LETTER KIRGHIZ YU FINAL FORM -FBE4 ARABIC LETTER E ISOLATED FORM -FBE5 ARABIC LETTER E FINAL FORM -FBE6 ARABIC LETTER E INITIAL FORM -FBE7 ARABIC LETTER E MEDIAL FORM -FBE8 ARABIC LETTER UIGHUR KAZAKH KIRGHIZ ALEF MAKSURA INITIAL FORM -FBE9 ARABIC LETTER UIGHUR KAZAKH KIRGHIZ ALEF MAKSURA MEDIAL FORM -FBEA ARABIC LIGATURE YEH WITH HAMZA ABOVE WITH ALEF ISOLATED FORM -FBEB ARABIC LIGATURE YEH WITH HAMZA ABOVE WITH ALEF FINAL FORM -FBEC ARABIC LIGATURE YEH WITH HAMZA ABOVE WITH AE ISOLATED FORM -FBED ARABIC LIGATURE YEH WITH HAMZA ABOVE WITH AE FINAL FORM -FBEE ARABIC LIGATURE YEH WITH HAMZA ABOVE WITH WAW ISOLATED FORM -FBEF ARABIC LIGATURE YEH WITH HAMZA ABOVE WITH WAW FINAL FORM -FBF0 ARABIC LIGATURE YEH WITH HAMZA ABOVE WITH U ISOLATED FORM -FBF1 ARABIC LIGATURE YEH WITH HAMZA ABOVE WITH U FINAL FORM -FBF2 ARABIC LIGATURE YEH WITH HAMZA ABOVE WITH OE ISOLATED FORM -FBF3 ARABIC LIGATURE YEH WITH HAMZA ABOVE WITH OE FINAL FORM -FBF4 ARABIC LIGATURE YEH WITH HAMZA ABOVE WITH YU ISOLATED FORM -FBF5 ARABIC LIGATURE YEH WITH HAMZA ABOVE WITH YU FINAL FORM -FBF6 ARABIC LIGATURE YEH WITH HAMZA ABOVE WITH E ISOLATED FORM -FBF7 ARABIC LIGATURE YEH WITH HAMZA ABOVE WITH E FINAL FORM -FBF8 ARABIC LIGATURE YEH WITH HAMZA ABOVE WITH E INITIAL FORM -FBF9 ARABIC LIGATURE UIGHUR KIRGHIZ YEH WITH HAMZA ABOVE WITH ALEF MAKSURA ISOLATED FORM -FBFA ARABIC LIGATURE UIGHUR KIRGHIZ YEH WITH HAMZA ABOVE WITH ALEF MAKSURA FINAL FORM -FBFB ARABIC LIGATURE UIGHUR KIRGHIZ YEH WITH HAMZA ABOVE WITH ALEF MAKSURA INITIAL FORM -FBFC ARABIC LETTER FARSI YEH ISOLATED FORM -FBFD ARABIC LETTER FARSI YEH FINAL FORM -FBFE ARABIC LETTER FARSI YEH INITIAL FORM -FBFF ARABIC LETTER FARSI YEH MEDIAL FORM -FC00 ARABIC LIGATURE YEH WITH HAMZA ABOVE WITH JEEM ISOLATED FORM -FC01 ARABIC LIGATURE YEH WITH HAMZA ABOVE WITH HAH ISOLATED FORM -FC02 ARABIC LIGATURE YEH WITH HAMZA ABOVE WITH MEEM ISOLATED FORM -FC03 ARABIC LIGATURE YEH WITH HAMZA ABOVE WITH ALEF MAKSURA ISOLATED FORM -FC04 ARABIC LIGATURE YEH WITH HAMZA ABOVE WITH YEH ISOLATED FORM -FC05 ARABIC LIGATURE BEH WITH JEEM ISOLATED FORM -FC06 ARABIC LIGATURE BEH WITH HAH ISOLATED FORM -FC07 ARABIC LIGATURE BEH WITH KHAH ISOLATED FORM -FC08 ARABIC LIGATURE BEH WITH MEEM ISOLATED FORM -FC09 ARABIC LIGATURE BEH WITH ALEF MAKSURA ISOLATED FORM -FC0A ARABIC LIGATURE BEH WITH YEH ISOLATED FORM -FC0B ARABIC LIGATURE TEH WITH JEEM ISOLATED FORM -FC0C ARABIC LIGATURE TEH WITH HAH ISOLATED FORM -FC0D ARABIC LIGATURE TEH WITH KHAH ISOLATED FORM -FC0E ARABIC LIGATURE TEH WITH MEEM ISOLATED FORM -FC0F ARABIC LIGATURE TEH WITH ALEF MAKSURA ISOLATED FORM -FC10 ARABIC LIGATURE TEH WITH YEH ISOLATED FORM -FC11 ARABIC LIGATURE THEH WITH JEEM ISOLATED FORM -FC12 ARABIC LIGATURE THEH WITH MEEM ISOLATED FORM -FC13 ARABIC LIGATURE THEH WITH ALEF MAKSURA ISOLATED FORM -FC14 ARABIC LIGATURE THEH WITH YEH ISOLATED FORM -FC15 ARABIC LIGATURE JEEM WITH HAH ISOLATED FORM -FC16 ARABIC LIGATURE JEEM WITH MEEM ISOLATED FORM -FC17 ARABIC LIGATURE HAH WITH JEEM ISOLATED FORM -FC18 ARABIC LIGATURE HAH WITH MEEM ISOLATED FORM -FC19 ARABIC LIGATURE KHAH WITH JEEM ISOLATED FORM -FC1A ARABIC LIGATURE KHAH WITH HAH ISOLATED FORM -FC1B ARABIC LIGATURE KHAH WITH MEEM ISOLATED FORM -FC1C ARABIC LIGATURE SEEN WITH JEEM ISOLATED FORM -FC1D ARABIC LIGATURE SEEN WITH HAH ISOLATED FORM -FC1E ARABIC LIGATURE SEEN WITH KHAH ISOLATED FORM -FC1F ARABIC LIGATURE SEEN WITH MEEM ISOLATED FORM -FC20 ARABIC LIGATURE SAD WITH HAH ISOLATED FORM -FC21 ARABIC LIGATURE SAD WITH MEEM ISOLATED FORM -FC22 ARABIC LIGATURE DAD WITH JEEM ISOLATED FORM -FC23 ARABIC LIGATURE DAD WITH HAH ISOLATED FORM -FC24 ARABIC LIGATURE DAD WITH KHAH ISOLATED FORM -FC25 ARABIC LIGATURE DAD WITH MEEM ISOLATED FORM -FC26 ARABIC LIGATURE TAH WITH HAH ISOLATED FORM -FC27 ARABIC LIGATURE TAH WITH MEEM ISOLATED FORM -FC28 ARABIC LIGATURE ZAH WITH MEEM ISOLATED FORM -FC29 ARABIC LIGATURE AIN WITH JEEM ISOLATED FORM -FC2A ARABIC LIGATURE AIN WITH MEEM ISOLATED FORM -FC2B ARABIC LIGATURE GHAIN WITH JEEM ISOLATED FORM -FC2C ARABIC LIGATURE GHAIN WITH MEEM ISOLATED FORM -FC2D ARABIC LIGATURE FEH WITH JEEM ISOLATED FORM -FC2E ARABIC LIGATURE FEH WITH HAH ISOLATED FORM -FC2F ARABIC LIGATURE FEH WITH KHAH ISOLATED FORM -FC30 ARABIC LIGATURE FEH WITH MEEM ISOLATED FORM -FC31 ARABIC LIGATURE FEH WITH ALEF MAKSURA ISOLATED FORM -FC32 ARABIC LIGATURE FEH WITH YEH ISOLATED FORM -FC33 ARABIC LIGATURE QAF WITH HAH ISOLATED FORM -FC34 ARABIC LIGATURE QAF WITH MEEM ISOLATED FORM -FC35 ARABIC LIGATURE QAF WITH ALEF MAKSURA ISOLATED FORM -FC36 ARABIC LIGATURE QAF WITH YEH ISOLATED FORM -FC37 ARABIC LIGATURE KAF WITH ALEF ISOLATED FORM -FC38 ARABIC LIGATURE KAF WITH JEEM ISOLATED FORM -FC39 ARABIC LIGATURE KAF WITH HAH ISOLATED FORM -FC3A ARABIC LIGATURE KAF WITH KHAH ISOLATED FORM -FC3B ARABIC LIGATURE KAF WITH LAM ISOLATED FORM -FC3C ARABIC LIGATURE KAF WITH MEEM ISOLATED FORM -FC3D ARABIC LIGATURE KAF WITH ALEF MAKSURA ISOLATED FORM -FC3E ARABIC LIGATURE KAF WITH YEH ISOLATED FORM -FC3F ARABIC LIGATURE LAM WITH JEEM ISOLATED FORM -FC40 ARABIC LIGATURE LAM WITH HAH ISOLATED FORM -FC41 ARABIC LIGATURE LAM WITH KHAH ISOLATED FORM -FC42 ARABIC LIGATURE LAM WITH MEEM ISOLATED FORM -FC43 ARABIC LIGATURE LAM WITH ALEF MAKSURA ISOLATED FORM -FC44 ARABIC LIGATURE LAM WITH YEH ISOLATED FORM -FC45 ARABIC LIGATURE MEEM WITH JEEM ISOLATED FORM -FC46 ARABIC LIGATURE MEEM WITH HAH ISOLATED FORM -FC47 ARABIC LIGATURE MEEM WITH KHAH ISOLATED FORM -FC48 ARABIC LIGATURE MEEM WITH MEEM ISOLATED FORM -FC49 ARABIC LIGATURE MEEM WITH ALEF MAKSURA ISOLATED FORM -FC4A ARABIC LIGATURE MEEM WITH YEH ISOLATED FORM -FC4B ARABIC LIGATURE NOON WITH JEEM ISOLATED FORM -FC4C ARABIC LIGATURE NOON WITH HAH ISOLATED FORM -FC4D ARABIC LIGATURE NOON WITH KHAH ISOLATED FORM -FC4E ARABIC LIGATURE NOON WITH MEEM ISOLATED FORM -FC4F ARABIC LIGATURE NOON WITH ALEF MAKSURA ISOLATED FORM -FC50 ARABIC LIGATURE NOON WITH YEH ISOLATED FORM -FC51 ARABIC LIGATURE HEH WITH JEEM ISOLATED FORM -FC52 ARABIC LIGATURE HEH WITH MEEM ISOLATED FORM -FC53 ARABIC LIGATURE HEH WITH ALEF MAKSURA ISOLATED FORM -FC54 ARABIC LIGATURE HEH WITH YEH ISOLATED FORM -FC55 ARABIC LIGATURE YEH WITH JEEM ISOLATED FORM -FC56 ARABIC LIGATURE YEH WITH HAH ISOLATED FORM -FC57 ARABIC LIGATURE YEH WITH KHAH ISOLATED FORM -FC58 ARABIC LIGATURE YEH WITH MEEM ISOLATED FORM -FC59 ARABIC LIGATURE YEH WITH ALEF MAKSURA ISOLATED FORM -FC5A ARABIC LIGATURE YEH WITH YEH ISOLATED FORM -FC5B ARABIC LIGATURE THAL WITH SUPERSCRIPT ALEF ISOLATED FORM -FC5C ARABIC LIGATURE REH WITH SUPERSCRIPT ALEF ISOLATED FORM -FC5D ARABIC LIGATURE ALEF MAKSURA WITH SUPERSCRIPT ALEF ISOLATED FORM -FC5E ARABIC LIGATURE SHADDA WITH DAMMATAN ISOLATED FORM -FC5F ARABIC LIGATURE SHADDA WITH KASRATAN ISOLATED FORM -FC60 ARABIC LIGATURE SHADDA WITH FATHA ISOLATED FORM -FC61 ARABIC LIGATURE SHADDA WITH DAMMA ISOLATED FORM -FC62 ARABIC LIGATURE SHADDA WITH KASRA ISOLATED FORM -FC63 ARABIC LIGATURE SHADDA WITH SUPERSCRIPT ALEF ISOLATED FORM -FC64 ARABIC LIGATURE YEH WITH HAMZA ABOVE WITH REH FINAL FORM -FC65 ARABIC LIGATURE YEH WITH HAMZA ABOVE WITH ZAIN FINAL FORM -FC66 ARABIC LIGATURE YEH WITH HAMZA ABOVE WITH MEEM FINAL FORM -FC67 ARABIC LIGATURE YEH WITH HAMZA ABOVE WITH NOON FINAL FORM -FC68 ARABIC LIGATURE YEH WITH HAMZA ABOVE WITH ALEF MAKSURA FINAL FORM -FC69 ARABIC LIGATURE YEH WITH HAMZA ABOVE WITH YEH FINAL FORM -FC6A ARABIC LIGATURE BEH WITH REH FINAL FORM -FC6B ARABIC LIGATURE BEH WITH ZAIN FINAL FORM -FC6C ARABIC LIGATURE BEH WITH MEEM FINAL FORM -FC6D ARABIC LIGATURE BEH WITH NOON FINAL FORM -FC6E ARABIC LIGATURE BEH WITH ALEF MAKSURA FINAL FORM -FC6F ARABIC LIGATURE BEH WITH YEH FINAL FORM -FC70 ARABIC LIGATURE TEH WITH REH FINAL FORM -FC71 ARABIC LIGATURE TEH WITH ZAIN FINAL FORM -FC72 ARABIC LIGATURE TEH WITH MEEM FINAL FORM -FC73 ARABIC LIGATURE TEH WITH NOON FINAL FORM -FC74 ARABIC LIGATURE TEH WITH ALEF MAKSURA FINAL FORM -FC75 ARABIC LIGATURE TEH WITH YEH FINAL FORM -FC76 ARABIC LIGATURE THEH WITH REH FINAL FORM -FC77 ARABIC LIGATURE THEH WITH ZAIN FINAL FORM -FC78 ARABIC LIGATURE THEH WITH MEEM FINAL FORM -FC79 ARABIC LIGATURE THEH WITH NOON FINAL FORM -FC7A ARABIC LIGATURE THEH WITH ALEF MAKSURA FINAL FORM -FC7B ARABIC LIGATURE THEH WITH YEH FINAL FORM -FC7C ARABIC LIGATURE FEH WITH ALEF MAKSURA FINAL FORM -FC7D ARABIC LIGATURE FEH WITH YEH FINAL FORM -FC7E ARABIC LIGATURE QAF WITH ALEF MAKSURA FINAL FORM -FC7F ARABIC LIGATURE QAF WITH YEH FINAL FORM -FC80 ARABIC LIGATURE KAF WITH ALEF FINAL FORM -FC81 ARABIC LIGATURE KAF WITH LAM FINAL FORM -FC82 ARABIC LIGATURE KAF WITH MEEM FINAL FORM -FC83 ARABIC LIGATURE KAF WITH ALEF MAKSURA FINAL FORM -FC84 ARABIC LIGATURE KAF WITH YEH FINAL FORM -FC85 ARABIC LIGATURE LAM WITH MEEM FINAL FORM -FC86 ARABIC LIGATURE LAM WITH ALEF MAKSURA FINAL FORM -FC87 ARABIC LIGATURE LAM WITH YEH FINAL FORM -FC88 ARABIC LIGATURE MEEM WITH ALEF FINAL FORM -FC89 ARABIC LIGATURE MEEM WITH MEEM FINAL FORM -FC8A ARABIC LIGATURE NOON WITH REH FINAL FORM -FC8B ARABIC LIGATURE NOON WITH ZAIN FINAL FORM -FC8C ARABIC LIGATURE NOON WITH MEEM FINAL FORM -FC8D ARABIC LIGATURE NOON WITH NOON FINAL FORM -FC8E ARABIC LIGATURE NOON WITH ALEF MAKSURA FINAL FORM -FC8F ARABIC LIGATURE NOON WITH YEH FINAL FORM -FC90 ARABIC LIGATURE ALEF MAKSURA WITH SUPERSCRIPT ALEF FINAL FORM -FC91 ARABIC LIGATURE YEH WITH REH FINAL FORM -FC92 ARABIC LIGATURE YEH WITH ZAIN FINAL FORM -FC93 ARABIC LIGATURE YEH WITH MEEM FINAL FORM -FC94 ARABIC LIGATURE YEH WITH NOON FINAL FORM -FC95 ARABIC LIGATURE YEH WITH ALEF MAKSURA FINAL FORM -FC96 ARABIC LIGATURE YEH WITH YEH FINAL FORM -FC97 ARABIC LIGATURE YEH WITH HAMZA ABOVE WITH JEEM INITIAL FORM -FC98 ARABIC LIGATURE YEH WITH HAMZA ABOVE WITH HAH INITIAL FORM -FC99 ARABIC LIGATURE YEH WITH HAMZA ABOVE WITH KHAH INITIAL FORM -FC9A ARABIC LIGATURE YEH WITH HAMZA ABOVE WITH MEEM INITIAL FORM -FC9B ARABIC LIGATURE YEH WITH HAMZA ABOVE WITH HEH INITIAL FORM -FC9C ARABIC LIGATURE BEH WITH JEEM INITIAL FORM -FC9D ARABIC LIGATURE BEH WITH HAH INITIAL FORM -FC9E ARABIC LIGATURE BEH WITH KHAH INITIAL FORM -FC9F ARABIC LIGATURE BEH WITH MEEM INITIAL FORM -FCA0 ARABIC LIGATURE BEH WITH HEH INITIAL FORM -FCA1 ARABIC LIGATURE TEH WITH JEEM INITIAL FORM -FCA2 ARABIC LIGATURE TEH WITH HAH INITIAL FORM -FCA3 ARABIC LIGATURE TEH WITH KHAH INITIAL FORM -FCA4 ARABIC LIGATURE TEH WITH MEEM INITIAL FORM -FCA5 ARABIC LIGATURE TEH WITH HEH INITIAL FORM -FCA6 ARABIC LIGATURE THEH WITH MEEM INITIAL FORM -FCA7 ARABIC LIGATURE JEEM WITH HAH INITIAL FORM -FCA8 ARABIC LIGATURE JEEM WITH MEEM INITIAL FORM -FCA9 ARABIC LIGATURE HAH WITH JEEM INITIAL FORM -FCAA ARABIC LIGATURE HAH WITH MEEM INITIAL FORM -FCAB ARABIC LIGATURE KHAH WITH JEEM INITIAL FORM -FCAC ARABIC LIGATURE KHAH WITH MEEM INITIAL FORM -FCAD ARABIC LIGATURE SEEN WITH JEEM INITIAL FORM -FCAE ARABIC LIGATURE SEEN WITH HAH INITIAL FORM -FCAF ARABIC LIGATURE SEEN WITH KHAH INITIAL FORM -FCB0 ARABIC LIGATURE SEEN WITH MEEM INITIAL FORM -FCB1 ARABIC LIGATURE SAD WITH HAH INITIAL FORM -FCB2 ARABIC LIGATURE SAD WITH KHAH INITIAL FORM -FCB3 ARABIC LIGATURE SAD WITH MEEM INITIAL FORM -FCB4 ARABIC LIGATURE DAD WITH JEEM INITIAL FORM -FCB5 ARABIC LIGATURE DAD WITH HAH INITIAL FORM -FCB6 ARABIC LIGATURE DAD WITH KHAH INITIAL FORM -FCB7 ARABIC LIGATURE DAD WITH MEEM INITIAL FORM -FCB8 ARABIC LIGATURE TAH WITH HAH INITIAL FORM -FCB9 ARABIC LIGATURE ZAH WITH MEEM INITIAL FORM -FCBA ARABIC LIGATURE AIN WITH JEEM INITIAL FORM -FCBB ARABIC LIGATURE AIN WITH MEEM INITIAL FORM -FCBC ARABIC LIGATURE GHAIN WITH JEEM INITIAL FORM -FCBD ARABIC LIGATURE GHAIN WITH MEEM INITIAL FORM -FCBE ARABIC LIGATURE FEH WITH JEEM INITIAL FORM -FCBF ARABIC LIGATURE FEH WITH HAH INITIAL FORM -FCC0 ARABIC LIGATURE FEH WITH KHAH INITIAL FORM -FCC1 ARABIC LIGATURE FEH WITH MEEM INITIAL FORM -FCC2 ARABIC LIGATURE QAF WITH HAH INITIAL FORM -FCC3 ARABIC LIGATURE QAF WITH MEEM INITIAL FORM -FCC4 ARABIC LIGATURE KAF WITH JEEM INITIAL FORM -FCC5 ARABIC LIGATURE KAF WITH HAH INITIAL FORM -FCC6 ARABIC LIGATURE KAF WITH KHAH INITIAL FORM -FCC7 ARABIC LIGATURE KAF WITH LAM INITIAL FORM -FCC8 ARABIC LIGATURE KAF WITH MEEM INITIAL FORM -FCC9 ARABIC LIGATURE LAM WITH JEEM INITIAL FORM -FCCA ARABIC LIGATURE LAM WITH HAH INITIAL FORM -FCCB ARABIC LIGATURE LAM WITH KHAH INITIAL FORM -FCCC ARABIC LIGATURE LAM WITH MEEM INITIAL FORM -FCCD ARABIC LIGATURE LAM WITH HEH INITIAL FORM -FCCE ARABIC LIGATURE MEEM WITH JEEM INITIAL FORM -FCCF ARABIC LIGATURE MEEM WITH HAH INITIAL FORM -FCD0 ARABIC LIGATURE MEEM WITH KHAH INITIAL FORM -FCD1 ARABIC LIGATURE MEEM WITH MEEM INITIAL FORM -FCD2 ARABIC LIGATURE NOON WITH JEEM INITIAL FORM -FCD3 ARABIC LIGATURE NOON WITH HAH INITIAL FORM -FCD4 ARABIC LIGATURE NOON WITH KHAH INITIAL FORM -FCD5 ARABIC LIGATURE NOON WITH MEEM INITIAL FORM -FCD6 ARABIC LIGATURE NOON WITH HEH INITIAL FORM -FCD7 ARABIC LIGATURE HEH WITH JEEM INITIAL FORM -FCD8 ARABIC LIGATURE HEH WITH MEEM INITIAL FORM -FCD9 ARABIC LIGATURE HEH WITH SUPERSCRIPT ALEF INITIAL FORM -FCDA ARABIC LIGATURE YEH WITH JEEM INITIAL FORM -FCDB ARABIC LIGATURE YEH WITH HAH INITIAL FORM -FCDC ARABIC LIGATURE YEH WITH KHAH INITIAL FORM -FCDD ARABIC LIGATURE YEH WITH MEEM INITIAL FORM -FCDE ARABIC LIGATURE YEH WITH HEH INITIAL FORM -FCDF ARABIC LIGATURE YEH WITH HAMZA ABOVE WITH MEEM MEDIAL FORM -FCE0 ARABIC LIGATURE YEH WITH HAMZA ABOVE WITH HEH MEDIAL FORM -FCE1 ARABIC LIGATURE BEH WITH MEEM MEDIAL FORM -FCE2 ARABIC LIGATURE BEH WITH HEH MEDIAL FORM -FCE3 ARABIC LIGATURE TEH WITH MEEM MEDIAL FORM -FCE4 ARABIC LIGATURE TEH WITH HEH MEDIAL FORM -FCE5 ARABIC LIGATURE THEH WITH MEEM MEDIAL FORM -FCE6 ARABIC LIGATURE THEH WITH HEH MEDIAL FORM -FCE7 ARABIC LIGATURE SEEN WITH MEEM MEDIAL FORM -FCE8 ARABIC LIGATURE SEEN WITH HEH MEDIAL FORM -FCE9 ARABIC LIGATURE SHEEN WITH MEEM MEDIAL FORM -FCEA ARABIC LIGATURE SHEEN WITH HEH MEDIAL FORM -FCEB ARABIC LIGATURE KAF WITH LAM MEDIAL FORM -FCEC ARABIC LIGATURE KAF WITH MEEM MEDIAL FORM -FCED ARABIC LIGATURE LAM WITH MEEM MEDIAL FORM -FCEE ARABIC LIGATURE NOON WITH MEEM MEDIAL FORM -FCEF ARABIC LIGATURE NOON WITH HEH MEDIAL FORM -FCF0 ARABIC LIGATURE YEH WITH MEEM MEDIAL FORM -FCF1 ARABIC LIGATURE YEH WITH HEH MEDIAL FORM -FCF2 ARABIC LIGATURE SHADDA WITH FATHA MEDIAL FORM -FCF3 ARABIC LIGATURE SHADDA WITH DAMMA MEDIAL FORM -FCF4 ARABIC LIGATURE SHADDA WITH KASRA MEDIAL FORM -FCF5 ARABIC LIGATURE TAH WITH ALEF MAKSURA ISOLATED FORM -FCF6 ARABIC LIGATURE TAH WITH YEH ISOLATED FORM -FCF7 ARABIC LIGATURE AIN WITH ALEF MAKSURA ISOLATED FORM -FCF8 ARABIC LIGATURE AIN WITH YEH ISOLATED FORM -FCF9 ARABIC LIGATURE GHAIN WITH ALEF MAKSURA ISOLATED FORM -FCFA ARABIC LIGATURE GHAIN WITH YEH ISOLATED FORM -FCFB ARABIC LIGATURE SEEN WITH ALEF MAKSURA ISOLATED FORM -FCFC ARABIC LIGATURE SEEN WITH YEH ISOLATED FORM -FCFD ARABIC LIGATURE SHEEN WITH ALEF MAKSURA ISOLATED FORM -FCFE ARABIC LIGATURE SHEEN WITH YEH ISOLATED FORM -FCFF ARABIC LIGATURE HAH WITH ALEF MAKSURA ISOLATED FORM -FD00 ARABIC LIGATURE HAH WITH YEH ISOLATED FORM -FD01 ARABIC LIGATURE JEEM WITH ALEF MAKSURA ISOLATED FORM -FD02 ARABIC LIGATURE JEEM WITH YEH ISOLATED FORM -FD03 ARABIC LIGATURE KHAH WITH ALEF MAKSURA ISOLATED FORM -FD04 ARABIC LIGATURE KHAH WITH YEH ISOLATED FORM -FD05 ARABIC LIGATURE SAD WITH ALEF MAKSURA ISOLATED FORM -FD06 ARABIC LIGATURE SAD WITH YEH ISOLATED FORM -FD07 ARABIC LIGATURE DAD WITH ALEF MAKSURA ISOLATED FORM -FD08 ARABIC LIGATURE DAD WITH YEH ISOLATED FORM -FD09 ARABIC LIGATURE SHEEN WITH JEEM ISOLATED FORM -FD0A ARABIC LIGATURE SHEEN WITH HAH ISOLATED FORM -FD0B ARABIC LIGATURE SHEEN WITH KHAH ISOLATED FORM -FD0C ARABIC LIGATURE SHEEN WITH MEEM ISOLATED FORM -FD0D ARABIC LIGATURE SHEEN WITH REH ISOLATED FORM -FD0E ARABIC LIGATURE SEEN WITH REH ISOLATED FORM -FD0F ARABIC LIGATURE SAD WITH REH ISOLATED FORM -FD10 ARABIC LIGATURE DAD WITH REH ISOLATED FORM -FD11 ARABIC LIGATURE TAH WITH ALEF MAKSURA FINAL FORM -FD12 ARABIC LIGATURE TAH WITH YEH FINAL FORM -FD13 ARABIC LIGATURE AIN WITH ALEF MAKSURA FINAL FORM -FD14 ARABIC LIGATURE AIN WITH YEH FINAL FORM -FD15 ARABIC LIGATURE GHAIN WITH ALEF MAKSURA FINAL FORM -FD16 ARABIC LIGATURE GHAIN WITH YEH FINAL FORM -FD17 ARABIC LIGATURE SEEN WITH ALEF MAKSURA FINAL FORM -FD18 ARABIC LIGATURE SEEN WITH YEH FINAL FORM -FD19 ARABIC LIGATURE SHEEN WITH ALEF MAKSURA FINAL FORM -FD1A ARABIC LIGATURE SHEEN WITH YEH FINAL FORM -FD1B ARABIC LIGATURE HAH WITH ALEF MAKSURA FINAL FORM -FD1C ARABIC LIGATURE HAH WITH YEH FINAL FORM -FD1D ARABIC LIGATURE JEEM WITH ALEF MAKSURA FINAL FORM -FD1E ARABIC LIGATURE JEEM WITH YEH FINAL FORM -FD1F ARABIC LIGATURE KHAH WITH ALEF MAKSURA FINAL FORM -FD20 ARABIC LIGATURE KHAH WITH YEH FINAL FORM -FD21 ARABIC LIGATURE SAD WITH ALEF MAKSURA FINAL FORM -FD22 ARABIC LIGATURE SAD WITH YEH FINAL FORM -FD23 ARABIC LIGATURE DAD WITH ALEF MAKSURA FINAL FORM -FD24 ARABIC LIGATURE DAD WITH YEH FINAL FORM -FD25 ARABIC LIGATURE SHEEN WITH JEEM FINAL FORM -FD26 ARABIC LIGATURE SHEEN WITH HAH FINAL FORM -FD27 ARABIC LIGATURE SHEEN WITH KHAH FINAL FORM -FD28 ARABIC LIGATURE SHEEN WITH MEEM FINAL FORM -FD29 ARABIC LIGATURE SHEEN WITH REH FINAL FORM -FD2A ARABIC LIGATURE SEEN WITH REH FINAL FORM -FD2B ARABIC LIGATURE SAD WITH REH FINAL FORM -FD2C ARABIC LIGATURE DAD WITH REH FINAL FORM -FD2D ARABIC LIGATURE SHEEN WITH JEEM INITIAL FORM -FD2E ARABIC LIGATURE SHEEN WITH HAH INITIAL FORM -FD2F ARABIC LIGATURE SHEEN WITH KHAH INITIAL FORM -FD30 ARABIC LIGATURE SHEEN WITH MEEM INITIAL FORM -FD31 ARABIC LIGATURE SEEN WITH HEH INITIAL FORM -FD32 ARABIC LIGATURE SHEEN WITH HEH INITIAL FORM -FD33 ARABIC LIGATURE TAH WITH MEEM INITIAL FORM -FD34 ARABIC LIGATURE SEEN WITH JEEM MEDIAL FORM -FD35 ARABIC LIGATURE SEEN WITH HAH MEDIAL FORM -FD36 ARABIC LIGATURE SEEN WITH KHAH MEDIAL FORM -FD37 ARABIC LIGATURE SHEEN WITH JEEM MEDIAL FORM -FD38 ARABIC LIGATURE SHEEN WITH HAH MEDIAL FORM -FD39 ARABIC LIGATURE SHEEN WITH KHAH MEDIAL FORM -FD3A ARABIC LIGATURE TAH WITH MEEM MEDIAL FORM -FD3B ARABIC LIGATURE ZAH WITH MEEM MEDIAL FORM -FD3C ARABIC LIGATURE ALEF WITH FATHATAN FINAL FORM -FD3D ARABIC LIGATURE ALEF WITH FATHATAN ISOLATED FORM -FD3E ORNATE LEFT PARENTHESIS -FD3F ORNATE RIGHT PARENTHESIS -FD50 ARABIC LIGATURE TEH WITH JEEM WITH MEEM INITIAL FORM -FD51 ARABIC LIGATURE TEH WITH HAH WITH JEEM FINAL FORM -FD52 ARABIC LIGATURE TEH WITH HAH WITH JEEM INITIAL FORM -FD53 ARABIC LIGATURE TEH WITH HAH WITH MEEM INITIAL FORM -FD54 ARABIC LIGATURE TEH WITH KHAH WITH MEEM INITIAL FORM -FD55 ARABIC LIGATURE TEH WITH MEEM WITH JEEM INITIAL FORM -FD56 ARABIC LIGATURE TEH WITH MEEM WITH HAH INITIAL FORM -FD57 ARABIC LIGATURE TEH WITH MEEM WITH KHAH INITIAL FORM -FD58 ARABIC LIGATURE JEEM WITH MEEM WITH HAH FINAL FORM -FD59 ARABIC LIGATURE JEEM WITH MEEM WITH HAH INITIAL FORM -FD5A ARABIC LIGATURE HAH WITH MEEM WITH YEH FINAL FORM -FD5B ARABIC LIGATURE HAH WITH MEEM WITH ALEF MAKSURA FINAL FORM -FD5C ARABIC LIGATURE SEEN WITH HAH WITH JEEM INITIAL FORM -FD5D ARABIC LIGATURE SEEN WITH JEEM WITH HAH INITIAL FORM -FD5E ARABIC LIGATURE SEEN WITH JEEM WITH ALEF MAKSURA FINAL FORM -FD5F ARABIC LIGATURE SEEN WITH MEEM WITH HAH FINAL FORM -FD60 ARABIC LIGATURE SEEN WITH MEEM WITH HAH INITIAL FORM -FD61 ARABIC LIGATURE SEEN WITH MEEM WITH JEEM INITIAL FORM -FD62 ARABIC LIGATURE SEEN WITH MEEM WITH MEEM FINAL FORM -FD63 ARABIC LIGATURE SEEN WITH MEEM WITH MEEM INITIAL FORM -FD64 ARABIC LIGATURE SAD WITH HAH WITH HAH FINAL FORM -FD65 ARABIC LIGATURE SAD WITH HAH WITH HAH INITIAL FORM -FD66 ARABIC LIGATURE SAD WITH MEEM WITH MEEM FINAL FORM -FD67 ARABIC LIGATURE SHEEN WITH HAH WITH MEEM FINAL FORM -FD68 ARABIC LIGATURE SHEEN WITH HAH WITH MEEM INITIAL FORM -FD69 ARABIC LIGATURE SHEEN WITH JEEM WITH YEH FINAL FORM -FD6A ARABIC LIGATURE SHEEN WITH MEEM WITH KHAH FINAL FORM -FD6B ARABIC LIGATURE SHEEN WITH MEEM WITH KHAH INITIAL FORM -FD6C ARABIC LIGATURE SHEEN WITH MEEM WITH MEEM FINAL FORM -FD6D ARABIC LIGATURE SHEEN WITH MEEM WITH MEEM INITIAL FORM -FD6E ARABIC LIGATURE DAD WITH HAH WITH ALEF MAKSURA FINAL FORM -FD6F ARABIC LIGATURE DAD WITH KHAH WITH MEEM FINAL FORM -FD70 ARABIC LIGATURE DAD WITH KHAH WITH MEEM INITIAL FORM -FD71 ARABIC LIGATURE TAH WITH MEEM WITH HAH FINAL FORM -FD72 ARABIC LIGATURE TAH WITH MEEM WITH HAH INITIAL FORM -FD73 ARABIC LIGATURE TAH WITH MEEM WITH MEEM INITIAL FORM -FD74 ARABIC LIGATURE TAH WITH MEEM WITH YEH FINAL FORM -FD75 ARABIC LIGATURE AIN WITH JEEM WITH MEEM FINAL FORM -FD76 ARABIC LIGATURE AIN WITH MEEM WITH MEEM FINAL FORM -FD77 ARABIC LIGATURE AIN WITH MEEM WITH MEEM INITIAL FORM -FD78 ARABIC LIGATURE AIN WITH MEEM WITH ALEF MAKSURA FINAL FORM -FD79 ARABIC LIGATURE GHAIN WITH MEEM WITH MEEM FINAL FORM -FD7A ARABIC LIGATURE GHAIN WITH MEEM WITH YEH FINAL FORM -FD7B ARABIC LIGATURE GHAIN WITH MEEM WITH ALEF MAKSURA FINAL FORM -FD7C ARABIC LIGATURE FEH WITH KHAH WITH MEEM FINAL FORM -FD7D ARABIC LIGATURE FEH WITH KHAH WITH MEEM INITIAL FORM -FD7E ARABIC LIGATURE QAF WITH MEEM WITH HAH FINAL FORM -FD7F ARABIC LIGATURE QAF WITH MEEM WITH MEEM FINAL FORM -FD80 ARABIC LIGATURE LAM WITH HAH WITH MEEM FINAL FORM -FD81 ARABIC LIGATURE LAM WITH HAH WITH YEH FINAL FORM -FD82 ARABIC LIGATURE LAM WITH HAH WITH ALEF MAKSURA FINAL FORM -FD83 ARABIC LIGATURE LAM WITH JEEM WITH JEEM INITIAL FORM -FD84 ARABIC LIGATURE LAM WITH JEEM WITH JEEM FINAL FORM -FD85 ARABIC LIGATURE LAM WITH KHAH WITH MEEM FINAL FORM -FD86 ARABIC LIGATURE LAM WITH KHAH WITH MEEM INITIAL FORM -FD87 ARABIC LIGATURE LAM WITH MEEM WITH HAH FINAL FORM -FD88 ARABIC LIGATURE LAM WITH MEEM WITH HAH INITIAL FORM -FD89 ARABIC LIGATURE MEEM WITH HAH WITH JEEM INITIAL FORM -FD8A ARABIC LIGATURE MEEM WITH HAH WITH MEEM INITIAL FORM -FD8B ARABIC LIGATURE MEEM WITH HAH WITH YEH FINAL FORM -FD8C ARABIC LIGATURE MEEM WITH JEEM WITH HAH INITIAL FORM -FD8D ARABIC LIGATURE MEEM WITH JEEM WITH MEEM INITIAL FORM -FD8E ARABIC LIGATURE MEEM WITH KHAH WITH JEEM INITIAL FORM -FD8F ARABIC LIGATURE MEEM WITH KHAH WITH MEEM INITIAL FORM -FD92 ARABIC LIGATURE MEEM WITH JEEM WITH KHAH INITIAL FORM -FD93 ARABIC LIGATURE HEH WITH MEEM WITH JEEM INITIAL FORM -FD94 ARABIC LIGATURE HEH WITH MEEM WITH MEEM INITIAL FORM -FD95 ARABIC LIGATURE NOON WITH HAH WITH MEEM INITIAL FORM -FD96 ARABIC LIGATURE NOON WITH HAH WITH ALEF MAKSURA FINAL FORM -FD97 ARABIC LIGATURE NOON WITH JEEM WITH MEEM FINAL FORM -FD98 ARABIC LIGATURE NOON WITH JEEM WITH MEEM INITIAL FORM -FD99 ARABIC LIGATURE NOON WITH JEEM WITH ALEF MAKSURA FINAL FORM -FD9A ARABIC LIGATURE NOON WITH MEEM WITH YEH FINAL FORM -FD9B ARABIC LIGATURE NOON WITH MEEM WITH ALEF MAKSURA FINAL FORM -FD9C ARABIC LIGATURE YEH WITH MEEM WITH MEEM FINAL FORM -FD9D ARABIC LIGATURE YEH WITH MEEM WITH MEEM INITIAL FORM -FD9E ARABIC LIGATURE BEH WITH KHAH WITH YEH FINAL FORM -FD9F ARABIC LIGATURE TEH WITH JEEM WITH YEH FINAL FORM -FDA0 ARABIC LIGATURE TEH WITH JEEM WITH ALEF MAKSURA FINAL FORM -FDA1 ARABIC LIGATURE TEH WITH KHAH WITH YEH FINAL FORM -FDA2 ARABIC LIGATURE TEH WITH KHAH WITH ALEF MAKSURA FINAL FORM -FDA3 ARABIC LIGATURE TEH WITH MEEM WITH YEH FINAL FORM -FDA4 ARABIC LIGATURE TEH WITH MEEM WITH ALEF MAKSURA FINAL FORM -FDA5 ARABIC LIGATURE JEEM WITH MEEM WITH YEH FINAL FORM -FDA6 ARABIC LIGATURE JEEM WITH HAH WITH ALEF MAKSURA FINAL FORM -FDA7 ARABIC LIGATURE JEEM WITH MEEM WITH ALEF MAKSURA FINAL FORM -FDA8 ARABIC LIGATURE SEEN WITH KHAH WITH ALEF MAKSURA FINAL FORM -FDA9 ARABIC LIGATURE SAD WITH HAH WITH YEH FINAL FORM -FDAA ARABIC LIGATURE SHEEN WITH HAH WITH YEH FINAL FORM -FDAB ARABIC LIGATURE DAD WITH HAH WITH YEH FINAL FORM -FDAC ARABIC LIGATURE LAM WITH JEEM WITH YEH FINAL FORM -FDAD ARABIC LIGATURE LAM WITH MEEM WITH YEH FINAL FORM -FDAE ARABIC LIGATURE YEH WITH HAH WITH YEH FINAL FORM -FDAF ARABIC LIGATURE YEH WITH JEEM WITH YEH FINAL FORM -FDB0 ARABIC LIGATURE YEH WITH MEEM WITH YEH FINAL FORM -FDB1 ARABIC LIGATURE MEEM WITH MEEM WITH YEH FINAL FORM -FDB2 ARABIC LIGATURE QAF WITH MEEM WITH YEH FINAL FORM -FDB3 ARABIC LIGATURE NOON WITH HAH WITH YEH FINAL FORM -FDB4 ARABIC LIGATURE QAF WITH MEEM WITH HAH INITIAL FORM -FDB5 ARABIC LIGATURE LAM WITH HAH WITH MEEM INITIAL FORM -FDB6 ARABIC LIGATURE AIN WITH MEEM WITH YEH FINAL FORM -FDB7 ARABIC LIGATURE KAF WITH MEEM WITH YEH FINAL FORM -FDB8 ARABIC LIGATURE NOON WITH JEEM WITH HAH INITIAL FORM -FDB9 ARABIC LIGATURE MEEM WITH KHAH WITH YEH FINAL FORM -FDBA ARABIC LIGATURE LAM WITH JEEM WITH MEEM INITIAL FORM -FDBB ARABIC LIGATURE KAF WITH MEEM WITH MEEM FINAL FORM -FDBC ARABIC LIGATURE LAM WITH JEEM WITH MEEM FINAL FORM -FDBD ARABIC LIGATURE NOON WITH JEEM WITH HAH FINAL FORM -FDBE ARABIC LIGATURE JEEM WITH HAH WITH YEH FINAL FORM -FDBF ARABIC LIGATURE HAH WITH JEEM WITH YEH FINAL FORM -FDC0 ARABIC LIGATURE MEEM WITH JEEM WITH YEH FINAL FORM -FDC1 ARABIC LIGATURE FEH WITH MEEM WITH YEH FINAL FORM -FDC2 ARABIC LIGATURE BEH WITH HAH WITH YEH FINAL FORM -FDC3 ARABIC LIGATURE KAF WITH MEEM WITH MEEM INITIAL FORM -FDC4 ARABIC LIGATURE AIN WITH JEEM WITH MEEM INITIAL FORM -FDC5 ARABIC LIGATURE SAD WITH MEEM WITH MEEM INITIAL FORM -FDC6 ARABIC LIGATURE SEEN WITH KHAH WITH YEH FINAL FORM -FDC7 ARABIC LIGATURE NOON WITH JEEM WITH YEH FINAL FORM -FDF0 ARABIC LIGATURE SALLA USED AS KORANIC STOP SIGN ISOLATED FORM -FDF1 ARABIC LIGATURE QALA USED AS KORANIC STOP SIGN ISOLATED FORM -FDF2 ARABIC LIGATURE ALLAH ISOLATED FORM -FDF3 ARABIC LIGATURE AKBAR ISOLATED FORM -FDF4 ARABIC LIGATURE MOHAMMAD ISOLATED FORM -FDF5 ARABIC LIGATURE SALAM ISOLATED FORM -FDF6 ARABIC LIGATURE RASOUL ISOLATED FORM -FDF7 ARABIC LIGATURE ALAYHE ISOLATED FORM -FDF8 ARABIC LIGATURE WASALLAM ISOLATED FORM -FDF9 ARABIC LIGATURE SALLA ISOLATED FORM -FDFA ARABIC LIGATURE SALLALLAHOU ALAYHE WASALLAM -FDFB ARABIC LIGATURE JALLAJALALOUHOU -FDFC RIAL SIGN -FDFD ARABIC LIGATURE BISMILLAH AR-RAHMAN AR-RAHEEM -FE00 VARIATION SELECTOR-1 -FE01 VARIATION SELECTOR-2 -FE02 VARIATION SELECTOR-3 -FE03 VARIATION SELECTOR-4 -FE04 VARIATION SELECTOR-5 -FE05 VARIATION SELECTOR-6 -FE06 VARIATION SELECTOR-7 -FE07 VARIATION SELECTOR-8 -FE08 VARIATION SELECTOR-9 -FE09 VARIATION SELECTOR-10 -FE0A VARIATION SELECTOR-11 -FE0B VARIATION SELECTOR-12 -FE0C VARIATION SELECTOR-13 -FE0D VARIATION SELECTOR-14 -FE0E VARIATION SELECTOR-15 -FE0F VARIATION SELECTOR-16 -FE10 PRESENTATION FORM FOR VERTICAL COMMA -FE11 PRESENTATION FORM FOR VERTICAL IDEOGRAPHIC COMMA -FE12 PRESENTATION FORM FOR VERTICAL IDEOGRAPHIC FULL STOP -FE13 PRESENTATION FORM FOR VERTICAL COLON -FE14 PRESENTATION FORM FOR VERTICAL SEMICOLON -FE15 PRESENTATION FORM FOR VERTICAL EXCLAMATION MARK -FE16 PRESENTATION FORM FOR VERTICAL QUESTION MARK -FE17 PRESENTATION FORM FOR VERTICAL LEFT WHITE LENTICULAR BRACKET -FE18 PRESENTATION FORM FOR VERTICAL RIGHT WHITE LENTICULAR BRAKCET -FE19 PRESENTATION FORM FOR VERTICAL HORIZONTAL ELLIPSIS -FE20 COMBINING LIGATURE LEFT HALF -FE21 COMBINING LIGATURE RIGHT HALF -FE22 COMBINING DOUBLE TILDE LEFT HALF -FE23 COMBINING DOUBLE TILDE RIGHT HALF -FE24 COMBINING MACRON LEFT HALF -FE25 COMBINING MACRON RIGHT HALF -FE26 COMBINING CONJOINING MACRON -FE30 PRESENTATION FORM FOR VERTICAL TWO DOT LEADER -FE31 PRESENTATION FORM FOR VERTICAL EM DASH -FE32 PRESENTATION FORM FOR VERTICAL EN DASH -FE33 PRESENTATION FORM FOR VERTICAL LOW LINE -FE34 PRESENTATION FORM FOR VERTICAL WAVY LOW LINE -FE35 PRESENTATION FORM FOR VERTICAL LEFT PARENTHESIS -FE36 PRESENTATION FORM FOR VERTICAL RIGHT PARENTHESIS -FE37 PRESENTATION FORM FOR VERTICAL LEFT CURLY BRACKET -FE38 PRESENTATION FORM FOR VERTICAL RIGHT CURLY BRACKET -FE39 PRESENTATION FORM FOR VERTICAL LEFT TORTOISE SHELL BRACKET -FE3A PRESENTATION FORM FOR VERTICAL RIGHT TORTOISE SHELL BRACKET -FE3B PRESENTATION FORM FOR VERTICAL LEFT BLACK LENTICULAR BRACKET -FE3C PRESENTATION FORM FOR VERTICAL RIGHT BLACK LENTICULAR BRACKET -FE3D PRESENTATION FORM FOR VERTICAL LEFT DOUBLE ANGLE BRACKET -FE3E PRESENTATION FORM FOR VERTICAL RIGHT DOUBLE ANGLE BRACKET -FE3F PRESENTATION FORM FOR VERTICAL LEFT ANGLE BRACKET -FE40 PRESENTATION FORM FOR VERTICAL RIGHT ANGLE BRACKET -FE41 PRESENTATION FORM FOR VERTICAL LEFT CORNER BRACKET -FE42 PRESENTATION FORM FOR VERTICAL RIGHT CORNER BRACKET -FE43 PRESENTATION FORM FOR VERTICAL LEFT WHITE CORNER BRACKET -FE44 PRESENTATION FORM FOR VERTICAL RIGHT WHITE CORNER BRACKET -FE45 SESAME DOT -FE46 WHITE SESAME DOT -FE47 PRESENTATION FORM FOR VERTICAL LEFT SQUARE BRACKET -FE48 PRESENTATION FORM FOR VERTICAL RIGHT SQUARE BRACKET -FE49 DASHED OVERLINE -FE4A CENTRELINE OVERLINE -FE4B WAVY OVERLINE -FE4C DOUBLE WAVY OVERLINE -FE4D DASHED LOW LINE -FE4E CENTRELINE LOW LINE -FE4F WAVY LOW LINE -FE50 SMALL COMMA -FE51 SMALL IDEOGRAPHIC COMMA -FE52 SMALL FULL STOP -FE54 SMALL SEMICOLON -FE55 SMALL COLON -FE56 SMALL QUESTION MARK -FE57 SMALL EXCLAMATION MARK -FE58 SMALL EM DASH -FE59 SMALL LEFT PARENTHESIS -FE5A SMALL RIGHT PARENTHESIS -FE5B SMALL LEFT CURLY BRACKET -FE5C SMALL RIGHT CURLY BRACKET -FE5D SMALL LEFT TORTOISE SHELL BRACKET -FE5E SMALL RIGHT TORTOISE SHELL BRACKET -FE5F SMALL NUMBER SIGN -FE60 SMALL AMPERSAND -FE61 SMALL ASTERISK -FE62 SMALL PLUS SIGN -FE63 SMALL HYPHEN-MINUS -FE64 SMALL LESS-THAN SIGN -FE65 SMALL GREATER-THAN SIGN -FE66 SMALL EQUALS SIGN -FE68 SMALL REVERSE SOLIDUS -FE69 SMALL DOLLAR SIGN -FE6A SMALL PERCENT SIGN -FE6B SMALL COMMERCIAL AT -FE70 ARABIC FATHATAN ISOLATED FORM -FE71 ARABIC TATWEEL WITH FATHATAN ABOVE -FE72 ARABIC DAMMATAN ISOLATED FORM -FE73 ARABIC TAIL FRAGMENT -FE74 ARABIC KASRATAN ISOLATED FORM -FE76 ARABIC FATHA ISOLATED FORM -FE77 ARABIC FATHA MEDIAL FORM -FE78 ARABIC DAMMA ISOLATED FORM -FE79 ARABIC DAMMA MEDIAL FORM -FE7A ARABIC KASRA ISOLATED FORM -FE7B ARABIC KASRA MEDIAL FORM -FE7C ARABIC SHADDA ISOLATED FORM -FE7D ARABIC SHADDA MEDIAL FORM -FE7E ARABIC SUKUN ISOLATED FORM -FE7F ARABIC SUKUN MEDIAL FORM -FE80 ARABIC LETTER HAMZA ISOLATED FORM -FE81 ARABIC LETTER ALEF WITH MADDA ABOVE ISOLATED FORM -FE82 ARABIC LETTER ALEF WITH MADDA ABOVE FINAL FORM -FE83 ARABIC LETTER ALEF WITH HAMZA ABOVE ISOLATED FORM -FE84 ARABIC LETTER ALEF WITH HAMZA ABOVE FINAL FORM -FE85 ARABIC LETTER WAW WITH HAMZA ABOVE ISOLATED FORM -FE86 ARABIC LETTER WAW WITH HAMZA ABOVE FINAL FORM -FE87 ARABIC LETTER ALEF WITH HAMZA BELOW ISOLATED FORM -FE88 ARABIC LETTER ALEF WITH HAMZA BELOW FINAL FORM -FE89 ARABIC LETTER YEH WITH HAMZA ABOVE ISOLATED FORM -FE8A ARABIC LETTER YEH WITH HAMZA ABOVE FINAL FORM -FE8B ARABIC LETTER YEH WITH HAMZA ABOVE INITIAL FORM -FE8C ARABIC LETTER YEH WITH HAMZA ABOVE MEDIAL FORM -FE8D ARABIC LETTER ALEF ISOLATED FORM -FE8E ARABIC LETTER ALEF FINAL FORM -FE8F ARABIC LETTER BEH ISOLATED FORM -FE90 ARABIC LETTER BEH FINAL FORM -FE91 ARABIC LETTER BEH INITIAL FORM -FE92 ARABIC LETTER BEH MEDIAL FORM -FE93 ARABIC LETTER TEH MARBUTA ISOLATED FORM -FE94 ARABIC LETTER TEH MARBUTA FINAL FORM -FE95 ARABIC LETTER TEH ISOLATED FORM -FE96 ARABIC LETTER TEH FINAL FORM -FE97 ARABIC LETTER TEH INITIAL FORM -FE98 ARABIC LETTER TEH MEDIAL FORM -FE99 ARABIC LETTER THEH ISOLATED FORM -FE9A ARABIC LETTER THEH FINAL FORM -FE9B ARABIC LETTER THEH INITIAL FORM -FE9C ARABIC LETTER THEH MEDIAL FORM -FE9D ARABIC LETTER JEEM ISOLATED FORM -FE9E ARABIC LETTER JEEM FINAL FORM -FE9F ARABIC LETTER JEEM INITIAL FORM -FEA0 ARABIC LETTER JEEM MEDIAL FORM -FEA1 ARABIC LETTER HAH ISOLATED FORM -FEA2 ARABIC LETTER HAH FINAL FORM -FEA3 ARABIC LETTER HAH INITIAL FORM -FEA4 ARABIC LETTER HAH MEDIAL FORM -FEA5 ARABIC LETTER KHAH ISOLATED FORM -FEA6 ARABIC LETTER KHAH FINAL FORM -FEA7 ARABIC LETTER KHAH INITIAL FORM -FEA8 ARABIC LETTER KHAH MEDIAL FORM -FEA9 ARABIC LETTER DAL ISOLATED FORM -FEAA ARABIC LETTER DAL FINAL FORM -FEAB ARABIC LETTER THAL ISOLATED FORM -FEAC ARABIC LETTER THAL FINAL FORM -FEAD ARABIC LETTER REH ISOLATED FORM -FEAE ARABIC LETTER REH FINAL FORM -FEAF ARABIC LETTER ZAIN ISOLATED FORM -FEB0 ARABIC LETTER ZAIN FINAL FORM -FEB1 ARABIC LETTER SEEN ISOLATED FORM -FEB2 ARABIC LETTER SEEN FINAL FORM -FEB3 ARABIC LETTER SEEN INITIAL FORM -FEB4 ARABIC LETTER SEEN MEDIAL FORM -FEB5 ARABIC LETTER SHEEN ISOLATED FORM -FEB6 ARABIC LETTER SHEEN FINAL FORM -FEB7 ARABIC LETTER SHEEN INITIAL FORM -FEB8 ARABIC LETTER SHEEN MEDIAL FORM -FEB9 ARABIC LETTER SAD ISOLATED FORM -FEBA ARABIC LETTER SAD FINAL FORM -FEBB ARABIC LETTER SAD INITIAL FORM -FEBC ARABIC LETTER SAD MEDIAL FORM -FEBD ARABIC LETTER DAD ISOLATED FORM -FEBE ARABIC LETTER DAD FINAL FORM -FEBF ARABIC LETTER DAD INITIAL FORM -FEC0 ARABIC LETTER DAD MEDIAL FORM -FEC1 ARABIC LETTER TAH ISOLATED FORM -FEC2 ARABIC LETTER TAH FINAL FORM -FEC3 ARABIC LETTER TAH INITIAL FORM -FEC4 ARABIC LETTER TAH MEDIAL FORM -FEC5 ARABIC LETTER ZAH ISOLATED FORM -FEC6 ARABIC LETTER ZAH FINAL FORM -FEC7 ARABIC LETTER ZAH INITIAL FORM -FEC8 ARABIC LETTER ZAH MEDIAL FORM -FEC9 ARABIC LETTER AIN ISOLATED FORM -FECA ARABIC LETTER AIN FINAL FORM -FECB ARABIC LETTER AIN INITIAL FORM -FECC ARABIC LETTER AIN MEDIAL FORM -FECD ARABIC LETTER GHAIN ISOLATED FORM -FECE ARABIC LETTER GHAIN FINAL FORM -FECF ARABIC LETTER GHAIN INITIAL FORM -FED0 ARABIC LETTER GHAIN MEDIAL FORM -FED1 ARABIC LETTER FEH ISOLATED FORM -FED2 ARABIC LETTER FEH FINAL FORM -FED3 ARABIC LETTER FEH INITIAL FORM -FED4 ARABIC LETTER FEH MEDIAL FORM -FED5 ARABIC LETTER QAF ISOLATED FORM -FED6 ARABIC LETTER QAF FINAL FORM -FED7 ARABIC LETTER QAF INITIAL FORM -FED8 ARABIC LETTER QAF MEDIAL FORM -FED9 ARABIC LETTER KAF ISOLATED FORM -FEDA ARABIC LETTER KAF FINAL FORM -FEDB ARABIC LETTER KAF INITIAL FORM -FEDC ARABIC LETTER KAF MEDIAL FORM -FEDD ARABIC LETTER LAM ISOLATED FORM -FEDE ARABIC LETTER LAM FINAL FORM -FEDF ARABIC LETTER LAM INITIAL FORM -FEE0 ARABIC LETTER LAM MEDIAL FORM -FEE1 ARABIC LETTER MEEM ISOLATED FORM -FEE2 ARABIC LETTER MEEM FINAL FORM -FEE3 ARABIC LETTER MEEM INITIAL FORM -FEE4 ARABIC LETTER MEEM MEDIAL FORM -FEE5 ARABIC LETTER NOON ISOLATED FORM -FEE6 ARABIC LETTER NOON FINAL FORM -FEE7 ARABIC LETTER NOON INITIAL FORM -FEE8 ARABIC LETTER NOON MEDIAL FORM -FEE9 ARABIC LETTER HEH ISOLATED FORM -FEEA ARABIC LETTER HEH FINAL FORM -FEEB ARABIC LETTER HEH INITIAL FORM -FEEC ARABIC LETTER HEH MEDIAL FORM -FEED ARABIC LETTER WAW ISOLATED FORM -FEEE ARABIC LETTER WAW FINAL FORM -FEEF ARABIC LETTER ALEF MAKSURA ISOLATED FORM -FEF0 ARABIC LETTER ALEF MAKSURA FINAL FORM -FEF1 ARABIC LETTER YEH ISOLATED FORM -FEF2 ARABIC LETTER YEH FINAL FORM -FEF3 ARABIC LETTER YEH INITIAL FORM -FEF4 ARABIC LETTER YEH MEDIAL FORM -FEF5 ARABIC LIGATURE LAM WITH ALEF WITH MADDA ABOVE ISOLATED FORM -FEF6 ARABIC LIGATURE LAM WITH ALEF WITH MADDA ABOVE FINAL FORM -FEF7 ARABIC LIGATURE LAM WITH ALEF WITH HAMZA ABOVE ISOLATED FORM -FEF8 ARABIC LIGATURE LAM WITH ALEF WITH HAMZA ABOVE FINAL FORM -FEF9 ARABIC LIGATURE LAM WITH ALEF WITH HAMZA BELOW ISOLATED FORM -FEFA ARABIC LIGATURE LAM WITH ALEF WITH HAMZA BELOW FINAL FORM -FEFB ARABIC LIGATURE LAM WITH ALEF ISOLATED FORM -FEFC ARABIC LIGATURE LAM WITH ALEF FINAL FORM -FEFF ZERO WIDTH NO-BREAK SPACE -FF01 FULLWIDTH EXCLAMATION MARK -FF02 FULLWIDTH QUOTATION MARK -FF03 FULLWIDTH NUMBER SIGN -FF04 FULLWIDTH DOLLAR SIGN -FF05 FULLWIDTH PERCENT SIGN -FF06 FULLWIDTH AMPERSAND -FF07 FULLWIDTH APOSTROPHE -FF08 FULLWIDTH LEFT PARENTHESIS -FF09 FULLWIDTH RIGHT PARENTHESIS -FF0A FULLWIDTH ASTERISK -FF0B FULLWIDTH PLUS SIGN -FF0C FULLWIDTH COMMA -FF0D FULLWIDTH HYPHEN-MINUS -FF0E FULLWIDTH FULL STOP -FF0F FULLWIDTH SOLIDUS -FF10 FULLWIDTH DIGIT ZERO -FF11 FULLWIDTH DIGIT ONE -FF12 FULLWIDTH DIGIT TWO -FF13 FULLWIDTH DIGIT THREE -FF14 FULLWIDTH DIGIT FOUR -FF15 FULLWIDTH DIGIT FIVE -FF16 FULLWIDTH DIGIT SIX -FF17 FULLWIDTH DIGIT SEVEN -FF18 FULLWIDTH DIGIT EIGHT -FF19 FULLWIDTH DIGIT NINE -FF1A FULLWIDTH COLON -FF1B FULLWIDTH SEMICOLON -FF1C FULLWIDTH LESS-THAN SIGN -FF1D FULLWIDTH EQUALS SIGN -FF1E FULLWIDTH GREATER-THAN SIGN -FF1F FULLWIDTH QUESTION MARK -FF20 FULLWIDTH COMMERCIAL AT -FF21 FULLWIDTH LATIN CAPITAL LETTER A -FF22 FULLWIDTH LATIN CAPITAL LETTER B -FF23 FULLWIDTH LATIN CAPITAL LETTER C -FF24 FULLWIDTH LATIN CAPITAL LETTER D -FF25 FULLWIDTH LATIN CAPITAL LETTER E -FF26 FULLWIDTH LATIN CAPITAL LETTER F -FF27 FULLWIDTH LATIN CAPITAL LETTER G -FF28 FULLWIDTH LATIN CAPITAL LETTER H -FF29 FULLWIDTH LATIN CAPITAL LETTER I -FF2A FULLWIDTH LATIN CAPITAL LETTER J -FF2B FULLWIDTH LATIN CAPITAL LETTER K -FF2C FULLWIDTH LATIN CAPITAL LETTER L -FF2D FULLWIDTH LATIN CAPITAL LETTER M -FF2E FULLWIDTH LATIN CAPITAL LETTER N -FF2F FULLWIDTH LATIN CAPITAL LETTER O -FF30 FULLWIDTH LATIN CAPITAL LETTER P -FF31 FULLWIDTH LATIN CAPITAL LETTER Q -FF32 FULLWIDTH LATIN CAPITAL LETTER R -FF33 FULLWIDTH LATIN CAPITAL LETTER S -FF34 FULLWIDTH LATIN CAPITAL LETTER T -FF35 FULLWIDTH LATIN CAPITAL LETTER U -FF36 FULLWIDTH LATIN CAPITAL LETTER V -FF37 FULLWIDTH LATIN CAPITAL LETTER W -FF38 FULLWIDTH LATIN CAPITAL LETTER X -FF39 FULLWIDTH LATIN CAPITAL LETTER Y -FF3A FULLWIDTH LATIN CAPITAL LETTER Z -FF3B FULLWIDTH LEFT SQUARE BRACKET -FF3C FULLWIDTH REVERSE SOLIDUS -FF3D FULLWIDTH RIGHT SQUARE BRACKET -FF3E FULLWIDTH CIRCUMFLEX ACCENT -FF3F FULLWIDTH LOW LINE -FF40 FULLWIDTH GRAVE ACCENT -FF41 FULLWIDTH LATIN SMALL LETTER A -FF42 FULLWIDTH LATIN SMALL LETTER B -FF43 FULLWIDTH LATIN SMALL LETTER C -FF44 FULLWIDTH LATIN SMALL LETTER D -FF45 FULLWIDTH LATIN SMALL LETTER E -FF46 FULLWIDTH LATIN SMALL LETTER F -FF47 FULLWIDTH LATIN SMALL LETTER G -FF48 FULLWIDTH LATIN SMALL LETTER H -FF49 FULLWIDTH LATIN SMALL LETTER I -FF4A FULLWIDTH LATIN SMALL LETTER J -FF4B FULLWIDTH LATIN SMALL LETTER K -FF4C FULLWIDTH LATIN SMALL LETTER L -FF4D FULLWIDTH LATIN SMALL LETTER M -FF4E FULLWIDTH LATIN SMALL LETTER N -FF4F FULLWIDTH LATIN SMALL LETTER O -FF50 FULLWIDTH LATIN SMALL LETTER P -FF51 FULLWIDTH LATIN SMALL LETTER Q -FF52 FULLWIDTH LATIN SMALL LETTER R -FF53 FULLWIDTH LATIN SMALL LETTER S -FF54 FULLWIDTH LATIN SMALL LETTER T -FF55 FULLWIDTH LATIN SMALL LETTER U -FF56 FULLWIDTH LATIN SMALL LETTER V -FF57 FULLWIDTH LATIN SMALL LETTER W -FF58 FULLWIDTH LATIN SMALL LETTER X -FF59 FULLWIDTH LATIN SMALL LETTER Y -FF5A FULLWIDTH LATIN SMALL LETTER Z -FF5B FULLWIDTH LEFT CURLY BRACKET -FF5C FULLWIDTH VERTICAL LINE -FF5D FULLWIDTH RIGHT CURLY BRACKET -FF5E FULLWIDTH TILDE -FF5F FULLWIDTH LEFT WHITE PARENTHESIS -FF60 FULLWIDTH RIGHT WHITE PARENTHESIS -FF61 HALFWIDTH IDEOGRAPHIC FULL STOP -FF62 HALFWIDTH LEFT CORNER BRACKET -FF63 HALFWIDTH RIGHT CORNER BRACKET -FF64 HALFWIDTH IDEOGRAPHIC COMMA -FF65 HALFWIDTH KATAKANA MIDDLE DOT -FF66 HALFWIDTH KATAKANA LETTER WO -FF67 HALFWIDTH KATAKANA LETTER SMALL A -FF68 HALFWIDTH KATAKANA LETTER SMALL I -FF69 HALFWIDTH KATAKANA LETTER SMALL U -FF6A HALFWIDTH KATAKANA LETTER SMALL E -FF6B HALFWIDTH KATAKANA LETTER SMALL O -FF6C HALFWIDTH KATAKANA LETTER SMALL YA -FF6D HALFWIDTH KATAKANA LETTER SMALL YU -FF6E HALFWIDTH KATAKANA LETTER SMALL YO -FF6F HALFWIDTH KATAKANA LETTER SMALL TU -FF70 HALFWIDTH KATAKANA-HIRAGANA PROLONGED SOUND MARK -FF71 HALFWIDTH KATAKANA LETTER A -FF72 HALFWIDTH KATAKANA LETTER I -FF73 HALFWIDTH KATAKANA LETTER U -FF74 HALFWIDTH KATAKANA LETTER E -FF75 HALFWIDTH KATAKANA LETTER O -FF76 HALFWIDTH KATAKANA LETTER KA -FF77 HALFWIDTH KATAKANA LETTER KI -FF78 HALFWIDTH KATAKANA LETTER KU -FF79 HALFWIDTH KATAKANA LETTER KE -FF7A HALFWIDTH KATAKANA LETTER KO -FF7B HALFWIDTH KATAKANA LETTER SA -FF7C HALFWIDTH KATAKANA LETTER SI -FF7D HALFWIDTH KATAKANA LETTER SU -FF7E HALFWIDTH KATAKANA LETTER SE -FF7F HALFWIDTH KATAKANA LETTER SO -FF80 HALFWIDTH KATAKANA LETTER TA -FF81 HALFWIDTH KATAKANA LETTER TI -FF82 HALFWIDTH KATAKANA LETTER TU -FF83 HALFWIDTH KATAKANA LETTER TE -FF84 HALFWIDTH KATAKANA LETTER TO -FF85 HALFWIDTH KATAKANA LETTER NA -FF86 HALFWIDTH KATAKANA LETTER NI -FF87 HALFWIDTH KATAKANA LETTER NU -FF88 HALFWIDTH KATAKANA LETTER NE -FF89 HALFWIDTH KATAKANA LETTER NO -FF8A HALFWIDTH KATAKANA LETTER HA -FF8B HALFWIDTH KATAKANA LETTER HI -FF8C HALFWIDTH KATAKANA LETTER HU -FF8D HALFWIDTH KATAKANA LETTER HE -FF8E HALFWIDTH KATAKANA LETTER HO -FF8F HALFWIDTH KATAKANA LETTER MA -FF90 HALFWIDTH KATAKANA LETTER MI -FF91 HALFWIDTH KATAKANA LETTER MU -FF92 HALFWIDTH KATAKANA LETTER ME -FF93 HALFWIDTH KATAKANA LETTER MO -FF94 HALFWIDTH KATAKANA LETTER YA -FF95 HALFWIDTH KATAKANA LETTER YU -FF96 HALFWIDTH KATAKANA LETTER YO -FF97 HALFWIDTH KATAKANA LETTER RA -FF98 HALFWIDTH KATAKANA LETTER RI -FF99 HALFWIDTH KATAKANA LETTER RU -FF9A HALFWIDTH KATAKANA LETTER RE -FF9B HALFWIDTH KATAKANA LETTER RO -FF9C HALFWIDTH KATAKANA LETTER WA -FF9D HALFWIDTH KATAKANA LETTER N -FF9E HALFWIDTH KATAKANA VOICED SOUND MARK -FF9F HALFWIDTH KATAKANA SEMI-VOICED SOUND MARK -FFA0 HALFWIDTH HANGUL FILLER -FFA1 HALFWIDTH HANGUL LETTER KIYEOK -FFA2 HALFWIDTH HANGUL LETTER SSANGKIYEOK -FFA3 HALFWIDTH HANGUL LETTER KIYEOK-SIOS -FFA4 HALFWIDTH HANGUL LETTER NIEUN -FFA5 HALFWIDTH HANGUL LETTER NIEUN-CIEUC -FFA6 HALFWIDTH HANGUL LETTER NIEUN-HIEUH -FFA7 HALFWIDTH HANGUL LETTER TIKEUT -FFA8 HALFWIDTH HANGUL LETTER SSANGTIKEUT -FFA9 HALFWIDTH HANGUL LETTER RIEUL -FFAA HALFWIDTH HANGUL LETTER RIEUL-KIYEOK -FFAB HALFWIDTH HANGUL LETTER RIEUL-MIEUM -FFAC HALFWIDTH HANGUL LETTER RIEUL-PIEUP -FFAD HALFWIDTH HANGUL LETTER RIEUL-SIOS -FFAE HALFWIDTH HANGUL LETTER RIEUL-THIEUTH -FFAF HALFWIDTH HANGUL LETTER RIEUL-PHIEUPH -FFB0 HALFWIDTH HANGUL LETTER RIEUL-HIEUH -FFB1 HALFWIDTH HANGUL LETTER MIEUM -FFB2 HALFWIDTH HANGUL LETTER PIEUP -FFB3 HALFWIDTH HANGUL LETTER SSANGPIEUP -FFB4 HALFWIDTH HANGUL LETTER PIEUP-SIOS -FFB5 HALFWIDTH HANGUL LETTER SIOS -FFB6 HALFWIDTH HANGUL LETTER SSANGSIOS -FFB7 HALFWIDTH HANGUL LETTER IEUNG -FFB8 HALFWIDTH HANGUL LETTER CIEUC -FFB9 HALFWIDTH HANGUL LETTER SSANGCIEUC -FFBA HALFWIDTH HANGUL LETTER CHIEUCH -FFBB HALFWIDTH HANGUL LETTER KHIEUKH -FFBC HALFWIDTH HANGUL LETTER THIEUTH -FFBD HALFWIDTH HANGUL LETTER PHIEUPH -FFBE HALFWIDTH HANGUL LETTER HIEUH -FFC2 HALFWIDTH HANGUL LETTER A -FFC3 HALFWIDTH HANGUL LETTER AE -FFC4 HALFWIDTH HANGUL LETTER YA -FFC5 HALFWIDTH HANGUL LETTER YAE -FFC6 HALFWIDTH HANGUL LETTER EO -FFC7 HALFWIDTH HANGUL LETTER E -FFCA HALFWIDTH HANGUL LETTER YEO -FFCB HALFWIDTH HANGUL LETTER YE -FFCC HALFWIDTH HANGUL LETTER O -FFCD HALFWIDTH HANGUL LETTER WA -FFCE HALFWIDTH HANGUL LETTER WAE -FFCF HALFWIDTH HANGUL LETTER OE -FFD2 HALFWIDTH HANGUL LETTER YO -FFD3 HALFWIDTH HANGUL LETTER U -FFD4 HALFWIDTH HANGUL LETTER WEO -FFD5 HALFWIDTH HANGUL LETTER WE -FFD6 HALFWIDTH HANGUL LETTER WI -FFD7 HALFWIDTH HANGUL LETTER YU -FFDA HALFWIDTH HANGUL LETTER EU -FFDB HALFWIDTH HANGUL LETTER YI -FFDC HALFWIDTH HANGUL LETTER I -FFE0 FULLWIDTH CENT SIGN -FFE1 FULLWIDTH POUND SIGN -FFE2 FULLWIDTH NOT SIGN -FFE3 FULLWIDTH MACRON -FFE4 FULLWIDTH BROKEN BAR -FFE5 FULLWIDTH YEN SIGN -FFE6 FULLWIDTH WON SIGN -FFE8 HALFWIDTH FORMS LIGHT VERTICAL -FFE9 HALFWIDTH LEFTWARDS ARROW -FFEA HALFWIDTH UPWARDS ARROW -FFEB HALFWIDTH RIGHTWARDS ARROW -FFEC HALFWIDTH DOWNWARDS ARROW -FFED HALFWIDTH BLACK SQUARE -FFEE HALFWIDTH WHITE CIRCLE -FFF9 INTERLINEAR ANNOTATION ANCHOR -FFFA INTERLINEAR ANNOTATION SEPARATOR -FFFB INTERLINEAR ANNOTATION TERMINATOR -FFFC OBJECT REPLACEMENT CHARACTER -FFFD REPLACEMENT CHARACTER -10000 LINEAR B SYLLABLE B008 A -10001 LINEAR B SYLLABLE B038 E -10002 LINEAR B SYLLABLE B028 I -10003 LINEAR B SYLLABLE B061 O -10004 LINEAR B SYLLABLE B010 U -10005 LINEAR B SYLLABLE B001 DA -10006 LINEAR B SYLLABLE B045 DE -10007 LINEAR B SYLLABLE B007 DI -10008 LINEAR B SYLLABLE B014 DO -10009 LINEAR B SYLLABLE B051 DU -1000A LINEAR B SYLLABLE B057 JA -1000B LINEAR B SYLLABLE B046 JE -1000D LINEAR B SYLLABLE B036 JO -1000E LINEAR B SYLLABLE B065 JU -1000F LINEAR B SYLLABLE B077 KA -10010 LINEAR B SYLLABLE B044 KE -10011 LINEAR B SYLLABLE B067 KI -10012 LINEAR B SYLLABLE B070 KO -10013 LINEAR B SYLLABLE B081 KU -10014 LINEAR B SYLLABLE B080 MA -10015 LINEAR B SYLLABLE B013 ME -10016 LINEAR B SYLLABLE B073 MI -10017 LINEAR B SYLLABLE B015 MO -10018 LINEAR B SYLLABLE B023 MU -10019 LINEAR B SYLLABLE B006 NA -1001A LINEAR B SYLLABLE B024 NE -1001B LINEAR B SYLLABLE B030 NI -1001C LINEAR B SYLLABLE B052 NO -1001D LINEAR B SYLLABLE B055 NU -1001E LINEAR B SYLLABLE B003 PA -1001F LINEAR B SYLLABLE B072 PE -10020 LINEAR B SYLLABLE B039 PI -10021 LINEAR B SYLLABLE B011 PO -10022 LINEAR B SYLLABLE B050 PU -10023 LINEAR B SYLLABLE B016 QA -10024 LINEAR B SYLLABLE B078 QE -10025 LINEAR B SYLLABLE B021 QI -10026 LINEAR B SYLLABLE B032 QO -10028 LINEAR B SYLLABLE B060 RA -10029 LINEAR B SYLLABLE B027 RE -1002A LINEAR B SYLLABLE B053 RI -1002B LINEAR B SYLLABLE B002 RO -1002C LINEAR B SYLLABLE B026 RU -1002D LINEAR B SYLLABLE B031 SA -1002E LINEAR B SYLLABLE B009 SE -1002F LINEAR B SYLLABLE B041 SI -10030 LINEAR B SYLLABLE B012 SO -10031 LINEAR B SYLLABLE B058 SU -10032 LINEAR B SYLLABLE B059 TA -10033 LINEAR B SYLLABLE B004 TE -10034 LINEAR B SYLLABLE B037 TI -10035 LINEAR B SYLLABLE B005 TO -10036 LINEAR B SYLLABLE B069 TU -10037 LINEAR B SYLLABLE B054 WA -10038 LINEAR B SYLLABLE B075 WE -10039 LINEAR B SYLLABLE B040 WI -1003A LINEAR B SYLLABLE B042 WO -1003C LINEAR B SYLLABLE B017 ZA -1003D LINEAR B SYLLABLE B074 ZE -1003F LINEAR B SYLLABLE B020 ZO -10040 LINEAR B SYLLABLE B025 A2 -10041 LINEAR B SYLLABLE B043 A3 -10042 LINEAR B SYLLABLE B085 AU -10043 LINEAR B SYLLABLE B071 DWE -10044 LINEAR B SYLLABLE B090 DWO -10045 LINEAR B SYLLABLE B048 NWA -10046 LINEAR B SYLLABLE B029 PU2 -10047 LINEAR B SYLLABLE B062 PTE -10048 LINEAR B SYLLABLE B076 RA2 -10049 LINEAR B SYLLABLE B033 RA3 -1004A LINEAR B SYLLABLE B068 RO2 -1004B LINEAR B SYLLABLE B066 TA2 -1004C LINEAR B SYLLABLE B087 TWE -1004D LINEAR B SYLLABLE B091 TWO -10050 LINEAR B SYMBOL B018 -10051 LINEAR B SYMBOL B019 -10052 LINEAR B SYMBOL B022 -10053 LINEAR B SYMBOL B034 -10054 LINEAR B SYMBOL B047 -10055 LINEAR B SYMBOL B049 -10056 LINEAR B SYMBOL B056 -10057 LINEAR B SYMBOL B063 -10058 LINEAR B SYMBOL B064 -10059 LINEAR B SYMBOL B079 -1005A LINEAR B SYMBOL B082 -1005B LINEAR B SYMBOL B083 -1005C LINEAR B SYMBOL B086 -1005D LINEAR B SYMBOL B089 -10080 LINEAR B IDEOGRAM B100 MAN -10081 LINEAR B IDEOGRAM B102 WOMAN -10082 LINEAR B IDEOGRAM B104 DEER -10083 LINEAR B IDEOGRAM B105 EQUID -10084 LINEAR B IDEOGRAM B105F MARE -10085 LINEAR B IDEOGRAM B105M STALLION -10086 LINEAR B IDEOGRAM B106F EWE -10087 LINEAR B IDEOGRAM B106M RAM -10088 LINEAR B IDEOGRAM B107F SHE-GOAT -10089 LINEAR B IDEOGRAM B107M HE-GOAT -1008A LINEAR B IDEOGRAM B108F SOW -1008B LINEAR B IDEOGRAM B108M BOAR -1008C LINEAR B IDEOGRAM B109F COW -1008D LINEAR B IDEOGRAM B109M BULL -1008E LINEAR B IDEOGRAM B120 WHEAT -1008F LINEAR B IDEOGRAM B121 BARLEY -10090 LINEAR B IDEOGRAM B122 OLIVE -10091 LINEAR B IDEOGRAM B123 SPICE -10092 LINEAR B IDEOGRAM B125 CYPERUS -10093 LINEAR B MONOGRAM B127 KAPO -10094 LINEAR B MONOGRAM B128 KANAKO -10095 LINEAR B IDEOGRAM B130 OIL -10096 LINEAR B IDEOGRAM B131 WINE -10097 LINEAR B IDEOGRAM B132 -10098 LINEAR B MONOGRAM B133 AREPA -10099 LINEAR B MONOGRAM B135 MERI -1009A LINEAR B IDEOGRAM B140 BRONZE -1009B LINEAR B IDEOGRAM B141 GOLD -1009C LINEAR B IDEOGRAM B142 -1009D LINEAR B IDEOGRAM B145 WOOL -1009E LINEAR B IDEOGRAM B146 -1009F LINEAR B IDEOGRAM B150 -100A0 LINEAR B IDEOGRAM B151 HORN -100A1 LINEAR B IDEOGRAM B152 -100A2 LINEAR B IDEOGRAM B153 -100A3 LINEAR B IDEOGRAM B154 -100A4 LINEAR B MONOGRAM B156 TURO2 -100A5 LINEAR B IDEOGRAM B157 -100A6 LINEAR B IDEOGRAM B158 -100A7 LINEAR B IDEOGRAM B159 CLOTH -100A8 LINEAR B IDEOGRAM B160 -100A9 LINEAR B IDEOGRAM B161 -100AA LINEAR B IDEOGRAM B162 GARMENT -100AB LINEAR B IDEOGRAM B163 ARMOUR -100AC LINEAR B IDEOGRAM B164 -100AD LINEAR B IDEOGRAM B165 -100AE LINEAR B IDEOGRAM B166 -100AF LINEAR B IDEOGRAM B167 -100B0 LINEAR B IDEOGRAM B168 -100B1 LINEAR B IDEOGRAM B169 -100B2 LINEAR B IDEOGRAM B170 -100B3 LINEAR B IDEOGRAM B171 -100B4 LINEAR B IDEOGRAM B172 -100B5 LINEAR B IDEOGRAM B173 MONTH -100B6 LINEAR B IDEOGRAM B174 -100B7 LINEAR B IDEOGRAM B176 TREE -100B8 LINEAR B IDEOGRAM B177 -100B9 LINEAR B IDEOGRAM B178 -100BA LINEAR B IDEOGRAM B179 -100BB LINEAR B IDEOGRAM B180 -100BC LINEAR B IDEOGRAM B181 -100BD LINEAR B IDEOGRAM B182 -100BE LINEAR B IDEOGRAM B183 -100BF LINEAR B IDEOGRAM B184 -100C0 LINEAR B IDEOGRAM B185 -100C1 LINEAR B IDEOGRAM B189 -100C2 LINEAR B IDEOGRAM B190 -100C3 LINEAR B IDEOGRAM B191 HELMET -100C4 LINEAR B IDEOGRAM B220 FOOTSTOOL -100C5 LINEAR B IDEOGRAM B225 BATHTUB -100C6 LINEAR B IDEOGRAM B230 SPEAR -100C7 LINEAR B IDEOGRAM B231 ARROW -100C8 LINEAR B IDEOGRAM B232 -100C9 LINEAR B IDEOGRAM B233 SWORD -100CA LINEAR B IDEOGRAM B234 -100CB LINEAR B IDEOGRAM B236 -100CC LINEAR B IDEOGRAM B240 WHEELED CHARIOT -100CD LINEAR B IDEOGRAM B241 CHARIOT -100CE LINEAR B IDEOGRAM B242 CHARIOT FRAME -100CF LINEAR B IDEOGRAM B243 WHEEL -100D0 LINEAR B IDEOGRAM B245 -100D1 LINEAR B IDEOGRAM B246 -100D2 LINEAR B MONOGRAM B247 DIPTE -100D3 LINEAR B IDEOGRAM B248 -100D4 LINEAR B IDEOGRAM B249 -100D5 LINEAR B IDEOGRAM B251 -100D6 LINEAR B IDEOGRAM B252 -100D7 LINEAR B IDEOGRAM B253 -100D8 LINEAR B IDEOGRAM B254 DART -100D9 LINEAR B IDEOGRAM B255 -100DA LINEAR B IDEOGRAM B256 -100DB LINEAR B IDEOGRAM B257 -100DC LINEAR B IDEOGRAM B258 -100DD LINEAR B IDEOGRAM B259 -100DE LINEAR B IDEOGRAM VESSEL B155 -100DF LINEAR B IDEOGRAM VESSEL B200 -100E0 LINEAR B IDEOGRAM VESSEL B201 -100E1 LINEAR B IDEOGRAM VESSEL B202 -100E2 LINEAR B IDEOGRAM VESSEL B203 -100E3 LINEAR B IDEOGRAM VESSEL B204 -100E4 LINEAR B IDEOGRAM VESSEL B205 -100E5 LINEAR B IDEOGRAM VESSEL B206 -100E6 LINEAR B IDEOGRAM VESSEL B207 -100E7 LINEAR B IDEOGRAM VESSEL B208 -100E8 LINEAR B IDEOGRAM VESSEL B209 -100E9 LINEAR B IDEOGRAM VESSEL B210 -100EA LINEAR B IDEOGRAM VESSEL B211 -100EB LINEAR B IDEOGRAM VESSEL B212 -100EC LINEAR B IDEOGRAM VESSEL B213 -100ED LINEAR B IDEOGRAM VESSEL B214 -100EE LINEAR B IDEOGRAM VESSEL B215 -100EF LINEAR B IDEOGRAM VESSEL B216 -100F0 LINEAR B IDEOGRAM VESSEL B217 -100F1 LINEAR B IDEOGRAM VESSEL B218 -100F2 LINEAR B IDEOGRAM VESSEL B219 -100F3 LINEAR B IDEOGRAM VESSEL B221 -100F4 LINEAR B IDEOGRAM VESSEL B222 -100F5 LINEAR B IDEOGRAM VESSEL B226 -100F6 LINEAR B IDEOGRAM VESSEL B227 -100F7 LINEAR B IDEOGRAM VESSEL B228 -100F8 LINEAR B IDEOGRAM VESSEL B229 -100F9 LINEAR B IDEOGRAM VESSEL B250 -100FA LINEAR B IDEOGRAM VESSEL B305 -10100 AEGEAN WORD SEPARATOR LINE -10101 AEGEAN WORD SEPARATOR DOT -10102 AEGEAN CHECK MARK -10107 AEGEAN NUMBER ONE -10108 AEGEAN NUMBER TWO -10109 AEGEAN NUMBER THREE -1010A AEGEAN NUMBER FOUR -1010B AEGEAN NUMBER FIVE -1010C AEGEAN NUMBER SIX -1010D AEGEAN NUMBER SEVEN -1010E AEGEAN NUMBER EIGHT -1010F AEGEAN NUMBER NINE -10110 AEGEAN NUMBER TEN -10111 AEGEAN NUMBER TWENTY -10112 AEGEAN NUMBER THIRTY -10113 AEGEAN NUMBER FORTY -10114 AEGEAN NUMBER FIFTY -10115 AEGEAN NUMBER SIXTY -10116 AEGEAN NUMBER SEVENTY -10117 AEGEAN NUMBER EIGHTY -10118 AEGEAN NUMBER NINETY -10119 AEGEAN NUMBER ONE HUNDRED -1011A AEGEAN NUMBER TWO HUNDRED -1011B AEGEAN NUMBER THREE HUNDRED -1011C AEGEAN NUMBER FOUR HUNDRED -1011D AEGEAN NUMBER FIVE HUNDRED -1011E AEGEAN NUMBER SIX HUNDRED -1011F AEGEAN NUMBER SEVEN HUNDRED -10120 AEGEAN NUMBER EIGHT HUNDRED -10121 AEGEAN NUMBER NINE HUNDRED -10122 AEGEAN NUMBER ONE THOUSAND -10123 AEGEAN NUMBER TWO THOUSAND -10124 AEGEAN NUMBER THREE THOUSAND -10125 AEGEAN NUMBER FOUR THOUSAND -10126 AEGEAN NUMBER FIVE THOUSAND -10127 AEGEAN NUMBER SIX THOUSAND -10128 AEGEAN NUMBER SEVEN THOUSAND -10129 AEGEAN NUMBER EIGHT THOUSAND -1012A AEGEAN NUMBER NINE THOUSAND -1012B AEGEAN NUMBER TEN THOUSAND -1012C AEGEAN NUMBER TWENTY THOUSAND -1012D AEGEAN NUMBER THIRTY THOUSAND -1012E AEGEAN NUMBER FORTY THOUSAND -1012F AEGEAN NUMBER FIFTY THOUSAND -10130 AEGEAN NUMBER SIXTY THOUSAND -10131 AEGEAN NUMBER SEVENTY THOUSAND -10132 AEGEAN NUMBER EIGHTY THOUSAND -10133 AEGEAN NUMBER NINETY THOUSAND -10137 AEGEAN WEIGHT BASE UNIT -10138 AEGEAN WEIGHT FIRST SUBUNIT -10139 AEGEAN WEIGHT SECOND SUBUNIT -1013A AEGEAN WEIGHT THIRD SUBUNIT -1013B AEGEAN WEIGHT FOURTH SUBUNIT -1013C AEGEAN DRY MEASURE FIRST SUBUNIT -1013D AEGEAN LIQUID MEASURE FIRST SUBUNIT -1013E AEGEAN MEASURE SECOND SUBUNIT -1013F AEGEAN MEASURE THIRD SUBUNIT -10140 GREEK ACROPHONIC ATTIC ONE QUARTER -10141 GREEK ACROPHONIC ATTIC ONE HALF -10142 GREEK ACROPHONIC ATTIC ONE DRACHMA -10143 GREEK ACROPHONIC ATTIC FIVE -10144 GREEK ACROPHONIC ATTIC FIFTY -10145 GREEK ACROPHONIC ATTIC FIVE HUNDRED -10146 GREEK ACROPHONIC ATTIC FIVE THOUSAND -10147 GREEK ACROPHONIC ATTIC FIFTY THOUSAND -10148 GREEK ACROPHONIC ATTIC FIVE TALENTS -10149 GREEK ACROPHONIC ATTIC TEN TALENTS -1014A GREEK ACROPHONIC ATTIC FIFTY TALENTS -1014B GREEK ACROPHONIC ATTIC ONE HUNDRED TALENTS -1014C GREEK ACROPHONIC ATTIC FIVE HUNDRED TALENTS -1014D GREEK ACROPHONIC ATTIC ONE THOUSAND TALENTS -1014E GREEK ACROPHONIC ATTIC FIVE THOUSAND TALENTS -1014F GREEK ACROPHONIC ATTIC FIVE STATERS -10150 GREEK ACROPHONIC ATTIC TEN STATERS -10151 GREEK ACROPHONIC ATTIC FIFTY STATERS -10152 GREEK ACROPHONIC ATTIC ONE HUNDRED STATERS -10153 GREEK ACROPHONIC ATTIC FIVE HUNDRED STATERS -10154 GREEK ACROPHONIC ATTIC ONE THOUSAND STATERS -10155 GREEK ACROPHONIC ATTIC TEN THOUSAND STATERS -10156 GREEK ACROPHONIC ATTIC FIFTY THOUSAND STATERS -10157 GREEK ACROPHONIC ATTIC TEN MNAS -10158 GREEK ACROPHONIC HERAEUM ONE PLETHRON -10159 GREEK ACROPHONIC THESPIAN ONE -1015A GREEK ACROPHONIC HERMIONIAN ONE -1015B GREEK ACROPHONIC EPIDAUREAN TWO -1015C GREEK ACROPHONIC THESPIAN TWO -1015D GREEK ACROPHONIC CYRENAIC TWO DRACHMAS -1015E GREEK ACROPHONIC EPIDAUREAN TWO DRACHMAS -1015F GREEK ACROPHONIC TROEZENIAN FIVE -10160 GREEK ACROPHONIC TROEZENIAN TEN -10161 GREEK ACROPHONIC TROEZENIAN TEN ALTERNATE FORM -10162 GREEK ACROPHONIC HERMIONIAN TEN -10163 GREEK ACROPHONIC MESSENIAN TEN -10164 GREEK ACROPHONIC THESPIAN TEN -10165 GREEK ACROPHONIC THESPIAN THIRTY -10166 GREEK ACROPHONIC TROEZENIAN FIFTY -10167 GREEK ACROPHONIC TROEZENIAN FIFTY ALTERNATE FORM -10168 GREEK ACROPHONIC HERMIONIAN FIFTY -10169 GREEK ACROPHONIC THESPIAN FIFTY -1016A GREEK ACROPHONIC THESPIAN ONE HUNDRED -1016B GREEK ACROPHONIC THESPIAN THREE HUNDRED -1016C GREEK ACROPHONIC EPIDAUREAN FIVE HUNDRED -1016D GREEK ACROPHONIC TROEZENIAN FIVE HUNDRED -1016E GREEK ACROPHONIC THESPIAN FIVE HUNDRED -1016F GREEK ACROPHONIC CARYSTIAN FIVE HUNDRED -10170 GREEK ACROPHONIC NAXIAN FIVE HUNDRED -10171 GREEK ACROPHONIC THESPIAN ONE THOUSAND -10172 GREEK ACROPHONIC THESPIAN FIVE THOUSAND -10173 GREEK ACROPHONIC DELPHIC FIVE MNAS -10174 GREEK ACROPHONIC STRATIAN FIFTY MNAS -10175 GREEK ONE HALF SIGN -10176 GREEK ONE HALF SIGN ALTERNATE FORM -10177 GREEK TWO THIRDS SIGN -10178 GREEK THREE QUARTERS SIGN -10179 GREEK YEAR SIGN -1017A GREEK TALENT SIGN -1017B GREEK DRACHMA SIGN -1017C GREEK OBOL SIGN -1017D GREEK TWO OBOLS SIGN -1017E GREEK THREE OBOLS SIGN -1017F GREEK FOUR OBOLS SIGN -10180 GREEK FIVE OBOLS SIGN -10181 GREEK METRETES SIGN -10182 GREEK KYATHOS BASE SIGN -10183 GREEK LITRA SIGN -10184 GREEK OUNKIA SIGN -10185 GREEK XESTES SIGN -10186 GREEK ARTABE SIGN -10187 GREEK AROURA SIGN -10188 GREEK GRAMMA SIGN -10189 GREEK TRYBLION BASE SIGN -1018A GREEK ZERO SIGN -10190 ROMAN SEXTANS SIGN -10191 ROMAN UNCIA SIGN -10192 ROMAN SEMUNCIA SIGN -10193 ROMAN SEXTULA SIGN -10194 ROMAN DIMIDIA SEXTULA SIGN -10195 ROMAN SILIQUA SIGN -10196 ROMAN DENARIUS SIGN -10197 ROMAN QUINARIUS SIGN -10198 ROMAN SESTERTIUS SIGN -10199 ROMAN DUPONDIUS SIGN -1019A ROMAN AS SIGN -1019B ROMAN CENTURIAL SIGN -101D0 PHAISTOS DISC SIGN PEDESTRIAN -101D1 PHAISTOS DISC SIGN PLUMED HEAD -101D2 PHAISTOS DISC SIGN TATTOOED HEAD -101D3 PHAISTOS DISC SIGN CAPTIVE -101D4 PHAISTOS DISC SIGN CHILD -101D5 PHAISTOS DISC SIGN WOMAN -101D6 PHAISTOS DISC SIGN HELMET -101D7 PHAISTOS DISC SIGN GAUNTLET -101D8 PHAISTOS DISC SIGN TIARA -101D9 PHAISTOS DISC SIGN ARROW -101DA PHAISTOS DISC SIGN BOW -101DB PHAISTOS DISC SIGN SHIELD -101DC PHAISTOS DISC SIGN CLUB -101DD PHAISTOS DISC SIGN MANACLES -101DE PHAISTOS DISC SIGN MATTOCK -101DF PHAISTOS DISC SIGN SAW -101E0 PHAISTOS DISC SIGN LID -101E1 PHAISTOS DISC SIGN BOOMERANG -101E2 PHAISTOS DISC SIGN CARPENTRY PLANE -101E3 PHAISTOS DISC SIGN DOLIUM -101E4 PHAISTOS DISC SIGN COMB -101E5 PHAISTOS DISC SIGN SLING -101E6 PHAISTOS DISC SIGN COLUMN -101E7 PHAISTOS DISC SIGN BEEHIVE -101E8 PHAISTOS DISC SIGN SHIP -101E9 PHAISTOS DISC SIGN HORN -101EA PHAISTOS DISC SIGN HIDE -101EB PHAISTOS DISC SIGN BULLS LEG -101EC PHAISTOS DISC SIGN CAT -101ED PHAISTOS DISC SIGN RAM -101EE PHAISTOS DISC SIGN EAGLE -101EF PHAISTOS DISC SIGN DOVE -101F0 PHAISTOS DISC SIGN TUNNY -101F1 PHAISTOS DISC SIGN BEE -101F2 PHAISTOS DISC SIGN PLANE TREE -101F3 PHAISTOS DISC SIGN VINE -101F4 PHAISTOS DISC SIGN PAPYRUS -101F5 PHAISTOS DISC SIGN ROSETTE -101F6 PHAISTOS DISC SIGN LILY -101F7 PHAISTOS DISC SIGN OX BACK -101F8 PHAISTOS DISC SIGN FLUTE -101F9 PHAISTOS DISC SIGN GRATER -101FA PHAISTOS DISC SIGN STRAINER -101FB PHAISTOS DISC SIGN SMALL AXE -101FC PHAISTOS DISC SIGN WAVY BAND -101FD PHAISTOS DISC SIGN COMBINING OBLIQUE STROKE -10280 LYCIAN LETTER A -10281 LYCIAN LETTER E -10282 LYCIAN LETTER B -10283 LYCIAN LETTER BH -10284 LYCIAN LETTER G -10285 LYCIAN LETTER D -10286 LYCIAN LETTER I -10287 LYCIAN LETTER W -10288 LYCIAN LETTER Z -10289 LYCIAN LETTER TH -1028A LYCIAN LETTER J -1028B LYCIAN LETTER K -1028C LYCIAN LETTER Q -1028D LYCIAN LETTER L -1028E LYCIAN LETTER M -1028F LYCIAN LETTER N -10290 LYCIAN LETTER MM -10291 LYCIAN LETTER NN -10292 LYCIAN LETTER U -10293 LYCIAN LETTER P -10294 LYCIAN LETTER KK -10295 LYCIAN LETTER R -10296 LYCIAN LETTER S -10297 LYCIAN LETTER T -10298 LYCIAN LETTER TT -10299 LYCIAN LETTER AN -1029A LYCIAN LETTER EN -1029B LYCIAN LETTER H -1029C LYCIAN LETTER X -102A0 CARIAN LETTER A -102A1 CARIAN LETTER P2 -102A2 CARIAN LETTER D -102A3 CARIAN LETTER L -102A4 CARIAN LETTER UUU -102A5 CARIAN LETTER R -102A6 CARIAN LETTER LD -102A7 CARIAN LETTER A2 -102A8 CARIAN LETTER Q -102A9 CARIAN LETTER B -102AA CARIAN LETTER M -102AB CARIAN LETTER O -102AC CARIAN LETTER D2 -102AD CARIAN LETTER T -102AE CARIAN LETTER SH -102AF CARIAN LETTER SH2 -102B0 CARIAN LETTER S -102B1 CARIAN LETTER C-18 -102B2 CARIAN LETTER U -102B3 CARIAN LETTER NN -102B4 CARIAN LETTER X -102B5 CARIAN LETTER N -102B6 CARIAN LETTER TT2 -102B7 CARIAN LETTER P -102B8 CARIAN LETTER SS -102B9 CARIAN LETTER I -102BA CARIAN LETTER E -102BB CARIAN LETTER UUUU -102BC CARIAN LETTER K -102BD CARIAN LETTER K2 -102BE CARIAN LETTER ND -102BF CARIAN LETTER UU -102C0 CARIAN LETTER G -102C1 CARIAN LETTER G2 -102C2 CARIAN LETTER ST -102C3 CARIAN LETTER ST2 -102C4 CARIAN LETTER NG -102C5 CARIAN LETTER II -102C6 CARIAN LETTER C-39 -102C7 CARIAN LETTER TT -102C8 CARIAN LETTER UUU2 -102C9 CARIAN LETTER RR -102CA CARIAN LETTER MB -102CB CARIAN LETTER MB2 -102CC CARIAN LETTER MB3 -102CD CARIAN LETTER MB4 -102CE CARIAN LETTER LD2 -102CF CARIAN LETTER E2 -102D0 CARIAN LETTER UUU3 -10300 OLD ITALIC LETTER A -10301 OLD ITALIC LETTER BE -10302 OLD ITALIC LETTER KE -10303 OLD ITALIC LETTER DE -10304 OLD ITALIC LETTER E -10305 OLD ITALIC LETTER VE -10306 OLD ITALIC LETTER ZE -10307 OLD ITALIC LETTER HE -10308 OLD ITALIC LETTER THE -10309 OLD ITALIC LETTER I -1030A OLD ITALIC LETTER KA -1030B OLD ITALIC LETTER EL -1030C OLD ITALIC LETTER EM -1030D OLD ITALIC LETTER EN -1030E OLD ITALIC LETTER ESH -1030F OLD ITALIC LETTER O -10310 OLD ITALIC LETTER PE -10311 OLD ITALIC LETTER SHE -10312 OLD ITALIC LETTER KU -10313 OLD ITALIC LETTER ER -10314 OLD ITALIC LETTER ES -10315 OLD ITALIC LETTER TE -10316 OLD ITALIC LETTER U -10317 OLD ITALIC LETTER EKS -10318 OLD ITALIC LETTER PHE -10319 OLD ITALIC LETTER KHE -1031A OLD ITALIC LETTER EF -1031B OLD ITALIC LETTER ERS -1031C OLD ITALIC LETTER CHE -1031D OLD ITALIC LETTER II -1031E OLD ITALIC LETTER UU -10320 OLD ITALIC NUMERAL ONE -10321 OLD ITALIC NUMERAL FIVE -10322 OLD ITALIC NUMERAL TEN -10323 OLD ITALIC NUMERAL FIFTY -10330 GOTHIC LETTER AHSA -10331 GOTHIC LETTER BAIRKAN -10332 GOTHIC LETTER GIBA -10333 GOTHIC LETTER DAGS -10334 GOTHIC LETTER AIHVUS -10335 GOTHIC LETTER QAIRTHRA -10336 GOTHIC LETTER IUJA -10337 GOTHIC LETTER HAGL -10338 GOTHIC LETTER THIUTH -10339 GOTHIC LETTER EIS -1033A GOTHIC LETTER KUSMA -1033B GOTHIC LETTER LAGUS -1033C GOTHIC LETTER MANNA -1033D GOTHIC LETTER NAUTHS -1033E GOTHIC LETTER JER -1033F GOTHIC LETTER URUS -10340 GOTHIC LETTER PAIRTHRA -10341 GOTHIC LETTER NINETY -10342 GOTHIC LETTER RAIDA -10343 GOTHIC LETTER SAUIL -10344 GOTHIC LETTER TEIWS -10345 GOTHIC LETTER WINJA -10346 GOTHIC LETTER FAIHU -10347 GOTHIC LETTER IGGWS -10348 GOTHIC LETTER HWAIR -10349 GOTHIC LETTER OTHAL -1034A GOTHIC LETTER NINE HUNDRED -10380 UGARITIC LETTER ALPA -10381 UGARITIC LETTER BETA -10382 UGARITIC LETTER GAMLA -10383 UGARITIC LETTER KHA -10384 UGARITIC LETTER DELTA -10385 UGARITIC LETTER HO -10386 UGARITIC LETTER WO -10387 UGARITIC LETTER ZETA -10388 UGARITIC LETTER HOTA -10389 UGARITIC LETTER TET -1038A UGARITIC LETTER YOD -1038B UGARITIC LETTER KAF -1038C UGARITIC LETTER SHIN -1038D UGARITIC LETTER LAMDA -1038E UGARITIC LETTER MEM -1038F UGARITIC LETTER DHAL -10390 UGARITIC LETTER NUN -10391 UGARITIC LETTER ZU -10392 UGARITIC LETTER SAMKA -10393 UGARITIC LETTER AIN -10394 UGARITIC LETTER PU -10395 UGARITIC LETTER SADE -10396 UGARITIC LETTER QOPA -10397 UGARITIC LETTER RASHA -10398 UGARITIC LETTER THANNA -10399 UGARITIC LETTER GHAIN -1039A UGARITIC LETTER TO -1039B UGARITIC LETTER I -1039C UGARITIC LETTER U -1039D UGARITIC LETTER SSU -1039F UGARITIC WORD DIVIDER -103A0 OLD PERSIAN SIGN A -103A1 OLD PERSIAN SIGN I -103A2 OLD PERSIAN SIGN U -103A3 OLD PERSIAN SIGN KA -103A4 OLD PERSIAN SIGN KU -103A5 OLD PERSIAN SIGN GA -103A6 OLD PERSIAN SIGN GU -103A7 OLD PERSIAN SIGN XA -103A8 OLD PERSIAN SIGN CA -103A9 OLD PERSIAN SIGN JA -103AA OLD PERSIAN SIGN JI -103AB OLD PERSIAN SIGN TA -103AC OLD PERSIAN SIGN TU -103AD OLD PERSIAN SIGN DA -103AE OLD PERSIAN SIGN DI -103AF OLD PERSIAN SIGN DU -103B0 OLD PERSIAN SIGN THA -103B1 OLD PERSIAN SIGN PA -103B2 OLD PERSIAN SIGN BA -103B3 OLD PERSIAN SIGN FA -103B4 OLD PERSIAN SIGN NA -103B5 OLD PERSIAN SIGN NU -103B6 OLD PERSIAN SIGN MA -103B7 OLD PERSIAN SIGN MI -103B8 OLD PERSIAN SIGN MU -103B9 OLD PERSIAN SIGN YA -103BA OLD PERSIAN SIGN VA -103BB OLD PERSIAN SIGN VI -103BC OLD PERSIAN SIGN RA -103BD OLD PERSIAN SIGN RU -103BE OLD PERSIAN SIGN LA -103BF OLD PERSIAN SIGN SA -103C0 OLD PERSIAN SIGN ZA -103C1 OLD PERSIAN SIGN SHA -103C2 OLD PERSIAN SIGN SSA -103C3 OLD PERSIAN SIGN HA -103C8 OLD PERSIAN SIGN AURAMAZDAA -103C9 OLD PERSIAN SIGN AURAMAZDAA-2 -103CA OLD PERSIAN SIGN AURAMAZDAAHA -103CB OLD PERSIAN SIGN XSHAAYATHIYA -103CC OLD PERSIAN SIGN DAHYAAUSH -103CD OLD PERSIAN SIGN DAHYAAUSH-2 -103CE OLD PERSIAN SIGN BAGA -103CF OLD PERSIAN SIGN BUUMISH -103D0 OLD PERSIAN WORD DIVIDER -103D1 OLD PERSIAN NUMBER ONE -103D2 OLD PERSIAN NUMBER TWO -103D3 OLD PERSIAN NUMBER TEN -103D4 OLD PERSIAN NUMBER TWENTY -103D5 OLD PERSIAN NUMBER HUNDRED -10400 DESERET CAPITAL LETTER LONG I -10401 DESERET CAPITAL LETTER LONG E -10402 DESERET CAPITAL LETTER LONG A -10403 DESERET CAPITAL LETTER LONG AH -10404 DESERET CAPITAL LETTER LONG O -10405 DESERET CAPITAL LETTER LONG OO -10406 DESERET CAPITAL LETTER SHORT I -10407 DESERET CAPITAL LETTER SHORT E -10408 DESERET CAPITAL LETTER SHORT A -10409 DESERET CAPITAL LETTER SHORT AH -1040A DESERET CAPITAL LETTER SHORT O -1040B DESERET CAPITAL LETTER SHORT OO -1040C DESERET CAPITAL LETTER AY -1040D DESERET CAPITAL LETTER OW -1040E DESERET CAPITAL LETTER WU -1040F DESERET CAPITAL LETTER YEE -10410 DESERET CAPITAL LETTER H -10411 DESERET CAPITAL LETTER PEE -10412 DESERET CAPITAL LETTER BEE -10413 DESERET CAPITAL LETTER TEE -10414 DESERET CAPITAL LETTER DEE -10415 DESERET CAPITAL LETTER CHEE -10416 DESERET CAPITAL LETTER JEE -10417 DESERET CAPITAL LETTER KAY -10418 DESERET CAPITAL LETTER GAY -10419 DESERET CAPITAL LETTER EF -1041A DESERET CAPITAL LETTER VEE -1041B DESERET CAPITAL LETTER ETH -1041C DESERET CAPITAL LETTER THEE -1041D DESERET CAPITAL LETTER ES -1041E DESERET CAPITAL LETTER ZEE -1041F DESERET CAPITAL LETTER ESH -10420 DESERET CAPITAL LETTER ZHEE -10421 DESERET CAPITAL LETTER ER -10422 DESERET CAPITAL LETTER EL -10423 DESERET CAPITAL LETTER EM -10424 DESERET CAPITAL LETTER EN -10425 DESERET CAPITAL LETTER ENG -10426 DESERET CAPITAL LETTER OI -10427 DESERET CAPITAL LETTER EW -10428 DESERET SMALL LETTER LONG I -10429 DESERET SMALL LETTER LONG E -1042A DESERET SMALL LETTER LONG A -1042B DESERET SMALL LETTER LONG AH -1042C DESERET SMALL LETTER LONG O -1042D DESERET SMALL LETTER LONG OO -1042E DESERET SMALL LETTER SHORT I -1042F DESERET SMALL LETTER SHORT E -10430 DESERET SMALL LETTER SHORT A -10431 DESERET SMALL LETTER SHORT AH -10432 DESERET SMALL LETTER SHORT O -10433 DESERET SMALL LETTER SHORT OO -10434 DESERET SMALL LETTER AY -10435 DESERET SMALL LETTER OW -10436 DESERET SMALL LETTER WU -10437 DESERET SMALL LETTER YEE -10438 DESERET SMALL LETTER H -10439 DESERET SMALL LETTER PEE -1043A DESERET SMALL LETTER BEE -1043B DESERET SMALL LETTER TEE -1043C DESERET SMALL LETTER DEE -1043D DESERET SMALL LETTER CHEE -1043E DESERET SMALL LETTER JEE -1043F DESERET SMALL LETTER KAY -10440 DESERET SMALL LETTER GAY -10441 DESERET SMALL LETTER EF -10442 DESERET SMALL LETTER VEE -10443 DESERET SMALL LETTER ETH -10444 DESERET SMALL LETTER THEE -10445 DESERET SMALL LETTER ES -10446 DESERET SMALL LETTER ZEE -10447 DESERET SMALL LETTER ESH -10448 DESERET SMALL LETTER ZHEE -10449 DESERET SMALL LETTER ER -1044A DESERET SMALL LETTER EL -1044B DESERET SMALL LETTER EM -1044C DESERET SMALL LETTER EN -1044D DESERET SMALL LETTER ENG -1044E DESERET SMALL LETTER OI -1044F DESERET SMALL LETTER EW -10450 SHAVIAN LETTER PEEP -10451 SHAVIAN LETTER TOT -10452 SHAVIAN LETTER KICK -10453 SHAVIAN LETTER FEE -10454 SHAVIAN LETTER THIGH -10455 SHAVIAN LETTER SO -10456 SHAVIAN LETTER SURE -10457 SHAVIAN LETTER CHURCH -10458 SHAVIAN LETTER YEA -10459 SHAVIAN LETTER HUNG -1045A SHAVIAN LETTER BIB -1045B SHAVIAN LETTER DEAD -1045C SHAVIAN LETTER GAG -1045D SHAVIAN LETTER VOW -1045E SHAVIAN LETTER THEY -1045F SHAVIAN LETTER ZOO -10460 SHAVIAN LETTER MEASURE -10461 SHAVIAN LETTER JUDGE -10462 SHAVIAN LETTER WOE -10463 SHAVIAN LETTER HA-HA -10464 SHAVIAN LETTER LOLL -10465 SHAVIAN LETTER MIME -10466 SHAVIAN LETTER IF -10467 SHAVIAN LETTER EGG -10468 SHAVIAN LETTER ASH -10469 SHAVIAN LETTER ADO -1046A SHAVIAN LETTER ON -1046B SHAVIAN LETTER WOOL -1046C SHAVIAN LETTER OUT -1046D SHAVIAN LETTER AH -1046E SHAVIAN LETTER ROAR -1046F SHAVIAN LETTER NUN -10470 SHAVIAN LETTER EAT -10471 SHAVIAN LETTER AGE -10472 SHAVIAN LETTER ICE -10473 SHAVIAN LETTER UP -10474 SHAVIAN LETTER OAK -10475 SHAVIAN LETTER OOZE -10476 SHAVIAN LETTER OIL -10477 SHAVIAN LETTER AWE -10478 SHAVIAN LETTER ARE -10479 SHAVIAN LETTER OR -1047A SHAVIAN LETTER AIR -1047B SHAVIAN LETTER ERR -1047C SHAVIAN LETTER ARRAY -1047D SHAVIAN LETTER EAR -1047E SHAVIAN LETTER IAN -1047F SHAVIAN LETTER YEW -10480 OSMANYA LETTER ALEF -10481 OSMANYA LETTER BA -10482 OSMANYA LETTER TA -10483 OSMANYA LETTER JA -10484 OSMANYA LETTER XA -10485 OSMANYA LETTER KHA -10486 OSMANYA LETTER DEEL -10487 OSMANYA LETTER RA -10488 OSMANYA LETTER SA -10489 OSMANYA LETTER SHIIN -1048A OSMANYA LETTER DHA -1048B OSMANYA LETTER CAYN -1048C OSMANYA LETTER GA -1048D OSMANYA LETTER FA -1048E OSMANYA LETTER QAAF -1048F OSMANYA LETTER KAAF -10490 OSMANYA LETTER LAAN -10491 OSMANYA LETTER MIIN -10492 OSMANYA LETTER NUUN -10493 OSMANYA LETTER WAW -10494 OSMANYA LETTER HA -10495 OSMANYA LETTER YA -10496 OSMANYA LETTER A -10497 OSMANYA LETTER E -10498 OSMANYA LETTER I -10499 OSMANYA LETTER O -1049A OSMANYA LETTER U -1049B OSMANYA LETTER AA -1049C OSMANYA LETTER EE -1049D OSMANYA LETTER OO -104A0 OSMANYA DIGIT ZERO -104A1 OSMANYA DIGIT ONE -104A2 OSMANYA DIGIT TWO -104A3 OSMANYA DIGIT THREE -104A4 OSMANYA DIGIT FOUR -104A5 OSMANYA DIGIT FIVE -104A6 OSMANYA DIGIT SIX -104A7 OSMANYA DIGIT SEVEN -104A8 OSMANYA DIGIT EIGHT -104A9 OSMANYA DIGIT NINE -10800 CYPRIOT SYLLABLE A -10801 CYPRIOT SYLLABLE E -10802 CYPRIOT SYLLABLE I -10803 CYPRIOT SYLLABLE O -10804 CYPRIOT SYLLABLE U -10805 CYPRIOT SYLLABLE JA -10808 CYPRIOT SYLLABLE JO -1080A CYPRIOT SYLLABLE KA -1080B CYPRIOT SYLLABLE KE -1080C CYPRIOT SYLLABLE KI -1080D CYPRIOT SYLLABLE KO -1080E CYPRIOT SYLLABLE KU -1080F CYPRIOT SYLLABLE LA -10810 CYPRIOT SYLLABLE LE -10811 CYPRIOT SYLLABLE LI -10812 CYPRIOT SYLLABLE LO -10813 CYPRIOT SYLLABLE LU -10814 CYPRIOT SYLLABLE MA -10815 CYPRIOT SYLLABLE ME -10816 CYPRIOT SYLLABLE MI -10817 CYPRIOT SYLLABLE MO -10818 CYPRIOT SYLLABLE MU -10819 CYPRIOT SYLLABLE NA -1081A CYPRIOT SYLLABLE NE -1081B CYPRIOT SYLLABLE NI -1081C CYPRIOT SYLLABLE NO -1081D CYPRIOT SYLLABLE NU -1081E CYPRIOT SYLLABLE PA -1081F CYPRIOT SYLLABLE PE -10820 CYPRIOT SYLLABLE PI -10821 CYPRIOT SYLLABLE PO -10822 CYPRIOT SYLLABLE PU -10823 CYPRIOT SYLLABLE RA -10824 CYPRIOT SYLLABLE RE -10825 CYPRIOT SYLLABLE RI -10826 CYPRIOT SYLLABLE RO -10827 CYPRIOT SYLLABLE RU -10828 CYPRIOT SYLLABLE SA -10829 CYPRIOT SYLLABLE SE -1082A CYPRIOT SYLLABLE SI -1082B CYPRIOT SYLLABLE SO -1082C CYPRIOT SYLLABLE SU -1082D CYPRIOT SYLLABLE TA -1082E CYPRIOT SYLLABLE TE -1082F CYPRIOT SYLLABLE TI -10830 CYPRIOT SYLLABLE TO -10831 CYPRIOT SYLLABLE TU -10832 CYPRIOT SYLLABLE WA -10833 CYPRIOT SYLLABLE WE -10834 CYPRIOT SYLLABLE WI -10835 CYPRIOT SYLLABLE WO -10837 CYPRIOT SYLLABLE XA -10838 CYPRIOT SYLLABLE XE -1083C CYPRIOT SYLLABLE ZA -1083F CYPRIOT SYLLABLE ZO -10840 IMPERIAL ARAMAIC LETTER ALEPH -10841 IMPERIAL ARAMAIC LETTER BETH -10842 IMPERIAL ARAMAIC LETTER GIMEL -10843 IMPERIAL ARAMAIC LETTER DALETH -10844 IMPERIAL ARAMAIC LETTER HE -10845 IMPERIAL ARAMAIC LETTER WAW -10846 IMPERIAL ARAMAIC LETTER ZAYIN -10847 IMPERIAL ARAMAIC LETTER HETH -10848 IMPERIAL ARAMAIC LETTER TETH -10849 IMPERIAL ARAMAIC LETTER YODH -1084A IMPERIAL ARAMAIC LETTER KAPH -1084B IMPERIAL ARAMAIC LETTER LAMEDH -1084C IMPERIAL ARAMAIC LETTER MEM -1084D IMPERIAL ARAMAIC LETTER NUN -1084E IMPERIAL ARAMAIC LETTER SAMEKH -1084F IMPERIAL ARAMAIC LETTER AYIN -10850 IMPERIAL ARAMAIC LETTER PE -10851 IMPERIAL ARAMAIC LETTER SADHE -10852 IMPERIAL ARAMAIC LETTER QOPH -10853 IMPERIAL ARAMAIC LETTER RESH -10854 IMPERIAL ARAMAIC LETTER SHIN -10855 IMPERIAL ARAMAIC LETTER TAW -10857 IMPERIAL ARAMAIC SECTION SIGN -10858 IMPERIAL ARAMAIC NUMBER ONE -10859 IMPERIAL ARAMAIC NUMBER TWO -1085A IMPERIAL ARAMAIC NUMBER THREE -1085B IMPERIAL ARAMAIC NUMBER TEN -1085C IMPERIAL ARAMAIC NUMBER TWENTY -1085D IMPERIAL ARAMAIC NUMBER ONE HUNDRED -1085E IMPERIAL ARAMAIC NUMBER ONE THOUSAND -1085F IMPERIAL ARAMAIC NUMBER TEN THOUSAND -10900 PHOENICIAN LETTER ALF -10901 PHOENICIAN LETTER BET -10902 PHOENICIAN LETTER GAML -10903 PHOENICIAN LETTER DELT -10904 PHOENICIAN LETTER HE -10905 PHOENICIAN LETTER WAU -10906 PHOENICIAN LETTER ZAI -10907 PHOENICIAN LETTER HET -10908 PHOENICIAN LETTER TET -10909 PHOENICIAN LETTER YOD -1090A PHOENICIAN LETTER KAF -1090B PHOENICIAN LETTER LAMD -1090C PHOENICIAN LETTER MEM -1090D PHOENICIAN LETTER NUN -1090E PHOENICIAN LETTER SEMK -1090F PHOENICIAN LETTER AIN -10910 PHOENICIAN LETTER PE -10911 PHOENICIAN LETTER SADE -10912 PHOENICIAN LETTER QOF -10913 PHOENICIAN LETTER ROSH -10914 PHOENICIAN LETTER SHIN -10915 PHOENICIAN LETTER TAU -10916 PHOENICIAN NUMBER ONE -10917 PHOENICIAN NUMBER TEN -10918 PHOENICIAN NUMBER TWENTY -10919 PHOENICIAN NUMBER ONE HUNDRED -1091A PHOENICIAN NUMBER TWO -1091B PHOENICIAN NUMBER THREE -1091F PHOENICIAN WORD SEPARATOR -10920 LYDIAN LETTER A -10921 LYDIAN LETTER B -10922 LYDIAN LETTER G -10923 LYDIAN LETTER D -10924 LYDIAN LETTER E -10925 LYDIAN LETTER V -10926 LYDIAN LETTER I -10927 LYDIAN LETTER Y -10928 LYDIAN LETTER K -10929 LYDIAN LETTER L -1092A LYDIAN LETTER M -1092B LYDIAN LETTER N -1092C LYDIAN LETTER O -1092D LYDIAN LETTER R -1092E LYDIAN LETTER SS -1092F LYDIAN LETTER T -10930 LYDIAN LETTER U -10931 LYDIAN LETTER F -10932 LYDIAN LETTER Q -10933 LYDIAN LETTER S -10934 LYDIAN LETTER TT -10935 LYDIAN LETTER AN -10936 LYDIAN LETTER EN -10937 LYDIAN LETTER LY -10938 LYDIAN LETTER NN -10939 LYDIAN LETTER C -1093F LYDIAN TRIANGULAR MARK -10A00 KHAROSHTHI LETTER A -10A01 KHAROSHTHI VOWEL SIGN I -10A02 KHAROSHTHI VOWEL SIGN U -10A03 KHAROSHTHI VOWEL SIGN VOCALIC R -10A05 KHAROSHTHI VOWEL SIGN E -10A06 KHAROSHTHI VOWEL SIGN O -10A0C KHAROSHTHI VOWEL LENGTH MARK -10A0D KHAROSHTHI SIGN DOUBLE RING BELOW -10A0E KHAROSHTHI SIGN ANUSVARA -10A0F KHAROSHTHI SIGN VISARGA -10A10 KHAROSHTHI LETTER KA -10A11 KHAROSHTHI LETTER KHA -10A12 KHAROSHTHI LETTER GA -10A13 KHAROSHTHI LETTER GHA -10A15 KHAROSHTHI LETTER CA -10A16 KHAROSHTHI LETTER CHA -10A17 KHAROSHTHI LETTER JA -10A19 KHAROSHTHI LETTER NYA -10A1A KHAROSHTHI LETTER TTA -10A1B KHAROSHTHI LETTER TTHA -10A1C KHAROSHTHI LETTER DDA -10A1D KHAROSHTHI LETTER DDHA -10A1E KHAROSHTHI LETTER NNA -10A1F KHAROSHTHI LETTER TA -10A20 KHAROSHTHI LETTER THA -10A21 KHAROSHTHI LETTER DA -10A22 KHAROSHTHI LETTER DHA -10A23 KHAROSHTHI LETTER NA -10A24 KHAROSHTHI LETTER PA -10A25 KHAROSHTHI LETTER PHA -10A26 KHAROSHTHI LETTER BA -10A27 KHAROSHTHI LETTER BHA -10A28 KHAROSHTHI LETTER MA -10A29 KHAROSHTHI LETTER YA -10A2A KHAROSHTHI LETTER RA -10A2B KHAROSHTHI LETTER LA -10A2C KHAROSHTHI LETTER VA -10A2D KHAROSHTHI LETTER SHA -10A2E KHAROSHTHI LETTER SSA -10A2F KHAROSHTHI LETTER SA -10A30 KHAROSHTHI LETTER ZA -10A31 KHAROSHTHI LETTER HA -10A32 KHAROSHTHI LETTER KKA -10A33 KHAROSHTHI LETTER TTTHA -10A38 KHAROSHTHI SIGN BAR ABOVE -10A39 KHAROSHTHI SIGN CAUDA -10A3A KHAROSHTHI SIGN DOT BELOW -10A3F KHAROSHTHI VIRAMA -10A40 KHAROSHTHI DIGIT ONE -10A41 KHAROSHTHI DIGIT TWO -10A42 KHAROSHTHI DIGIT THREE -10A43 KHAROSHTHI DIGIT FOUR -10A44 KHAROSHTHI NUMBER TEN -10A45 KHAROSHTHI NUMBER TWENTY -10A46 KHAROSHTHI NUMBER ONE HUNDRED -10A47 KHAROSHTHI NUMBER ONE THOUSAND -10A50 KHAROSHTHI PUNCTUATION DOT -10A51 KHAROSHTHI PUNCTUATION SMALL CIRCLE -10A52 KHAROSHTHI PUNCTUATION CIRCLE -10A53 KHAROSHTHI PUNCTUATION CRESCENT BAR -10A54 KHAROSHTHI PUNCTUATION MANGALAM -10A55 KHAROSHTHI PUNCTUATION LOTUS -10A56 KHAROSHTHI PUNCTUATION DANDA -10A57 KHAROSHTHI PUNCTUATION DOUBLE DANDA -10A58 KHAROSHTHI PUNCTUATION LINES -10A60 OLD SOUTH ARABIAN LETTER HE -10A61 OLD SOUTH ARABIAN LETTER LAMEDH -10A62 OLD SOUTH ARABIAN LETTER HETH -10A63 OLD SOUTH ARABIAN LETTER MEM -10A64 OLD SOUTH ARABIAN LETTER QOPH -10A65 OLD SOUTH ARABIAN LETTER WAW -10A66 OLD SOUTH ARABIAN LETTER SHIN -10A67 OLD SOUTH ARABIAN LETTER RESH -10A68 OLD SOUTH ARABIAN LETTER BETH -10A69 OLD SOUTH ARABIAN LETTER TAW -10A6A OLD SOUTH ARABIAN LETTER SAT -10A6B OLD SOUTH ARABIAN LETTER KAPH -10A6C OLD SOUTH ARABIAN LETTER NUN -10A6D OLD SOUTH ARABIAN LETTER KHETH -10A6E OLD SOUTH ARABIAN LETTER SADHE -10A6F OLD SOUTH ARABIAN LETTER SAMEKH -10A70 OLD SOUTH ARABIAN LETTER FE -10A71 OLD SOUTH ARABIAN LETTER ALEF -10A72 OLD SOUTH ARABIAN LETTER AYN -10A73 OLD SOUTH ARABIAN LETTER DHADHE -10A74 OLD SOUTH ARABIAN LETTER GIMEL -10A75 OLD SOUTH ARABIAN LETTER DALETH -10A76 OLD SOUTH ARABIAN LETTER GHAYN -10A77 OLD SOUTH ARABIAN LETTER TETH -10A78 OLD SOUTH ARABIAN LETTER ZAYN -10A79 OLD SOUTH ARABIAN LETTER DHALETH -10A7A OLD SOUTH ARABIAN LETTER YODH -10A7B OLD SOUTH ARABIAN LETTER THAW -10A7C OLD SOUTH ARABIAN LETTER THETH -10A7D OLD SOUTH ARABIAN NUMBER ONE -10A7E OLD SOUTH ARABIAN NUMBER FIFTY -10A7F OLD SOUTH ARABIAN NUMERIC INDICATOR -10B00 AVESTAN LETTER A -10B01 AVESTAN LETTER AA -10B02 AVESTAN LETTER AO -10B03 AVESTAN LETTER AAO -10B04 AVESTAN LETTER AN -10B05 AVESTAN LETTER AAN -10B06 AVESTAN LETTER AE -10B07 AVESTAN LETTER AEE -10B08 AVESTAN LETTER E -10B09 AVESTAN LETTER EE -10B0A AVESTAN LETTER O -10B0B AVESTAN LETTER OO -10B0C AVESTAN LETTER I -10B0D AVESTAN LETTER II -10B0E AVESTAN LETTER U -10B0F AVESTAN LETTER UU -10B10 AVESTAN LETTER KE -10B11 AVESTAN LETTER XE -10B12 AVESTAN LETTER XYE -10B13 AVESTAN LETTER XVE -10B14 AVESTAN LETTER GE -10B15 AVESTAN LETTER GGE -10B16 AVESTAN LETTER GHE -10B17 AVESTAN LETTER CE -10B18 AVESTAN LETTER JE -10B19 AVESTAN LETTER TE -10B1A AVESTAN LETTER THE -10B1B AVESTAN LETTER DE -10B1C AVESTAN LETTER DHE -10B1D AVESTAN LETTER TTE -10B1E AVESTAN LETTER PE -10B1F AVESTAN LETTER FE -10B20 AVESTAN LETTER BE -10B21 AVESTAN LETTER BHE -10B22 AVESTAN LETTER NGE -10B23 AVESTAN LETTER NGYE -10B24 AVESTAN LETTER NGVE -10B25 AVESTAN LETTER NE -10B26 AVESTAN LETTER NYE -10B27 AVESTAN LETTER NNE -10B28 AVESTAN LETTER ME -10B29 AVESTAN LETTER HME -10B2A AVESTAN LETTER YYE -10B2B AVESTAN LETTER YE -10B2C AVESTAN LETTER VE -10B2D AVESTAN LETTER RE -10B2E AVESTAN LETTER LE -10B2F AVESTAN LETTER SE -10B30 AVESTAN LETTER ZE -10B31 AVESTAN LETTER SHE -10B32 AVESTAN LETTER ZHE -10B33 AVESTAN LETTER SHYE -10B34 AVESTAN LETTER SSHE -10B35 AVESTAN LETTER HE -10B39 AVESTAN ABBREVIATION MARK -10B3A TINY TWO DOTS OVER ONE DOT PUNCTUATION -10B3B SMALL TWO DOTS OVER ONE DOT PUNCTUATION -10B3C LARGE TWO DOTS OVER ONE DOT PUNCTUATION -10B3D LARGE ONE DOT OVER TWO DOTS PUNCTUATION -10B3E LARGE TWO RINGS OVER ONE RING PUNCTUATION -10B3F LARGE ONE RING OVER TWO RINGS PUNCTUATION -10B40 INSCRIPTIONAL PARTHIAN LETTER ALEPH -10B41 INSCRIPTIONAL PARTHIAN LETTER BETH -10B42 INSCRIPTIONAL PARTHIAN LETTER GIMEL -10B43 INSCRIPTIONAL PARTHIAN LETTER DALETH -10B44 INSCRIPTIONAL PARTHIAN LETTER HE -10B45 INSCRIPTIONAL PARTHIAN LETTER WAW -10B46 INSCRIPTIONAL PARTHIAN LETTER ZAYIN -10B47 INSCRIPTIONAL PARTHIAN LETTER HETH -10B48 INSCRIPTIONAL PARTHIAN LETTER TETH -10B49 INSCRIPTIONAL PARTHIAN LETTER YODH -10B4A INSCRIPTIONAL PARTHIAN LETTER KAPH -10B4B INSCRIPTIONAL PARTHIAN LETTER LAMEDH -10B4C INSCRIPTIONAL PARTHIAN LETTER MEM -10B4D INSCRIPTIONAL PARTHIAN LETTER NUN -10B4E INSCRIPTIONAL PARTHIAN LETTER SAMEKH -10B4F INSCRIPTIONAL PARTHIAN LETTER AYIN -10B50 INSCRIPTIONAL PARTHIAN LETTER PE -10B51 INSCRIPTIONAL PARTHIAN LETTER SADHE -10B52 INSCRIPTIONAL PARTHIAN LETTER QOPH -10B53 INSCRIPTIONAL PARTHIAN LETTER RESH -10B54 INSCRIPTIONAL PARTHIAN LETTER SHIN -10B55 INSCRIPTIONAL PARTHIAN LETTER TAW -10B58 INSCRIPTIONAL PARTHIAN NUMBER ONE -10B59 INSCRIPTIONAL PARTHIAN NUMBER TWO -10B5A INSCRIPTIONAL PARTHIAN NUMBER THREE -10B5B INSCRIPTIONAL PARTHIAN NUMBER FOUR -10B5C INSCRIPTIONAL PARTHIAN NUMBER TEN -10B5D INSCRIPTIONAL PARTHIAN NUMBER TWENTY -10B5E INSCRIPTIONAL PARTHIAN NUMBER ONE HUNDRED -10B5F INSCRIPTIONAL PARTHIAN NUMBER ONE THOUSAND -10B60 INSCRIPTIONAL PAHLAVI LETTER ALEPH -10B61 INSCRIPTIONAL PAHLAVI LETTER BETH -10B62 INSCRIPTIONAL PAHLAVI LETTER GIMEL -10B63 INSCRIPTIONAL PAHLAVI LETTER DALETH -10B64 INSCRIPTIONAL PAHLAVI LETTER HE -10B65 INSCRIPTIONAL PAHLAVI LETTER WAW-AYIN-RESH -10B66 INSCRIPTIONAL PAHLAVI LETTER ZAYIN -10B67 INSCRIPTIONAL PAHLAVI LETTER HETH -10B68 INSCRIPTIONAL PAHLAVI LETTER TETH -10B69 INSCRIPTIONAL PAHLAVI LETTER YODH -10B6A INSCRIPTIONAL PAHLAVI LETTER KAPH -10B6B INSCRIPTIONAL PAHLAVI LETTER LAMEDH -10B6C INSCRIPTIONAL PAHLAVI LETTER MEM-QOPH -10B6D INSCRIPTIONAL PAHLAVI LETTER NUN -10B6E INSCRIPTIONAL PAHLAVI LETTER SAMEKH -10B6F INSCRIPTIONAL PAHLAVI LETTER PE -10B70 INSCRIPTIONAL PAHLAVI LETTER SADHE -10B71 INSCRIPTIONAL PAHLAVI LETTER SHIN -10B72 INSCRIPTIONAL PAHLAVI LETTER TAW -10B78 INSCRIPTIONAL PAHLAVI NUMBER ONE -10B79 INSCRIPTIONAL PAHLAVI NUMBER TWO -10B7A INSCRIPTIONAL PAHLAVI NUMBER THREE -10B7B INSCRIPTIONAL PAHLAVI NUMBER FOUR -10B7C INSCRIPTIONAL PAHLAVI NUMBER TEN -10B7D INSCRIPTIONAL PAHLAVI NUMBER TWENTY -10B7E INSCRIPTIONAL PAHLAVI NUMBER ONE HUNDRED -10B7F INSCRIPTIONAL PAHLAVI NUMBER ONE THOUSAND -10C00 OLD TURKIC LETTER ORKHON A -10C01 OLD TURKIC LETTER YENISEI A -10C02 OLD TURKIC LETTER YENISEI AE -10C03 OLD TURKIC LETTER ORKHON I -10C04 OLD TURKIC LETTER YENISEI I -10C05 OLD TURKIC LETTER YENISEI E -10C06 OLD TURKIC LETTER ORKHON O -10C07 OLD TURKIC LETTER ORKHON OE -10C08 OLD TURKIC LETTER YENISEI OE -10C09 OLD TURKIC LETTER ORKHON AB -10C0A OLD TURKIC LETTER YENISEI AB -10C0B OLD TURKIC LETTER ORKHON AEB -10C0C OLD TURKIC LETTER YENISEI AEB -10C0D OLD TURKIC LETTER ORKHON AG -10C0E OLD TURKIC LETTER YENISEI AG -10C0F OLD TURKIC LETTER ORKHON AEG -10C10 OLD TURKIC LETTER YENISEI AEG -10C11 OLD TURKIC LETTER ORKHON AD -10C12 OLD TURKIC LETTER YENISEI AD -10C13 OLD TURKIC LETTER ORKHON AED -10C14 OLD TURKIC LETTER ORKHON EZ -10C15 OLD TURKIC LETTER YENISEI EZ -10C16 OLD TURKIC LETTER ORKHON AY -10C17 OLD TURKIC LETTER YENISEI AY -10C18 OLD TURKIC LETTER ORKHON AEY -10C19 OLD TURKIC LETTER YENISEI AEY -10C1A OLD TURKIC LETTER ORKHON AEK -10C1B OLD TURKIC LETTER YENISEI AEK -10C1C OLD TURKIC LETTER ORKHON OEK -10C1D OLD TURKIC LETTER YENISEI OEK -10C1E OLD TURKIC LETTER ORKHON AL -10C1F OLD TURKIC LETTER YENISEI AL -10C20 OLD TURKIC LETTER ORKHON AEL -10C21 OLD TURKIC LETTER ORKHON ELT -10C22 OLD TURKIC LETTER ORKHON EM -10C23 OLD TURKIC LETTER ORKHON AN -10C24 OLD TURKIC LETTER ORKHON AEN -10C25 OLD TURKIC LETTER YENISEI AEN -10C26 OLD TURKIC LETTER ORKHON ENT -10C27 OLD TURKIC LETTER YENISEI ENT -10C28 OLD TURKIC LETTER ORKHON ENC -10C29 OLD TURKIC LETTER YENISEI ENC -10C2A OLD TURKIC LETTER ORKHON ENY -10C2B OLD TURKIC LETTER YENISEI ENY -10C2C OLD TURKIC LETTER YENISEI ANG -10C2D OLD TURKIC LETTER ORKHON ENG -10C2E OLD TURKIC LETTER YENISEI AENG -10C2F OLD TURKIC LETTER ORKHON EP -10C30 OLD TURKIC LETTER ORKHON OP -10C31 OLD TURKIC LETTER ORKHON IC -10C32 OLD TURKIC LETTER ORKHON EC -10C33 OLD TURKIC LETTER YENISEI EC -10C34 OLD TURKIC LETTER ORKHON AQ -10C35 OLD TURKIC LETTER YENISEI AQ -10C36 OLD TURKIC LETTER ORKHON IQ -10C37 OLD TURKIC LETTER YENISEI IQ -10C38 OLD TURKIC LETTER ORKHON OQ -10C39 OLD TURKIC LETTER YENISEI OQ -10C3A OLD TURKIC LETTER ORKHON AR -10C3B OLD TURKIC LETTER YENISEI AR -10C3C OLD TURKIC LETTER ORKHON AER -10C3D OLD TURKIC LETTER ORKHON AS -10C3E OLD TURKIC LETTER ORKHON AES -10C3F OLD TURKIC LETTER ORKHON ASH -10C40 OLD TURKIC LETTER YENISEI ASH -10C41 OLD TURKIC LETTER ORKHON ESH -10C42 OLD TURKIC LETTER YENISEI ESH -10C43 OLD TURKIC LETTER ORKHON AT -10C44 OLD TURKIC LETTER YENISEI AT -10C45 OLD TURKIC LETTER ORKHON AET -10C46 OLD TURKIC LETTER YENISEI AET -10C47 OLD TURKIC LETTER ORKHON OT -10C48 OLD TURKIC LETTER ORKHON BASH -10E60 RUMI DIGIT ONE -10E61 RUMI DIGIT TWO -10E62 RUMI DIGIT THREE -10E63 RUMI DIGIT FOUR -10E64 RUMI DIGIT FIVE -10E65 RUMI DIGIT SIX -10E66 RUMI DIGIT SEVEN -10E67 RUMI DIGIT EIGHT -10E68 RUMI DIGIT NINE -10E69 RUMI NUMBER TEN -10E6A RUMI NUMBER TWENTY -10E6B RUMI NUMBER THIRTY -10E6C RUMI NUMBER FORTY -10E6D RUMI NUMBER FIFTY -10E6E RUMI NUMBER SIXTY -10E6F RUMI NUMBER SEVENTY -10E70 RUMI NUMBER EIGHTY -10E71 RUMI NUMBER NINETY -10E72 RUMI NUMBER ONE HUNDRED -10E73 RUMI NUMBER TWO HUNDRED -10E74 RUMI NUMBER THREE HUNDRED -10E75 RUMI NUMBER FOUR HUNDRED -10E76 RUMI NUMBER FIVE HUNDRED -10E77 RUMI NUMBER SIX HUNDRED -10E78 RUMI NUMBER SEVEN HUNDRED -10E79 RUMI NUMBER EIGHT HUNDRED -10E7A RUMI NUMBER NINE HUNDRED -10E7B RUMI FRACTION ONE HALF -10E7C RUMI FRACTION ONE QUARTER -10E7D RUMI FRACTION ONE THIRD -10E7E RUMI FRACTION TWO THIRDS -11080 KAITHI SIGN CANDRABINDU -11081 KAITHI SIGN ANUSVARA -11082 KAITHI SIGN VISARGA -11083 KAITHI LETTER A -11084 KAITHI LETTER AA -11085 KAITHI LETTER I -11086 KAITHI LETTER II -11087 KAITHI LETTER U -11088 KAITHI LETTER UU -11089 KAITHI LETTER E -1108A KAITHI LETTER AI -1108B KAITHI LETTER O -1108C KAITHI LETTER AU -1108D KAITHI LETTER KA -1108E KAITHI LETTER KHA -1108F KAITHI LETTER GA -11090 KAITHI LETTER GHA -11091 KAITHI LETTER NGA -11092 KAITHI LETTER CA -11093 KAITHI LETTER CHA -11094 KAITHI LETTER JA -11095 KAITHI LETTER JHA -11096 KAITHI LETTER NYA -11097 KAITHI LETTER TTA -11098 KAITHI LETTER TTHA -11099 KAITHI LETTER DDA -1109A KAITHI LETTER DDDHA -1109B KAITHI LETTER DDHA -1109C KAITHI LETTER RHA -1109D KAITHI LETTER NNA -1109E KAITHI LETTER TA -1109F KAITHI LETTER THA -110A0 KAITHI LETTER DA -110A1 KAITHI LETTER DHA -110A2 KAITHI LETTER NA -110A3 KAITHI LETTER PA -110A4 KAITHI LETTER PHA -110A5 KAITHI LETTER BA -110A6 KAITHI LETTER BHA -110A7 KAITHI LETTER MA -110A8 KAITHI LETTER YA -110A9 KAITHI LETTER RA -110AA KAITHI LETTER LA -110AB KAITHI LETTER VA -110AC KAITHI LETTER SHA -110AD KAITHI LETTER SSA -110AE KAITHI LETTER SA -110AF KAITHI LETTER HA -110B0 KAITHI VOWEL SIGN AA -110B1 KAITHI VOWEL SIGN I -110B2 KAITHI VOWEL SIGN II -110B3 KAITHI VOWEL SIGN U -110B4 KAITHI VOWEL SIGN UU -110B5 KAITHI VOWEL SIGN E -110B6 KAITHI VOWEL SIGN AI -110B7 KAITHI VOWEL SIGN O -110B8 KAITHI VOWEL SIGN AU -110B9 KAITHI SIGN VIRAMA -110BA KAITHI SIGN NUKTA -110BB KAITHI ABBREVIATION SIGN -110BC KAITHI ENUMERATION SIGN -110BD KAITHI NUMBER SIGN -110BE KAITHI SECTION MARK -110BF KAITHI DOUBLE SECTION MARK -110C0 KAITHI DANDA -110C1 KAITHI DOUBLE DANDA -12000 CUNEIFORM SIGN A -12001 CUNEIFORM SIGN A TIMES A -12002 CUNEIFORM SIGN A TIMES BAD -12003 CUNEIFORM SIGN A TIMES GAN2 TENU -12004 CUNEIFORM SIGN A TIMES HA -12005 CUNEIFORM SIGN A TIMES IGI -12006 CUNEIFORM SIGN A TIMES LAGAR GUNU -12007 CUNEIFORM SIGN A TIMES MUSH -12008 CUNEIFORM SIGN A TIMES SAG -12009 CUNEIFORM SIGN A2 -1200A CUNEIFORM SIGN AB -1200B CUNEIFORM SIGN AB TIMES ASH2 -1200C CUNEIFORM SIGN AB TIMES DUN3 GUNU -1200D CUNEIFORM SIGN AB TIMES GAL -1200E CUNEIFORM SIGN AB TIMES GAN2 TENU -1200F CUNEIFORM SIGN AB TIMES HA -12010 CUNEIFORM SIGN AB TIMES IGI GUNU -12011 CUNEIFORM SIGN AB TIMES IMIN -12012 CUNEIFORM SIGN AB TIMES LAGAB -12013 CUNEIFORM SIGN AB TIMES SHESH -12014 CUNEIFORM SIGN AB TIMES U PLUS U PLUS U -12015 CUNEIFORM SIGN AB GUNU -12016 CUNEIFORM SIGN AB2 -12017 CUNEIFORM SIGN AB2 TIMES BALAG -12018 CUNEIFORM SIGN AB2 TIMES GAN2 TENU -12019 CUNEIFORM SIGN AB2 TIMES ME PLUS EN -1201A CUNEIFORM SIGN AB2 TIMES SHA3 -1201B CUNEIFORM SIGN AB2 TIMES TAK4 -1201C CUNEIFORM SIGN AD -1201D CUNEIFORM SIGN AK -1201E CUNEIFORM SIGN AK TIMES ERIN2 -1201F CUNEIFORM SIGN AK TIMES SHITA PLUS GISH -12020 CUNEIFORM SIGN AL -12021 CUNEIFORM SIGN AL TIMES AL -12022 CUNEIFORM SIGN AL TIMES DIM2 -12023 CUNEIFORM SIGN AL TIMES GISH -12024 CUNEIFORM SIGN AL TIMES HA -12025 CUNEIFORM SIGN AL TIMES KAD3 -12026 CUNEIFORM SIGN AL TIMES KI -12027 CUNEIFORM SIGN AL TIMES SHE -12028 CUNEIFORM SIGN AL TIMES USH -12029 CUNEIFORM SIGN ALAN -1202A CUNEIFORM SIGN ALEPH -1202B CUNEIFORM SIGN AMAR -1202C CUNEIFORM SIGN AMAR TIMES SHE -1202D CUNEIFORM SIGN AN -1202E CUNEIFORM SIGN AN OVER AN -1202F CUNEIFORM SIGN AN THREE TIMES -12030 CUNEIFORM SIGN AN PLUS NAGA OPPOSING AN PLUS NAGA -12031 CUNEIFORM SIGN AN PLUS NAGA SQUARED -12032 CUNEIFORM SIGN ANSHE -12033 CUNEIFORM SIGN APIN -12034 CUNEIFORM SIGN ARAD -12035 CUNEIFORM SIGN ARAD TIMES KUR -12036 CUNEIFORM SIGN ARKAB -12037 CUNEIFORM SIGN ASAL2 -12038 CUNEIFORM SIGN ASH -12039 CUNEIFORM SIGN ASH ZIDA TENU -1203A CUNEIFORM SIGN ASH KABA TENU -1203B CUNEIFORM SIGN ASH OVER ASH TUG2 OVER TUG2 TUG2 OVER TUG2 PAP -1203C CUNEIFORM SIGN ASH OVER ASH OVER ASH -1203D CUNEIFORM SIGN ASH OVER ASH OVER ASH CROSSING ASH OVER ASH OVER ASH -1203E CUNEIFORM SIGN ASH2 -1203F CUNEIFORM SIGN ASHGAB -12040 CUNEIFORM SIGN BA -12041 CUNEIFORM SIGN BAD -12042 CUNEIFORM SIGN BAG3 -12043 CUNEIFORM SIGN BAHAR2 -12044 CUNEIFORM SIGN BAL -12045 CUNEIFORM SIGN BAL OVER BAL -12046 CUNEIFORM SIGN BALAG -12047 CUNEIFORM SIGN BAR -12048 CUNEIFORM SIGN BARA2 -12049 CUNEIFORM SIGN BI -1204A CUNEIFORM SIGN BI TIMES A -1204B CUNEIFORM SIGN BI TIMES GAR -1204C CUNEIFORM SIGN BI TIMES IGI GUNU -1204D CUNEIFORM SIGN BU -1204E CUNEIFORM SIGN BU OVER BU AB -1204F CUNEIFORM SIGN BU OVER BU UN -12050 CUNEIFORM SIGN BU CROSSING BU -12051 CUNEIFORM SIGN BULUG -12052 CUNEIFORM SIGN BULUG OVER BULUG -12053 CUNEIFORM SIGN BUR -12054 CUNEIFORM SIGN BUR2 -12055 CUNEIFORM SIGN DA -12056 CUNEIFORM SIGN DAG -12057 CUNEIFORM SIGN DAG KISIM5 TIMES A PLUS MASH -12058 CUNEIFORM SIGN DAG KISIM5 TIMES AMAR -12059 CUNEIFORM SIGN DAG KISIM5 TIMES BALAG -1205A CUNEIFORM SIGN DAG KISIM5 TIMES BI -1205B CUNEIFORM SIGN DAG KISIM5 TIMES GA -1205C CUNEIFORM SIGN DAG KISIM5 TIMES GA PLUS MASH -1205D CUNEIFORM SIGN DAG KISIM5 TIMES GI -1205E CUNEIFORM SIGN DAG KISIM5 TIMES GIR2 -1205F CUNEIFORM SIGN DAG KISIM5 TIMES GUD -12060 CUNEIFORM SIGN DAG KISIM5 TIMES HA -12061 CUNEIFORM SIGN DAG KISIM5 TIMES IR -12062 CUNEIFORM SIGN DAG KISIM5 TIMES IR PLUS LU -12063 CUNEIFORM SIGN DAG KISIM5 TIMES KAK -12064 CUNEIFORM SIGN DAG KISIM5 TIMES LA -12065 CUNEIFORM SIGN DAG KISIM5 TIMES LU -12066 CUNEIFORM SIGN DAG KISIM5 TIMES LU PLUS MASH2 -12067 CUNEIFORM SIGN DAG KISIM5 TIMES LUM -12068 CUNEIFORM SIGN DAG KISIM5 TIMES NE -12069 CUNEIFORM SIGN DAG KISIM5 TIMES PAP PLUS PAP -1206A CUNEIFORM SIGN DAG KISIM5 TIMES SI -1206B CUNEIFORM SIGN DAG KISIM5 TIMES TAK4 -1206C CUNEIFORM SIGN DAG KISIM5 TIMES U2 PLUS GIR2 -1206D CUNEIFORM SIGN DAG KISIM5 TIMES USH -1206E CUNEIFORM SIGN DAM -1206F CUNEIFORM SIGN DAR -12070 CUNEIFORM SIGN DARA3 -12071 CUNEIFORM SIGN DARA4 -12072 CUNEIFORM SIGN DI -12073 CUNEIFORM SIGN DIB -12074 CUNEIFORM SIGN DIM -12075 CUNEIFORM SIGN DIM TIMES SHE -12076 CUNEIFORM SIGN DIM2 -12077 CUNEIFORM SIGN DIN -12078 CUNEIFORM SIGN DIN KASKAL U GUNU DISH -12079 CUNEIFORM SIGN DISH -1207A CUNEIFORM SIGN DU -1207B CUNEIFORM SIGN DU OVER DU -1207C CUNEIFORM SIGN DU GUNU -1207D CUNEIFORM SIGN DU SHESHIG -1207E CUNEIFORM SIGN DUB -1207F CUNEIFORM SIGN DUB TIMES ESH2 -12080 CUNEIFORM SIGN DUB2 -12081 CUNEIFORM SIGN DUG -12082 CUNEIFORM SIGN DUGUD -12083 CUNEIFORM SIGN DUH -12084 CUNEIFORM SIGN DUN -12085 CUNEIFORM SIGN DUN3 -12086 CUNEIFORM SIGN DUN3 GUNU -12087 CUNEIFORM SIGN DUN3 GUNU GUNU -12088 CUNEIFORM SIGN DUN4 -12089 CUNEIFORM SIGN DUR2 -1208A CUNEIFORM SIGN E -1208B CUNEIFORM SIGN E TIMES PAP -1208C CUNEIFORM SIGN E OVER E NUN OVER NUN -1208D CUNEIFORM SIGN E2 -1208E CUNEIFORM SIGN E2 TIMES A PLUS HA PLUS DA -1208F CUNEIFORM SIGN E2 TIMES GAR -12090 CUNEIFORM SIGN E2 TIMES MI -12091 CUNEIFORM SIGN E2 TIMES SAL -12092 CUNEIFORM SIGN E2 TIMES SHE -12093 CUNEIFORM SIGN E2 TIMES U -12094 CUNEIFORM SIGN EDIN -12095 CUNEIFORM SIGN EGIR -12096 CUNEIFORM SIGN EL -12097 CUNEIFORM SIGN EN -12098 CUNEIFORM SIGN EN TIMES GAN2 -12099 CUNEIFORM SIGN EN TIMES GAN2 TENU -1209A CUNEIFORM SIGN EN TIMES ME -1209B CUNEIFORM SIGN EN CROSSING EN -1209C CUNEIFORM SIGN EN OPPOSING EN -1209D CUNEIFORM SIGN EN SQUARED -1209E CUNEIFORM SIGN EREN -1209F CUNEIFORM SIGN ERIN2 -120A0 CUNEIFORM SIGN ESH2 -120A1 CUNEIFORM SIGN EZEN -120A2 CUNEIFORM SIGN EZEN TIMES A -120A3 CUNEIFORM SIGN EZEN TIMES A PLUS LAL -120A4 CUNEIFORM SIGN EZEN TIMES A PLUS LAL TIMES LAL -120A5 CUNEIFORM SIGN EZEN TIMES AN -120A6 CUNEIFORM SIGN EZEN TIMES BAD -120A7 CUNEIFORM SIGN EZEN TIMES DUN3 GUNU -120A8 CUNEIFORM SIGN EZEN TIMES DUN3 GUNU GUNU -120A9 CUNEIFORM SIGN EZEN TIMES HA -120AA CUNEIFORM SIGN EZEN TIMES HA GUNU -120AB CUNEIFORM SIGN EZEN TIMES IGI GUNU -120AC CUNEIFORM SIGN EZEN TIMES KASKAL -120AD CUNEIFORM SIGN EZEN TIMES KASKAL SQUARED -120AE CUNEIFORM SIGN EZEN TIMES KU3 -120AF CUNEIFORM SIGN EZEN TIMES LA -120B0 CUNEIFORM SIGN EZEN TIMES LAL TIMES LAL -120B1 CUNEIFORM SIGN EZEN TIMES LI -120B2 CUNEIFORM SIGN EZEN TIMES LU -120B3 CUNEIFORM SIGN EZEN TIMES U2 -120B4 CUNEIFORM SIGN EZEN TIMES UD -120B5 CUNEIFORM SIGN GA -120B6 CUNEIFORM SIGN GA GUNU -120B7 CUNEIFORM SIGN GA2 -120B8 CUNEIFORM SIGN GA2 TIMES A PLUS DA PLUS HA -120B9 CUNEIFORM SIGN GA2 TIMES A PLUS HA -120BA CUNEIFORM SIGN GA2 TIMES A PLUS IGI -120BB CUNEIFORM SIGN GA2 TIMES AB2 TENU PLUS TAB -120BC CUNEIFORM SIGN GA2 TIMES AN -120BD CUNEIFORM SIGN GA2 TIMES ASH -120BE CUNEIFORM SIGN GA2 TIMES ASH2 PLUS GAL -120BF CUNEIFORM SIGN GA2 TIMES BAD -120C0 CUNEIFORM SIGN GA2 TIMES BAR PLUS RA -120C1 CUNEIFORM SIGN GA2 TIMES BUR -120C2 CUNEIFORM SIGN GA2 TIMES BUR PLUS RA -120C3 CUNEIFORM SIGN GA2 TIMES DA -120C4 CUNEIFORM SIGN GA2 TIMES DI -120C5 CUNEIFORM SIGN GA2 TIMES DIM TIMES SHE -120C6 CUNEIFORM SIGN GA2 TIMES DUB -120C7 CUNEIFORM SIGN GA2 TIMES EL -120C8 CUNEIFORM SIGN GA2 TIMES EL PLUS LA -120C9 CUNEIFORM SIGN GA2 TIMES EN -120CA CUNEIFORM SIGN GA2 TIMES EN TIMES GAN2 TENU -120CB CUNEIFORM SIGN GA2 TIMES GAN2 TENU -120CC CUNEIFORM SIGN GA2 TIMES GAR -120CD CUNEIFORM SIGN GA2 TIMES GI -120CE CUNEIFORM SIGN GA2 TIMES GI4 -120CF CUNEIFORM SIGN GA2 TIMES GI4 PLUS A -120D0 CUNEIFORM SIGN GA2 TIMES GIR2 PLUS SU -120D1 CUNEIFORM SIGN GA2 TIMES HA PLUS LU PLUS ESH2 -120D2 CUNEIFORM SIGN GA2 TIMES HAL -120D3 CUNEIFORM SIGN GA2 TIMES HAL PLUS LA -120D4 CUNEIFORM SIGN GA2 TIMES HI PLUS LI -120D5 CUNEIFORM SIGN GA2 TIMES HUB2 -120D6 CUNEIFORM SIGN GA2 TIMES IGI GUNU -120D7 CUNEIFORM SIGN GA2 TIMES ISH PLUS HU PLUS ASH -120D8 CUNEIFORM SIGN GA2 TIMES KAK -120D9 CUNEIFORM SIGN GA2 TIMES KASKAL -120DA CUNEIFORM SIGN GA2 TIMES KID -120DB CUNEIFORM SIGN GA2 TIMES KID PLUS LAL -120DC CUNEIFORM SIGN GA2 TIMES KU3 PLUS AN -120DD CUNEIFORM SIGN GA2 TIMES LA -120DE CUNEIFORM SIGN GA2 TIMES ME PLUS EN -120DF CUNEIFORM SIGN GA2 TIMES MI -120E0 CUNEIFORM SIGN GA2 TIMES NUN -120E1 CUNEIFORM SIGN GA2 TIMES NUN OVER NUN -120E2 CUNEIFORM SIGN GA2 TIMES PA -120E3 CUNEIFORM SIGN GA2 TIMES SAL -120E4 CUNEIFORM SIGN GA2 TIMES SAR -120E5 CUNEIFORM SIGN GA2 TIMES SHE -120E6 CUNEIFORM SIGN GA2 TIMES SHE PLUS TUR -120E7 CUNEIFORM SIGN GA2 TIMES SHID -120E8 CUNEIFORM SIGN GA2 TIMES SUM -120E9 CUNEIFORM SIGN GA2 TIMES TAK4 -120EA CUNEIFORM SIGN GA2 TIMES U -120EB CUNEIFORM SIGN GA2 TIMES UD -120EC CUNEIFORM SIGN GA2 TIMES UD PLUS DU -120ED CUNEIFORM SIGN GA2 OVER GA2 -120EE CUNEIFORM SIGN GABA -120EF CUNEIFORM SIGN GABA CROSSING GABA -120F0 CUNEIFORM SIGN GAD -120F1 CUNEIFORM SIGN GAD OVER GAD GAR OVER GAR -120F2 CUNEIFORM SIGN GAL -120F3 CUNEIFORM SIGN GAL GAD OVER GAD GAR OVER GAR -120F4 CUNEIFORM SIGN GALAM -120F5 CUNEIFORM SIGN GAM -120F6 CUNEIFORM SIGN GAN -120F7 CUNEIFORM SIGN GAN2 -120F8 CUNEIFORM SIGN GAN2 TENU -120F9 CUNEIFORM SIGN GAN2 OVER GAN2 -120FA CUNEIFORM SIGN GAN2 CROSSING GAN2 -120FB CUNEIFORM SIGN GAR -120FC CUNEIFORM SIGN GAR3 -120FD CUNEIFORM SIGN GASHAN -120FE CUNEIFORM SIGN GESHTIN -120FF CUNEIFORM SIGN GESHTIN TIMES KUR -12100 CUNEIFORM SIGN GI -12101 CUNEIFORM SIGN GI TIMES E -12102 CUNEIFORM SIGN GI TIMES U -12103 CUNEIFORM SIGN GI CROSSING GI -12104 CUNEIFORM SIGN GI4 -12105 CUNEIFORM SIGN GI4 OVER GI4 -12106 CUNEIFORM SIGN GI4 CROSSING GI4 -12107 CUNEIFORM SIGN GIDIM -12108 CUNEIFORM SIGN GIR2 -12109 CUNEIFORM SIGN GIR2 GUNU -1210A CUNEIFORM SIGN GIR3 -1210B CUNEIFORM SIGN GIR3 TIMES A PLUS IGI -1210C CUNEIFORM SIGN GIR3 TIMES GAN2 TENU -1210D CUNEIFORM SIGN GIR3 TIMES IGI -1210E CUNEIFORM SIGN GIR3 TIMES LU PLUS IGI -1210F CUNEIFORM SIGN GIR3 TIMES PA -12110 CUNEIFORM SIGN GISAL -12111 CUNEIFORM SIGN GISH -12112 CUNEIFORM SIGN GISH CROSSING GISH -12113 CUNEIFORM SIGN GISH TIMES BAD -12114 CUNEIFORM SIGN GISH TIMES TAK4 -12115 CUNEIFORM SIGN GISH TENU -12116 CUNEIFORM SIGN GU -12117 CUNEIFORM SIGN GU CROSSING GU -12118 CUNEIFORM SIGN GU2 -12119 CUNEIFORM SIGN GU2 TIMES KAK -1211A CUNEIFORM SIGN GU2 TIMES KAK TIMES IGI GUNU -1211B CUNEIFORM SIGN GU2 TIMES NUN -1211C CUNEIFORM SIGN GU2 TIMES SAL PLUS TUG2 -1211D CUNEIFORM SIGN GU2 GUNU -1211E CUNEIFORM SIGN GUD -1211F CUNEIFORM SIGN GUD TIMES A PLUS KUR -12120 CUNEIFORM SIGN GUD TIMES KUR -12121 CUNEIFORM SIGN GUD OVER GUD LUGAL -12122 CUNEIFORM SIGN GUL -12123 CUNEIFORM SIGN GUM -12124 CUNEIFORM SIGN GUM TIMES SHE -12125 CUNEIFORM SIGN GUR -12126 CUNEIFORM SIGN GUR7 -12127 CUNEIFORM SIGN GURUN -12128 CUNEIFORM SIGN GURUSH -12129 CUNEIFORM SIGN HA -1212A CUNEIFORM SIGN HA TENU -1212B CUNEIFORM SIGN HA GUNU -1212C CUNEIFORM SIGN HAL -1212D CUNEIFORM SIGN HI -1212E CUNEIFORM SIGN HI TIMES ASH -1212F CUNEIFORM SIGN HI TIMES ASH2 -12130 CUNEIFORM SIGN HI TIMES BAD -12131 CUNEIFORM SIGN HI TIMES DISH -12132 CUNEIFORM SIGN HI TIMES GAD -12133 CUNEIFORM SIGN HI TIMES KIN -12134 CUNEIFORM SIGN HI TIMES NUN -12135 CUNEIFORM SIGN HI TIMES SHE -12136 CUNEIFORM SIGN HI TIMES U -12137 CUNEIFORM SIGN HU -12138 CUNEIFORM SIGN HUB2 -12139 CUNEIFORM SIGN HUB2 TIMES AN -1213A CUNEIFORM SIGN HUB2 TIMES HAL -1213B CUNEIFORM SIGN HUB2 TIMES KASKAL -1213C CUNEIFORM SIGN HUB2 TIMES LISH -1213D CUNEIFORM SIGN HUB2 TIMES UD -1213E CUNEIFORM SIGN HUL2 -1213F CUNEIFORM SIGN I -12140 CUNEIFORM SIGN I A -12141 CUNEIFORM SIGN IB -12142 CUNEIFORM SIGN IDIM -12143 CUNEIFORM SIGN IDIM OVER IDIM BUR -12144 CUNEIFORM SIGN IDIM OVER IDIM SQUARED -12145 CUNEIFORM SIGN IG -12146 CUNEIFORM SIGN IGI -12147 CUNEIFORM SIGN IGI DIB -12148 CUNEIFORM SIGN IGI RI -12149 CUNEIFORM SIGN IGI OVER IGI SHIR OVER SHIR UD OVER UD -1214A CUNEIFORM SIGN IGI GUNU -1214B CUNEIFORM SIGN IL -1214C CUNEIFORM SIGN IL TIMES GAN2 TENU -1214D CUNEIFORM SIGN IL2 -1214E CUNEIFORM SIGN IM -1214F CUNEIFORM SIGN IM TIMES TAK4 -12150 CUNEIFORM SIGN IM CROSSING IM -12151 CUNEIFORM SIGN IM OPPOSING IM -12152 CUNEIFORM SIGN IM SQUARED -12153 CUNEIFORM SIGN IMIN -12154 CUNEIFORM SIGN IN -12155 CUNEIFORM SIGN IR -12156 CUNEIFORM SIGN ISH -12157 CUNEIFORM SIGN KA -12158 CUNEIFORM SIGN KA TIMES A -12159 CUNEIFORM SIGN KA TIMES AD -1215A CUNEIFORM SIGN KA TIMES AD PLUS KU3 -1215B CUNEIFORM SIGN KA TIMES ASH2 -1215C CUNEIFORM SIGN KA TIMES BAD -1215D CUNEIFORM SIGN KA TIMES BALAG -1215E CUNEIFORM SIGN KA TIMES BAR -1215F CUNEIFORM SIGN KA TIMES BI -12160 CUNEIFORM SIGN KA TIMES ERIN2 -12161 CUNEIFORM SIGN KA TIMES ESH2 -12162 CUNEIFORM SIGN KA TIMES GA -12163 CUNEIFORM SIGN KA TIMES GAL -12164 CUNEIFORM SIGN KA TIMES GAN2 TENU -12165 CUNEIFORM SIGN KA TIMES GAR -12166 CUNEIFORM SIGN KA TIMES GAR PLUS SHA3 PLUS A -12167 CUNEIFORM SIGN KA TIMES GI -12168 CUNEIFORM SIGN KA TIMES GIR2 -12169 CUNEIFORM SIGN KA TIMES GISH PLUS SAR -1216A CUNEIFORM SIGN KA TIMES GISH CROSSING GISH -1216B CUNEIFORM SIGN KA TIMES GU -1216C CUNEIFORM SIGN KA TIMES GUR7 -1216D CUNEIFORM SIGN KA TIMES IGI -1216E CUNEIFORM SIGN KA TIMES IM -1216F CUNEIFORM SIGN KA TIMES KAK -12170 CUNEIFORM SIGN KA TIMES KI -12171 CUNEIFORM SIGN KA TIMES KID -12172 CUNEIFORM SIGN KA TIMES LI -12173 CUNEIFORM SIGN KA TIMES LU -12174 CUNEIFORM SIGN KA TIMES ME -12175 CUNEIFORM SIGN KA TIMES ME PLUS DU -12176 CUNEIFORM SIGN KA TIMES ME PLUS GI -12177 CUNEIFORM SIGN KA TIMES ME PLUS TE -12178 CUNEIFORM SIGN KA TIMES MI -12179 CUNEIFORM SIGN KA TIMES MI PLUS NUNUZ -1217A CUNEIFORM SIGN KA TIMES NE -1217B CUNEIFORM SIGN KA TIMES NUN -1217C CUNEIFORM SIGN KA TIMES PI -1217D CUNEIFORM SIGN KA TIMES RU -1217E CUNEIFORM SIGN KA TIMES SA -1217F CUNEIFORM SIGN KA TIMES SAR -12180 CUNEIFORM SIGN KA TIMES SHA -12181 CUNEIFORM SIGN KA TIMES SHE -12182 CUNEIFORM SIGN KA TIMES SHID -12183 CUNEIFORM SIGN KA TIMES SHU -12184 CUNEIFORM SIGN KA TIMES SIG -12185 CUNEIFORM SIGN KA TIMES SUHUR -12186 CUNEIFORM SIGN KA TIMES TAR -12187 CUNEIFORM SIGN KA TIMES U -12188 CUNEIFORM SIGN KA TIMES U2 -12189 CUNEIFORM SIGN KA TIMES UD -1218A CUNEIFORM SIGN KA TIMES UMUM TIMES PA -1218B CUNEIFORM SIGN KA TIMES USH -1218C CUNEIFORM SIGN KA TIMES ZI -1218D CUNEIFORM SIGN KA2 -1218E CUNEIFORM SIGN KA2 CROSSING KA2 -1218F CUNEIFORM SIGN KAB -12190 CUNEIFORM SIGN KAD2 -12191 CUNEIFORM SIGN KAD3 -12192 CUNEIFORM SIGN KAD4 -12193 CUNEIFORM SIGN KAD5 -12194 CUNEIFORM SIGN KAD5 OVER KAD5 -12195 CUNEIFORM SIGN KAK -12196 CUNEIFORM SIGN KAK TIMES IGI GUNU -12197 CUNEIFORM SIGN KAL -12198 CUNEIFORM SIGN KAL TIMES BAD -12199 CUNEIFORM SIGN KAL CROSSING KAL -1219A CUNEIFORM SIGN KAM2 -1219B CUNEIFORM SIGN KAM4 -1219C CUNEIFORM SIGN KASKAL -1219D CUNEIFORM SIGN KASKAL LAGAB TIMES U OVER LAGAB TIMES U -1219E CUNEIFORM SIGN KASKAL OVER KASKAL LAGAB TIMES U OVER LAGAB TIMES U -1219F CUNEIFORM SIGN KESH2 -121A0 CUNEIFORM SIGN KI -121A1 CUNEIFORM SIGN KI TIMES BAD -121A2 CUNEIFORM SIGN KI TIMES U -121A3 CUNEIFORM SIGN KI TIMES UD -121A4 CUNEIFORM SIGN KID -121A5 CUNEIFORM SIGN KIN -121A6 CUNEIFORM SIGN KISAL -121A7 CUNEIFORM SIGN KISH -121A8 CUNEIFORM SIGN KISIM5 -121A9 CUNEIFORM SIGN KISIM5 OVER KISIM5 -121AA CUNEIFORM SIGN KU -121AB CUNEIFORM SIGN KU OVER HI TIMES ASH2 KU OVER HI TIMES ASH2 -121AC CUNEIFORM SIGN KU3 -121AD CUNEIFORM SIGN KU4 -121AE CUNEIFORM SIGN KU4 VARIANT FORM -121AF CUNEIFORM SIGN KU7 -121B0 CUNEIFORM SIGN KUL -121B1 CUNEIFORM SIGN KUL GUNU -121B2 CUNEIFORM SIGN KUN -121B3 CUNEIFORM SIGN KUR -121B4 CUNEIFORM SIGN KUR OPPOSING KUR -121B5 CUNEIFORM SIGN KUSHU2 -121B6 CUNEIFORM SIGN KWU318 -121B7 CUNEIFORM SIGN LA -121B8 CUNEIFORM SIGN LAGAB -121B9 CUNEIFORM SIGN LAGAB TIMES A -121BA CUNEIFORM SIGN LAGAB TIMES A PLUS DA PLUS HA -121BB CUNEIFORM SIGN LAGAB TIMES A PLUS GAR -121BC CUNEIFORM SIGN LAGAB TIMES A PLUS LAL -121BD CUNEIFORM SIGN LAGAB TIMES AL -121BE CUNEIFORM SIGN LAGAB TIMES AN -121BF CUNEIFORM SIGN LAGAB TIMES ASH ZIDA TENU -121C0 CUNEIFORM SIGN LAGAB TIMES BAD -121C1 CUNEIFORM SIGN LAGAB TIMES BI -121C2 CUNEIFORM SIGN LAGAB TIMES DAR -121C3 CUNEIFORM SIGN LAGAB TIMES EN -121C4 CUNEIFORM SIGN LAGAB TIMES GA -121C5 CUNEIFORM SIGN LAGAB TIMES GAR -121C6 CUNEIFORM SIGN LAGAB TIMES GUD -121C7 CUNEIFORM SIGN LAGAB TIMES GUD PLUS GUD -121C8 CUNEIFORM SIGN LAGAB TIMES HA -121C9 CUNEIFORM SIGN LAGAB TIMES HAL -121CA CUNEIFORM SIGN LAGAB TIMES HI TIMES NUN -121CB CUNEIFORM SIGN LAGAB TIMES IGI GUNU -121CC CUNEIFORM SIGN LAGAB TIMES IM -121CD CUNEIFORM SIGN LAGAB TIMES IM PLUS HA -121CE CUNEIFORM SIGN LAGAB TIMES IM PLUS LU -121CF CUNEIFORM SIGN LAGAB TIMES KI -121D0 CUNEIFORM SIGN LAGAB TIMES KIN -121D1 CUNEIFORM SIGN LAGAB TIMES KU3 -121D2 CUNEIFORM SIGN LAGAB TIMES KUL -121D3 CUNEIFORM SIGN LAGAB TIMES KUL PLUS HI PLUS A -121D4 CUNEIFORM SIGN LAGAB TIMES LAGAB -121D5 CUNEIFORM SIGN LAGAB TIMES LISH -121D6 CUNEIFORM SIGN LAGAB TIMES LU -121D7 CUNEIFORM SIGN LAGAB TIMES LUL -121D8 CUNEIFORM SIGN LAGAB TIMES ME -121D9 CUNEIFORM SIGN LAGAB TIMES ME PLUS EN -121DA CUNEIFORM SIGN LAGAB TIMES MUSH -121DB CUNEIFORM SIGN LAGAB TIMES NE -121DC CUNEIFORM SIGN LAGAB TIMES SHE PLUS SUM -121DD CUNEIFORM SIGN LAGAB TIMES SHITA PLUS GISH PLUS ERIN2 -121DE CUNEIFORM SIGN LAGAB TIMES SHITA PLUS GISH TENU -121DF CUNEIFORM SIGN LAGAB TIMES SHU2 -121E0 CUNEIFORM SIGN LAGAB TIMES SHU2 PLUS SHU2 -121E1 CUNEIFORM SIGN LAGAB TIMES SUM -121E2 CUNEIFORM SIGN LAGAB TIMES TAG -121E3 CUNEIFORM SIGN LAGAB TIMES TAK4 -121E4 CUNEIFORM SIGN LAGAB TIMES TE PLUS A PLUS SU PLUS NA -121E5 CUNEIFORM SIGN LAGAB TIMES U -121E6 CUNEIFORM SIGN LAGAB TIMES U PLUS A -121E7 CUNEIFORM SIGN LAGAB TIMES U PLUS U PLUS U -121E8 CUNEIFORM SIGN LAGAB TIMES U2 PLUS ASH -121E9 CUNEIFORM SIGN LAGAB TIMES UD -121EA CUNEIFORM SIGN LAGAB TIMES USH -121EB CUNEIFORM SIGN LAGAB SQUARED -121EC CUNEIFORM SIGN LAGAR -121ED CUNEIFORM SIGN LAGAR TIMES SHE -121EE CUNEIFORM SIGN LAGAR TIMES SHE PLUS SUM -121EF CUNEIFORM SIGN LAGAR GUNU -121F0 CUNEIFORM SIGN LAGAR GUNU OVER LAGAR GUNU SHE -121F1 CUNEIFORM SIGN LAHSHU -121F2 CUNEIFORM SIGN LAL -121F3 CUNEIFORM SIGN LAL TIMES LAL -121F4 CUNEIFORM SIGN LAM -121F5 CUNEIFORM SIGN LAM TIMES KUR -121F6 CUNEIFORM SIGN LAM TIMES KUR PLUS RU -121F7 CUNEIFORM SIGN LI -121F8 CUNEIFORM SIGN LIL -121F9 CUNEIFORM SIGN LIMMU2 -121FA CUNEIFORM SIGN LISH -121FB CUNEIFORM SIGN LU -121FC CUNEIFORM SIGN LU TIMES BAD -121FD CUNEIFORM SIGN LU2 -121FE CUNEIFORM SIGN LU2 TIMES AL -121FF CUNEIFORM SIGN LU2 TIMES BAD -12200 CUNEIFORM SIGN LU2 TIMES ESH2 -12201 CUNEIFORM SIGN LU2 TIMES ESH2 TENU -12202 CUNEIFORM SIGN LU2 TIMES GAN2 TENU -12203 CUNEIFORM SIGN LU2 TIMES HI TIMES BAD -12204 CUNEIFORM SIGN LU2 TIMES IM -12205 CUNEIFORM SIGN LU2 TIMES KAD2 -12206 CUNEIFORM SIGN LU2 TIMES KAD3 -12207 CUNEIFORM SIGN LU2 TIMES KAD3 PLUS ASH -12208 CUNEIFORM SIGN LU2 TIMES KI -12209 CUNEIFORM SIGN LU2 TIMES LA PLUS ASH -1220A CUNEIFORM SIGN LU2 TIMES LAGAB -1220B CUNEIFORM SIGN LU2 TIMES ME PLUS EN -1220C CUNEIFORM SIGN LU2 TIMES NE -1220D CUNEIFORM SIGN LU2 TIMES NU -1220E CUNEIFORM SIGN LU2 TIMES SI PLUS ASH -1220F CUNEIFORM SIGN LU2 TIMES SIK2 PLUS BU -12210 CUNEIFORM SIGN LU2 TIMES TUG2 -12211 CUNEIFORM SIGN LU2 TENU -12212 CUNEIFORM SIGN LU2 CROSSING LU2 -12213 CUNEIFORM SIGN LU2 OPPOSING LU2 -12214 CUNEIFORM SIGN LU2 SQUARED -12215 CUNEIFORM SIGN LU2 SHESHIG -12216 CUNEIFORM SIGN LU3 -12217 CUNEIFORM SIGN LUGAL -12218 CUNEIFORM SIGN LUGAL OVER LUGAL -12219 CUNEIFORM SIGN LUGAL OPPOSING LUGAL -1221A CUNEIFORM SIGN LUGAL SHESHIG -1221B CUNEIFORM SIGN LUH -1221C CUNEIFORM SIGN LUL -1221D CUNEIFORM SIGN LUM -1221E CUNEIFORM SIGN LUM OVER LUM -1221F CUNEIFORM SIGN LUM OVER LUM GAR OVER GAR -12220 CUNEIFORM SIGN MA -12221 CUNEIFORM SIGN MA TIMES TAK4 -12222 CUNEIFORM SIGN MA GUNU -12223 CUNEIFORM SIGN MA2 -12224 CUNEIFORM SIGN MAH -12225 CUNEIFORM SIGN MAR -12226 CUNEIFORM SIGN MASH -12227 CUNEIFORM SIGN MASH2 -12228 CUNEIFORM SIGN ME -12229 CUNEIFORM SIGN MES -1222A CUNEIFORM SIGN MI -1222B CUNEIFORM SIGN MIN -1222C CUNEIFORM SIGN MU -1222D CUNEIFORM SIGN MU OVER MU -1222E CUNEIFORM SIGN MUG -1222F CUNEIFORM SIGN MUG GUNU -12230 CUNEIFORM SIGN MUNSUB -12231 CUNEIFORM SIGN MURGU2 -12232 CUNEIFORM SIGN MUSH -12233 CUNEIFORM SIGN MUSH TIMES A -12234 CUNEIFORM SIGN MUSH TIMES KUR -12235 CUNEIFORM SIGN MUSH TIMES ZA -12236 CUNEIFORM SIGN MUSH OVER MUSH -12237 CUNEIFORM SIGN MUSH OVER MUSH TIMES A PLUS NA -12238 CUNEIFORM SIGN MUSH CROSSING MUSH -12239 CUNEIFORM SIGN MUSH3 -1223A CUNEIFORM SIGN MUSH3 TIMES A -1223B CUNEIFORM SIGN MUSH3 TIMES A PLUS DI -1223C CUNEIFORM SIGN MUSH3 TIMES DI -1223D CUNEIFORM SIGN MUSH3 GUNU -1223E CUNEIFORM SIGN NA -1223F CUNEIFORM SIGN NA2 -12240 CUNEIFORM SIGN NAGA -12241 CUNEIFORM SIGN NAGA INVERTED -12242 CUNEIFORM SIGN NAGA TIMES SHU TENU -12243 CUNEIFORM SIGN NAGA OPPOSING NAGA -12244 CUNEIFORM SIGN NAGAR -12245 CUNEIFORM SIGN NAM NUTILLU -12246 CUNEIFORM SIGN NAM -12247 CUNEIFORM SIGN NAM2 -12248 CUNEIFORM SIGN NE -12249 CUNEIFORM SIGN NE TIMES A -1224A CUNEIFORM SIGN NE TIMES UD -1224B CUNEIFORM SIGN NE SHESHIG -1224C CUNEIFORM SIGN NI -1224D CUNEIFORM SIGN NI TIMES E -1224E CUNEIFORM SIGN NI2 -1224F CUNEIFORM SIGN NIM -12250 CUNEIFORM SIGN NIM TIMES GAN2 TENU -12251 CUNEIFORM SIGN NIM TIMES GAR PLUS GAN2 TENU -12252 CUNEIFORM SIGN NINDA2 -12253 CUNEIFORM SIGN NINDA2 TIMES AN -12254 CUNEIFORM SIGN NINDA2 TIMES ASH -12255 CUNEIFORM SIGN NINDA2 TIMES ASH PLUS ASH -12256 CUNEIFORM SIGN NINDA2 TIMES GUD -12257 CUNEIFORM SIGN NINDA2 TIMES ME PLUS GAN2 TENU -12258 CUNEIFORM SIGN NINDA2 TIMES NE -12259 CUNEIFORM SIGN NINDA2 TIMES NUN -1225A CUNEIFORM SIGN NINDA2 TIMES SHE -1225B CUNEIFORM SIGN NINDA2 TIMES SHE PLUS A AN -1225C CUNEIFORM SIGN NINDA2 TIMES SHE PLUS ASH -1225D CUNEIFORM SIGN NINDA2 TIMES SHE PLUS ASH PLUS ASH -1225E CUNEIFORM SIGN NINDA2 TIMES U2 PLUS ASH -1225F CUNEIFORM SIGN NINDA2 TIMES USH -12260 CUNEIFORM SIGN NISAG -12261 CUNEIFORM SIGN NU -12262 CUNEIFORM SIGN NU11 -12263 CUNEIFORM SIGN NUN -12264 CUNEIFORM SIGN NUN LAGAR TIMES GAR -12265 CUNEIFORM SIGN NUN LAGAR TIMES MASH -12266 CUNEIFORM SIGN NUN LAGAR TIMES SAL -12267 CUNEIFORM SIGN NUN LAGAR TIMES SAL OVER NUN LAGAR TIMES SAL -12268 CUNEIFORM SIGN NUN LAGAR TIMES USH -12269 CUNEIFORM SIGN NUN TENU -1226A CUNEIFORM SIGN NUN OVER NUN -1226B CUNEIFORM SIGN NUN CROSSING NUN -1226C CUNEIFORM SIGN NUN CROSSING NUN LAGAR OVER LAGAR -1226D CUNEIFORM SIGN NUNUZ -1226E CUNEIFORM SIGN NUNUZ AB2 TIMES ASHGAB -1226F CUNEIFORM SIGN NUNUZ AB2 TIMES BI -12270 CUNEIFORM SIGN NUNUZ AB2 TIMES DUG -12271 CUNEIFORM SIGN NUNUZ AB2 TIMES GUD -12272 CUNEIFORM SIGN NUNUZ AB2 TIMES IGI GUNU -12273 CUNEIFORM SIGN NUNUZ AB2 TIMES KAD3 -12274 CUNEIFORM SIGN NUNUZ AB2 TIMES LA -12275 CUNEIFORM SIGN NUNUZ AB2 TIMES NE -12276 CUNEIFORM SIGN NUNUZ AB2 TIMES SILA3 -12277 CUNEIFORM SIGN NUNUZ AB2 TIMES U2 -12278 CUNEIFORM SIGN NUNUZ KISIM5 TIMES BI -12279 CUNEIFORM SIGN NUNUZ KISIM5 TIMES BI U -1227A CUNEIFORM SIGN PA -1227B CUNEIFORM SIGN PAD -1227C CUNEIFORM SIGN PAN -1227D CUNEIFORM SIGN PAP -1227E CUNEIFORM SIGN PESH2 -1227F CUNEIFORM SIGN PI -12280 CUNEIFORM SIGN PI TIMES A -12281 CUNEIFORM SIGN PI TIMES AB -12282 CUNEIFORM SIGN PI TIMES BI -12283 CUNEIFORM SIGN PI TIMES BU -12284 CUNEIFORM SIGN PI TIMES E -12285 CUNEIFORM SIGN PI TIMES I -12286 CUNEIFORM SIGN PI TIMES IB -12287 CUNEIFORM SIGN PI TIMES U -12288 CUNEIFORM SIGN PI TIMES U2 -12289 CUNEIFORM SIGN PI CROSSING PI -1228A CUNEIFORM SIGN PIRIG -1228B CUNEIFORM SIGN PIRIG TIMES KAL -1228C CUNEIFORM SIGN PIRIG TIMES UD -1228D CUNEIFORM SIGN PIRIG TIMES ZA -1228E CUNEIFORM SIGN PIRIG OPPOSING PIRIG -1228F CUNEIFORM SIGN RA -12290 CUNEIFORM SIGN RAB -12291 CUNEIFORM SIGN RI -12292 CUNEIFORM SIGN RU -12293 CUNEIFORM SIGN SA -12294 CUNEIFORM SIGN SAG NUTILLU -12295 CUNEIFORM SIGN SAG -12296 CUNEIFORM SIGN SAG TIMES A -12297 CUNEIFORM SIGN SAG TIMES DU -12298 CUNEIFORM SIGN SAG TIMES DUB -12299 CUNEIFORM SIGN SAG TIMES HA -1229A CUNEIFORM SIGN SAG TIMES KAK -1229B CUNEIFORM SIGN SAG TIMES KUR -1229C CUNEIFORM SIGN SAG TIMES LUM -1229D CUNEIFORM SIGN SAG TIMES MI -1229E CUNEIFORM SIGN SAG TIMES NUN -1229F CUNEIFORM SIGN SAG TIMES SAL -122A0 CUNEIFORM SIGN SAG TIMES SHID -122A1 CUNEIFORM SIGN SAG TIMES TAB -122A2 CUNEIFORM SIGN SAG TIMES U2 -122A3 CUNEIFORM SIGN SAG TIMES UB -122A4 CUNEIFORM SIGN SAG TIMES UM -122A5 CUNEIFORM SIGN SAG TIMES UR -122A6 CUNEIFORM SIGN SAG TIMES USH -122A7 CUNEIFORM SIGN SAG OVER SAG -122A8 CUNEIFORM SIGN SAG GUNU -122A9 CUNEIFORM SIGN SAL -122AA CUNEIFORM SIGN SAL LAGAB TIMES ASH2 -122AB CUNEIFORM SIGN SANGA2 -122AC CUNEIFORM SIGN SAR -122AD CUNEIFORM SIGN SHA -122AE CUNEIFORM SIGN SHA3 -122AF CUNEIFORM SIGN SHA3 TIMES A -122B0 CUNEIFORM SIGN SHA3 TIMES BAD -122B1 CUNEIFORM SIGN SHA3 TIMES GISH -122B2 CUNEIFORM SIGN SHA3 TIMES NE -122B3 CUNEIFORM SIGN SHA3 TIMES SHU2 -122B4 CUNEIFORM SIGN SHA3 TIMES TUR -122B5 CUNEIFORM SIGN SHA3 TIMES U -122B6 CUNEIFORM SIGN SHA3 TIMES U PLUS A -122B7 CUNEIFORM SIGN SHA6 -122B8 CUNEIFORM SIGN SHAB6 -122B9 CUNEIFORM SIGN SHAR2 -122BA CUNEIFORM SIGN SHE -122BB CUNEIFORM SIGN SHE HU -122BC CUNEIFORM SIGN SHE OVER SHE GAD OVER GAD GAR OVER GAR -122BD CUNEIFORM SIGN SHE OVER SHE TAB OVER TAB GAR OVER GAR -122BE CUNEIFORM SIGN SHEG9 -122BF CUNEIFORM SIGN SHEN -122C0 CUNEIFORM SIGN SHESH -122C1 CUNEIFORM SIGN SHESH2 -122C2 CUNEIFORM SIGN SHESHLAM -122C3 CUNEIFORM SIGN SHID -122C4 CUNEIFORM SIGN SHID TIMES A -122C5 CUNEIFORM SIGN SHID TIMES IM -122C6 CUNEIFORM SIGN SHIM -122C7 CUNEIFORM SIGN SHIM TIMES A -122C8 CUNEIFORM SIGN SHIM TIMES BAL -122C9 CUNEIFORM SIGN SHIM TIMES BULUG -122CA CUNEIFORM SIGN SHIM TIMES DIN -122CB CUNEIFORM SIGN SHIM TIMES GAR -122CC CUNEIFORM SIGN SHIM TIMES IGI -122CD CUNEIFORM SIGN SHIM TIMES IGI GUNU -122CE CUNEIFORM SIGN SHIM TIMES KUSHU2 -122CF CUNEIFORM SIGN SHIM TIMES LUL -122D0 CUNEIFORM SIGN SHIM TIMES MUG -122D1 CUNEIFORM SIGN SHIM TIMES SAL -122D2 CUNEIFORM SIGN SHINIG -122D3 CUNEIFORM SIGN SHIR -122D4 CUNEIFORM SIGN SHIR TENU -122D5 CUNEIFORM SIGN SHIR OVER SHIR BUR OVER BUR -122D6 CUNEIFORM SIGN SHITA -122D7 CUNEIFORM SIGN SHU -122D8 CUNEIFORM SIGN SHU OVER INVERTED SHU -122D9 CUNEIFORM SIGN SHU2 -122DA CUNEIFORM SIGN SHUBUR -122DB CUNEIFORM SIGN SI -122DC CUNEIFORM SIGN SI GUNU -122DD CUNEIFORM SIGN SIG -122DE CUNEIFORM SIGN SIG4 -122DF CUNEIFORM SIGN SIG4 OVER SIG4 SHU2 -122E0 CUNEIFORM SIGN SIK2 -122E1 CUNEIFORM SIGN SILA3 -122E2 CUNEIFORM SIGN SU -122E3 CUNEIFORM SIGN SU OVER SU -122E4 CUNEIFORM SIGN SUD -122E5 CUNEIFORM SIGN SUD2 -122E6 CUNEIFORM SIGN SUHUR -122E7 CUNEIFORM SIGN SUM -122E8 CUNEIFORM SIGN SUMASH -122E9 CUNEIFORM SIGN SUR -122EA CUNEIFORM SIGN SUR9 -122EB CUNEIFORM SIGN TA -122EC CUNEIFORM SIGN TA ASTERISK -122ED CUNEIFORM SIGN TA TIMES HI -122EE CUNEIFORM SIGN TA TIMES MI -122EF CUNEIFORM SIGN TA GUNU -122F0 CUNEIFORM SIGN TAB -122F1 CUNEIFORM SIGN TAB OVER TAB NI OVER NI DISH OVER DISH -122F2 CUNEIFORM SIGN TAB SQUARED -122F3 CUNEIFORM SIGN TAG -122F4 CUNEIFORM SIGN TAG TIMES BI -122F5 CUNEIFORM SIGN TAG TIMES GUD -122F6 CUNEIFORM SIGN TAG TIMES SHE -122F7 CUNEIFORM SIGN TAG TIMES SHU -122F8 CUNEIFORM SIGN TAG TIMES TUG2 -122F9 CUNEIFORM SIGN TAG TIMES UD -122FA CUNEIFORM SIGN TAK4 -122FB CUNEIFORM SIGN TAR -122FC CUNEIFORM SIGN TE -122FD CUNEIFORM SIGN TE GUNU -122FE CUNEIFORM SIGN TI -122FF CUNEIFORM SIGN TI TENU -12300 CUNEIFORM SIGN TIL -12301 CUNEIFORM SIGN TIR -12302 CUNEIFORM SIGN TIR TIMES TAK4 -12303 CUNEIFORM SIGN TIR OVER TIR -12304 CUNEIFORM SIGN TIR OVER TIR GAD OVER GAD GAR OVER GAR -12305 CUNEIFORM SIGN TU -12306 CUNEIFORM SIGN TUG2 -12307 CUNEIFORM SIGN TUK -12308 CUNEIFORM SIGN TUM -12309 CUNEIFORM SIGN TUR -1230A CUNEIFORM SIGN TUR OVER TUR ZA OVER ZA -1230B CUNEIFORM SIGN U -1230C CUNEIFORM SIGN U GUD -1230D CUNEIFORM SIGN U U U -1230E CUNEIFORM SIGN U OVER U PA OVER PA GAR OVER GAR -1230F CUNEIFORM SIGN U OVER U SUR OVER SUR -12310 CUNEIFORM SIGN U OVER U U REVERSED OVER U REVERSED -12311 CUNEIFORM SIGN U2 -12312 CUNEIFORM SIGN UB -12313 CUNEIFORM SIGN UD -12314 CUNEIFORM SIGN UD KUSHU2 -12315 CUNEIFORM SIGN UD TIMES BAD -12316 CUNEIFORM SIGN UD TIMES MI -12317 CUNEIFORM SIGN UD TIMES U PLUS U PLUS U -12318 CUNEIFORM SIGN UD TIMES U PLUS U PLUS U GUNU -12319 CUNEIFORM SIGN UD GUNU -1231A CUNEIFORM SIGN UD SHESHIG -1231B CUNEIFORM SIGN UD SHESHIG TIMES BAD -1231C CUNEIFORM SIGN UDUG -1231D CUNEIFORM SIGN UM -1231E CUNEIFORM SIGN UM TIMES LAGAB -1231F CUNEIFORM SIGN UM TIMES ME PLUS DA -12320 CUNEIFORM SIGN UM TIMES SHA3 -12321 CUNEIFORM SIGN UM TIMES U -12322 CUNEIFORM SIGN UMBIN -12323 CUNEIFORM SIGN UMUM -12324 CUNEIFORM SIGN UMUM TIMES KASKAL -12325 CUNEIFORM SIGN UMUM TIMES PA -12326 CUNEIFORM SIGN UN -12327 CUNEIFORM SIGN UN GUNU -12328 CUNEIFORM SIGN UR -12329 CUNEIFORM SIGN UR CROSSING UR -1232A CUNEIFORM SIGN UR SHESHIG -1232B CUNEIFORM SIGN UR2 -1232C CUNEIFORM SIGN UR2 TIMES A PLUS HA -1232D CUNEIFORM SIGN UR2 TIMES A PLUS NA -1232E CUNEIFORM SIGN UR2 TIMES AL -1232F CUNEIFORM SIGN UR2 TIMES HA -12330 CUNEIFORM SIGN UR2 TIMES NUN -12331 CUNEIFORM SIGN UR2 TIMES U2 -12332 CUNEIFORM SIGN UR2 TIMES U2 PLUS ASH -12333 CUNEIFORM SIGN UR2 TIMES U2 PLUS BI -12334 CUNEIFORM SIGN UR4 -12335 CUNEIFORM SIGN URI -12336 CUNEIFORM SIGN URI3 -12337 CUNEIFORM SIGN URU -12338 CUNEIFORM SIGN URU TIMES A -12339 CUNEIFORM SIGN URU TIMES ASHGAB -1233A CUNEIFORM SIGN URU TIMES BAR -1233B CUNEIFORM SIGN URU TIMES DUN -1233C CUNEIFORM SIGN URU TIMES GA -1233D CUNEIFORM SIGN URU TIMES GAL -1233E CUNEIFORM SIGN URU TIMES GAN2 TENU -1233F CUNEIFORM SIGN URU TIMES GAR -12340 CUNEIFORM SIGN URU TIMES GU -12341 CUNEIFORM SIGN URU TIMES HA -12342 CUNEIFORM SIGN URU TIMES IGI -12343 CUNEIFORM SIGN URU TIMES IM -12344 CUNEIFORM SIGN URU TIMES ISH -12345 CUNEIFORM SIGN URU TIMES KI -12346 CUNEIFORM SIGN URU TIMES LUM -12347 CUNEIFORM SIGN URU TIMES MIN -12348 CUNEIFORM SIGN URU TIMES PA -12349 CUNEIFORM SIGN URU TIMES SHE -1234A CUNEIFORM SIGN URU TIMES SIG4 -1234B CUNEIFORM SIGN URU TIMES TU -1234C CUNEIFORM SIGN URU TIMES U PLUS GUD -1234D CUNEIFORM SIGN URU TIMES UD -1234E CUNEIFORM SIGN URU TIMES URUDA -1234F CUNEIFORM SIGN URUDA -12350 CUNEIFORM SIGN URUDA TIMES U -12351 CUNEIFORM SIGN USH -12352 CUNEIFORM SIGN USH TIMES A -12353 CUNEIFORM SIGN USH TIMES KU -12354 CUNEIFORM SIGN USH TIMES KUR -12355 CUNEIFORM SIGN USH TIMES TAK4 -12356 CUNEIFORM SIGN USHX -12357 CUNEIFORM SIGN USH2 -12358 CUNEIFORM SIGN USHUMX -12359 CUNEIFORM SIGN UTUKI -1235A CUNEIFORM SIGN UZ3 -1235B CUNEIFORM SIGN UZ3 TIMES KASKAL -1235C CUNEIFORM SIGN UZU -1235D CUNEIFORM SIGN ZA -1235E CUNEIFORM SIGN ZA TENU -1235F CUNEIFORM SIGN ZA SQUARED TIMES KUR -12360 CUNEIFORM SIGN ZAG -12361 CUNEIFORM SIGN ZAMX -12362 CUNEIFORM SIGN ZE2 -12363 CUNEIFORM SIGN ZI -12364 CUNEIFORM SIGN ZI OVER ZI -12365 CUNEIFORM SIGN ZI3 -12366 CUNEIFORM SIGN ZIB -12367 CUNEIFORM SIGN ZIB KABA TENU -12368 CUNEIFORM SIGN ZIG -12369 CUNEIFORM SIGN ZIZ2 -1236A CUNEIFORM SIGN ZU -1236B CUNEIFORM SIGN ZU5 -1236C CUNEIFORM SIGN ZU5 TIMES A -1236D CUNEIFORM SIGN ZUBUR -1236E CUNEIFORM SIGN ZUM -12400 CUNEIFORM NUMERIC SIGN TWO ASH -12401 CUNEIFORM NUMERIC SIGN THREE ASH -12402 CUNEIFORM NUMERIC SIGN FOUR ASH -12403 CUNEIFORM NUMERIC SIGN FIVE ASH -12404 CUNEIFORM NUMERIC SIGN SIX ASH -12405 CUNEIFORM NUMERIC SIGN SEVEN ASH -12406 CUNEIFORM NUMERIC SIGN EIGHT ASH -12407 CUNEIFORM NUMERIC SIGN NINE ASH -12408 CUNEIFORM NUMERIC SIGN THREE DISH -12409 CUNEIFORM NUMERIC SIGN FOUR DISH -1240A CUNEIFORM NUMERIC SIGN FIVE DISH -1240B CUNEIFORM NUMERIC SIGN SIX DISH -1240C CUNEIFORM NUMERIC SIGN SEVEN DISH -1240D CUNEIFORM NUMERIC SIGN EIGHT DISH -1240E CUNEIFORM NUMERIC SIGN NINE DISH -1240F CUNEIFORM NUMERIC SIGN FOUR U -12410 CUNEIFORM NUMERIC SIGN FIVE U -12411 CUNEIFORM NUMERIC SIGN SIX U -12412 CUNEIFORM NUMERIC SIGN SEVEN U -12413 CUNEIFORM NUMERIC SIGN EIGHT U -12414 CUNEIFORM NUMERIC SIGN NINE U -12415 CUNEIFORM NUMERIC SIGN ONE GESH2 -12416 CUNEIFORM NUMERIC SIGN TWO GESH2 -12417 CUNEIFORM NUMERIC SIGN THREE GESH2 -12418 CUNEIFORM NUMERIC SIGN FOUR GESH2 -12419 CUNEIFORM NUMERIC SIGN FIVE GESH2 -1241A CUNEIFORM NUMERIC SIGN SIX GESH2 -1241B CUNEIFORM NUMERIC SIGN SEVEN GESH2 -1241C CUNEIFORM NUMERIC SIGN EIGHT GESH2 -1241D CUNEIFORM NUMERIC SIGN NINE GESH2 -1241E CUNEIFORM NUMERIC SIGN ONE GESHU -1241F CUNEIFORM NUMERIC SIGN TWO GESHU -12420 CUNEIFORM NUMERIC SIGN THREE GESHU -12421 CUNEIFORM NUMERIC SIGN FOUR GESHU -12422 CUNEIFORM NUMERIC SIGN FIVE GESHU -12423 CUNEIFORM NUMERIC SIGN TWO SHAR2 -12424 CUNEIFORM NUMERIC SIGN THREE SHAR2 -12425 CUNEIFORM NUMERIC SIGN THREE SHAR2 VARIANT FORM -12426 CUNEIFORM NUMERIC SIGN FOUR SHAR2 -12427 CUNEIFORM NUMERIC SIGN FIVE SHAR2 -12428 CUNEIFORM NUMERIC SIGN SIX SHAR2 -12429 CUNEIFORM NUMERIC SIGN SEVEN SHAR2 -1242A CUNEIFORM NUMERIC SIGN EIGHT SHAR2 -1242B CUNEIFORM NUMERIC SIGN NINE SHAR2 -1242C CUNEIFORM NUMERIC SIGN ONE SHARU -1242D CUNEIFORM NUMERIC SIGN TWO SHARU -1242E CUNEIFORM NUMERIC SIGN THREE SHARU -1242F CUNEIFORM NUMERIC SIGN THREE SHARU VARIANT FORM -12430 CUNEIFORM NUMERIC SIGN FOUR SHARU -12431 CUNEIFORM NUMERIC SIGN FIVE SHARU -12432 CUNEIFORM NUMERIC SIGN SHAR2 TIMES GAL PLUS DISH -12433 CUNEIFORM NUMERIC SIGN SHAR2 TIMES GAL PLUS MIN -12434 CUNEIFORM NUMERIC SIGN ONE BURU -12435 CUNEIFORM NUMERIC SIGN TWO BURU -12436 CUNEIFORM NUMERIC SIGN THREE BURU -12437 CUNEIFORM NUMERIC SIGN THREE BURU VARIANT FORM -12438 CUNEIFORM NUMERIC SIGN FOUR BURU -12439 CUNEIFORM NUMERIC SIGN FIVE BURU -1243A CUNEIFORM NUMERIC SIGN THREE VARIANT FORM ESH16 -1243B CUNEIFORM NUMERIC SIGN THREE VARIANT FORM ESH21 -1243C CUNEIFORM NUMERIC SIGN FOUR VARIANT FORM LIMMU -1243D CUNEIFORM NUMERIC SIGN FOUR VARIANT FORM LIMMU4 -1243E CUNEIFORM NUMERIC SIGN FOUR VARIANT FORM LIMMU A -1243F CUNEIFORM NUMERIC SIGN FOUR VARIANT FORM LIMMU B -12440 CUNEIFORM NUMERIC SIGN SIX VARIANT FORM ASH9 -12441 CUNEIFORM NUMERIC SIGN SEVEN VARIANT FORM IMIN3 -12442 CUNEIFORM NUMERIC SIGN SEVEN VARIANT FORM IMIN A -12443 CUNEIFORM NUMERIC SIGN SEVEN VARIANT FORM IMIN B -12444 CUNEIFORM NUMERIC SIGN EIGHT VARIANT FORM USSU -12445 CUNEIFORM NUMERIC SIGN EIGHT VARIANT FORM USSU3 -12446 CUNEIFORM NUMERIC SIGN NINE VARIANT FORM ILIMMU -12447 CUNEIFORM NUMERIC SIGN NINE VARIANT FORM ILIMMU3 -12448 CUNEIFORM NUMERIC SIGN NINE VARIANT FORM ILIMMU4 -12449 CUNEIFORM NUMERIC SIGN NINE VARIANT FORM ILIMMU A -1244A CUNEIFORM NUMERIC SIGN TWO ASH TENU -1244B CUNEIFORM NUMERIC SIGN THREE ASH TENU -1244C CUNEIFORM NUMERIC SIGN FOUR ASH TENU -1244D CUNEIFORM NUMERIC SIGN FIVE ASH TENU -1244E CUNEIFORM NUMERIC SIGN SIX ASH TENU -1244F CUNEIFORM NUMERIC SIGN ONE BAN2 -12450 CUNEIFORM NUMERIC SIGN TWO BAN2 -12451 CUNEIFORM NUMERIC SIGN THREE BAN2 -12452 CUNEIFORM NUMERIC SIGN FOUR BAN2 -12453 CUNEIFORM NUMERIC SIGN FOUR BAN2 VARIANT FORM -12454 CUNEIFORM NUMERIC SIGN FIVE BAN2 -12455 CUNEIFORM NUMERIC SIGN FIVE BAN2 VARIANT FORM -12456 CUNEIFORM NUMERIC SIGN NIGIDAMIN -12457 CUNEIFORM NUMERIC SIGN NIGIDAESH -12458 CUNEIFORM NUMERIC SIGN ONE ESHE3 -12459 CUNEIFORM NUMERIC SIGN TWO ESHE3 -1245A CUNEIFORM NUMERIC SIGN ONE THIRD DISH -1245B CUNEIFORM NUMERIC SIGN TWO THIRDS DISH -1245C CUNEIFORM NUMERIC SIGN FIVE SIXTHS DISH -1245D CUNEIFORM NUMERIC SIGN ONE THIRD VARIANT FORM A -1245E CUNEIFORM NUMERIC SIGN TWO THIRDS VARIANT FORM A -1245F CUNEIFORM NUMERIC SIGN ONE EIGHTH ASH -12460 CUNEIFORM NUMERIC SIGN ONE QUARTER ASH -12461 CUNEIFORM NUMERIC SIGN OLD ASSYRIAN ONE SIXTH -12462 CUNEIFORM NUMERIC SIGN OLD ASSYRIAN ONE QUARTER -12470 CUNEIFORM PUNCTUATION SIGN OLD ASSYRIAN WORD DIVIDER -12471 CUNEIFORM PUNCTUATION SIGN VERTICAL COLON -12472 CUNEIFORM PUNCTUATION SIGN DIAGONAL COLON -12473 CUNEIFORM PUNCTUATION SIGN DIAGONAL TRICOLON -13000 EGYPTIAN HIEROGLYPH A001 -13001 EGYPTIAN HIEROGLYPH A002 -13002 EGYPTIAN HIEROGLYPH A003 -13003 EGYPTIAN HIEROGLYPH A004 -13004 EGYPTIAN HIEROGLYPH A005 -13005 EGYPTIAN HIEROGLYPH A005A -13006 EGYPTIAN HIEROGLYPH A006 -13007 EGYPTIAN HIEROGLYPH A006A -13008 EGYPTIAN HIEROGLYPH A006B -13009 EGYPTIAN HIEROGLYPH A007 -1300A EGYPTIAN HIEROGLYPH A008 -1300B EGYPTIAN HIEROGLYPH A009 -1300C EGYPTIAN HIEROGLYPH A010 -1300D EGYPTIAN HIEROGLYPH A011 -1300E EGYPTIAN HIEROGLYPH A012 -1300F EGYPTIAN HIEROGLYPH A013 -13010 EGYPTIAN HIEROGLYPH A014 -13011 EGYPTIAN HIEROGLYPH A014A -13012 EGYPTIAN HIEROGLYPH A015 -13013 EGYPTIAN HIEROGLYPH A016 -13014 EGYPTIAN HIEROGLYPH A017 -13015 EGYPTIAN HIEROGLYPH A017A -13016 EGYPTIAN HIEROGLYPH A018 -13017 EGYPTIAN HIEROGLYPH A019 -13018 EGYPTIAN HIEROGLYPH A020 -13019 EGYPTIAN HIEROGLYPH A021 -1301A EGYPTIAN HIEROGLYPH A022 -1301B EGYPTIAN HIEROGLYPH A023 -1301C EGYPTIAN HIEROGLYPH A024 -1301D EGYPTIAN HIEROGLYPH A025 -1301E EGYPTIAN HIEROGLYPH A026 -1301F EGYPTIAN HIEROGLYPH A027 -13020 EGYPTIAN HIEROGLYPH A028 -13021 EGYPTIAN HIEROGLYPH A029 -13022 EGYPTIAN HIEROGLYPH A030 -13023 EGYPTIAN HIEROGLYPH A031 -13024 EGYPTIAN HIEROGLYPH A032 -13025 EGYPTIAN HIEROGLYPH A032A -13026 EGYPTIAN HIEROGLYPH A033 -13027 EGYPTIAN HIEROGLYPH A034 -13028 EGYPTIAN HIEROGLYPH A035 -13029 EGYPTIAN HIEROGLYPH A036 -1302A EGYPTIAN HIEROGLYPH A037 -1302B EGYPTIAN HIEROGLYPH A038 -1302C EGYPTIAN HIEROGLYPH A039 -1302D EGYPTIAN HIEROGLYPH A040 -1302E EGYPTIAN HIEROGLYPH A040A -1302F EGYPTIAN HIEROGLYPH A041 -13030 EGYPTIAN HIEROGLYPH A042 -13031 EGYPTIAN HIEROGLYPH A042A -13032 EGYPTIAN HIEROGLYPH A043 -13033 EGYPTIAN HIEROGLYPH A043A -13034 EGYPTIAN HIEROGLYPH A044 -13035 EGYPTIAN HIEROGLYPH A045 -13036 EGYPTIAN HIEROGLYPH A045A -13037 EGYPTIAN HIEROGLYPH A046 -13038 EGYPTIAN HIEROGLYPH A047 -13039 EGYPTIAN HIEROGLYPH A048 -1303A EGYPTIAN HIEROGLYPH A049 -1303B EGYPTIAN HIEROGLYPH A050 -1303C EGYPTIAN HIEROGLYPH A051 -1303D EGYPTIAN HIEROGLYPH A052 -1303E EGYPTIAN HIEROGLYPH A053 -1303F EGYPTIAN HIEROGLYPH A054 -13040 EGYPTIAN HIEROGLYPH A055 -13041 EGYPTIAN HIEROGLYPH A056 -13042 EGYPTIAN HIEROGLYPH A057 -13043 EGYPTIAN HIEROGLYPH A058 -13044 EGYPTIAN HIEROGLYPH A059 -13045 EGYPTIAN HIEROGLYPH A060 -13046 EGYPTIAN HIEROGLYPH A061 -13047 EGYPTIAN HIEROGLYPH A062 -13048 EGYPTIAN HIEROGLYPH A063 -13049 EGYPTIAN HIEROGLYPH A064 -1304A EGYPTIAN HIEROGLYPH A065 -1304B EGYPTIAN HIEROGLYPH A066 -1304C EGYPTIAN HIEROGLYPH A067 -1304D EGYPTIAN HIEROGLYPH A068 -1304E EGYPTIAN HIEROGLYPH A069 -1304F EGYPTIAN HIEROGLYPH A070 -13050 EGYPTIAN HIEROGLYPH B001 -13051 EGYPTIAN HIEROGLYPH B002 -13052 EGYPTIAN HIEROGLYPH B003 -13053 EGYPTIAN HIEROGLYPH B004 -13054 EGYPTIAN HIEROGLYPH B005 -13055 EGYPTIAN HIEROGLYPH B005A -13056 EGYPTIAN HIEROGLYPH B006 -13057 EGYPTIAN HIEROGLYPH B007 -13058 EGYPTIAN HIEROGLYPH B008 -13059 EGYPTIAN HIEROGLYPH B009 -1305A EGYPTIAN HIEROGLYPH C001 -1305B EGYPTIAN HIEROGLYPH C002 -1305C EGYPTIAN HIEROGLYPH C002A -1305D EGYPTIAN HIEROGLYPH C002B -1305E EGYPTIAN HIEROGLYPH C002C -1305F EGYPTIAN HIEROGLYPH C003 -13060 EGYPTIAN HIEROGLYPH C004 -13061 EGYPTIAN HIEROGLYPH C005 -13062 EGYPTIAN HIEROGLYPH C006 -13063 EGYPTIAN HIEROGLYPH C007 -13064 EGYPTIAN HIEROGLYPH C008 -13065 EGYPTIAN HIEROGLYPH C009 -13066 EGYPTIAN HIEROGLYPH C010 -13067 EGYPTIAN HIEROGLYPH C010A -13068 EGYPTIAN HIEROGLYPH C011 -13069 EGYPTIAN HIEROGLYPH C012 -1306A EGYPTIAN HIEROGLYPH C013 -1306B EGYPTIAN HIEROGLYPH C014 -1306C EGYPTIAN HIEROGLYPH C015 -1306D EGYPTIAN HIEROGLYPH C016 -1306E EGYPTIAN HIEROGLYPH C017 -1306F EGYPTIAN HIEROGLYPH C018 -13070 EGYPTIAN HIEROGLYPH C019 -13071 EGYPTIAN HIEROGLYPH C020 -13072 EGYPTIAN HIEROGLYPH C021 -13073 EGYPTIAN HIEROGLYPH C022 -13074 EGYPTIAN HIEROGLYPH C023 -13075 EGYPTIAN HIEROGLYPH C024 -13076 EGYPTIAN HIEROGLYPH D001 -13077 EGYPTIAN HIEROGLYPH D002 -13078 EGYPTIAN HIEROGLYPH D003 -13079 EGYPTIAN HIEROGLYPH D004 -1307A EGYPTIAN HIEROGLYPH D005 -1307B EGYPTIAN HIEROGLYPH D006 -1307C EGYPTIAN HIEROGLYPH D007 -1307D EGYPTIAN HIEROGLYPH D008 -1307E EGYPTIAN HIEROGLYPH D008A -1307F EGYPTIAN HIEROGLYPH D009 -13080 EGYPTIAN HIEROGLYPH D010 -13081 EGYPTIAN HIEROGLYPH D011 -13082 EGYPTIAN HIEROGLYPH D012 -13083 EGYPTIAN HIEROGLYPH D013 -13084 EGYPTIAN HIEROGLYPH D014 -13085 EGYPTIAN HIEROGLYPH D015 -13086 EGYPTIAN HIEROGLYPH D016 -13087 EGYPTIAN HIEROGLYPH D017 -13088 EGYPTIAN HIEROGLYPH D018 -13089 EGYPTIAN HIEROGLYPH D019 -1308A EGYPTIAN HIEROGLYPH D020 -1308B EGYPTIAN HIEROGLYPH D021 -1308C EGYPTIAN HIEROGLYPH D022 -1308D EGYPTIAN HIEROGLYPH D023 -1308E EGYPTIAN HIEROGLYPH D024 -1308F EGYPTIAN HIEROGLYPH D025 -13090 EGYPTIAN HIEROGLYPH D026 -13091 EGYPTIAN HIEROGLYPH D027 -13092 EGYPTIAN HIEROGLYPH D027A -13093 EGYPTIAN HIEROGLYPH D028 -13094 EGYPTIAN HIEROGLYPH D029 -13095 EGYPTIAN HIEROGLYPH D030 -13096 EGYPTIAN HIEROGLYPH D031 -13097 EGYPTIAN HIEROGLYPH D031A -13098 EGYPTIAN HIEROGLYPH D032 -13099 EGYPTIAN HIEROGLYPH D033 -1309A EGYPTIAN HIEROGLYPH D034 -1309B EGYPTIAN HIEROGLYPH D034A -1309C EGYPTIAN HIEROGLYPH D035 -1309D EGYPTIAN HIEROGLYPH D036 -1309E EGYPTIAN HIEROGLYPH D037 -1309F EGYPTIAN HIEROGLYPH D038 -130A0 EGYPTIAN HIEROGLYPH D039 -130A1 EGYPTIAN HIEROGLYPH D040 -130A2 EGYPTIAN HIEROGLYPH D041 -130A3 EGYPTIAN HIEROGLYPH D042 -130A4 EGYPTIAN HIEROGLYPH D043 -130A5 EGYPTIAN HIEROGLYPH D044 -130A6 EGYPTIAN HIEROGLYPH D045 -130A7 EGYPTIAN HIEROGLYPH D046 -130A8 EGYPTIAN HIEROGLYPH D046A -130A9 EGYPTIAN HIEROGLYPH D047 -130AA EGYPTIAN HIEROGLYPH D048 -130AB EGYPTIAN HIEROGLYPH D048A -130AC EGYPTIAN HIEROGLYPH D049 -130AD EGYPTIAN HIEROGLYPH D050 -130AE EGYPTIAN HIEROGLYPH D050A -130AF EGYPTIAN HIEROGLYPH D050B -130B0 EGYPTIAN HIEROGLYPH D050C -130B1 EGYPTIAN HIEROGLYPH D050D -130B2 EGYPTIAN HIEROGLYPH D050E -130B3 EGYPTIAN HIEROGLYPH D050F -130B4 EGYPTIAN HIEROGLYPH D050G -130B5 EGYPTIAN HIEROGLYPH D050H -130B6 EGYPTIAN HIEROGLYPH D050I -130B7 EGYPTIAN HIEROGLYPH D051 -130B8 EGYPTIAN HIEROGLYPH D052 -130B9 EGYPTIAN HIEROGLYPH D052A -130BA EGYPTIAN HIEROGLYPH D053 -130BB EGYPTIAN HIEROGLYPH D054 -130BC EGYPTIAN HIEROGLYPH D054A -130BD EGYPTIAN HIEROGLYPH D055 -130BE EGYPTIAN HIEROGLYPH D056 -130BF EGYPTIAN HIEROGLYPH D057 -130C0 EGYPTIAN HIEROGLYPH D058 -130C1 EGYPTIAN HIEROGLYPH D059 -130C2 EGYPTIAN HIEROGLYPH D060 -130C3 EGYPTIAN HIEROGLYPH D061 -130C4 EGYPTIAN HIEROGLYPH D062 -130C5 EGYPTIAN HIEROGLYPH D063 -130C6 EGYPTIAN HIEROGLYPH D064 -130C7 EGYPTIAN HIEROGLYPH D065 -130C8 EGYPTIAN HIEROGLYPH D066 -130C9 EGYPTIAN HIEROGLYPH D067 -130CA EGYPTIAN HIEROGLYPH D067A -130CB EGYPTIAN HIEROGLYPH D067B -130CC EGYPTIAN HIEROGLYPH D067C -130CD EGYPTIAN HIEROGLYPH D067D -130CE EGYPTIAN HIEROGLYPH D067E -130CF EGYPTIAN HIEROGLYPH D067F -130D0 EGYPTIAN HIEROGLYPH D067G -130D1 EGYPTIAN HIEROGLYPH D067H -130D2 EGYPTIAN HIEROGLYPH E001 -130D3 EGYPTIAN HIEROGLYPH E002 -130D4 EGYPTIAN HIEROGLYPH E003 -130D5 EGYPTIAN HIEROGLYPH E004 -130D6 EGYPTIAN HIEROGLYPH E005 -130D7 EGYPTIAN HIEROGLYPH E006 -130D8 EGYPTIAN HIEROGLYPH E007 -130D9 EGYPTIAN HIEROGLYPH E008 -130DA EGYPTIAN HIEROGLYPH E008A -130DB EGYPTIAN HIEROGLYPH E009 -130DC EGYPTIAN HIEROGLYPH E009A -130DD EGYPTIAN HIEROGLYPH E010 -130DE EGYPTIAN HIEROGLYPH E011 -130DF EGYPTIAN HIEROGLYPH E012 -130E0 EGYPTIAN HIEROGLYPH E013 -130E1 EGYPTIAN HIEROGLYPH E014 -130E2 EGYPTIAN HIEROGLYPH E015 -130E3 EGYPTIAN HIEROGLYPH E016 -130E4 EGYPTIAN HIEROGLYPH E016A -130E5 EGYPTIAN HIEROGLYPH E017 -130E6 EGYPTIAN HIEROGLYPH E017A -130E7 EGYPTIAN HIEROGLYPH E018 -130E8 EGYPTIAN HIEROGLYPH E019 -130E9 EGYPTIAN HIEROGLYPH E020 -130EA EGYPTIAN HIEROGLYPH E020A -130EB EGYPTIAN HIEROGLYPH E021 -130EC EGYPTIAN HIEROGLYPH E022 -130ED EGYPTIAN HIEROGLYPH E023 -130EE EGYPTIAN HIEROGLYPH E024 -130EF EGYPTIAN HIEROGLYPH E025 -130F0 EGYPTIAN HIEROGLYPH E026 -130F1 EGYPTIAN HIEROGLYPH E027 -130F2 EGYPTIAN HIEROGLYPH E028 -130F3 EGYPTIAN HIEROGLYPH E028A -130F4 EGYPTIAN HIEROGLYPH E029 -130F5 EGYPTIAN HIEROGLYPH E030 -130F6 EGYPTIAN HIEROGLYPH E031 -130F7 EGYPTIAN HIEROGLYPH E032 -130F8 EGYPTIAN HIEROGLYPH E033 -130F9 EGYPTIAN HIEROGLYPH E034 -130FA EGYPTIAN HIEROGLYPH E034A -130FB EGYPTIAN HIEROGLYPH E036 -130FC EGYPTIAN HIEROGLYPH E037 -130FD EGYPTIAN HIEROGLYPH E038 -130FE EGYPTIAN HIEROGLYPH F001 -130FF EGYPTIAN HIEROGLYPH F001A -13100 EGYPTIAN HIEROGLYPH F002 -13101 EGYPTIAN HIEROGLYPH F003 -13102 EGYPTIAN HIEROGLYPH F004 -13103 EGYPTIAN HIEROGLYPH F005 -13104 EGYPTIAN HIEROGLYPH F006 -13105 EGYPTIAN HIEROGLYPH F007 -13106 EGYPTIAN HIEROGLYPH F008 -13107 EGYPTIAN HIEROGLYPH F009 -13108 EGYPTIAN HIEROGLYPH F010 -13109 EGYPTIAN HIEROGLYPH F011 -1310A EGYPTIAN HIEROGLYPH F012 -1310B EGYPTIAN HIEROGLYPH F013 -1310C EGYPTIAN HIEROGLYPH F013A -1310D EGYPTIAN HIEROGLYPH F014 -1310E EGYPTIAN HIEROGLYPH F015 -1310F EGYPTIAN HIEROGLYPH F016 -13110 EGYPTIAN HIEROGLYPH F017 -13111 EGYPTIAN HIEROGLYPH F018 -13112 EGYPTIAN HIEROGLYPH F019 -13113 EGYPTIAN HIEROGLYPH F020 -13114 EGYPTIAN HIEROGLYPH F021 -13115 EGYPTIAN HIEROGLYPH F021A -13116 EGYPTIAN HIEROGLYPH F022 -13117 EGYPTIAN HIEROGLYPH F023 -13118 EGYPTIAN HIEROGLYPH F024 -13119 EGYPTIAN HIEROGLYPH F025 -1311A EGYPTIAN HIEROGLYPH F026 -1311B EGYPTIAN HIEROGLYPH F027 -1311C EGYPTIAN HIEROGLYPH F028 -1311D EGYPTIAN HIEROGLYPH F029 -1311E EGYPTIAN HIEROGLYPH F030 -1311F EGYPTIAN HIEROGLYPH F031 -13120 EGYPTIAN HIEROGLYPH F031A -13121 EGYPTIAN HIEROGLYPH F032 -13122 EGYPTIAN HIEROGLYPH F033 -13123 EGYPTIAN HIEROGLYPH F034 -13124 EGYPTIAN HIEROGLYPH F035 -13125 EGYPTIAN HIEROGLYPH F036 -13126 EGYPTIAN HIEROGLYPH F037 -13127 EGYPTIAN HIEROGLYPH F037A -13128 EGYPTIAN HIEROGLYPH F038 -13129 EGYPTIAN HIEROGLYPH F038A -1312A EGYPTIAN HIEROGLYPH F039 -1312B EGYPTIAN HIEROGLYPH F040 -1312C EGYPTIAN HIEROGLYPH F041 -1312D EGYPTIAN HIEROGLYPH F042 -1312E EGYPTIAN HIEROGLYPH F043 -1312F EGYPTIAN HIEROGLYPH F044 -13130 EGYPTIAN HIEROGLYPH F045 -13131 EGYPTIAN HIEROGLYPH F045A -13132 EGYPTIAN HIEROGLYPH F046 -13133 EGYPTIAN HIEROGLYPH F046A -13134 EGYPTIAN HIEROGLYPH F047 -13135 EGYPTIAN HIEROGLYPH F047A -13136 EGYPTIAN HIEROGLYPH F048 -13137 EGYPTIAN HIEROGLYPH F049 -13138 EGYPTIAN HIEROGLYPH F050 -13139 EGYPTIAN HIEROGLYPH F051 -1313A EGYPTIAN HIEROGLYPH F051A -1313B EGYPTIAN HIEROGLYPH F051B -1313C EGYPTIAN HIEROGLYPH F051C -1313D EGYPTIAN HIEROGLYPH F052 -1313E EGYPTIAN HIEROGLYPH F053 -1313F EGYPTIAN HIEROGLYPH G001 -13140 EGYPTIAN HIEROGLYPH G002 -13141 EGYPTIAN HIEROGLYPH G003 -13142 EGYPTIAN HIEROGLYPH G004 -13143 EGYPTIAN HIEROGLYPH G005 -13144 EGYPTIAN HIEROGLYPH G006 -13145 EGYPTIAN HIEROGLYPH G006A -13146 EGYPTIAN HIEROGLYPH G007 -13147 EGYPTIAN HIEROGLYPH G007A -13148 EGYPTIAN HIEROGLYPH G007B -13149 EGYPTIAN HIEROGLYPH G008 -1314A EGYPTIAN HIEROGLYPH G009 -1314B EGYPTIAN HIEROGLYPH G010 -1314C EGYPTIAN HIEROGLYPH G011 -1314D EGYPTIAN HIEROGLYPH G011A -1314E EGYPTIAN HIEROGLYPH G012 -1314F EGYPTIAN HIEROGLYPH G013 -13150 EGYPTIAN HIEROGLYPH G014 -13151 EGYPTIAN HIEROGLYPH G015 -13152 EGYPTIAN HIEROGLYPH G016 -13153 EGYPTIAN HIEROGLYPH G017 -13154 EGYPTIAN HIEROGLYPH G018 -13155 EGYPTIAN HIEROGLYPH G019 -13156 EGYPTIAN HIEROGLYPH G020 -13157 EGYPTIAN HIEROGLYPH G020A -13158 EGYPTIAN HIEROGLYPH G021 -13159 EGYPTIAN HIEROGLYPH G022 -1315A EGYPTIAN HIEROGLYPH G023 -1315B EGYPTIAN HIEROGLYPH G024 -1315C EGYPTIAN HIEROGLYPH G025 -1315D EGYPTIAN HIEROGLYPH G026 -1315E EGYPTIAN HIEROGLYPH G026A -1315F EGYPTIAN HIEROGLYPH G027 -13160 EGYPTIAN HIEROGLYPH G028 -13161 EGYPTIAN HIEROGLYPH G029 -13162 EGYPTIAN HIEROGLYPH G030 -13163 EGYPTIAN HIEROGLYPH G031 -13164 EGYPTIAN HIEROGLYPH G032 -13165 EGYPTIAN HIEROGLYPH G033 -13166 EGYPTIAN HIEROGLYPH G034 -13167 EGYPTIAN HIEROGLYPH G035 -13168 EGYPTIAN HIEROGLYPH G036 -13169 EGYPTIAN HIEROGLYPH G036A -1316A EGYPTIAN HIEROGLYPH G037 -1316B EGYPTIAN HIEROGLYPH G037A -1316C EGYPTIAN HIEROGLYPH G038 -1316D EGYPTIAN HIEROGLYPH G039 -1316E EGYPTIAN HIEROGLYPH G040 -1316F EGYPTIAN HIEROGLYPH G041 -13170 EGYPTIAN HIEROGLYPH G042 -13171 EGYPTIAN HIEROGLYPH G043 -13172 EGYPTIAN HIEROGLYPH G043A -13173 EGYPTIAN HIEROGLYPH G044 -13174 EGYPTIAN HIEROGLYPH G045 -13175 EGYPTIAN HIEROGLYPH G045A -13176 EGYPTIAN HIEROGLYPH G046 -13177 EGYPTIAN HIEROGLYPH G047 -13178 EGYPTIAN HIEROGLYPH G048 -13179 EGYPTIAN HIEROGLYPH G049 -1317A EGYPTIAN HIEROGLYPH G050 -1317B EGYPTIAN HIEROGLYPH G051 -1317C EGYPTIAN HIEROGLYPH G052 -1317D EGYPTIAN HIEROGLYPH G053 -1317E EGYPTIAN HIEROGLYPH G054 -1317F EGYPTIAN HIEROGLYPH H001 -13180 EGYPTIAN HIEROGLYPH H002 -13181 EGYPTIAN HIEROGLYPH H003 -13182 EGYPTIAN HIEROGLYPH H004 -13183 EGYPTIAN HIEROGLYPH H005 -13184 EGYPTIAN HIEROGLYPH H006 -13185 EGYPTIAN HIEROGLYPH H006A -13186 EGYPTIAN HIEROGLYPH H007 -13187 EGYPTIAN HIEROGLYPH H008 -13188 EGYPTIAN HIEROGLYPH I001 -13189 EGYPTIAN HIEROGLYPH I002 -1318A EGYPTIAN HIEROGLYPH I003 -1318B EGYPTIAN HIEROGLYPH I004 -1318C EGYPTIAN HIEROGLYPH I005 -1318D EGYPTIAN HIEROGLYPH I005A -1318E EGYPTIAN HIEROGLYPH I006 -1318F EGYPTIAN HIEROGLYPH I007 -13190 EGYPTIAN HIEROGLYPH I008 -13191 EGYPTIAN HIEROGLYPH I009 -13192 EGYPTIAN HIEROGLYPH I009A -13193 EGYPTIAN HIEROGLYPH I010 -13194 EGYPTIAN HIEROGLYPH I010A -13195 EGYPTIAN HIEROGLYPH I011 -13196 EGYPTIAN HIEROGLYPH I011A -13197 EGYPTIAN HIEROGLYPH I012 -13198 EGYPTIAN HIEROGLYPH I013 -13199 EGYPTIAN HIEROGLYPH I014 -1319A EGYPTIAN HIEROGLYPH I015 -1319B EGYPTIAN HIEROGLYPH K001 -1319C EGYPTIAN HIEROGLYPH K002 -1319D EGYPTIAN HIEROGLYPH K003 -1319E EGYPTIAN HIEROGLYPH K004 -1319F EGYPTIAN HIEROGLYPH K005 -131A0 EGYPTIAN HIEROGLYPH K006 -131A1 EGYPTIAN HIEROGLYPH K007 -131A2 EGYPTIAN HIEROGLYPH K008 -131A3 EGYPTIAN HIEROGLYPH L001 -131A4 EGYPTIAN HIEROGLYPH L002 -131A5 EGYPTIAN HIEROGLYPH L002A -131A6 EGYPTIAN HIEROGLYPH L003 -131A7 EGYPTIAN HIEROGLYPH L004 -131A8 EGYPTIAN HIEROGLYPH L005 -131A9 EGYPTIAN HIEROGLYPH L006 -131AA EGYPTIAN HIEROGLYPH L006A -131AB EGYPTIAN HIEROGLYPH L007 -131AC EGYPTIAN HIEROGLYPH L008 -131AD EGYPTIAN HIEROGLYPH M001 -131AE EGYPTIAN HIEROGLYPH M001A -131AF EGYPTIAN HIEROGLYPH M001B -131B0 EGYPTIAN HIEROGLYPH M002 -131B1 EGYPTIAN HIEROGLYPH M003 -131B2 EGYPTIAN HIEROGLYPH M003A -131B3 EGYPTIAN HIEROGLYPH M004 -131B4 EGYPTIAN HIEROGLYPH M005 -131B5 EGYPTIAN HIEROGLYPH M006 -131B6 EGYPTIAN HIEROGLYPH M007 -131B7 EGYPTIAN HIEROGLYPH M008 -131B8 EGYPTIAN HIEROGLYPH M009 -131B9 EGYPTIAN HIEROGLYPH M010 -131BA EGYPTIAN HIEROGLYPH M010A -131BB EGYPTIAN HIEROGLYPH M011 -131BC EGYPTIAN HIEROGLYPH M012 -131BD EGYPTIAN HIEROGLYPH M012A -131BE EGYPTIAN HIEROGLYPH M012B -131BF EGYPTIAN HIEROGLYPH M012C -131C0 EGYPTIAN HIEROGLYPH M012D -131C1 EGYPTIAN HIEROGLYPH M012E -131C2 EGYPTIAN HIEROGLYPH M012F -131C3 EGYPTIAN HIEROGLYPH M012G -131C4 EGYPTIAN HIEROGLYPH M012H -131C5 EGYPTIAN HIEROGLYPH M013 -131C6 EGYPTIAN HIEROGLYPH M014 -131C7 EGYPTIAN HIEROGLYPH M015 -131C8 EGYPTIAN HIEROGLYPH M015A -131C9 EGYPTIAN HIEROGLYPH M016 -131CA EGYPTIAN HIEROGLYPH M016A -131CB EGYPTIAN HIEROGLYPH M017 -131CC EGYPTIAN HIEROGLYPH M017A -131CD EGYPTIAN HIEROGLYPH M018 -131CE EGYPTIAN HIEROGLYPH M019 -131CF EGYPTIAN HIEROGLYPH M020 -131D0 EGYPTIAN HIEROGLYPH M021 -131D1 EGYPTIAN HIEROGLYPH M022 -131D2 EGYPTIAN HIEROGLYPH M022A -131D3 EGYPTIAN HIEROGLYPH M023 -131D4 EGYPTIAN HIEROGLYPH M024 -131D5 EGYPTIAN HIEROGLYPH M024A -131D6 EGYPTIAN HIEROGLYPH M025 -131D7 EGYPTIAN HIEROGLYPH M026 -131D8 EGYPTIAN HIEROGLYPH M027 -131D9 EGYPTIAN HIEROGLYPH M028 -131DA EGYPTIAN HIEROGLYPH M028A -131DB EGYPTIAN HIEROGLYPH M029 -131DC EGYPTIAN HIEROGLYPH M030 -131DD EGYPTIAN HIEROGLYPH M031 -131DE EGYPTIAN HIEROGLYPH M031A -131DF EGYPTIAN HIEROGLYPH M032 -131E0 EGYPTIAN HIEROGLYPH M033 -131E1 EGYPTIAN HIEROGLYPH M033A -131E2 EGYPTIAN HIEROGLYPH M033B -131E3 EGYPTIAN HIEROGLYPH M034 -131E4 EGYPTIAN HIEROGLYPH M035 -131E5 EGYPTIAN HIEROGLYPH M036 -131E6 EGYPTIAN HIEROGLYPH M037 -131E7 EGYPTIAN HIEROGLYPH M038 -131E8 EGYPTIAN HIEROGLYPH M039 -131E9 EGYPTIAN HIEROGLYPH M040 -131EA EGYPTIAN HIEROGLYPH M040A -131EB EGYPTIAN HIEROGLYPH M041 -131EC EGYPTIAN HIEROGLYPH M042 -131ED EGYPTIAN HIEROGLYPH M043 -131EE EGYPTIAN HIEROGLYPH M044 -131EF EGYPTIAN HIEROGLYPH N001 -131F0 EGYPTIAN HIEROGLYPH N002 -131F1 EGYPTIAN HIEROGLYPH N003 -131F2 EGYPTIAN HIEROGLYPH N004 -131F3 EGYPTIAN HIEROGLYPH N005 -131F4 EGYPTIAN HIEROGLYPH N006 -131F5 EGYPTIAN HIEROGLYPH N007 -131F6 EGYPTIAN HIEROGLYPH N008 -131F7 EGYPTIAN HIEROGLYPH N009 -131F8 EGYPTIAN HIEROGLYPH N010 -131F9 EGYPTIAN HIEROGLYPH N011 -131FA EGYPTIAN HIEROGLYPH N012 -131FB EGYPTIAN HIEROGLYPH N013 -131FC EGYPTIAN HIEROGLYPH N014 -131FD EGYPTIAN HIEROGLYPH N015 -131FE EGYPTIAN HIEROGLYPH N016 -131FF EGYPTIAN HIEROGLYPH N017 -13200 EGYPTIAN HIEROGLYPH N018 -13201 EGYPTIAN HIEROGLYPH N018A -13202 EGYPTIAN HIEROGLYPH N018B -13203 EGYPTIAN HIEROGLYPH N019 -13204 EGYPTIAN HIEROGLYPH N020 -13205 EGYPTIAN HIEROGLYPH N021 -13206 EGYPTIAN HIEROGLYPH N022 -13207 EGYPTIAN HIEROGLYPH N023 -13208 EGYPTIAN HIEROGLYPH N024 -13209 EGYPTIAN HIEROGLYPH N025 -1320A EGYPTIAN HIEROGLYPH N025A -1320B EGYPTIAN HIEROGLYPH N026 -1320C EGYPTIAN HIEROGLYPH N027 -1320D EGYPTIAN HIEROGLYPH N028 -1320E EGYPTIAN HIEROGLYPH N029 -1320F EGYPTIAN HIEROGLYPH N030 -13210 EGYPTIAN HIEROGLYPH N031 -13211 EGYPTIAN HIEROGLYPH N032 -13212 EGYPTIAN HIEROGLYPH N033 -13213 EGYPTIAN HIEROGLYPH N033A -13214 EGYPTIAN HIEROGLYPH N034 -13215 EGYPTIAN HIEROGLYPH N034A -13216 EGYPTIAN HIEROGLYPH N035 -13217 EGYPTIAN HIEROGLYPH N035A -13218 EGYPTIAN HIEROGLYPH N036 -13219 EGYPTIAN HIEROGLYPH N037 -1321A EGYPTIAN HIEROGLYPH N037A -1321B EGYPTIAN HIEROGLYPH N038 -1321C EGYPTIAN HIEROGLYPH N039 -1321D EGYPTIAN HIEROGLYPH N040 -1321E EGYPTIAN HIEROGLYPH N041 -1321F EGYPTIAN HIEROGLYPH N042 -13220 EGYPTIAN HIEROGLYPH NL001 -13221 EGYPTIAN HIEROGLYPH NL002 -13222 EGYPTIAN HIEROGLYPH NL003 -13223 EGYPTIAN HIEROGLYPH NL004 -13224 EGYPTIAN HIEROGLYPH NL005 -13225 EGYPTIAN HIEROGLYPH NL005A -13226 EGYPTIAN HIEROGLYPH NL006 -13227 EGYPTIAN HIEROGLYPH NL007 -13228 EGYPTIAN HIEROGLYPH NL008 -13229 EGYPTIAN HIEROGLYPH NL009 -1322A EGYPTIAN HIEROGLYPH NL010 -1322B EGYPTIAN HIEROGLYPH NL011 -1322C EGYPTIAN HIEROGLYPH NL012 -1322D EGYPTIAN HIEROGLYPH NL013 -1322E EGYPTIAN HIEROGLYPH NL014 -1322F EGYPTIAN HIEROGLYPH NL015 -13230 EGYPTIAN HIEROGLYPH NL016 -13231 EGYPTIAN HIEROGLYPH NL017 -13232 EGYPTIAN HIEROGLYPH NL017A -13233 EGYPTIAN HIEROGLYPH NL018 -13234 EGYPTIAN HIEROGLYPH NL019 -13235 EGYPTIAN HIEROGLYPH NL020 -13236 EGYPTIAN HIEROGLYPH NU001 -13237 EGYPTIAN HIEROGLYPH NU002 -13238 EGYPTIAN HIEROGLYPH NU003 -13239 EGYPTIAN HIEROGLYPH NU004 -1323A EGYPTIAN HIEROGLYPH NU005 -1323B EGYPTIAN HIEROGLYPH NU006 -1323C EGYPTIAN HIEROGLYPH NU007 -1323D EGYPTIAN HIEROGLYPH NU008 -1323E EGYPTIAN HIEROGLYPH NU009 -1323F EGYPTIAN HIEROGLYPH NU010 -13240 EGYPTIAN HIEROGLYPH NU010A -13241 EGYPTIAN HIEROGLYPH NU011 -13242 EGYPTIAN HIEROGLYPH NU011A -13243 EGYPTIAN HIEROGLYPH NU012 -13244 EGYPTIAN HIEROGLYPH NU013 -13245 EGYPTIAN HIEROGLYPH NU014 -13246 EGYPTIAN HIEROGLYPH NU015 -13247 EGYPTIAN HIEROGLYPH NU016 -13248 EGYPTIAN HIEROGLYPH NU017 -13249 EGYPTIAN HIEROGLYPH NU018 -1324A EGYPTIAN HIEROGLYPH NU018A -1324B EGYPTIAN HIEROGLYPH NU019 -1324C EGYPTIAN HIEROGLYPH NU020 -1324D EGYPTIAN HIEROGLYPH NU021 -1324E EGYPTIAN HIEROGLYPH NU022 -1324F EGYPTIAN HIEROGLYPH NU022A -13250 EGYPTIAN HIEROGLYPH O001 -13251 EGYPTIAN HIEROGLYPH O001A -13252 EGYPTIAN HIEROGLYPH O002 -13253 EGYPTIAN HIEROGLYPH O003 -13254 EGYPTIAN HIEROGLYPH O004 -13255 EGYPTIAN HIEROGLYPH O005 -13256 EGYPTIAN HIEROGLYPH O005A -13257 EGYPTIAN HIEROGLYPH O006 -13258 EGYPTIAN HIEROGLYPH O006A -13259 EGYPTIAN HIEROGLYPH O006B -1325A EGYPTIAN HIEROGLYPH O006C -1325B EGYPTIAN HIEROGLYPH O006D -1325C EGYPTIAN HIEROGLYPH O006E -1325D EGYPTIAN HIEROGLYPH O006F -1325E EGYPTIAN HIEROGLYPH O007 -1325F EGYPTIAN HIEROGLYPH O008 -13260 EGYPTIAN HIEROGLYPH O009 -13261 EGYPTIAN HIEROGLYPH O010 -13262 EGYPTIAN HIEROGLYPH O010A -13263 EGYPTIAN HIEROGLYPH O010B -13264 EGYPTIAN HIEROGLYPH O010C -13265 EGYPTIAN HIEROGLYPH O011 -13266 EGYPTIAN HIEROGLYPH O012 -13267 EGYPTIAN HIEROGLYPH O013 -13268 EGYPTIAN HIEROGLYPH O014 -13269 EGYPTIAN HIEROGLYPH O015 -1326A EGYPTIAN HIEROGLYPH O016 -1326B EGYPTIAN HIEROGLYPH O017 -1326C EGYPTIAN HIEROGLYPH O018 -1326D EGYPTIAN HIEROGLYPH O019 -1326E EGYPTIAN HIEROGLYPH O019A -1326F EGYPTIAN HIEROGLYPH O020 -13270 EGYPTIAN HIEROGLYPH O020A -13271 EGYPTIAN HIEROGLYPH O021 -13272 EGYPTIAN HIEROGLYPH O022 -13273 EGYPTIAN HIEROGLYPH O023 -13274 EGYPTIAN HIEROGLYPH O024 -13275 EGYPTIAN HIEROGLYPH O024A -13276 EGYPTIAN HIEROGLYPH O025 -13277 EGYPTIAN HIEROGLYPH O025A -13278 EGYPTIAN HIEROGLYPH O026 -13279 EGYPTIAN HIEROGLYPH O027 -1327A EGYPTIAN HIEROGLYPH O028 -1327B EGYPTIAN HIEROGLYPH O029 -1327C EGYPTIAN HIEROGLYPH O029A -1327D EGYPTIAN HIEROGLYPH O030 -1327E EGYPTIAN HIEROGLYPH O030A -1327F EGYPTIAN HIEROGLYPH O031 -13280 EGYPTIAN HIEROGLYPH O032 -13281 EGYPTIAN HIEROGLYPH O033 -13282 EGYPTIAN HIEROGLYPH O033A -13283 EGYPTIAN HIEROGLYPH O034 -13284 EGYPTIAN HIEROGLYPH O035 -13285 EGYPTIAN HIEROGLYPH O036 -13286 EGYPTIAN HIEROGLYPH O036A -13287 EGYPTIAN HIEROGLYPH O036B -13288 EGYPTIAN HIEROGLYPH O036C -13289 EGYPTIAN HIEROGLYPH O036D -1328A EGYPTIAN HIEROGLYPH O037 -1328B EGYPTIAN HIEROGLYPH O038 -1328C EGYPTIAN HIEROGLYPH O039 -1328D EGYPTIAN HIEROGLYPH O040 -1328E EGYPTIAN HIEROGLYPH O041 -1328F EGYPTIAN HIEROGLYPH O042 -13290 EGYPTIAN HIEROGLYPH O043 -13291 EGYPTIAN HIEROGLYPH O044 -13292 EGYPTIAN HIEROGLYPH O045 -13293 EGYPTIAN HIEROGLYPH O046 -13294 EGYPTIAN HIEROGLYPH O047 -13295 EGYPTIAN HIEROGLYPH O048 -13296 EGYPTIAN HIEROGLYPH O049 -13297 EGYPTIAN HIEROGLYPH O050 -13298 EGYPTIAN HIEROGLYPH O050A -13299 EGYPTIAN HIEROGLYPH O050B -1329A EGYPTIAN HIEROGLYPH O051 -1329B EGYPTIAN HIEROGLYPH P001 -1329C EGYPTIAN HIEROGLYPH P001A -1329D EGYPTIAN HIEROGLYPH P002 -1329E EGYPTIAN HIEROGLYPH P003 -1329F EGYPTIAN HIEROGLYPH P003A -132A0 EGYPTIAN HIEROGLYPH P004 -132A1 EGYPTIAN HIEROGLYPH P005 -132A2 EGYPTIAN HIEROGLYPH P006 -132A3 EGYPTIAN HIEROGLYPH P007 -132A4 EGYPTIAN HIEROGLYPH P008 -132A5 EGYPTIAN HIEROGLYPH P009 -132A6 EGYPTIAN HIEROGLYPH P010 -132A7 EGYPTIAN HIEROGLYPH P011 -132A8 EGYPTIAN HIEROGLYPH Q001 -132A9 EGYPTIAN HIEROGLYPH Q002 -132AA EGYPTIAN HIEROGLYPH Q003 -132AB EGYPTIAN HIEROGLYPH Q004 -132AC EGYPTIAN HIEROGLYPH Q005 -132AD EGYPTIAN HIEROGLYPH Q006 -132AE EGYPTIAN HIEROGLYPH Q007 -132AF EGYPTIAN HIEROGLYPH R001 -132B0 EGYPTIAN HIEROGLYPH R002 -132B1 EGYPTIAN HIEROGLYPH R002A -132B2 EGYPTIAN HIEROGLYPH R003 -132B3 EGYPTIAN HIEROGLYPH R003A -132B4 EGYPTIAN HIEROGLYPH R003B -132B5 EGYPTIAN HIEROGLYPH R004 -132B6 EGYPTIAN HIEROGLYPH R005 -132B7 EGYPTIAN HIEROGLYPH R006 -132B8 EGYPTIAN HIEROGLYPH R007 -132B9 EGYPTIAN HIEROGLYPH R008 -132BA EGYPTIAN HIEROGLYPH R009 -132BB EGYPTIAN HIEROGLYPH R010 -132BC EGYPTIAN HIEROGLYPH R010A -132BD EGYPTIAN HIEROGLYPH R011 -132BE EGYPTIAN HIEROGLYPH R012 -132BF EGYPTIAN HIEROGLYPH R013 -132C0 EGYPTIAN HIEROGLYPH R014 -132C1 EGYPTIAN HIEROGLYPH R015 -132C2 EGYPTIAN HIEROGLYPH R016 -132C3 EGYPTIAN HIEROGLYPH R016A -132C4 EGYPTIAN HIEROGLYPH R017 -132C5 EGYPTIAN HIEROGLYPH R018 -132C6 EGYPTIAN HIEROGLYPH R019 -132C7 EGYPTIAN HIEROGLYPH R020 -132C8 EGYPTIAN HIEROGLYPH R021 -132C9 EGYPTIAN HIEROGLYPH R022 -132CA EGYPTIAN HIEROGLYPH R023 -132CB EGYPTIAN HIEROGLYPH R024 -132CC EGYPTIAN HIEROGLYPH R025 -132CD EGYPTIAN HIEROGLYPH R026 -132CE EGYPTIAN HIEROGLYPH R027 -132CF EGYPTIAN HIEROGLYPH R028 -132D0 EGYPTIAN HIEROGLYPH R029 -132D1 EGYPTIAN HIEROGLYPH S001 -132D2 EGYPTIAN HIEROGLYPH S002 -132D3 EGYPTIAN HIEROGLYPH S002A -132D4 EGYPTIAN HIEROGLYPH S003 -132D5 EGYPTIAN HIEROGLYPH S004 -132D6 EGYPTIAN HIEROGLYPH S005 -132D7 EGYPTIAN HIEROGLYPH S006 -132D8 EGYPTIAN HIEROGLYPH S006A -132D9 EGYPTIAN HIEROGLYPH S007 -132DA EGYPTIAN HIEROGLYPH S008 -132DB EGYPTIAN HIEROGLYPH S009 -132DC EGYPTIAN HIEROGLYPH S010 -132DD EGYPTIAN HIEROGLYPH S011 -132DE EGYPTIAN HIEROGLYPH S012 -132DF EGYPTIAN HIEROGLYPH S013 -132E0 EGYPTIAN HIEROGLYPH S014 -132E1 EGYPTIAN HIEROGLYPH S014A -132E2 EGYPTIAN HIEROGLYPH S014B -132E3 EGYPTIAN HIEROGLYPH S015 -132E4 EGYPTIAN HIEROGLYPH S016 -132E5 EGYPTIAN HIEROGLYPH S017 -132E6 EGYPTIAN HIEROGLYPH S017A -132E7 EGYPTIAN HIEROGLYPH S018 -132E8 EGYPTIAN HIEROGLYPH S019 -132E9 EGYPTIAN HIEROGLYPH S020 -132EA EGYPTIAN HIEROGLYPH S021 -132EB EGYPTIAN HIEROGLYPH S022 -132EC EGYPTIAN HIEROGLYPH S023 -132ED EGYPTIAN HIEROGLYPH S024 -132EE EGYPTIAN HIEROGLYPH S025 -132EF EGYPTIAN HIEROGLYPH S026 -132F0 EGYPTIAN HIEROGLYPH S026A -132F1 EGYPTIAN HIEROGLYPH S026B -132F2 EGYPTIAN HIEROGLYPH S027 -132F3 EGYPTIAN HIEROGLYPH S028 -132F4 EGYPTIAN HIEROGLYPH S029 -132F5 EGYPTIAN HIEROGLYPH S030 -132F6 EGYPTIAN HIEROGLYPH S031 -132F7 EGYPTIAN HIEROGLYPH S032 -132F8 EGYPTIAN HIEROGLYPH S033 -132F9 EGYPTIAN HIEROGLYPH S034 -132FA EGYPTIAN HIEROGLYPH S035 -132FB EGYPTIAN HIEROGLYPH S035A -132FC EGYPTIAN HIEROGLYPH S036 -132FD EGYPTIAN HIEROGLYPH S037 -132FE EGYPTIAN HIEROGLYPH S038 -132FF EGYPTIAN HIEROGLYPH S039 -13300 EGYPTIAN HIEROGLYPH S040 -13301 EGYPTIAN HIEROGLYPH S041 -13302 EGYPTIAN HIEROGLYPH S042 -13303 EGYPTIAN HIEROGLYPH S043 -13304 EGYPTIAN HIEROGLYPH S044 -13305 EGYPTIAN HIEROGLYPH S045 -13306 EGYPTIAN HIEROGLYPH S046 -13307 EGYPTIAN HIEROGLYPH T001 -13308 EGYPTIAN HIEROGLYPH T002 -13309 EGYPTIAN HIEROGLYPH T003 -1330A EGYPTIAN HIEROGLYPH T003A -1330B EGYPTIAN HIEROGLYPH T004 -1330C EGYPTIAN HIEROGLYPH T005 -1330D EGYPTIAN HIEROGLYPH T006 -1330E EGYPTIAN HIEROGLYPH T007 -1330F EGYPTIAN HIEROGLYPH T007A -13310 EGYPTIAN HIEROGLYPH T008 -13311 EGYPTIAN HIEROGLYPH T008A -13312 EGYPTIAN HIEROGLYPH T009 -13313 EGYPTIAN HIEROGLYPH T009A -13314 EGYPTIAN HIEROGLYPH T010 -13315 EGYPTIAN HIEROGLYPH T011 -13316 EGYPTIAN HIEROGLYPH T011A -13317 EGYPTIAN HIEROGLYPH T012 -13318 EGYPTIAN HIEROGLYPH T013 -13319 EGYPTIAN HIEROGLYPH T014 -1331A EGYPTIAN HIEROGLYPH T015 -1331B EGYPTIAN HIEROGLYPH T016 -1331C EGYPTIAN HIEROGLYPH T016A -1331D EGYPTIAN HIEROGLYPH T017 -1331E EGYPTIAN HIEROGLYPH T018 -1331F EGYPTIAN HIEROGLYPH T019 -13320 EGYPTIAN HIEROGLYPH T020 -13321 EGYPTIAN HIEROGLYPH T021 -13322 EGYPTIAN HIEROGLYPH T022 -13323 EGYPTIAN HIEROGLYPH T023 -13324 EGYPTIAN HIEROGLYPH T024 -13325 EGYPTIAN HIEROGLYPH T025 -13326 EGYPTIAN HIEROGLYPH T026 -13327 EGYPTIAN HIEROGLYPH T027 -13328 EGYPTIAN HIEROGLYPH T028 -13329 EGYPTIAN HIEROGLYPH T029 -1332A EGYPTIAN HIEROGLYPH T030 -1332B EGYPTIAN HIEROGLYPH T031 -1332C EGYPTIAN HIEROGLYPH T032 -1332D EGYPTIAN HIEROGLYPH T032A -1332E EGYPTIAN HIEROGLYPH T033 -1332F EGYPTIAN HIEROGLYPH T033A -13330 EGYPTIAN HIEROGLYPH T034 -13331 EGYPTIAN HIEROGLYPH T035 -13332 EGYPTIAN HIEROGLYPH T036 -13333 EGYPTIAN HIEROGLYPH U001 -13334 EGYPTIAN HIEROGLYPH U002 -13335 EGYPTIAN HIEROGLYPH U003 -13336 EGYPTIAN HIEROGLYPH U004 -13337 EGYPTIAN HIEROGLYPH U005 -13338 EGYPTIAN HIEROGLYPH U006 -13339 EGYPTIAN HIEROGLYPH U006A -1333A EGYPTIAN HIEROGLYPH U006B -1333B EGYPTIAN HIEROGLYPH U007 -1333C EGYPTIAN HIEROGLYPH U008 -1333D EGYPTIAN HIEROGLYPH U009 -1333E EGYPTIAN HIEROGLYPH U010 -1333F EGYPTIAN HIEROGLYPH U011 -13340 EGYPTIAN HIEROGLYPH U012 -13341 EGYPTIAN HIEROGLYPH U013 -13342 EGYPTIAN HIEROGLYPH U014 -13343 EGYPTIAN HIEROGLYPH U015 -13344 EGYPTIAN HIEROGLYPH U016 -13345 EGYPTIAN HIEROGLYPH U017 -13346 EGYPTIAN HIEROGLYPH U018 -13347 EGYPTIAN HIEROGLYPH U019 -13348 EGYPTIAN HIEROGLYPH U020 -13349 EGYPTIAN HIEROGLYPH U021 -1334A EGYPTIAN HIEROGLYPH U022 -1334B EGYPTIAN HIEROGLYPH U023 -1334C EGYPTIAN HIEROGLYPH U023A -1334D EGYPTIAN HIEROGLYPH U024 -1334E EGYPTIAN HIEROGLYPH U025 -1334F EGYPTIAN HIEROGLYPH U026 -13350 EGYPTIAN HIEROGLYPH U027 -13351 EGYPTIAN HIEROGLYPH U028 -13352 EGYPTIAN HIEROGLYPH U029 -13353 EGYPTIAN HIEROGLYPH U029A -13354 EGYPTIAN HIEROGLYPH U030 -13355 EGYPTIAN HIEROGLYPH U031 -13356 EGYPTIAN HIEROGLYPH U032 -13357 EGYPTIAN HIEROGLYPH U032A -13358 EGYPTIAN HIEROGLYPH U033 -13359 EGYPTIAN HIEROGLYPH U034 -1335A EGYPTIAN HIEROGLYPH U035 -1335B EGYPTIAN HIEROGLYPH U036 -1335C EGYPTIAN HIEROGLYPH U037 -1335D EGYPTIAN HIEROGLYPH U038 -1335E EGYPTIAN HIEROGLYPH U039 -1335F EGYPTIAN HIEROGLYPH U040 -13360 EGYPTIAN HIEROGLYPH U041 -13361 EGYPTIAN HIEROGLYPH U042 -13362 EGYPTIAN HIEROGLYPH V001 -13363 EGYPTIAN HIEROGLYPH V001A -13364 EGYPTIAN HIEROGLYPH V001B -13365 EGYPTIAN HIEROGLYPH V001C -13366 EGYPTIAN HIEROGLYPH V001D -13367 EGYPTIAN HIEROGLYPH V001E -13368 EGYPTIAN HIEROGLYPH V001F -13369 EGYPTIAN HIEROGLYPH V001G -1336A EGYPTIAN HIEROGLYPH V001H -1336B EGYPTIAN HIEROGLYPH V001I -1336C EGYPTIAN HIEROGLYPH V002 -1336D EGYPTIAN HIEROGLYPH V002A -1336E EGYPTIAN HIEROGLYPH V003 -1336F EGYPTIAN HIEROGLYPH V004 -13370 EGYPTIAN HIEROGLYPH V005 -13371 EGYPTIAN HIEROGLYPH V006 -13372 EGYPTIAN HIEROGLYPH V007 -13373 EGYPTIAN HIEROGLYPH V007A -13374 EGYPTIAN HIEROGLYPH V007B -13375 EGYPTIAN HIEROGLYPH V008 -13376 EGYPTIAN HIEROGLYPH V009 -13377 EGYPTIAN HIEROGLYPH V010 -13378 EGYPTIAN HIEROGLYPH V011 -13379 EGYPTIAN HIEROGLYPH V011A -1337A EGYPTIAN HIEROGLYPH V011B -1337B EGYPTIAN HIEROGLYPH V011C -1337C EGYPTIAN HIEROGLYPH V012 -1337D EGYPTIAN HIEROGLYPH V012A -1337E EGYPTIAN HIEROGLYPH V012B -1337F EGYPTIAN HIEROGLYPH V013 -13380 EGYPTIAN HIEROGLYPH V014 -13381 EGYPTIAN HIEROGLYPH V015 -13382 EGYPTIAN HIEROGLYPH V016 -13383 EGYPTIAN HIEROGLYPH V017 -13384 EGYPTIAN HIEROGLYPH V018 -13385 EGYPTIAN HIEROGLYPH V019 -13386 EGYPTIAN HIEROGLYPH V020 -13387 EGYPTIAN HIEROGLYPH V020A -13388 EGYPTIAN HIEROGLYPH V020B -13389 EGYPTIAN HIEROGLYPH V020C -1338A EGYPTIAN HIEROGLYPH V020D -1338B EGYPTIAN HIEROGLYPH V020E -1338C EGYPTIAN HIEROGLYPH V020F -1338D EGYPTIAN HIEROGLYPH V020G -1338E EGYPTIAN HIEROGLYPH V020H -1338F EGYPTIAN HIEROGLYPH V020I -13390 EGYPTIAN HIEROGLYPH V020J -13391 EGYPTIAN HIEROGLYPH V020K -13392 EGYPTIAN HIEROGLYPH V020L -13393 EGYPTIAN HIEROGLYPH V021 -13394 EGYPTIAN HIEROGLYPH V022 -13395 EGYPTIAN HIEROGLYPH V023 -13396 EGYPTIAN HIEROGLYPH V023A -13397 EGYPTIAN HIEROGLYPH V024 -13398 EGYPTIAN HIEROGLYPH V025 -13399 EGYPTIAN HIEROGLYPH V026 -1339A EGYPTIAN HIEROGLYPH V027 -1339B EGYPTIAN HIEROGLYPH V028 -1339C EGYPTIAN HIEROGLYPH V028A -1339D EGYPTIAN HIEROGLYPH V029 -1339E EGYPTIAN HIEROGLYPH V029A -1339F EGYPTIAN HIEROGLYPH V030 -133A0 EGYPTIAN HIEROGLYPH V030A -133A1 EGYPTIAN HIEROGLYPH V031 -133A2 EGYPTIAN HIEROGLYPH V031A -133A3 EGYPTIAN HIEROGLYPH V032 -133A4 EGYPTIAN HIEROGLYPH V033 -133A5 EGYPTIAN HIEROGLYPH V033A -133A6 EGYPTIAN HIEROGLYPH V034 -133A7 EGYPTIAN HIEROGLYPH V035 -133A8 EGYPTIAN HIEROGLYPH V036 -133A9 EGYPTIAN HIEROGLYPH V037 -133AA EGYPTIAN HIEROGLYPH V037A -133AB EGYPTIAN HIEROGLYPH V038 -133AC EGYPTIAN HIEROGLYPH V039 -133AD EGYPTIAN HIEROGLYPH V040 -133AE EGYPTIAN HIEROGLYPH V040A -133AF EGYPTIAN HIEROGLYPH W001 -133B0 EGYPTIAN HIEROGLYPH W002 -133B1 EGYPTIAN HIEROGLYPH W003 -133B2 EGYPTIAN HIEROGLYPH W003A -133B3 EGYPTIAN HIEROGLYPH W004 -133B4 EGYPTIAN HIEROGLYPH W005 -133B5 EGYPTIAN HIEROGLYPH W006 -133B6 EGYPTIAN HIEROGLYPH W007 -133B7 EGYPTIAN HIEROGLYPH W008 -133B8 EGYPTIAN HIEROGLYPH W009 -133B9 EGYPTIAN HIEROGLYPH W009A -133BA EGYPTIAN HIEROGLYPH W010 -133BB EGYPTIAN HIEROGLYPH W010A -133BC EGYPTIAN HIEROGLYPH W011 -133BD EGYPTIAN HIEROGLYPH W012 -133BE EGYPTIAN HIEROGLYPH W013 -133BF EGYPTIAN HIEROGLYPH W014 -133C0 EGYPTIAN HIEROGLYPH W014A -133C1 EGYPTIAN HIEROGLYPH W015 -133C2 EGYPTIAN HIEROGLYPH W016 -133C3 EGYPTIAN HIEROGLYPH W017 -133C4 EGYPTIAN HIEROGLYPH W017A -133C5 EGYPTIAN HIEROGLYPH W018 -133C6 EGYPTIAN HIEROGLYPH W018A -133C7 EGYPTIAN HIEROGLYPH W019 -133C8 EGYPTIAN HIEROGLYPH W020 -133C9 EGYPTIAN HIEROGLYPH W021 -133CA EGYPTIAN HIEROGLYPH W022 -133CB EGYPTIAN HIEROGLYPH W023 -133CC EGYPTIAN HIEROGLYPH W024 -133CD EGYPTIAN HIEROGLYPH W024A -133CE EGYPTIAN HIEROGLYPH W025 -133CF EGYPTIAN HIEROGLYPH X001 -133D0 EGYPTIAN HIEROGLYPH X002 -133D1 EGYPTIAN HIEROGLYPH X003 -133D2 EGYPTIAN HIEROGLYPH X004 -133D3 EGYPTIAN HIEROGLYPH X004A -133D4 EGYPTIAN HIEROGLYPH X004B -133D5 EGYPTIAN HIEROGLYPH X005 -133D6 EGYPTIAN HIEROGLYPH X006 -133D7 EGYPTIAN HIEROGLYPH X006A -133D8 EGYPTIAN HIEROGLYPH X007 -133D9 EGYPTIAN HIEROGLYPH X008 -133DA EGYPTIAN HIEROGLYPH X008A -133DB EGYPTIAN HIEROGLYPH Y001 -133DC EGYPTIAN HIEROGLYPH Y001A -133DD EGYPTIAN HIEROGLYPH Y002 -133DE EGYPTIAN HIEROGLYPH Y003 -133DF EGYPTIAN HIEROGLYPH Y004 -133E0 EGYPTIAN HIEROGLYPH Y005 -133E1 EGYPTIAN HIEROGLYPH Y006 -133E2 EGYPTIAN HIEROGLYPH Y007 -133E3 EGYPTIAN HIEROGLYPH Y008 -133E4 EGYPTIAN HIEROGLYPH Z001 -133E5 EGYPTIAN HIEROGLYPH Z002 -133E6 EGYPTIAN HIEROGLYPH Z002A -133E7 EGYPTIAN HIEROGLYPH Z002B -133E8 EGYPTIAN HIEROGLYPH Z002C -133E9 EGYPTIAN HIEROGLYPH Z002D -133EA EGYPTIAN HIEROGLYPH Z003 -133EB EGYPTIAN HIEROGLYPH Z003A -133EC EGYPTIAN HIEROGLYPH Z003B -133ED EGYPTIAN HIEROGLYPH Z004 -133EE EGYPTIAN HIEROGLYPH Z004A -133EF EGYPTIAN HIEROGLYPH Z005 -133F0 EGYPTIAN HIEROGLYPH Z005A -133F1 EGYPTIAN HIEROGLYPH Z006 -133F2 EGYPTIAN HIEROGLYPH Z007 -133F3 EGYPTIAN HIEROGLYPH Z008 -133F4 EGYPTIAN HIEROGLYPH Z009 -133F5 EGYPTIAN HIEROGLYPH Z010 -133F6 EGYPTIAN HIEROGLYPH Z011 -133F7 EGYPTIAN HIEROGLYPH Z012 -133F8 EGYPTIAN HIEROGLYPH Z013 -133F9 EGYPTIAN HIEROGLYPH Z014 -133FA EGYPTIAN HIEROGLYPH Z015 -133FB EGYPTIAN HIEROGLYPH Z015A -133FC EGYPTIAN HIEROGLYPH Z015B -133FD EGYPTIAN HIEROGLYPH Z015C -133FE EGYPTIAN HIEROGLYPH Z015D -133FF EGYPTIAN HIEROGLYPH Z015E -13400 EGYPTIAN HIEROGLYPH Z015F -13401 EGYPTIAN HIEROGLYPH Z015G -13402 EGYPTIAN HIEROGLYPH Z015H -13403 EGYPTIAN HIEROGLYPH Z015I -13404 EGYPTIAN HIEROGLYPH Z016 -13405 EGYPTIAN HIEROGLYPH Z016A -13406 EGYPTIAN HIEROGLYPH Z016B -13407 EGYPTIAN HIEROGLYPH Z016C -13408 EGYPTIAN HIEROGLYPH Z016D -13409 EGYPTIAN HIEROGLYPH Z016E -1340A EGYPTIAN HIEROGLYPH Z016F -1340B EGYPTIAN HIEROGLYPH Z016G -1340C EGYPTIAN HIEROGLYPH Z016H -1340D EGYPTIAN HIEROGLYPH AA001 -1340E EGYPTIAN HIEROGLYPH AA002 -1340F EGYPTIAN HIEROGLYPH AA003 -13410 EGYPTIAN HIEROGLYPH AA004 -13411 EGYPTIAN HIEROGLYPH AA005 -13412 EGYPTIAN HIEROGLYPH AA006 -13413 EGYPTIAN HIEROGLYPH AA007 -13414 EGYPTIAN HIEROGLYPH AA007A -13415 EGYPTIAN HIEROGLYPH AA007B -13416 EGYPTIAN HIEROGLYPH AA008 -13417 EGYPTIAN HIEROGLYPH AA009 -13418 EGYPTIAN HIEROGLYPH AA010 -13419 EGYPTIAN HIEROGLYPH AA011 -1341A EGYPTIAN HIEROGLYPH AA012 -1341B EGYPTIAN HIEROGLYPH AA013 -1341C EGYPTIAN HIEROGLYPH AA014 -1341D EGYPTIAN HIEROGLYPH AA015 -1341E EGYPTIAN HIEROGLYPH AA016 -1341F EGYPTIAN HIEROGLYPH AA017 -13420 EGYPTIAN HIEROGLYPH AA018 -13421 EGYPTIAN HIEROGLYPH AA019 -13422 EGYPTIAN HIEROGLYPH AA020 -13423 EGYPTIAN HIEROGLYPH AA021 -13424 EGYPTIAN HIEROGLYPH AA022 -13425 EGYPTIAN HIEROGLYPH AA023 -13426 EGYPTIAN HIEROGLYPH AA024 -13427 EGYPTIAN HIEROGLYPH AA025 -13428 EGYPTIAN HIEROGLYPH AA026 -13429 EGYPTIAN HIEROGLYPH AA027 -1342A EGYPTIAN HIEROGLYPH AA028 -1342B EGYPTIAN HIEROGLYPH AA029 -1342C EGYPTIAN HIEROGLYPH AA030 -1342D EGYPTIAN HIEROGLYPH AA031 -1342E EGYPTIAN HIEROGLYPH AA032 -1D000 BYZANTINE MUSICAL SYMBOL PSILI -1D001 BYZANTINE MUSICAL SYMBOL DASEIA -1D002 BYZANTINE MUSICAL SYMBOL PERISPOMENI -1D003 BYZANTINE MUSICAL SYMBOL OXEIA EKFONITIKON -1D004 BYZANTINE MUSICAL SYMBOL OXEIA DIPLI -1D005 BYZANTINE MUSICAL SYMBOL VAREIA EKFONITIKON -1D006 BYZANTINE MUSICAL SYMBOL VAREIA DIPLI -1D007 BYZANTINE MUSICAL SYMBOL KATHISTI -1D008 BYZANTINE MUSICAL SYMBOL SYRMATIKI -1D009 BYZANTINE MUSICAL SYMBOL PARAKLITIKI -1D00A BYZANTINE MUSICAL SYMBOL YPOKRISIS -1D00B BYZANTINE MUSICAL SYMBOL YPOKRISIS DIPLI -1D00C BYZANTINE MUSICAL SYMBOL KREMASTI -1D00D BYZANTINE MUSICAL SYMBOL APESO EKFONITIKON -1D00E BYZANTINE MUSICAL SYMBOL EXO EKFONITIKON -1D00F BYZANTINE MUSICAL SYMBOL TELEIA -1D010 BYZANTINE MUSICAL SYMBOL KENTIMATA -1D011 BYZANTINE MUSICAL SYMBOL APOSTROFOS -1D012 BYZANTINE MUSICAL SYMBOL APOSTROFOS DIPLI -1D013 BYZANTINE MUSICAL SYMBOL SYNEVMA -1D014 BYZANTINE MUSICAL SYMBOL THITA -1D015 BYZANTINE MUSICAL SYMBOL OLIGON ARCHAION -1D016 BYZANTINE MUSICAL SYMBOL GORGON ARCHAION -1D017 BYZANTINE MUSICAL SYMBOL PSILON -1D018 BYZANTINE MUSICAL SYMBOL CHAMILON -1D019 BYZANTINE MUSICAL SYMBOL VATHY -1D01A BYZANTINE MUSICAL SYMBOL ISON ARCHAION -1D01B BYZANTINE MUSICAL SYMBOL KENTIMA ARCHAION -1D01C BYZANTINE MUSICAL SYMBOL KENTIMATA ARCHAION -1D01D BYZANTINE MUSICAL SYMBOL SAXIMATA -1D01E BYZANTINE MUSICAL SYMBOL PARICHON -1D01F BYZANTINE MUSICAL SYMBOL STAVROS APODEXIA -1D020 BYZANTINE MUSICAL SYMBOL OXEIAI ARCHAION -1D021 BYZANTINE MUSICAL SYMBOL VAREIAI ARCHAION -1D022 BYZANTINE MUSICAL SYMBOL APODERMA ARCHAION -1D023 BYZANTINE MUSICAL SYMBOL APOTHEMA -1D024 BYZANTINE MUSICAL SYMBOL KLASMA -1D025 BYZANTINE MUSICAL SYMBOL REVMA -1D026 BYZANTINE MUSICAL SYMBOL PIASMA ARCHAION -1D027 BYZANTINE MUSICAL SYMBOL TINAGMA -1D028 BYZANTINE MUSICAL SYMBOL ANATRICHISMA -1D029 BYZANTINE MUSICAL SYMBOL SEISMA -1D02A BYZANTINE MUSICAL SYMBOL SYNAGMA ARCHAION -1D02B BYZANTINE MUSICAL SYMBOL SYNAGMA META STAVROU -1D02C BYZANTINE MUSICAL SYMBOL OYRANISMA ARCHAION -1D02D BYZANTINE MUSICAL SYMBOL THEMA -1D02E BYZANTINE MUSICAL SYMBOL LEMOI -1D02F BYZANTINE MUSICAL SYMBOL DYO -1D030 BYZANTINE MUSICAL SYMBOL TRIA -1D031 BYZANTINE MUSICAL SYMBOL TESSERA -1D032 BYZANTINE MUSICAL SYMBOL KRATIMATA -1D033 BYZANTINE MUSICAL SYMBOL APESO EXO NEO -1D034 BYZANTINE MUSICAL SYMBOL FTHORA ARCHAION -1D035 BYZANTINE MUSICAL SYMBOL IMIFTHORA -1D036 BYZANTINE MUSICAL SYMBOL TROMIKON ARCHAION -1D037 BYZANTINE MUSICAL SYMBOL KATAVA TROMIKON -1D038 BYZANTINE MUSICAL SYMBOL PELASTON -1D039 BYZANTINE MUSICAL SYMBOL PSIFISTON -1D03A BYZANTINE MUSICAL SYMBOL KONTEVMA -1D03B BYZANTINE MUSICAL SYMBOL CHOREVMA ARCHAION -1D03C BYZANTINE MUSICAL SYMBOL RAPISMA -1D03D BYZANTINE MUSICAL SYMBOL PARAKALESMA ARCHAION -1D03E BYZANTINE MUSICAL SYMBOL PARAKLITIKI ARCHAION -1D03F BYZANTINE MUSICAL SYMBOL ICHADIN -1D040 BYZANTINE MUSICAL SYMBOL NANA -1D041 BYZANTINE MUSICAL SYMBOL PETASMA -1D042 BYZANTINE MUSICAL SYMBOL KONTEVMA ALLO -1D043 BYZANTINE MUSICAL SYMBOL TROMIKON ALLO -1D044 BYZANTINE MUSICAL SYMBOL STRAGGISMATA -1D045 BYZANTINE MUSICAL SYMBOL GRONTHISMATA -1D046 BYZANTINE MUSICAL SYMBOL ISON NEO -1D047 BYZANTINE MUSICAL SYMBOL OLIGON NEO -1D048 BYZANTINE MUSICAL SYMBOL OXEIA NEO -1D049 BYZANTINE MUSICAL SYMBOL PETASTI -1D04A BYZANTINE MUSICAL SYMBOL KOUFISMA -1D04B BYZANTINE MUSICAL SYMBOL PETASTOKOUFISMA -1D04C BYZANTINE MUSICAL SYMBOL KRATIMOKOUFISMA -1D04D BYZANTINE MUSICAL SYMBOL PELASTON NEO -1D04E BYZANTINE MUSICAL SYMBOL KENTIMATA NEO ANO -1D04F BYZANTINE MUSICAL SYMBOL KENTIMA NEO ANO -1D050 BYZANTINE MUSICAL SYMBOL YPSILI -1D051 BYZANTINE MUSICAL SYMBOL APOSTROFOS NEO -1D052 BYZANTINE MUSICAL SYMBOL APOSTROFOI SYNDESMOS NEO -1D053 BYZANTINE MUSICAL SYMBOL YPORROI -1D054 BYZANTINE MUSICAL SYMBOL KRATIMOYPORROON -1D055 BYZANTINE MUSICAL SYMBOL ELAFRON -1D056 BYZANTINE MUSICAL SYMBOL CHAMILI -1D057 BYZANTINE MUSICAL SYMBOL MIKRON ISON -1D058 BYZANTINE MUSICAL SYMBOL VAREIA NEO -1D059 BYZANTINE MUSICAL SYMBOL PIASMA NEO -1D05A BYZANTINE MUSICAL SYMBOL PSIFISTON NEO -1D05B BYZANTINE MUSICAL SYMBOL OMALON -1D05C BYZANTINE MUSICAL SYMBOL ANTIKENOMA -1D05D BYZANTINE MUSICAL SYMBOL LYGISMA -1D05E BYZANTINE MUSICAL SYMBOL PARAKLITIKI NEO -1D05F BYZANTINE MUSICAL SYMBOL PARAKALESMA NEO -1D060 BYZANTINE MUSICAL SYMBOL ETERON PARAKALESMA -1D061 BYZANTINE MUSICAL SYMBOL KYLISMA -1D062 BYZANTINE MUSICAL SYMBOL ANTIKENOKYLISMA -1D063 BYZANTINE MUSICAL SYMBOL TROMIKON NEO -1D064 BYZANTINE MUSICAL SYMBOL EKSTREPTON -1D065 BYZANTINE MUSICAL SYMBOL SYNAGMA NEO -1D066 BYZANTINE MUSICAL SYMBOL SYRMA -1D067 BYZANTINE MUSICAL SYMBOL CHOREVMA NEO -1D068 BYZANTINE MUSICAL SYMBOL EPEGERMA -1D069 BYZANTINE MUSICAL SYMBOL SEISMA NEO -1D06A BYZANTINE MUSICAL SYMBOL XIRON KLASMA -1D06B BYZANTINE MUSICAL SYMBOL TROMIKOPSIFISTON -1D06C BYZANTINE MUSICAL SYMBOL PSIFISTOLYGISMA -1D06D BYZANTINE MUSICAL SYMBOL TROMIKOLYGISMA -1D06E BYZANTINE MUSICAL SYMBOL TROMIKOPARAKALESMA -1D06F BYZANTINE MUSICAL SYMBOL PSIFISTOPARAKALESMA -1D070 BYZANTINE MUSICAL SYMBOL TROMIKOSYNAGMA -1D071 BYZANTINE MUSICAL SYMBOL PSIFISTOSYNAGMA -1D072 BYZANTINE MUSICAL SYMBOL GORGOSYNTHETON -1D073 BYZANTINE MUSICAL SYMBOL ARGOSYNTHETON -1D074 BYZANTINE MUSICAL SYMBOL ETERON ARGOSYNTHETON -1D075 BYZANTINE MUSICAL SYMBOL OYRANISMA NEO -1D076 BYZANTINE MUSICAL SYMBOL THEMATISMOS ESO -1D077 BYZANTINE MUSICAL SYMBOL THEMATISMOS EXO -1D078 BYZANTINE MUSICAL SYMBOL THEMA APLOUN -1D079 BYZANTINE MUSICAL SYMBOL THES KAI APOTHES -1D07A BYZANTINE MUSICAL SYMBOL KATAVASMA -1D07B BYZANTINE MUSICAL SYMBOL ENDOFONON -1D07C BYZANTINE MUSICAL SYMBOL YFEN KATO -1D07D BYZANTINE MUSICAL SYMBOL YFEN ANO -1D07E BYZANTINE MUSICAL SYMBOL STAVROS -1D07F BYZANTINE MUSICAL SYMBOL KLASMA ANO -1D080 BYZANTINE MUSICAL SYMBOL DIPLI ARCHAION -1D081 BYZANTINE MUSICAL SYMBOL KRATIMA ARCHAION -1D082 BYZANTINE MUSICAL SYMBOL KRATIMA ALLO -1D083 BYZANTINE MUSICAL SYMBOL KRATIMA NEO -1D084 BYZANTINE MUSICAL SYMBOL APODERMA NEO -1D085 BYZANTINE MUSICAL SYMBOL APLI -1D086 BYZANTINE MUSICAL SYMBOL DIPLI -1D087 BYZANTINE MUSICAL SYMBOL TRIPLI -1D088 BYZANTINE MUSICAL SYMBOL TETRAPLI -1D089 BYZANTINE MUSICAL SYMBOL KORONIS -1D08A BYZANTINE MUSICAL SYMBOL LEIMMA ENOS CHRONOU -1D08B BYZANTINE MUSICAL SYMBOL LEIMMA DYO CHRONON -1D08C BYZANTINE MUSICAL SYMBOL LEIMMA TRION CHRONON -1D08D BYZANTINE MUSICAL SYMBOL LEIMMA TESSARON CHRONON -1D08E BYZANTINE MUSICAL SYMBOL LEIMMA IMISEOS CHRONOU -1D08F BYZANTINE MUSICAL SYMBOL GORGON NEO ANO -1D090 BYZANTINE MUSICAL SYMBOL GORGON PARESTIGMENON ARISTERA -1D091 BYZANTINE MUSICAL SYMBOL GORGON PARESTIGMENON DEXIA -1D092 BYZANTINE MUSICAL SYMBOL DIGORGON -1D093 BYZANTINE MUSICAL SYMBOL DIGORGON PARESTIGMENON ARISTERA KATO -1D094 BYZANTINE MUSICAL SYMBOL DIGORGON PARESTIGMENON ARISTERA ANO -1D095 BYZANTINE MUSICAL SYMBOL DIGORGON PARESTIGMENON DEXIA -1D096 BYZANTINE MUSICAL SYMBOL TRIGORGON -1D097 BYZANTINE MUSICAL SYMBOL ARGON -1D098 BYZANTINE MUSICAL SYMBOL IMIDIARGON -1D099 BYZANTINE MUSICAL SYMBOL DIARGON -1D09A BYZANTINE MUSICAL SYMBOL AGOGI POLI ARGI -1D09B BYZANTINE MUSICAL SYMBOL AGOGI ARGOTERI -1D09C BYZANTINE MUSICAL SYMBOL AGOGI ARGI -1D09D BYZANTINE MUSICAL SYMBOL AGOGI METRIA -1D09E BYZANTINE MUSICAL SYMBOL AGOGI MESI -1D09F BYZANTINE MUSICAL SYMBOL AGOGI GORGI -1D0A0 BYZANTINE MUSICAL SYMBOL AGOGI GORGOTERI -1D0A1 BYZANTINE MUSICAL SYMBOL AGOGI POLI GORGI -1D0A2 BYZANTINE MUSICAL SYMBOL MARTYRIA PROTOS ICHOS -1D0A3 BYZANTINE MUSICAL SYMBOL MARTYRIA ALLI PROTOS ICHOS -1D0A4 BYZANTINE MUSICAL SYMBOL MARTYRIA DEYTEROS ICHOS -1D0A5 BYZANTINE MUSICAL SYMBOL MARTYRIA ALLI DEYTEROS ICHOS -1D0A6 BYZANTINE MUSICAL SYMBOL MARTYRIA TRITOS ICHOS -1D0A7 BYZANTINE MUSICAL SYMBOL MARTYRIA TRIFONIAS -1D0A8 BYZANTINE MUSICAL SYMBOL MARTYRIA TETARTOS ICHOS -1D0A9 BYZANTINE MUSICAL SYMBOL MARTYRIA TETARTOS LEGETOS ICHOS -1D0AA BYZANTINE MUSICAL SYMBOL MARTYRIA LEGETOS ICHOS -1D0AB BYZANTINE MUSICAL SYMBOL MARTYRIA PLAGIOS ICHOS -1D0AC BYZANTINE MUSICAL SYMBOL ISAKIA TELOUS ICHIMATOS -1D0AD BYZANTINE MUSICAL SYMBOL APOSTROFOI TELOUS ICHIMATOS -1D0AE BYZANTINE MUSICAL SYMBOL FANEROSIS TETRAFONIAS -1D0AF BYZANTINE MUSICAL SYMBOL FANEROSIS MONOFONIAS -1D0B0 BYZANTINE MUSICAL SYMBOL FANEROSIS DIFONIAS -1D0B1 BYZANTINE MUSICAL SYMBOL MARTYRIA VARYS ICHOS -1D0B2 BYZANTINE MUSICAL SYMBOL MARTYRIA PROTOVARYS ICHOS -1D0B3 BYZANTINE MUSICAL SYMBOL MARTYRIA PLAGIOS TETARTOS ICHOS -1D0B4 BYZANTINE MUSICAL SYMBOL GORTHMIKON N APLOUN -1D0B5 BYZANTINE MUSICAL SYMBOL GORTHMIKON N DIPLOUN -1D0B6 BYZANTINE MUSICAL SYMBOL ENARXIS KAI FTHORA VOU -1D0B7 BYZANTINE MUSICAL SYMBOL IMIFONON -1D0B8 BYZANTINE MUSICAL SYMBOL IMIFTHORON -1D0B9 BYZANTINE MUSICAL SYMBOL FTHORA ARCHAION DEYTEROU ICHOU -1D0BA BYZANTINE MUSICAL SYMBOL FTHORA DIATONIKI PA -1D0BB BYZANTINE MUSICAL SYMBOL FTHORA DIATONIKI NANA -1D0BC BYZANTINE MUSICAL SYMBOL FTHORA NAOS ICHOS -1D0BD BYZANTINE MUSICAL SYMBOL FTHORA DIATONIKI DI -1D0BE BYZANTINE MUSICAL SYMBOL FTHORA SKLIRON DIATONON DI -1D0BF BYZANTINE MUSICAL SYMBOL FTHORA DIATONIKI KE -1D0C0 BYZANTINE MUSICAL SYMBOL FTHORA DIATONIKI ZO -1D0C1 BYZANTINE MUSICAL SYMBOL FTHORA DIATONIKI NI KATO -1D0C2 BYZANTINE MUSICAL SYMBOL FTHORA DIATONIKI NI ANO -1D0C3 BYZANTINE MUSICAL SYMBOL FTHORA MALAKON CHROMA DIFONIAS -1D0C4 BYZANTINE MUSICAL SYMBOL FTHORA MALAKON CHROMA MONOFONIAS -1D0C5 BYZANTINE MUSICAL SYMBOL FHTORA SKLIRON CHROMA VASIS -1D0C6 BYZANTINE MUSICAL SYMBOL FTHORA SKLIRON CHROMA SYNAFI -1D0C7 BYZANTINE MUSICAL SYMBOL FTHORA NENANO -1D0C8 BYZANTINE MUSICAL SYMBOL CHROA ZYGOS -1D0C9 BYZANTINE MUSICAL SYMBOL CHROA KLITON -1D0CA BYZANTINE MUSICAL SYMBOL CHROA SPATHI -1D0CB BYZANTINE MUSICAL SYMBOL FTHORA I YFESIS TETARTIMORION -1D0CC BYZANTINE MUSICAL SYMBOL FTHORA ENARMONIOS ANTIFONIA -1D0CD BYZANTINE MUSICAL SYMBOL YFESIS TRITIMORION -1D0CE BYZANTINE MUSICAL SYMBOL DIESIS TRITIMORION -1D0CF BYZANTINE MUSICAL SYMBOL DIESIS TETARTIMORION -1D0D0 BYZANTINE MUSICAL SYMBOL DIESIS APLI DYO DODEKATA -1D0D1 BYZANTINE MUSICAL SYMBOL DIESIS MONOGRAMMOS TESSERA DODEKATA -1D0D2 BYZANTINE MUSICAL SYMBOL DIESIS DIGRAMMOS EX DODEKATA -1D0D3 BYZANTINE MUSICAL SYMBOL DIESIS TRIGRAMMOS OKTO DODEKATA -1D0D4 BYZANTINE MUSICAL SYMBOL YFESIS APLI DYO DODEKATA -1D0D5 BYZANTINE MUSICAL SYMBOL YFESIS MONOGRAMMOS TESSERA DODEKATA -1D0D6 BYZANTINE MUSICAL SYMBOL YFESIS DIGRAMMOS EX DODEKATA -1D0D7 BYZANTINE MUSICAL SYMBOL YFESIS TRIGRAMMOS OKTO DODEKATA -1D0D8 BYZANTINE MUSICAL SYMBOL GENIKI DIESIS -1D0D9 BYZANTINE MUSICAL SYMBOL GENIKI YFESIS -1D0DA BYZANTINE MUSICAL SYMBOL DIASTOLI APLI MIKRI -1D0DB BYZANTINE MUSICAL SYMBOL DIASTOLI APLI MEGALI -1D0DC BYZANTINE MUSICAL SYMBOL DIASTOLI DIPLI -1D0DD BYZANTINE MUSICAL SYMBOL DIASTOLI THESEOS -1D0DE BYZANTINE MUSICAL SYMBOL SIMANSIS THESEOS -1D0DF BYZANTINE MUSICAL SYMBOL SIMANSIS THESEOS DISIMOU -1D0E0 BYZANTINE MUSICAL SYMBOL SIMANSIS THESEOS TRISIMOU -1D0E1 BYZANTINE MUSICAL SYMBOL SIMANSIS THESEOS TETRASIMOU -1D0E2 BYZANTINE MUSICAL SYMBOL SIMANSIS ARSEOS -1D0E3 BYZANTINE MUSICAL SYMBOL SIMANSIS ARSEOS DISIMOU -1D0E4 BYZANTINE MUSICAL SYMBOL SIMANSIS ARSEOS TRISIMOU -1D0E5 BYZANTINE MUSICAL SYMBOL SIMANSIS ARSEOS TETRASIMOU -1D0E6 BYZANTINE MUSICAL SYMBOL DIGRAMMA GG -1D0E7 BYZANTINE MUSICAL SYMBOL DIFTOGGOS OU -1D0E8 BYZANTINE MUSICAL SYMBOL STIGMA -1D0E9 BYZANTINE MUSICAL SYMBOL ARKTIKO PA -1D0EA BYZANTINE MUSICAL SYMBOL ARKTIKO VOU -1D0EB BYZANTINE MUSICAL SYMBOL ARKTIKO GA -1D0EC BYZANTINE MUSICAL SYMBOL ARKTIKO DI -1D0ED BYZANTINE MUSICAL SYMBOL ARKTIKO KE -1D0EE BYZANTINE MUSICAL SYMBOL ARKTIKO ZO -1D0EF BYZANTINE MUSICAL SYMBOL ARKTIKO NI -1D0F0 BYZANTINE MUSICAL SYMBOL KENTIMATA NEO MESO -1D0F1 BYZANTINE MUSICAL SYMBOL KENTIMA NEO MESO -1D0F2 BYZANTINE MUSICAL SYMBOL KENTIMATA NEO KATO -1D0F3 BYZANTINE MUSICAL SYMBOL KENTIMA NEO KATO -1D0F4 BYZANTINE MUSICAL SYMBOL KLASMA KATO -1D0F5 BYZANTINE MUSICAL SYMBOL GORGON NEO KATO -1D100 MUSICAL SYMBOL SINGLE BARLINE -1D101 MUSICAL SYMBOL DOUBLE BARLINE -1D102 MUSICAL SYMBOL FINAL BARLINE -1D103 MUSICAL SYMBOL REVERSE FINAL BARLINE -1D104 MUSICAL SYMBOL DASHED BARLINE -1D105 MUSICAL SYMBOL SHORT BARLINE -1D106 MUSICAL SYMBOL LEFT REPEAT SIGN -1D107 MUSICAL SYMBOL RIGHT REPEAT SIGN -1D108 MUSICAL SYMBOL REPEAT DOTS -1D109 MUSICAL SYMBOL DAL SEGNO -1D10A MUSICAL SYMBOL DA CAPO -1D10B MUSICAL SYMBOL SEGNO -1D10C MUSICAL SYMBOL CODA -1D10D MUSICAL SYMBOL REPEATED FIGURE-1 -1D10E MUSICAL SYMBOL REPEATED FIGURE-2 -1D10F MUSICAL SYMBOL REPEATED FIGURE-3 -1D110 MUSICAL SYMBOL FERMATA -1D111 MUSICAL SYMBOL FERMATA BELOW -1D112 MUSICAL SYMBOL BREATH MARK -1D113 MUSICAL SYMBOL CAESURA -1D114 MUSICAL SYMBOL BRACE -1D115 MUSICAL SYMBOL BRACKET -1D116 MUSICAL SYMBOL ONE-LINE STAFF -1D117 MUSICAL SYMBOL TWO-LINE STAFF -1D118 MUSICAL SYMBOL THREE-LINE STAFF -1D119 MUSICAL SYMBOL FOUR-LINE STAFF -1D11A MUSICAL SYMBOL FIVE-LINE STAFF -1D11B MUSICAL SYMBOL SIX-LINE STAFF -1D11C MUSICAL SYMBOL SIX-STRING FRETBOARD -1D11D MUSICAL SYMBOL FOUR-STRING FRETBOARD -1D11E MUSICAL SYMBOL G CLEF -1D11F MUSICAL SYMBOL G CLEF OTTAVA ALTA -1D120 MUSICAL SYMBOL G CLEF OTTAVA BASSA -1D121 MUSICAL SYMBOL C CLEF -1D122 MUSICAL SYMBOL F CLEF -1D123 MUSICAL SYMBOL F CLEF OTTAVA ALTA -1D124 MUSICAL SYMBOL F CLEF OTTAVA BASSA -1D125 MUSICAL SYMBOL DRUM CLEF-1 -1D126 MUSICAL SYMBOL DRUM CLEF-2 -1D129 MUSICAL SYMBOL MULTIPLE MEASURE REST -1D12A MUSICAL SYMBOL DOUBLE SHARP -1D12B MUSICAL SYMBOL DOUBLE FLAT -1D12C MUSICAL SYMBOL FLAT UP -1D12D MUSICAL SYMBOL FLAT DOWN -1D12E MUSICAL SYMBOL NATURAL UP -1D12F MUSICAL SYMBOL NATURAL DOWN -1D130 MUSICAL SYMBOL SHARP UP -1D131 MUSICAL SYMBOL SHARP DOWN -1D132 MUSICAL SYMBOL QUARTER TONE SHARP -1D133 MUSICAL SYMBOL QUARTER TONE FLAT -1D134 MUSICAL SYMBOL COMMON TIME -1D135 MUSICAL SYMBOL CUT TIME -1D136 MUSICAL SYMBOL OTTAVA ALTA -1D137 MUSICAL SYMBOL OTTAVA BASSA -1D138 MUSICAL SYMBOL QUINDICESIMA ALTA -1D139 MUSICAL SYMBOL QUINDICESIMA BASSA -1D13A MUSICAL SYMBOL MULTI REST -1D13B MUSICAL SYMBOL WHOLE REST -1D13C MUSICAL SYMBOL HALF REST -1D13D MUSICAL SYMBOL QUARTER REST -1D13E MUSICAL SYMBOL EIGHTH REST -1D13F MUSICAL SYMBOL SIXTEENTH REST -1D140 MUSICAL SYMBOL THIRTY-SECOND REST -1D141 MUSICAL SYMBOL SIXTY-FOURTH REST -1D142 MUSICAL SYMBOL ONE HUNDRED TWENTY-EIGHTH REST -1D143 MUSICAL SYMBOL X NOTEHEAD -1D144 MUSICAL SYMBOL PLUS NOTEHEAD -1D145 MUSICAL SYMBOL CIRCLE X NOTEHEAD -1D146 MUSICAL SYMBOL SQUARE NOTEHEAD WHITE -1D147 MUSICAL SYMBOL SQUARE NOTEHEAD BLACK -1D148 MUSICAL SYMBOL TRIANGLE NOTEHEAD UP WHITE -1D149 MUSICAL SYMBOL TRIANGLE NOTEHEAD UP BLACK -1D14A MUSICAL SYMBOL TRIANGLE NOTEHEAD LEFT WHITE -1D14B MUSICAL SYMBOL TRIANGLE NOTEHEAD LEFT BLACK -1D14C MUSICAL SYMBOL TRIANGLE NOTEHEAD RIGHT WHITE -1D14D MUSICAL SYMBOL TRIANGLE NOTEHEAD RIGHT BLACK -1D14E MUSICAL SYMBOL TRIANGLE NOTEHEAD DOWN WHITE -1D14F MUSICAL SYMBOL TRIANGLE NOTEHEAD DOWN BLACK -1D150 MUSICAL SYMBOL TRIANGLE NOTEHEAD UP RIGHT WHITE -1D151 MUSICAL SYMBOL TRIANGLE NOTEHEAD UP RIGHT BLACK -1D152 MUSICAL SYMBOL MOON NOTEHEAD WHITE -1D153 MUSICAL SYMBOL MOON NOTEHEAD BLACK -1D154 MUSICAL SYMBOL TRIANGLE-ROUND NOTEHEAD DOWN WHITE -1D155 MUSICAL SYMBOL TRIANGLE-ROUND NOTEHEAD DOWN BLACK -1D156 MUSICAL SYMBOL PARENTHESIS NOTEHEAD -1D157 MUSICAL SYMBOL VOID NOTEHEAD -1D158 MUSICAL SYMBOL NOTEHEAD BLACK -1D159 MUSICAL SYMBOL NULL NOTEHEAD -1D15A MUSICAL SYMBOL CLUSTER NOTEHEAD WHITE -1D15B MUSICAL SYMBOL CLUSTER NOTEHEAD BLACK -1D15C MUSICAL SYMBOL BREVE -1D15D MUSICAL SYMBOL WHOLE NOTE -1D15E MUSICAL SYMBOL HALF NOTE -1D15F MUSICAL SYMBOL QUARTER NOTE -1D160 MUSICAL SYMBOL EIGHTH NOTE -1D161 MUSICAL SYMBOL SIXTEENTH NOTE -1D162 MUSICAL SYMBOL THIRTY-SECOND NOTE -1D163 MUSICAL SYMBOL SIXTY-FOURTH NOTE -1D164 MUSICAL SYMBOL ONE HUNDRED TWENTY-EIGHTH NOTE -1D165 MUSICAL SYMBOL COMBINING STEM -1D166 MUSICAL SYMBOL COMBINING SPRECHGESANG STEM -1D167 MUSICAL SYMBOL COMBINING TREMOLO-1 -1D168 MUSICAL SYMBOL COMBINING TREMOLO-2 -1D169 MUSICAL SYMBOL COMBINING TREMOLO-3 -1D16A MUSICAL SYMBOL FINGERED TREMOLO-1 -1D16B MUSICAL SYMBOL FINGERED TREMOLO-2 -1D16C MUSICAL SYMBOL FINGERED TREMOLO-3 -1D16D MUSICAL SYMBOL COMBINING AUGMENTATION DOT -1D16E MUSICAL SYMBOL COMBINING FLAG-1 -1D16F MUSICAL SYMBOL COMBINING FLAG-2 -1D170 MUSICAL SYMBOL COMBINING FLAG-3 -1D171 MUSICAL SYMBOL COMBINING FLAG-4 -1D172 MUSICAL SYMBOL COMBINING FLAG-5 -1D173 MUSICAL SYMBOL BEGIN BEAM -1D174 MUSICAL SYMBOL END BEAM -1D175 MUSICAL SYMBOL BEGIN TIE -1D176 MUSICAL SYMBOL END TIE -1D177 MUSICAL SYMBOL BEGIN SLUR -1D178 MUSICAL SYMBOL END SLUR -1D179 MUSICAL SYMBOL BEGIN PHRASE -1D17A MUSICAL SYMBOL END PHRASE -1D17B MUSICAL SYMBOL COMBINING ACCENT -1D17C MUSICAL SYMBOL COMBINING STACCATO -1D17D MUSICAL SYMBOL COMBINING TENUTO -1D17E MUSICAL SYMBOL COMBINING STACCATISSIMO -1D17F MUSICAL SYMBOL COMBINING MARCATO -1D180 MUSICAL SYMBOL COMBINING MARCATO-STACCATO -1D181 MUSICAL SYMBOL COMBINING ACCENT-STACCATO -1D182 MUSICAL SYMBOL COMBINING LOURE -1D183 MUSICAL SYMBOL ARPEGGIATO UP -1D184 MUSICAL SYMBOL ARPEGGIATO DOWN -1D185 MUSICAL SYMBOL COMBINING DOIT -1D186 MUSICAL SYMBOL COMBINING RIP -1D187 MUSICAL SYMBOL COMBINING FLIP -1D188 MUSICAL SYMBOL COMBINING SMEAR -1D189 MUSICAL SYMBOL COMBINING BEND -1D18A MUSICAL SYMBOL COMBINING DOUBLE TONGUE -1D18B MUSICAL SYMBOL COMBINING TRIPLE TONGUE -1D18C MUSICAL SYMBOL RINFORZANDO -1D18D MUSICAL SYMBOL SUBITO -1D18E MUSICAL SYMBOL Z -1D18F MUSICAL SYMBOL PIANO -1D190 MUSICAL SYMBOL MEZZO -1D191 MUSICAL SYMBOL FORTE -1D192 MUSICAL SYMBOL CRESCENDO -1D193 MUSICAL SYMBOL DECRESCENDO -1D194 MUSICAL SYMBOL GRACE NOTE SLASH -1D195 MUSICAL SYMBOL GRACE NOTE NO SLASH -1D196 MUSICAL SYMBOL TR -1D197 MUSICAL SYMBOL TURN -1D198 MUSICAL SYMBOL INVERTED TURN -1D199 MUSICAL SYMBOL TURN SLASH -1D19A MUSICAL SYMBOL TURN UP -1D19B MUSICAL SYMBOL ORNAMENT STROKE-1 -1D19C MUSICAL SYMBOL ORNAMENT STROKE-2 -1D19D MUSICAL SYMBOL ORNAMENT STROKE-3 -1D19E MUSICAL SYMBOL ORNAMENT STROKE-4 -1D19F MUSICAL SYMBOL ORNAMENT STROKE-5 -1D1A0 MUSICAL SYMBOL ORNAMENT STROKE-6 -1D1A1 MUSICAL SYMBOL ORNAMENT STROKE-7 -1D1A2 MUSICAL SYMBOL ORNAMENT STROKE-8 -1D1A3 MUSICAL SYMBOL ORNAMENT STROKE-9 -1D1A4 MUSICAL SYMBOL ORNAMENT STROKE-10 -1D1A5 MUSICAL SYMBOL ORNAMENT STROKE-11 -1D1A6 MUSICAL SYMBOL HAUPTSTIMME -1D1A7 MUSICAL SYMBOL NEBENSTIMME -1D1A8 MUSICAL SYMBOL END OF STIMME -1D1A9 MUSICAL SYMBOL DEGREE SLASH -1D1AA MUSICAL SYMBOL COMBINING DOWN BOW -1D1AB MUSICAL SYMBOL COMBINING UP BOW -1D1AC MUSICAL SYMBOL COMBINING HARMONIC -1D1AD MUSICAL SYMBOL COMBINING SNAP PIZZICATO -1D1AE MUSICAL SYMBOL PEDAL MARK -1D1AF MUSICAL SYMBOL PEDAL UP MARK -1D1B0 MUSICAL SYMBOL HALF PEDAL MARK -1D1B1 MUSICAL SYMBOL GLISSANDO UP -1D1B2 MUSICAL SYMBOL GLISSANDO DOWN -1D1B3 MUSICAL SYMBOL WITH FINGERNAILS -1D1B4 MUSICAL SYMBOL DAMP -1D1B5 MUSICAL SYMBOL DAMP ALL -1D1B6 MUSICAL SYMBOL MAXIMA -1D1B7 MUSICAL SYMBOL LONGA -1D1B8 MUSICAL SYMBOL BREVIS -1D1B9 MUSICAL SYMBOL SEMIBREVIS WHITE -1D1BA MUSICAL SYMBOL SEMIBREVIS BLACK -1D1BB MUSICAL SYMBOL MINIMA -1D1BC MUSICAL SYMBOL MINIMA BLACK -1D1BD MUSICAL SYMBOL SEMIMINIMA WHITE -1D1BE MUSICAL SYMBOL SEMIMINIMA BLACK -1D1BF MUSICAL SYMBOL FUSA WHITE -1D1C0 MUSICAL SYMBOL FUSA BLACK -1D1C1 MUSICAL SYMBOL LONGA PERFECTA REST -1D1C2 MUSICAL SYMBOL LONGA IMPERFECTA REST -1D1C3 MUSICAL SYMBOL BREVIS REST -1D1C4 MUSICAL SYMBOL SEMIBREVIS REST -1D1C5 MUSICAL SYMBOL MINIMA REST -1D1C6 MUSICAL SYMBOL SEMIMINIMA REST -1D1C7 MUSICAL SYMBOL TEMPUS PERFECTUM CUM PROLATIONE PERFECTA -1D1C8 MUSICAL SYMBOL TEMPUS PERFECTUM CUM PROLATIONE IMPERFECTA -1D1C9 MUSICAL SYMBOL TEMPUS PERFECTUM CUM PROLATIONE PERFECTA DIMINUTION-1 -1D1CA MUSICAL SYMBOL TEMPUS IMPERFECTUM CUM PROLATIONE PERFECTA -1D1CB MUSICAL SYMBOL TEMPUS IMPERFECTUM CUM PROLATIONE IMPERFECTA -1D1CC MUSICAL SYMBOL TEMPUS IMPERFECTUM CUM PROLATIONE IMPERFECTA DIMINUTION-1 -1D1CD MUSICAL SYMBOL TEMPUS IMPERFECTUM CUM PROLATIONE IMPERFECTA DIMINUTION-2 -1D1CE MUSICAL SYMBOL TEMPUS IMPERFECTUM CUM PROLATIONE IMPERFECTA DIMINUTION-3 -1D1CF MUSICAL SYMBOL CROIX -1D1D0 MUSICAL SYMBOL GREGORIAN C CLEF -1D1D1 MUSICAL SYMBOL GREGORIAN F CLEF -1D1D2 MUSICAL SYMBOL SQUARE B -1D1D3 MUSICAL SYMBOL VIRGA -1D1D4 MUSICAL SYMBOL PODATUS -1D1D5 MUSICAL SYMBOL CLIVIS -1D1D6 MUSICAL SYMBOL SCANDICUS -1D1D7 MUSICAL SYMBOL CLIMACUS -1D1D8 MUSICAL SYMBOL TORCULUS -1D1D9 MUSICAL SYMBOL PORRECTUS -1D1DA MUSICAL SYMBOL PORRECTUS FLEXUS -1D1DB MUSICAL SYMBOL SCANDICUS FLEXUS -1D1DC MUSICAL SYMBOL TORCULUS RESUPINUS -1D1DD MUSICAL SYMBOL PES SUBPUNCTIS -1D200 GREEK VOCAL NOTATION SYMBOL-1 -1D201 GREEK VOCAL NOTATION SYMBOL-2 -1D202 GREEK VOCAL NOTATION SYMBOL-3 -1D203 GREEK VOCAL NOTATION SYMBOL-4 -1D204 GREEK VOCAL NOTATION SYMBOL-5 -1D205 GREEK VOCAL NOTATION SYMBOL-6 -1D206 GREEK VOCAL NOTATION SYMBOL-7 -1D207 GREEK VOCAL NOTATION SYMBOL-8 -1D208 GREEK VOCAL NOTATION SYMBOL-9 -1D209 GREEK VOCAL NOTATION SYMBOL-10 -1D20A GREEK VOCAL NOTATION SYMBOL-11 -1D20B GREEK VOCAL NOTATION SYMBOL-12 -1D20C GREEK VOCAL NOTATION SYMBOL-13 -1D20D GREEK VOCAL NOTATION SYMBOL-14 -1D20E GREEK VOCAL NOTATION SYMBOL-15 -1D20F GREEK VOCAL NOTATION SYMBOL-16 -1D210 GREEK VOCAL NOTATION SYMBOL-17 -1D211 GREEK VOCAL NOTATION SYMBOL-18 -1D212 GREEK VOCAL NOTATION SYMBOL-19 -1D213 GREEK VOCAL NOTATION SYMBOL-20 -1D214 GREEK VOCAL NOTATION SYMBOL-21 -1D215 GREEK VOCAL NOTATION SYMBOL-22 -1D216 GREEK VOCAL NOTATION SYMBOL-23 -1D217 GREEK VOCAL NOTATION SYMBOL-24 -1D218 GREEK VOCAL NOTATION SYMBOL-50 -1D219 GREEK VOCAL NOTATION SYMBOL-51 -1D21A GREEK VOCAL NOTATION SYMBOL-52 -1D21B GREEK VOCAL NOTATION SYMBOL-53 -1D21C GREEK VOCAL NOTATION SYMBOL-54 -1D21D GREEK INSTRUMENTAL NOTATION SYMBOL-1 -1D21E GREEK INSTRUMENTAL NOTATION SYMBOL-2 -1D21F GREEK INSTRUMENTAL NOTATION SYMBOL-4 -1D220 GREEK INSTRUMENTAL NOTATION SYMBOL-5 -1D221 GREEK INSTRUMENTAL NOTATION SYMBOL-7 -1D222 GREEK INSTRUMENTAL NOTATION SYMBOL-8 -1D223 GREEK INSTRUMENTAL NOTATION SYMBOL-11 -1D224 GREEK INSTRUMENTAL NOTATION SYMBOL-12 -1D225 GREEK INSTRUMENTAL NOTATION SYMBOL-13 -1D226 GREEK INSTRUMENTAL NOTATION SYMBOL-14 -1D227 GREEK INSTRUMENTAL NOTATION SYMBOL-17 -1D228 GREEK INSTRUMENTAL NOTATION SYMBOL-18 -1D229 GREEK INSTRUMENTAL NOTATION SYMBOL-19 -1D22A GREEK INSTRUMENTAL NOTATION SYMBOL-23 -1D22B GREEK INSTRUMENTAL NOTATION SYMBOL-24 -1D22C GREEK INSTRUMENTAL NOTATION SYMBOL-25 -1D22D GREEK INSTRUMENTAL NOTATION SYMBOL-26 -1D22E GREEK INSTRUMENTAL NOTATION SYMBOL-27 -1D22F GREEK INSTRUMENTAL NOTATION SYMBOL-29 -1D230 GREEK INSTRUMENTAL NOTATION SYMBOL-30 -1D231 GREEK INSTRUMENTAL NOTATION SYMBOL-32 -1D232 GREEK INSTRUMENTAL NOTATION SYMBOL-36 -1D233 GREEK INSTRUMENTAL NOTATION SYMBOL-37 -1D234 GREEK INSTRUMENTAL NOTATION SYMBOL-38 -1D235 GREEK INSTRUMENTAL NOTATION SYMBOL-39 -1D236 GREEK INSTRUMENTAL NOTATION SYMBOL-40 -1D237 GREEK INSTRUMENTAL NOTATION SYMBOL-42 -1D238 GREEK INSTRUMENTAL NOTATION SYMBOL-43 -1D239 GREEK INSTRUMENTAL NOTATION SYMBOL-45 -1D23A GREEK INSTRUMENTAL NOTATION SYMBOL-47 -1D23B GREEK INSTRUMENTAL NOTATION SYMBOL-48 -1D23C GREEK INSTRUMENTAL NOTATION SYMBOL-49 -1D23D GREEK INSTRUMENTAL NOTATION SYMBOL-50 -1D23E GREEK INSTRUMENTAL NOTATION SYMBOL-51 -1D23F GREEK INSTRUMENTAL NOTATION SYMBOL-52 -1D240 GREEK INSTRUMENTAL NOTATION SYMBOL-53 -1D241 GREEK INSTRUMENTAL NOTATION SYMBOL-54 -1D242 COMBINING GREEK MUSICAL TRISEME -1D243 COMBINING GREEK MUSICAL TETRASEME -1D244 COMBINING GREEK MUSICAL PENTASEME -1D245 GREEK MUSICAL LEIMMA -1D300 MONOGRAM FOR EARTH -1D301 DIGRAM FOR HEAVENLY EARTH -1D302 DIGRAM FOR HUMAN EARTH -1D303 DIGRAM FOR EARTHLY HEAVEN -1D304 DIGRAM FOR EARTHLY HUMAN -1D305 DIGRAM FOR EARTH -1D306 TETRAGRAM FOR CENTRE -1D307 TETRAGRAM FOR FULL CIRCLE -1D308 TETRAGRAM FOR MIRED -1D309 TETRAGRAM FOR BARRIER -1D30A TETRAGRAM FOR KEEPING SMALL -1D30B TETRAGRAM FOR CONTRARIETY -1D30C TETRAGRAM FOR ASCENT -1D30D TETRAGRAM FOR OPPOSITION -1D30E TETRAGRAM FOR BRANCHING OUT -1D30F TETRAGRAM FOR DEFECTIVENESS OR DISTORTION -1D310 TETRAGRAM FOR DIVERGENCE -1D311 TETRAGRAM FOR YOUTHFULNESS -1D312 TETRAGRAM FOR INCREASE -1D313 TETRAGRAM FOR PENETRATION -1D314 TETRAGRAM FOR REACH -1D315 TETRAGRAM FOR CONTACT -1D316 TETRAGRAM FOR HOLDING BACK -1D317 TETRAGRAM FOR WAITING -1D318 TETRAGRAM FOR FOLLOWING -1D319 TETRAGRAM FOR ADVANCE -1D31A TETRAGRAM FOR RELEASE -1D31B TETRAGRAM FOR RESISTANCE -1D31C TETRAGRAM FOR EASE -1D31D TETRAGRAM FOR JOY -1D31E TETRAGRAM FOR CONTENTION -1D31F TETRAGRAM FOR ENDEAVOUR -1D320 TETRAGRAM FOR DUTIES -1D321 TETRAGRAM FOR CHANGE -1D322 TETRAGRAM FOR DECISIVENESS -1D323 TETRAGRAM FOR BOLD RESOLUTION -1D324 TETRAGRAM FOR PACKING -1D325 TETRAGRAM FOR LEGION -1D326 TETRAGRAM FOR CLOSENESS -1D327 TETRAGRAM FOR KINSHIP -1D328 TETRAGRAM FOR GATHERING -1D329 TETRAGRAM FOR STRENGTH -1D32A TETRAGRAM FOR PURITY -1D32B TETRAGRAM FOR FULLNESS -1D32C TETRAGRAM FOR RESIDENCE -1D32D TETRAGRAM FOR LAW OR MODEL -1D32E TETRAGRAM FOR RESPONSE -1D32F TETRAGRAM FOR GOING TO MEET -1D330 TETRAGRAM FOR ENCOUNTERS -1D331 TETRAGRAM FOR STOVE -1D332 TETRAGRAM FOR GREATNESS -1D333 TETRAGRAM FOR ENLARGEMENT -1D334 TETRAGRAM FOR PATTERN -1D335 TETRAGRAM FOR RITUAL -1D336 TETRAGRAM FOR FLIGHT -1D337 TETRAGRAM FOR VASTNESS OR WASTING -1D338 TETRAGRAM FOR CONSTANCY -1D339 TETRAGRAM FOR MEASURE -1D33A TETRAGRAM FOR ETERNITY -1D33B TETRAGRAM FOR UNITY -1D33C TETRAGRAM FOR DIMINISHMENT -1D33D TETRAGRAM FOR CLOSED MOUTH -1D33E TETRAGRAM FOR GUARDEDNESS -1D33F TETRAGRAM FOR GATHERING IN -1D340 TETRAGRAM FOR MASSING -1D341 TETRAGRAM FOR ACCUMULATION -1D342 TETRAGRAM FOR EMBELLISHMENT -1D343 TETRAGRAM FOR DOUBT -1D344 TETRAGRAM FOR WATCH -1D345 TETRAGRAM FOR SINKING -1D346 TETRAGRAM FOR INNER -1D347 TETRAGRAM FOR DEPARTURE -1D348 TETRAGRAM FOR DARKENING -1D349 TETRAGRAM FOR DIMMING -1D34A TETRAGRAM FOR EXHAUSTION -1D34B TETRAGRAM FOR SEVERANCE -1D34C TETRAGRAM FOR STOPPAGE -1D34D TETRAGRAM FOR HARDNESS -1D34E TETRAGRAM FOR COMPLETION -1D34F TETRAGRAM FOR CLOSURE -1D350 TETRAGRAM FOR FAILURE -1D351 TETRAGRAM FOR AGGRAVATION -1D352 TETRAGRAM FOR COMPLIANCE -1D353 TETRAGRAM FOR ON THE VERGE -1D354 TETRAGRAM FOR DIFFICULTIES -1D355 TETRAGRAM FOR LABOURING -1D356 TETRAGRAM FOR FOSTERING -1D360 COUNTING ROD UNIT DIGIT ONE -1D361 COUNTING ROD UNIT DIGIT TWO -1D362 COUNTING ROD UNIT DIGIT THREE -1D363 COUNTING ROD UNIT DIGIT FOUR -1D364 COUNTING ROD UNIT DIGIT FIVE -1D365 COUNTING ROD UNIT DIGIT SIX -1D366 COUNTING ROD UNIT DIGIT SEVEN -1D367 COUNTING ROD UNIT DIGIT EIGHT -1D368 COUNTING ROD UNIT DIGIT NINE -1D369 COUNTING ROD TENS DIGIT ONE -1D36A COUNTING ROD TENS DIGIT TWO -1D36B COUNTING ROD TENS DIGIT THREE -1D36C COUNTING ROD TENS DIGIT FOUR -1D36D COUNTING ROD TENS DIGIT FIVE -1D36E COUNTING ROD TENS DIGIT SIX -1D36F COUNTING ROD TENS DIGIT SEVEN -1D370 COUNTING ROD TENS DIGIT EIGHT -1D371 COUNTING ROD TENS DIGIT NINE -1D400 MATHEMATICAL BOLD CAPITAL A -1D401 MATHEMATICAL BOLD CAPITAL B -1D402 MATHEMATICAL BOLD CAPITAL C -1D403 MATHEMATICAL BOLD CAPITAL D -1D404 MATHEMATICAL BOLD CAPITAL E -1D405 MATHEMATICAL BOLD CAPITAL F -1D406 MATHEMATICAL BOLD CAPITAL G -1D407 MATHEMATICAL BOLD CAPITAL H -1D408 MATHEMATICAL BOLD CAPITAL I -1D409 MATHEMATICAL BOLD CAPITAL J -1D40A MATHEMATICAL BOLD CAPITAL K -1D40B MATHEMATICAL BOLD CAPITAL L -1D40C MATHEMATICAL BOLD CAPITAL M -1D40D MATHEMATICAL BOLD CAPITAL N -1D40E MATHEMATICAL BOLD CAPITAL O -1D40F MATHEMATICAL BOLD CAPITAL P -1D410 MATHEMATICAL BOLD CAPITAL Q -1D411 MATHEMATICAL BOLD CAPITAL R -1D412 MATHEMATICAL BOLD CAPITAL S -1D413 MATHEMATICAL BOLD CAPITAL T -1D414 MATHEMATICAL BOLD CAPITAL U -1D415 MATHEMATICAL BOLD CAPITAL V -1D416 MATHEMATICAL BOLD CAPITAL W -1D417 MATHEMATICAL BOLD CAPITAL X -1D418 MATHEMATICAL BOLD CAPITAL Y -1D419 MATHEMATICAL BOLD CAPITAL Z -1D41A MATHEMATICAL BOLD SMALL A -1D41B MATHEMATICAL BOLD SMALL B -1D41C MATHEMATICAL BOLD SMALL C -1D41D MATHEMATICAL BOLD SMALL D -1D41E MATHEMATICAL BOLD SMALL E -1D41F MATHEMATICAL BOLD SMALL F -1D420 MATHEMATICAL BOLD SMALL G -1D421 MATHEMATICAL BOLD SMALL H -1D422 MATHEMATICAL BOLD SMALL I -1D423 MATHEMATICAL BOLD SMALL J -1D424 MATHEMATICAL BOLD SMALL K -1D425 MATHEMATICAL BOLD SMALL L -1D426 MATHEMATICAL BOLD SMALL M -1D427 MATHEMATICAL BOLD SMALL N -1D428 MATHEMATICAL BOLD SMALL O -1D429 MATHEMATICAL BOLD SMALL P -1D42A MATHEMATICAL BOLD SMALL Q -1D42B MATHEMATICAL BOLD SMALL R -1D42C MATHEMATICAL BOLD SMALL S -1D42D MATHEMATICAL BOLD SMALL T -1D42E MATHEMATICAL BOLD SMALL U -1D42F MATHEMATICAL BOLD SMALL V -1D430 MATHEMATICAL BOLD SMALL W -1D431 MATHEMATICAL BOLD SMALL X -1D432 MATHEMATICAL BOLD SMALL Y -1D433 MATHEMATICAL BOLD SMALL Z -1D434 MATHEMATICAL ITALIC CAPITAL A -1D435 MATHEMATICAL ITALIC CAPITAL B -1D436 MATHEMATICAL ITALIC CAPITAL C -1D437 MATHEMATICAL ITALIC CAPITAL D -1D438 MATHEMATICAL ITALIC CAPITAL E -1D439 MATHEMATICAL ITALIC CAPITAL F -1D43A MATHEMATICAL ITALIC CAPITAL G -1D43B MATHEMATICAL ITALIC CAPITAL H -1D43C MATHEMATICAL ITALIC CAPITAL I -1D43D MATHEMATICAL ITALIC CAPITAL J -1D43E MATHEMATICAL ITALIC CAPITAL K -1D43F MATHEMATICAL ITALIC CAPITAL L -1D440 MATHEMATICAL ITALIC CAPITAL M -1D441 MATHEMATICAL ITALIC CAPITAL N -1D442 MATHEMATICAL ITALIC CAPITAL O -1D443 MATHEMATICAL ITALIC CAPITAL P -1D444 MATHEMATICAL ITALIC CAPITAL Q -1D445 MATHEMATICAL ITALIC CAPITAL R -1D446 MATHEMATICAL ITALIC CAPITAL S -1D447 MATHEMATICAL ITALIC CAPITAL T -1D448 MATHEMATICAL ITALIC CAPITAL U -1D449 MATHEMATICAL ITALIC CAPITAL V -1D44A MATHEMATICAL ITALIC CAPITAL W -1D44B MATHEMATICAL ITALIC CAPITAL X -1D44C MATHEMATICAL ITALIC CAPITAL Y -1D44D MATHEMATICAL ITALIC CAPITAL Z -1D44E MATHEMATICAL ITALIC SMALL A -1D44F MATHEMATICAL ITALIC SMALL B -1D450 MATHEMATICAL ITALIC SMALL C -1D451 MATHEMATICAL ITALIC SMALL D -1D452 MATHEMATICAL ITALIC SMALL E -1D453 MATHEMATICAL ITALIC SMALL F -1D454 MATHEMATICAL ITALIC SMALL G -1D456 MATHEMATICAL ITALIC SMALL I -1D457 MATHEMATICAL ITALIC SMALL J -1D458 MATHEMATICAL ITALIC SMALL K -1D459 MATHEMATICAL ITALIC SMALL L -1D45A MATHEMATICAL ITALIC SMALL M -1D45B MATHEMATICAL ITALIC SMALL N -1D45C MATHEMATICAL ITALIC SMALL O -1D45D MATHEMATICAL ITALIC SMALL P -1D45E MATHEMATICAL ITALIC SMALL Q -1D45F MATHEMATICAL ITALIC SMALL R -1D460 MATHEMATICAL ITALIC SMALL S -1D461 MATHEMATICAL ITALIC SMALL T -1D462 MATHEMATICAL ITALIC SMALL U -1D463 MATHEMATICAL ITALIC SMALL V -1D464 MATHEMATICAL ITALIC SMALL W -1D465 MATHEMATICAL ITALIC SMALL X -1D466 MATHEMATICAL ITALIC SMALL Y -1D467 MATHEMATICAL ITALIC SMALL Z -1D468 MATHEMATICAL BOLD ITALIC CAPITAL A -1D469 MATHEMATICAL BOLD ITALIC CAPITAL B -1D46A MATHEMATICAL BOLD ITALIC CAPITAL C -1D46B MATHEMATICAL BOLD ITALIC CAPITAL D -1D46C MATHEMATICAL BOLD ITALIC CAPITAL E -1D46D MATHEMATICAL BOLD ITALIC CAPITAL F -1D46E MATHEMATICAL BOLD ITALIC CAPITAL G -1D46F MATHEMATICAL BOLD ITALIC CAPITAL H -1D470 MATHEMATICAL BOLD ITALIC CAPITAL I -1D471 MATHEMATICAL BOLD ITALIC CAPITAL J -1D472 MATHEMATICAL BOLD ITALIC CAPITAL K -1D473 MATHEMATICAL BOLD ITALIC CAPITAL L -1D474 MATHEMATICAL BOLD ITALIC CAPITAL M -1D475 MATHEMATICAL BOLD ITALIC CAPITAL N -1D476 MATHEMATICAL BOLD ITALIC CAPITAL O -1D477 MATHEMATICAL BOLD ITALIC CAPITAL P -1D478 MATHEMATICAL BOLD ITALIC CAPITAL Q -1D479 MATHEMATICAL BOLD ITALIC CAPITAL R -1D47A MATHEMATICAL BOLD ITALIC CAPITAL S -1D47B MATHEMATICAL BOLD ITALIC CAPITAL T -1D47C MATHEMATICAL BOLD ITALIC CAPITAL U -1D47D MATHEMATICAL BOLD ITALIC CAPITAL V -1D47E MATHEMATICAL BOLD ITALIC CAPITAL W -1D47F MATHEMATICAL BOLD ITALIC CAPITAL X -1D480 MATHEMATICAL BOLD ITALIC CAPITAL Y -1D481 MATHEMATICAL BOLD ITALIC CAPITAL Z -1D482 MATHEMATICAL BOLD ITALIC SMALL A -1D483 MATHEMATICAL BOLD ITALIC SMALL B -1D484 MATHEMATICAL BOLD ITALIC SMALL C -1D485 MATHEMATICAL BOLD ITALIC SMALL D -1D486 MATHEMATICAL BOLD ITALIC SMALL E -1D487 MATHEMATICAL BOLD ITALIC SMALL F -1D488 MATHEMATICAL BOLD ITALIC SMALL G -1D489 MATHEMATICAL BOLD ITALIC SMALL H -1D48A MATHEMATICAL BOLD ITALIC SMALL I -1D48B MATHEMATICAL BOLD ITALIC SMALL J -1D48C MATHEMATICAL BOLD ITALIC SMALL K -1D48D MATHEMATICAL BOLD ITALIC SMALL L -1D48E MATHEMATICAL BOLD ITALIC SMALL M -1D48F MATHEMATICAL BOLD ITALIC SMALL N -1D490 MATHEMATICAL BOLD ITALIC SMALL O -1D491 MATHEMATICAL BOLD ITALIC SMALL P -1D492 MATHEMATICAL BOLD ITALIC SMALL Q -1D493 MATHEMATICAL BOLD ITALIC SMALL R -1D494 MATHEMATICAL BOLD ITALIC SMALL S -1D495 MATHEMATICAL BOLD ITALIC SMALL T -1D496 MATHEMATICAL BOLD ITALIC SMALL U -1D497 MATHEMATICAL BOLD ITALIC SMALL V -1D498 MATHEMATICAL BOLD ITALIC SMALL W -1D499 MATHEMATICAL BOLD ITALIC SMALL X -1D49A MATHEMATICAL BOLD ITALIC SMALL Y -1D49B MATHEMATICAL BOLD ITALIC SMALL Z -1D49C MATHEMATICAL SCRIPT CAPITAL A -1D49E MATHEMATICAL SCRIPT CAPITAL C -1D49F MATHEMATICAL SCRIPT CAPITAL D -1D4A2 MATHEMATICAL SCRIPT CAPITAL G -1D4A5 MATHEMATICAL SCRIPT CAPITAL J -1D4A6 MATHEMATICAL SCRIPT CAPITAL K -1D4A9 MATHEMATICAL SCRIPT CAPITAL N -1D4AA MATHEMATICAL SCRIPT CAPITAL O -1D4AB MATHEMATICAL SCRIPT CAPITAL P -1D4AC MATHEMATICAL SCRIPT CAPITAL Q -1D4AE MATHEMATICAL SCRIPT CAPITAL S -1D4AF MATHEMATICAL SCRIPT CAPITAL T -1D4B0 MATHEMATICAL SCRIPT CAPITAL U -1D4B1 MATHEMATICAL SCRIPT CAPITAL V -1D4B2 MATHEMATICAL SCRIPT CAPITAL W -1D4B3 MATHEMATICAL SCRIPT CAPITAL X -1D4B4 MATHEMATICAL SCRIPT CAPITAL Y -1D4B5 MATHEMATICAL SCRIPT CAPITAL Z -1D4B6 MATHEMATICAL SCRIPT SMALL A -1D4B7 MATHEMATICAL SCRIPT SMALL B -1D4B8 MATHEMATICAL SCRIPT SMALL C -1D4B9 MATHEMATICAL SCRIPT SMALL D -1D4BB MATHEMATICAL SCRIPT SMALL F -1D4BD MATHEMATICAL SCRIPT SMALL H -1D4BE MATHEMATICAL SCRIPT SMALL I -1D4BF MATHEMATICAL SCRIPT SMALL J -1D4C0 MATHEMATICAL SCRIPT SMALL K -1D4C1 MATHEMATICAL SCRIPT SMALL L -1D4C2 MATHEMATICAL SCRIPT SMALL M -1D4C3 MATHEMATICAL SCRIPT SMALL N -1D4C5 MATHEMATICAL SCRIPT SMALL P -1D4C6 MATHEMATICAL SCRIPT SMALL Q -1D4C7 MATHEMATICAL SCRIPT SMALL R -1D4C8 MATHEMATICAL SCRIPT SMALL S -1D4C9 MATHEMATICAL SCRIPT SMALL T -1D4CA MATHEMATICAL SCRIPT SMALL U -1D4CB MATHEMATICAL SCRIPT SMALL V -1D4CC MATHEMATICAL SCRIPT SMALL W -1D4CD MATHEMATICAL SCRIPT SMALL X -1D4CE MATHEMATICAL SCRIPT SMALL Y -1D4CF MATHEMATICAL SCRIPT SMALL Z -1D4D0 MATHEMATICAL BOLD SCRIPT CAPITAL A -1D4D1 MATHEMATICAL BOLD SCRIPT CAPITAL B -1D4D2 MATHEMATICAL BOLD SCRIPT CAPITAL C -1D4D3 MATHEMATICAL BOLD SCRIPT CAPITAL D -1D4D4 MATHEMATICAL BOLD SCRIPT CAPITAL E -1D4D5 MATHEMATICAL BOLD SCRIPT CAPITAL F -1D4D6 MATHEMATICAL BOLD SCRIPT CAPITAL G -1D4D7 MATHEMATICAL BOLD SCRIPT CAPITAL H -1D4D8 MATHEMATICAL BOLD SCRIPT CAPITAL I -1D4D9 MATHEMATICAL BOLD SCRIPT CAPITAL J -1D4DA MATHEMATICAL BOLD SCRIPT CAPITAL K -1D4DB MATHEMATICAL BOLD SCRIPT CAPITAL L -1D4DC MATHEMATICAL BOLD SCRIPT CAPITAL M -1D4DD MATHEMATICAL BOLD SCRIPT CAPITAL N -1D4DE MATHEMATICAL BOLD SCRIPT CAPITAL O -1D4DF MATHEMATICAL BOLD SCRIPT CAPITAL P -1D4E0 MATHEMATICAL BOLD SCRIPT CAPITAL Q -1D4E1 MATHEMATICAL BOLD SCRIPT CAPITAL R -1D4E2 MATHEMATICAL BOLD SCRIPT CAPITAL S -1D4E3 MATHEMATICAL BOLD SCRIPT CAPITAL T -1D4E4 MATHEMATICAL BOLD SCRIPT CAPITAL U -1D4E5 MATHEMATICAL BOLD SCRIPT CAPITAL V -1D4E6 MATHEMATICAL BOLD SCRIPT CAPITAL W -1D4E7 MATHEMATICAL BOLD SCRIPT CAPITAL X -1D4E8 MATHEMATICAL BOLD SCRIPT CAPITAL Y -1D4E9 MATHEMATICAL BOLD SCRIPT CAPITAL Z -1D4EA MATHEMATICAL BOLD SCRIPT SMALL A -1D4EB MATHEMATICAL BOLD SCRIPT SMALL B -1D4EC MATHEMATICAL BOLD SCRIPT SMALL C -1D4ED MATHEMATICAL BOLD SCRIPT SMALL D -1D4EE MATHEMATICAL BOLD SCRIPT SMALL E -1D4EF MATHEMATICAL BOLD SCRIPT SMALL F -1D4F0 MATHEMATICAL BOLD SCRIPT SMALL G -1D4F1 MATHEMATICAL BOLD SCRIPT SMALL H -1D4F2 MATHEMATICAL BOLD SCRIPT SMALL I -1D4F3 MATHEMATICAL BOLD SCRIPT SMALL J -1D4F4 MATHEMATICAL BOLD SCRIPT SMALL K -1D4F5 MATHEMATICAL BOLD SCRIPT SMALL L -1D4F6 MATHEMATICAL BOLD SCRIPT SMALL M -1D4F7 MATHEMATICAL BOLD SCRIPT SMALL N -1D4F8 MATHEMATICAL BOLD SCRIPT SMALL O -1D4F9 MATHEMATICAL BOLD SCRIPT SMALL P -1D4FA MATHEMATICAL BOLD SCRIPT SMALL Q -1D4FB MATHEMATICAL BOLD SCRIPT SMALL R -1D4FC MATHEMATICAL BOLD SCRIPT SMALL S -1D4FD MATHEMATICAL BOLD SCRIPT SMALL T -1D4FE MATHEMATICAL BOLD SCRIPT SMALL U -1D4FF MATHEMATICAL BOLD SCRIPT SMALL V -1D500 MATHEMATICAL BOLD SCRIPT SMALL W -1D501 MATHEMATICAL BOLD SCRIPT SMALL X -1D502 MATHEMATICAL BOLD SCRIPT SMALL Y -1D503 MATHEMATICAL BOLD SCRIPT SMALL Z -1D504 MATHEMATICAL FRAKTUR CAPITAL A -1D505 MATHEMATICAL FRAKTUR CAPITAL B -1D507 MATHEMATICAL FRAKTUR CAPITAL D -1D508 MATHEMATICAL FRAKTUR CAPITAL E -1D509 MATHEMATICAL FRAKTUR CAPITAL F -1D50A MATHEMATICAL FRAKTUR CAPITAL G -1D50D MATHEMATICAL FRAKTUR CAPITAL J -1D50E MATHEMATICAL FRAKTUR CAPITAL K -1D50F MATHEMATICAL FRAKTUR CAPITAL L -1D510 MATHEMATICAL FRAKTUR CAPITAL M -1D511 MATHEMATICAL FRAKTUR CAPITAL N -1D512 MATHEMATICAL FRAKTUR CAPITAL O -1D513 MATHEMATICAL FRAKTUR CAPITAL P -1D514 MATHEMATICAL FRAKTUR CAPITAL Q -1D516 MATHEMATICAL FRAKTUR CAPITAL S -1D517 MATHEMATICAL FRAKTUR CAPITAL T -1D518 MATHEMATICAL FRAKTUR CAPITAL U -1D519 MATHEMATICAL FRAKTUR CAPITAL V -1D51A MATHEMATICAL FRAKTUR CAPITAL W -1D51B MATHEMATICAL FRAKTUR CAPITAL X -1D51C MATHEMATICAL FRAKTUR CAPITAL Y -1D51E MATHEMATICAL FRAKTUR SMALL A -1D51F MATHEMATICAL FRAKTUR SMALL B -1D520 MATHEMATICAL FRAKTUR SMALL C -1D521 MATHEMATICAL FRAKTUR SMALL D -1D522 MATHEMATICAL FRAKTUR SMALL E -1D523 MATHEMATICAL FRAKTUR SMALL F -1D524 MATHEMATICAL FRAKTUR SMALL G -1D525 MATHEMATICAL FRAKTUR SMALL H -1D526 MATHEMATICAL FRAKTUR SMALL I -1D527 MATHEMATICAL FRAKTUR SMALL J -1D528 MATHEMATICAL FRAKTUR SMALL K -1D529 MATHEMATICAL FRAKTUR SMALL L -1D52A MATHEMATICAL FRAKTUR SMALL M -1D52B MATHEMATICAL FRAKTUR SMALL N -1D52C MATHEMATICAL FRAKTUR SMALL O -1D52D MATHEMATICAL FRAKTUR SMALL P -1D52E MATHEMATICAL FRAKTUR SMALL Q -1D52F MATHEMATICAL FRAKTUR SMALL R -1D530 MATHEMATICAL FRAKTUR SMALL S -1D531 MATHEMATICAL FRAKTUR SMALL T -1D532 MATHEMATICAL FRAKTUR SMALL U -1D533 MATHEMATICAL FRAKTUR SMALL V -1D534 MATHEMATICAL FRAKTUR SMALL W -1D535 MATHEMATICAL FRAKTUR SMALL X -1D536 MATHEMATICAL FRAKTUR SMALL Y -1D537 MATHEMATICAL FRAKTUR SMALL Z -1D538 MATHEMATICAL DOUBLE-STRUCK CAPITAL A -1D539 MATHEMATICAL DOUBLE-STRUCK CAPITAL B -1D53B MATHEMATICAL DOUBLE-STRUCK CAPITAL D -1D53C MATHEMATICAL DOUBLE-STRUCK CAPITAL E -1D53D MATHEMATICAL DOUBLE-STRUCK CAPITAL F -1D53E MATHEMATICAL DOUBLE-STRUCK CAPITAL G -1D540 MATHEMATICAL DOUBLE-STRUCK CAPITAL I -1D541 MATHEMATICAL DOUBLE-STRUCK CAPITAL J -1D542 MATHEMATICAL DOUBLE-STRUCK CAPITAL K -1D543 MATHEMATICAL DOUBLE-STRUCK CAPITAL L -1D544 MATHEMATICAL DOUBLE-STRUCK CAPITAL M -1D546 MATHEMATICAL DOUBLE-STRUCK CAPITAL O -1D54A MATHEMATICAL DOUBLE-STRUCK CAPITAL S -1D54B MATHEMATICAL DOUBLE-STRUCK CAPITAL T -1D54C MATHEMATICAL DOUBLE-STRUCK CAPITAL U -1D54D MATHEMATICAL DOUBLE-STRUCK CAPITAL V -1D54E MATHEMATICAL DOUBLE-STRUCK CAPITAL W -1D54F MATHEMATICAL DOUBLE-STRUCK CAPITAL X -1D550 MATHEMATICAL DOUBLE-STRUCK CAPITAL Y -1D552 MATHEMATICAL DOUBLE-STRUCK SMALL A -1D553 MATHEMATICAL DOUBLE-STRUCK SMALL B -1D554 MATHEMATICAL DOUBLE-STRUCK SMALL C -1D555 MATHEMATICAL DOUBLE-STRUCK SMALL D -1D556 MATHEMATICAL DOUBLE-STRUCK SMALL E -1D557 MATHEMATICAL DOUBLE-STRUCK SMALL F -1D558 MATHEMATICAL DOUBLE-STRUCK SMALL G -1D559 MATHEMATICAL DOUBLE-STRUCK SMALL H -1D55A MATHEMATICAL DOUBLE-STRUCK SMALL I -1D55B MATHEMATICAL DOUBLE-STRUCK SMALL J -1D55C MATHEMATICAL DOUBLE-STRUCK SMALL K -1D55D MATHEMATICAL DOUBLE-STRUCK SMALL L -1D55E MATHEMATICAL DOUBLE-STRUCK SMALL M -1D55F MATHEMATICAL DOUBLE-STRUCK SMALL N -1D560 MATHEMATICAL DOUBLE-STRUCK SMALL O -1D561 MATHEMATICAL DOUBLE-STRUCK SMALL P -1D562 MATHEMATICAL DOUBLE-STRUCK SMALL Q -1D563 MATHEMATICAL DOUBLE-STRUCK SMALL R -1D564 MATHEMATICAL DOUBLE-STRUCK SMALL S -1D565 MATHEMATICAL DOUBLE-STRUCK SMALL T -1D566 MATHEMATICAL DOUBLE-STRUCK SMALL U -1D567 MATHEMATICAL DOUBLE-STRUCK SMALL V -1D568 MATHEMATICAL DOUBLE-STRUCK SMALL W -1D569 MATHEMATICAL DOUBLE-STRUCK SMALL X -1D56A MATHEMATICAL DOUBLE-STRUCK SMALL Y -1D56B MATHEMATICAL DOUBLE-STRUCK SMALL Z -1D56C MATHEMATICAL BOLD FRAKTUR CAPITAL A -1D56D MATHEMATICAL BOLD FRAKTUR CAPITAL B -1D56E MATHEMATICAL BOLD FRAKTUR CAPITAL C -1D56F MATHEMATICAL BOLD FRAKTUR CAPITAL D -1D570 MATHEMATICAL BOLD FRAKTUR CAPITAL E -1D571 MATHEMATICAL BOLD FRAKTUR CAPITAL F -1D572 MATHEMATICAL BOLD FRAKTUR CAPITAL G -1D573 MATHEMATICAL BOLD FRAKTUR CAPITAL H -1D574 MATHEMATICAL BOLD FRAKTUR CAPITAL I -1D575 MATHEMATICAL BOLD FRAKTUR CAPITAL J -1D576 MATHEMATICAL BOLD FRAKTUR CAPITAL K -1D577 MATHEMATICAL BOLD FRAKTUR CAPITAL L -1D578 MATHEMATICAL BOLD FRAKTUR CAPITAL M -1D579 MATHEMATICAL BOLD FRAKTUR CAPITAL N -1D57A MATHEMATICAL BOLD FRAKTUR CAPITAL O -1D57B MATHEMATICAL BOLD FRAKTUR CAPITAL P -1D57C MATHEMATICAL BOLD FRAKTUR CAPITAL Q -1D57D MATHEMATICAL BOLD FRAKTUR CAPITAL R -1D57E MATHEMATICAL BOLD FRAKTUR CAPITAL S -1D57F MATHEMATICAL BOLD FRAKTUR CAPITAL T -1D580 MATHEMATICAL BOLD FRAKTUR CAPITAL U -1D581 MATHEMATICAL BOLD FRAKTUR CAPITAL V -1D582 MATHEMATICAL BOLD FRAKTUR CAPITAL W -1D583 MATHEMATICAL BOLD FRAKTUR CAPITAL X -1D584 MATHEMATICAL BOLD FRAKTUR CAPITAL Y -1D585 MATHEMATICAL BOLD FRAKTUR CAPITAL Z -1D586 MATHEMATICAL BOLD FRAKTUR SMALL A -1D587 MATHEMATICAL BOLD FRAKTUR SMALL B -1D588 MATHEMATICAL BOLD FRAKTUR SMALL C -1D589 MATHEMATICAL BOLD FRAKTUR SMALL D -1D58A MATHEMATICAL BOLD FRAKTUR SMALL E -1D58B MATHEMATICAL BOLD FRAKTUR SMALL F -1D58C MATHEMATICAL BOLD FRAKTUR SMALL G -1D58D MATHEMATICAL BOLD FRAKTUR SMALL H -1D58E MATHEMATICAL BOLD FRAKTUR SMALL I -1D58F MATHEMATICAL BOLD FRAKTUR SMALL J -1D590 MATHEMATICAL BOLD FRAKTUR SMALL K -1D591 MATHEMATICAL BOLD FRAKTUR SMALL L -1D592 MATHEMATICAL BOLD FRAKTUR SMALL M -1D593 MATHEMATICAL BOLD FRAKTUR SMALL N -1D594 MATHEMATICAL BOLD FRAKTUR SMALL O -1D595 MATHEMATICAL BOLD FRAKTUR SMALL P -1D596 MATHEMATICAL BOLD FRAKTUR SMALL Q -1D597 MATHEMATICAL BOLD FRAKTUR SMALL R -1D598 MATHEMATICAL BOLD FRAKTUR SMALL S -1D599 MATHEMATICAL BOLD FRAKTUR SMALL T -1D59A MATHEMATICAL BOLD FRAKTUR SMALL U -1D59B MATHEMATICAL BOLD FRAKTUR SMALL V -1D59C MATHEMATICAL BOLD FRAKTUR SMALL W -1D59D MATHEMATICAL BOLD FRAKTUR SMALL X -1D59E MATHEMATICAL BOLD FRAKTUR SMALL Y -1D59F MATHEMATICAL BOLD FRAKTUR SMALL Z -1D5A0 MATHEMATICAL SANS-SERIF CAPITAL A -1D5A1 MATHEMATICAL SANS-SERIF CAPITAL B -1D5A2 MATHEMATICAL SANS-SERIF CAPITAL C -1D5A3 MATHEMATICAL SANS-SERIF CAPITAL D -1D5A4 MATHEMATICAL SANS-SERIF CAPITAL E -1D5A5 MATHEMATICAL SANS-SERIF CAPITAL F -1D5A6 MATHEMATICAL SANS-SERIF CAPITAL G -1D5A7 MATHEMATICAL SANS-SERIF CAPITAL H -1D5A8 MATHEMATICAL SANS-SERIF CAPITAL I -1D5A9 MATHEMATICAL SANS-SERIF CAPITAL J -1D5AA MATHEMATICAL SANS-SERIF CAPITAL K -1D5AB MATHEMATICAL SANS-SERIF CAPITAL L -1D5AC MATHEMATICAL SANS-SERIF CAPITAL M -1D5AD MATHEMATICAL SANS-SERIF CAPITAL N -1D5AE MATHEMATICAL SANS-SERIF CAPITAL O -1D5AF MATHEMATICAL SANS-SERIF CAPITAL P -1D5B0 MATHEMATICAL SANS-SERIF CAPITAL Q -1D5B1 MATHEMATICAL SANS-SERIF CAPITAL R -1D5B2 MATHEMATICAL SANS-SERIF CAPITAL S -1D5B3 MATHEMATICAL SANS-SERIF CAPITAL T -1D5B4 MATHEMATICAL SANS-SERIF CAPITAL U -1D5B5 MATHEMATICAL SANS-SERIF CAPITAL V -1D5B6 MATHEMATICAL SANS-SERIF CAPITAL W -1D5B7 MATHEMATICAL SANS-SERIF CAPITAL X -1D5B8 MATHEMATICAL SANS-SERIF CAPITAL Y -1D5B9 MATHEMATICAL SANS-SERIF CAPITAL Z -1D5BA MATHEMATICAL SANS-SERIF SMALL A -1D5BB MATHEMATICAL SANS-SERIF SMALL B -1D5BC MATHEMATICAL SANS-SERIF SMALL C -1D5BD MATHEMATICAL SANS-SERIF SMALL D -1D5BE MATHEMATICAL SANS-SERIF SMALL E -1D5BF MATHEMATICAL SANS-SERIF SMALL F -1D5C0 MATHEMATICAL SANS-SERIF SMALL G -1D5C1 MATHEMATICAL SANS-SERIF SMALL H -1D5C2 MATHEMATICAL SANS-SERIF SMALL I -1D5C3 MATHEMATICAL SANS-SERIF SMALL J -1D5C4 MATHEMATICAL SANS-SERIF SMALL K -1D5C5 MATHEMATICAL SANS-SERIF SMALL L -1D5C6 MATHEMATICAL SANS-SERIF SMALL M -1D5C7 MATHEMATICAL SANS-SERIF SMALL N -1D5C8 MATHEMATICAL SANS-SERIF SMALL O -1D5C9 MATHEMATICAL SANS-SERIF SMALL P -1D5CA MATHEMATICAL SANS-SERIF SMALL Q -1D5CB MATHEMATICAL SANS-SERIF SMALL R -1D5CC MATHEMATICAL SANS-SERIF SMALL S -1D5CD MATHEMATICAL SANS-SERIF SMALL T -1D5CE MATHEMATICAL SANS-SERIF SMALL U -1D5CF MATHEMATICAL SANS-SERIF SMALL V -1D5D0 MATHEMATICAL SANS-SERIF SMALL W -1D5D1 MATHEMATICAL SANS-SERIF SMALL X -1D5D2 MATHEMATICAL SANS-SERIF SMALL Y -1D5D3 MATHEMATICAL SANS-SERIF SMALL Z -1D5D4 MATHEMATICAL SANS-SERIF BOLD CAPITAL A -1D5D5 MATHEMATICAL SANS-SERIF BOLD CAPITAL B -1D5D6 MATHEMATICAL SANS-SERIF BOLD CAPITAL C -1D5D7 MATHEMATICAL SANS-SERIF BOLD CAPITAL D -1D5D8 MATHEMATICAL SANS-SERIF BOLD CAPITAL E -1D5D9 MATHEMATICAL SANS-SERIF BOLD CAPITAL F -1D5DA MATHEMATICAL SANS-SERIF BOLD CAPITAL G -1D5DB MATHEMATICAL SANS-SERIF BOLD CAPITAL H -1D5DC MATHEMATICAL SANS-SERIF BOLD CAPITAL I -1D5DD MATHEMATICAL SANS-SERIF BOLD CAPITAL J -1D5DE MATHEMATICAL SANS-SERIF BOLD CAPITAL K -1D5DF MATHEMATICAL SANS-SERIF BOLD CAPITAL L -1D5E0 MATHEMATICAL SANS-SERIF BOLD CAPITAL M -1D5E1 MATHEMATICAL SANS-SERIF BOLD CAPITAL N -1D5E2 MATHEMATICAL SANS-SERIF BOLD CAPITAL O -1D5E3 MATHEMATICAL SANS-SERIF BOLD CAPITAL P -1D5E4 MATHEMATICAL SANS-SERIF BOLD CAPITAL Q -1D5E5 MATHEMATICAL SANS-SERIF BOLD CAPITAL R -1D5E6 MATHEMATICAL SANS-SERIF BOLD CAPITAL S -1D5E7 MATHEMATICAL SANS-SERIF BOLD CAPITAL T -1D5E8 MATHEMATICAL SANS-SERIF BOLD CAPITAL U -1D5E9 MATHEMATICAL SANS-SERIF BOLD CAPITAL V -1D5EA MATHEMATICAL SANS-SERIF BOLD CAPITAL W -1D5EB MATHEMATICAL SANS-SERIF BOLD CAPITAL X -1D5EC MATHEMATICAL SANS-SERIF BOLD CAPITAL Y -1D5ED MATHEMATICAL SANS-SERIF BOLD CAPITAL Z -1D5EE MATHEMATICAL SANS-SERIF BOLD SMALL A -1D5EF MATHEMATICAL SANS-SERIF BOLD SMALL B -1D5F0 MATHEMATICAL SANS-SERIF BOLD SMALL C -1D5F1 MATHEMATICAL SANS-SERIF BOLD SMALL D -1D5F2 MATHEMATICAL SANS-SERIF BOLD SMALL E -1D5F3 MATHEMATICAL SANS-SERIF BOLD SMALL F -1D5F4 MATHEMATICAL SANS-SERIF BOLD SMALL G -1D5F5 MATHEMATICAL SANS-SERIF BOLD SMALL H -1D5F6 MATHEMATICAL SANS-SERIF BOLD SMALL I -1D5F7 MATHEMATICAL SANS-SERIF BOLD SMALL J -1D5F8 MATHEMATICAL SANS-SERIF BOLD SMALL K -1D5F9 MATHEMATICAL SANS-SERIF BOLD SMALL L -1D5FA MATHEMATICAL SANS-SERIF BOLD SMALL M -1D5FB MATHEMATICAL SANS-SERIF BOLD SMALL N -1D5FC MATHEMATICAL SANS-SERIF BOLD SMALL O -1D5FD MATHEMATICAL SANS-SERIF BOLD SMALL P -1D5FE MATHEMATICAL SANS-SERIF BOLD SMALL Q -1D5FF MATHEMATICAL SANS-SERIF BOLD SMALL R -1D600 MATHEMATICAL SANS-SERIF BOLD SMALL S -1D601 MATHEMATICAL SANS-SERIF BOLD SMALL T -1D602 MATHEMATICAL SANS-SERIF BOLD SMALL U -1D603 MATHEMATICAL SANS-SERIF BOLD SMALL V -1D604 MATHEMATICAL SANS-SERIF BOLD SMALL W -1D605 MATHEMATICAL SANS-SERIF BOLD SMALL X -1D606 MATHEMATICAL SANS-SERIF BOLD SMALL Y -1D607 MATHEMATICAL SANS-SERIF BOLD SMALL Z -1D608 MATHEMATICAL SANS-SERIF ITALIC CAPITAL A -1D609 MATHEMATICAL SANS-SERIF ITALIC CAPITAL B -1D60A MATHEMATICAL SANS-SERIF ITALIC CAPITAL C -1D60B MATHEMATICAL SANS-SERIF ITALIC CAPITAL D -1D60C MATHEMATICAL SANS-SERIF ITALIC CAPITAL E -1D60D MATHEMATICAL SANS-SERIF ITALIC CAPITAL F -1D60E MATHEMATICAL SANS-SERIF ITALIC CAPITAL G -1D60F MATHEMATICAL SANS-SERIF ITALIC CAPITAL H -1D610 MATHEMATICAL SANS-SERIF ITALIC CAPITAL I -1D611 MATHEMATICAL SANS-SERIF ITALIC CAPITAL J -1D612 MATHEMATICAL SANS-SERIF ITALIC CAPITAL K -1D613 MATHEMATICAL SANS-SERIF ITALIC CAPITAL L -1D614 MATHEMATICAL SANS-SERIF ITALIC CAPITAL M -1D615 MATHEMATICAL SANS-SERIF ITALIC CAPITAL N -1D616 MATHEMATICAL SANS-SERIF ITALIC CAPITAL O -1D617 MATHEMATICAL SANS-SERIF ITALIC CAPITAL P -1D618 MATHEMATICAL SANS-SERIF ITALIC CAPITAL Q -1D619 MATHEMATICAL SANS-SERIF ITALIC CAPITAL R -1D61A MATHEMATICAL SANS-SERIF ITALIC CAPITAL S -1D61B MATHEMATICAL SANS-SERIF ITALIC CAPITAL T -1D61C MATHEMATICAL SANS-SERIF ITALIC CAPITAL U -1D61D MATHEMATICAL SANS-SERIF ITALIC CAPITAL V -1D61E MATHEMATICAL SANS-SERIF ITALIC CAPITAL W -1D61F MATHEMATICAL SANS-SERIF ITALIC CAPITAL X -1D620 MATHEMATICAL SANS-SERIF ITALIC CAPITAL Y -1D621 MATHEMATICAL SANS-SERIF ITALIC CAPITAL Z -1D622 MATHEMATICAL SANS-SERIF ITALIC SMALL A -1D623 MATHEMATICAL SANS-SERIF ITALIC SMALL B -1D624 MATHEMATICAL SANS-SERIF ITALIC SMALL C -1D625 MATHEMATICAL SANS-SERIF ITALIC SMALL D -1D626 MATHEMATICAL SANS-SERIF ITALIC SMALL E -1D627 MATHEMATICAL SANS-SERIF ITALIC SMALL F -1D628 MATHEMATICAL SANS-SERIF ITALIC SMALL G -1D629 MATHEMATICAL SANS-SERIF ITALIC SMALL H -1D62A MATHEMATICAL SANS-SERIF ITALIC SMALL I -1D62B MATHEMATICAL SANS-SERIF ITALIC SMALL J -1D62C MATHEMATICAL SANS-SERIF ITALIC SMALL K -1D62D MATHEMATICAL SANS-SERIF ITALIC SMALL L -1D62E MATHEMATICAL SANS-SERIF ITALIC SMALL M -1D62F MATHEMATICAL SANS-SERIF ITALIC SMALL N -1D630 MATHEMATICAL SANS-SERIF ITALIC SMALL O -1D631 MATHEMATICAL SANS-SERIF ITALIC SMALL P -1D632 MATHEMATICAL SANS-SERIF ITALIC SMALL Q -1D633 MATHEMATICAL SANS-SERIF ITALIC SMALL R -1D634 MATHEMATICAL SANS-SERIF ITALIC SMALL S -1D635 MATHEMATICAL SANS-SERIF ITALIC SMALL T -1D636 MATHEMATICAL SANS-SERIF ITALIC SMALL U -1D637 MATHEMATICAL SANS-SERIF ITALIC SMALL V -1D638 MATHEMATICAL SANS-SERIF ITALIC SMALL W -1D639 MATHEMATICAL SANS-SERIF ITALIC SMALL X -1D63A MATHEMATICAL SANS-SERIF ITALIC SMALL Y -1D63B MATHEMATICAL SANS-SERIF ITALIC SMALL Z -1D63C MATHEMATICAL SANS-SERIF BOLD ITALIC CAPITAL A -1D63D MATHEMATICAL SANS-SERIF BOLD ITALIC CAPITAL B -1D63E MATHEMATICAL SANS-SERIF BOLD ITALIC CAPITAL C -1D63F MATHEMATICAL SANS-SERIF BOLD ITALIC CAPITAL D -1D640 MATHEMATICAL SANS-SERIF BOLD ITALIC CAPITAL E -1D641 MATHEMATICAL SANS-SERIF BOLD ITALIC CAPITAL F -1D642 MATHEMATICAL SANS-SERIF BOLD ITALIC CAPITAL G -1D643 MATHEMATICAL SANS-SERIF BOLD ITALIC CAPITAL H -1D644 MATHEMATICAL SANS-SERIF BOLD ITALIC CAPITAL I -1D645 MATHEMATICAL SANS-SERIF BOLD ITALIC CAPITAL J -1D646 MATHEMATICAL SANS-SERIF BOLD ITALIC CAPITAL K -1D647 MATHEMATICAL SANS-SERIF BOLD ITALIC CAPITAL L -1D648 MATHEMATICAL SANS-SERIF BOLD ITALIC CAPITAL M -1D649 MATHEMATICAL SANS-SERIF BOLD ITALIC CAPITAL N -1D64A MATHEMATICAL SANS-SERIF BOLD ITALIC CAPITAL O -1D64B MATHEMATICAL SANS-SERIF BOLD ITALIC CAPITAL P -1D64C MATHEMATICAL SANS-SERIF BOLD ITALIC CAPITAL Q -1D64D MATHEMATICAL SANS-SERIF BOLD ITALIC CAPITAL R -1D64E MATHEMATICAL SANS-SERIF BOLD ITALIC CAPITAL S -1D64F MATHEMATICAL SANS-SERIF BOLD ITALIC CAPITAL T -1D650 MATHEMATICAL SANS-SERIF BOLD ITALIC CAPITAL U -1D651 MATHEMATICAL SANS-SERIF BOLD ITALIC CAPITAL V -1D652 MATHEMATICAL SANS-SERIF BOLD ITALIC CAPITAL W -1D653 MATHEMATICAL SANS-SERIF BOLD ITALIC CAPITAL X -1D654 MATHEMATICAL SANS-SERIF BOLD ITALIC CAPITAL Y -1D655 MATHEMATICAL SANS-SERIF BOLD ITALIC CAPITAL Z -1D656 MATHEMATICAL SANS-SERIF BOLD ITALIC SMALL A -1D657 MATHEMATICAL SANS-SERIF BOLD ITALIC SMALL B -1D658 MATHEMATICAL SANS-SERIF BOLD ITALIC SMALL C -1D659 MATHEMATICAL SANS-SERIF BOLD ITALIC SMALL D -1D65A MATHEMATICAL SANS-SERIF BOLD ITALIC SMALL E -1D65B MATHEMATICAL SANS-SERIF BOLD ITALIC SMALL F -1D65C MATHEMATICAL SANS-SERIF BOLD ITALIC SMALL G -1D65D MATHEMATICAL SANS-SERIF BOLD ITALIC SMALL H -1D65E MATHEMATICAL SANS-SERIF BOLD ITALIC SMALL I -1D65F MATHEMATICAL SANS-SERIF BOLD ITALIC SMALL J -1D660 MATHEMATICAL SANS-SERIF BOLD ITALIC SMALL K -1D661 MATHEMATICAL SANS-SERIF BOLD ITALIC SMALL L -1D662 MATHEMATICAL SANS-SERIF BOLD ITALIC SMALL M -1D663 MATHEMATICAL SANS-SERIF BOLD ITALIC SMALL N -1D664 MATHEMATICAL SANS-SERIF BOLD ITALIC SMALL O -1D665 MATHEMATICAL SANS-SERIF BOLD ITALIC SMALL P -1D666 MATHEMATICAL SANS-SERIF BOLD ITALIC SMALL Q -1D667 MATHEMATICAL SANS-SERIF BOLD ITALIC SMALL R -1D668 MATHEMATICAL SANS-SERIF BOLD ITALIC SMALL S -1D669 MATHEMATICAL SANS-SERIF BOLD ITALIC SMALL T -1D66A MATHEMATICAL SANS-SERIF BOLD ITALIC SMALL U -1D66B MATHEMATICAL SANS-SERIF BOLD ITALIC SMALL V -1D66C MATHEMATICAL SANS-SERIF BOLD ITALIC SMALL W -1D66D MATHEMATICAL SANS-SERIF BOLD ITALIC SMALL X -1D66E MATHEMATICAL SANS-SERIF BOLD ITALIC SMALL Y -1D66F MATHEMATICAL SANS-SERIF BOLD ITALIC SMALL Z -1D670 MATHEMATICAL MONOSPACE CAPITAL A -1D671 MATHEMATICAL MONOSPACE CAPITAL B -1D672 MATHEMATICAL MONOSPACE CAPITAL C -1D673 MATHEMATICAL MONOSPACE CAPITAL D -1D674 MATHEMATICAL MONOSPACE CAPITAL E -1D675 MATHEMATICAL MONOSPACE CAPITAL F -1D676 MATHEMATICAL MONOSPACE CAPITAL G -1D677 MATHEMATICAL MONOSPACE CAPITAL H -1D678 MATHEMATICAL MONOSPACE CAPITAL I -1D679 MATHEMATICAL MONOSPACE CAPITAL J -1D67A MATHEMATICAL MONOSPACE CAPITAL K -1D67B MATHEMATICAL MONOSPACE CAPITAL L -1D67C MATHEMATICAL MONOSPACE CAPITAL M -1D67D MATHEMATICAL MONOSPACE CAPITAL N -1D67E MATHEMATICAL MONOSPACE CAPITAL O -1D67F MATHEMATICAL MONOSPACE CAPITAL P -1D680 MATHEMATICAL MONOSPACE CAPITAL Q -1D681 MATHEMATICAL MONOSPACE CAPITAL R -1D682 MATHEMATICAL MONOSPACE CAPITAL S -1D683 MATHEMATICAL MONOSPACE CAPITAL T -1D684 MATHEMATICAL MONOSPACE CAPITAL U -1D685 MATHEMATICAL MONOSPACE CAPITAL V -1D686 MATHEMATICAL MONOSPACE CAPITAL W -1D687 MATHEMATICAL MONOSPACE CAPITAL X -1D688 MATHEMATICAL MONOSPACE CAPITAL Y -1D689 MATHEMATICAL MONOSPACE CAPITAL Z -1D68A MATHEMATICAL MONOSPACE SMALL A -1D68B MATHEMATICAL MONOSPACE SMALL B -1D68C MATHEMATICAL MONOSPACE SMALL C -1D68D MATHEMATICAL MONOSPACE SMALL D -1D68E MATHEMATICAL MONOSPACE SMALL E -1D68F MATHEMATICAL MONOSPACE SMALL F -1D690 MATHEMATICAL MONOSPACE SMALL G -1D691 MATHEMATICAL MONOSPACE SMALL H -1D692 MATHEMATICAL MONOSPACE SMALL I -1D693 MATHEMATICAL MONOSPACE SMALL J -1D694 MATHEMATICAL MONOSPACE SMALL K -1D695 MATHEMATICAL MONOSPACE SMALL L -1D696 MATHEMATICAL MONOSPACE SMALL M -1D697 MATHEMATICAL MONOSPACE SMALL N -1D698 MATHEMATICAL MONOSPACE SMALL O -1D699 MATHEMATICAL MONOSPACE SMALL P -1D69A MATHEMATICAL MONOSPACE SMALL Q -1D69B MATHEMATICAL MONOSPACE SMALL R -1D69C MATHEMATICAL MONOSPACE SMALL S -1D69D MATHEMATICAL MONOSPACE SMALL T -1D69E MATHEMATICAL MONOSPACE SMALL U -1D69F MATHEMATICAL MONOSPACE SMALL V -1D6A0 MATHEMATICAL MONOSPACE SMALL W -1D6A1 MATHEMATICAL MONOSPACE SMALL X -1D6A2 MATHEMATICAL MONOSPACE SMALL Y -1D6A3 MATHEMATICAL MONOSPACE SMALL Z -1D6A4 MATHEMATICAL ITALIC SMALL DOTLESS I -1D6A5 MATHEMATICAL ITALIC SMALL DOTLESS J -1D6A8 MATHEMATICAL BOLD CAPITAL ALPHA -1D6A9 MATHEMATICAL BOLD CAPITAL BETA -1D6AA MATHEMATICAL BOLD CAPITAL GAMMA -1D6AB MATHEMATICAL BOLD CAPITAL DELTA -1D6AC MATHEMATICAL BOLD CAPITAL EPSILON -1D6AD MATHEMATICAL BOLD CAPITAL ZETA -1D6AE MATHEMATICAL BOLD CAPITAL ETA -1D6AF MATHEMATICAL BOLD CAPITAL THETA -1D6B0 MATHEMATICAL BOLD CAPITAL IOTA -1D6B1 MATHEMATICAL BOLD CAPITAL KAPPA -1D6B2 MATHEMATICAL BOLD CAPITAL LAMDA -1D6B3 MATHEMATICAL BOLD CAPITAL MU -1D6B4 MATHEMATICAL BOLD CAPITAL NU -1D6B5 MATHEMATICAL BOLD CAPITAL XI -1D6B6 MATHEMATICAL BOLD CAPITAL OMICRON -1D6B7 MATHEMATICAL BOLD CAPITAL PI -1D6B8 MATHEMATICAL BOLD CAPITAL RHO -1D6B9 MATHEMATICAL BOLD CAPITAL THETA SYMBOL -1D6BA MATHEMATICAL BOLD CAPITAL SIGMA -1D6BB MATHEMATICAL BOLD CAPITAL TAU -1D6BC MATHEMATICAL BOLD CAPITAL UPSILON -1D6BD MATHEMATICAL BOLD CAPITAL PHI -1D6BE MATHEMATICAL BOLD CAPITAL CHI -1D6BF MATHEMATICAL BOLD CAPITAL PSI -1D6C0 MATHEMATICAL BOLD CAPITAL OMEGA -1D6C1 MATHEMATICAL BOLD NABLA -1D6C2 MATHEMATICAL BOLD SMALL ALPHA -1D6C3 MATHEMATICAL BOLD SMALL BETA -1D6C4 MATHEMATICAL BOLD SMALL GAMMA -1D6C5 MATHEMATICAL BOLD SMALL DELTA -1D6C6 MATHEMATICAL BOLD SMALL EPSILON -1D6C7 MATHEMATICAL BOLD SMALL ZETA -1D6C8 MATHEMATICAL BOLD SMALL ETA -1D6C9 MATHEMATICAL BOLD SMALL THETA -1D6CA MATHEMATICAL BOLD SMALL IOTA -1D6CB MATHEMATICAL BOLD SMALL KAPPA -1D6CC MATHEMATICAL BOLD SMALL LAMDA -1D6CD MATHEMATICAL BOLD SMALL MU -1D6CE MATHEMATICAL BOLD SMALL NU -1D6CF MATHEMATICAL BOLD SMALL XI -1D6D0 MATHEMATICAL BOLD SMALL OMICRON -1D6D1 MATHEMATICAL BOLD SMALL PI -1D6D2 MATHEMATICAL BOLD SMALL RHO -1D6D3 MATHEMATICAL BOLD SMALL FINAL SIGMA -1D6D4 MATHEMATICAL BOLD SMALL SIGMA -1D6D5 MATHEMATICAL BOLD SMALL TAU -1D6D6 MATHEMATICAL BOLD SMALL UPSILON -1D6D7 MATHEMATICAL BOLD SMALL PHI -1D6D8 MATHEMATICAL BOLD SMALL CHI -1D6D9 MATHEMATICAL BOLD SMALL PSI -1D6DA MATHEMATICAL BOLD SMALL OMEGA -1D6DB MATHEMATICAL BOLD PARTIAL DIFFERENTIAL -1D6DC MATHEMATICAL BOLD EPSILON SYMBOL -1D6DD MATHEMATICAL BOLD THETA SYMBOL -1D6DE MATHEMATICAL BOLD KAPPA SYMBOL -1D6DF MATHEMATICAL BOLD PHI SYMBOL -1D6E0 MATHEMATICAL BOLD RHO SYMBOL -1D6E1 MATHEMATICAL BOLD PI SYMBOL -1D6E2 MATHEMATICAL ITALIC CAPITAL ALPHA -1D6E3 MATHEMATICAL ITALIC CAPITAL BETA -1D6E4 MATHEMATICAL ITALIC CAPITAL GAMMA -1D6E5 MATHEMATICAL ITALIC CAPITAL DELTA -1D6E6 MATHEMATICAL ITALIC CAPITAL EPSILON -1D6E7 MATHEMATICAL ITALIC CAPITAL ZETA -1D6E8 MATHEMATICAL ITALIC CAPITAL ETA -1D6E9 MATHEMATICAL ITALIC CAPITAL THETA -1D6EA MATHEMATICAL ITALIC CAPITAL IOTA -1D6EB MATHEMATICAL ITALIC CAPITAL KAPPA -1D6EC MATHEMATICAL ITALIC CAPITAL LAMDA -1D6ED MATHEMATICAL ITALIC CAPITAL MU -1D6EE MATHEMATICAL ITALIC CAPITAL NU -1D6EF MATHEMATICAL ITALIC CAPITAL XI -1D6F0 MATHEMATICAL ITALIC CAPITAL OMICRON -1D6F1 MATHEMATICAL ITALIC CAPITAL PI -1D6F2 MATHEMATICAL ITALIC CAPITAL RHO -1D6F3 MATHEMATICAL ITALIC CAPITAL THETA SYMBOL -1D6F4 MATHEMATICAL ITALIC CAPITAL SIGMA -1D6F5 MATHEMATICAL ITALIC CAPITAL TAU -1D6F6 MATHEMATICAL ITALIC CAPITAL UPSILON -1D6F7 MATHEMATICAL ITALIC CAPITAL PHI -1D6F8 MATHEMATICAL ITALIC CAPITAL CHI -1D6F9 MATHEMATICAL ITALIC CAPITAL PSI -1D6FA MATHEMATICAL ITALIC CAPITAL OMEGA -1D6FB MATHEMATICAL ITALIC NABLA -1D6FC MATHEMATICAL ITALIC SMALL ALPHA -1D6FD MATHEMATICAL ITALIC SMALL BETA -1D6FE MATHEMATICAL ITALIC SMALL GAMMA -1D6FF MATHEMATICAL ITALIC SMALL DELTA -1D700 MATHEMATICAL ITALIC SMALL EPSILON -1D701 MATHEMATICAL ITALIC SMALL ZETA -1D702 MATHEMATICAL ITALIC SMALL ETA -1D703 MATHEMATICAL ITALIC SMALL THETA -1D704 MATHEMATICAL ITALIC SMALL IOTA -1D705 MATHEMATICAL ITALIC SMALL KAPPA -1D706 MATHEMATICAL ITALIC SMALL LAMDA -1D707 MATHEMATICAL ITALIC SMALL MU -1D708 MATHEMATICAL ITALIC SMALL NU -1D709 MATHEMATICAL ITALIC SMALL XI -1D70A MATHEMATICAL ITALIC SMALL OMICRON -1D70B MATHEMATICAL ITALIC SMALL PI -1D70C MATHEMATICAL ITALIC SMALL RHO -1D70D MATHEMATICAL ITALIC SMALL FINAL SIGMA -1D70E MATHEMATICAL ITALIC SMALL SIGMA -1D70F MATHEMATICAL ITALIC SMALL TAU -1D710 MATHEMATICAL ITALIC SMALL UPSILON -1D711 MATHEMATICAL ITALIC SMALL PHI -1D712 MATHEMATICAL ITALIC SMALL CHI -1D713 MATHEMATICAL ITALIC SMALL PSI -1D714 MATHEMATICAL ITALIC SMALL OMEGA -1D715 MATHEMATICAL ITALIC PARTIAL DIFFERENTIAL -1D716 MATHEMATICAL ITALIC EPSILON SYMBOL -1D717 MATHEMATICAL ITALIC THETA SYMBOL -1D718 MATHEMATICAL ITALIC KAPPA SYMBOL -1D719 MATHEMATICAL ITALIC PHI SYMBOL -1D71A MATHEMATICAL ITALIC RHO SYMBOL -1D71B MATHEMATICAL ITALIC PI SYMBOL -1D71C MATHEMATICAL BOLD ITALIC CAPITAL ALPHA -1D71D MATHEMATICAL BOLD ITALIC CAPITAL BETA -1D71E MATHEMATICAL BOLD ITALIC CAPITAL GAMMA -1D71F MATHEMATICAL BOLD ITALIC CAPITAL DELTA -1D720 MATHEMATICAL BOLD ITALIC CAPITAL EPSILON -1D721 MATHEMATICAL BOLD ITALIC CAPITAL ZETA -1D722 MATHEMATICAL BOLD ITALIC CAPITAL ETA -1D723 MATHEMATICAL BOLD ITALIC CAPITAL THETA -1D724 MATHEMATICAL BOLD ITALIC CAPITAL IOTA -1D725 MATHEMATICAL BOLD ITALIC CAPITAL KAPPA -1D726 MATHEMATICAL BOLD ITALIC CAPITAL LAMDA -1D727 MATHEMATICAL BOLD ITALIC CAPITAL MU -1D728 MATHEMATICAL BOLD ITALIC CAPITAL NU -1D729 MATHEMATICAL BOLD ITALIC CAPITAL XI -1D72A MATHEMATICAL BOLD ITALIC CAPITAL OMICRON -1D72B MATHEMATICAL BOLD ITALIC CAPITAL PI -1D72C MATHEMATICAL BOLD ITALIC CAPITAL RHO -1D72D MATHEMATICAL BOLD ITALIC CAPITAL THETA SYMBOL -1D72E MATHEMATICAL BOLD ITALIC CAPITAL SIGMA -1D72F MATHEMATICAL BOLD ITALIC CAPITAL TAU -1D730 MATHEMATICAL BOLD ITALIC CAPITAL UPSILON -1D731 MATHEMATICAL BOLD ITALIC CAPITAL PHI -1D732 MATHEMATICAL BOLD ITALIC CAPITAL CHI -1D733 MATHEMATICAL BOLD ITALIC CAPITAL PSI -1D734 MATHEMATICAL BOLD ITALIC CAPITAL OMEGA -1D735 MATHEMATICAL BOLD ITALIC NABLA -1D736 MATHEMATICAL BOLD ITALIC SMALL ALPHA -1D737 MATHEMATICAL BOLD ITALIC SMALL BETA -1D738 MATHEMATICAL BOLD ITALIC SMALL GAMMA -1D739 MATHEMATICAL BOLD ITALIC SMALL DELTA -1D73A MATHEMATICAL BOLD ITALIC SMALL EPSILON -1D73B MATHEMATICAL BOLD ITALIC SMALL ZETA -1D73C MATHEMATICAL BOLD ITALIC SMALL ETA -1D73D MATHEMATICAL BOLD ITALIC SMALL THETA -1D73E MATHEMATICAL BOLD ITALIC SMALL IOTA -1D73F MATHEMATICAL BOLD ITALIC SMALL KAPPA -1D740 MATHEMATICAL BOLD ITALIC SMALL LAMDA -1D741 MATHEMATICAL BOLD ITALIC SMALL MU -1D742 MATHEMATICAL BOLD ITALIC SMALL NU -1D743 MATHEMATICAL BOLD ITALIC SMALL XI -1D744 MATHEMATICAL BOLD ITALIC SMALL OMICRON -1D745 MATHEMATICAL BOLD ITALIC SMALL PI -1D746 MATHEMATICAL BOLD ITALIC SMALL RHO -1D747 MATHEMATICAL BOLD ITALIC SMALL FINAL SIGMA -1D748 MATHEMATICAL BOLD ITALIC SMALL SIGMA -1D749 MATHEMATICAL BOLD ITALIC SMALL TAU -1D74A MATHEMATICAL BOLD ITALIC SMALL UPSILON -1D74B MATHEMATICAL BOLD ITALIC SMALL PHI -1D74C MATHEMATICAL BOLD ITALIC SMALL CHI -1D74D MATHEMATICAL BOLD ITALIC SMALL PSI -1D74E MATHEMATICAL BOLD ITALIC SMALL OMEGA -1D74F MATHEMATICAL BOLD ITALIC PARTIAL DIFFERENTIAL -1D750 MATHEMATICAL BOLD ITALIC EPSILON SYMBOL -1D751 MATHEMATICAL BOLD ITALIC THETA SYMBOL -1D752 MATHEMATICAL BOLD ITALIC KAPPA SYMBOL -1D753 MATHEMATICAL BOLD ITALIC PHI SYMBOL -1D754 MATHEMATICAL BOLD ITALIC RHO SYMBOL -1D755 MATHEMATICAL BOLD ITALIC PI SYMBOL -1D756 MATHEMATICAL SANS-SERIF BOLD CAPITAL ALPHA -1D757 MATHEMATICAL SANS-SERIF BOLD CAPITAL BETA -1D758 MATHEMATICAL SANS-SERIF BOLD CAPITAL GAMMA -1D759 MATHEMATICAL SANS-SERIF BOLD CAPITAL DELTA -1D75A MATHEMATICAL SANS-SERIF BOLD CAPITAL EPSILON -1D75B MATHEMATICAL SANS-SERIF BOLD CAPITAL ZETA -1D75C MATHEMATICAL SANS-SERIF BOLD CAPITAL ETA -1D75D MATHEMATICAL SANS-SERIF BOLD CAPITAL THETA -1D75E MATHEMATICAL SANS-SERIF BOLD CAPITAL IOTA -1D75F MATHEMATICAL SANS-SERIF BOLD CAPITAL KAPPA -1D760 MATHEMATICAL SANS-SERIF BOLD CAPITAL LAMDA -1D761 MATHEMATICAL SANS-SERIF BOLD CAPITAL MU -1D762 MATHEMATICAL SANS-SERIF BOLD CAPITAL NU -1D763 MATHEMATICAL SANS-SERIF BOLD CAPITAL XI -1D764 MATHEMATICAL SANS-SERIF BOLD CAPITAL OMICRON -1D765 MATHEMATICAL SANS-SERIF BOLD CAPITAL PI -1D766 MATHEMATICAL SANS-SERIF BOLD CAPITAL RHO -1D767 MATHEMATICAL SANS-SERIF BOLD CAPITAL THETA SYMBOL -1D768 MATHEMATICAL SANS-SERIF BOLD CAPITAL SIGMA -1D769 MATHEMATICAL SANS-SERIF BOLD CAPITAL TAU -1D76A MATHEMATICAL SANS-SERIF BOLD CAPITAL UPSILON -1D76B MATHEMATICAL SANS-SERIF BOLD CAPITAL PHI -1D76C MATHEMATICAL SANS-SERIF BOLD CAPITAL CHI -1D76D MATHEMATICAL SANS-SERIF BOLD CAPITAL PSI -1D76E MATHEMATICAL SANS-SERIF BOLD CAPITAL OMEGA -1D76F MATHEMATICAL SANS-SERIF BOLD NABLA -1D770 MATHEMATICAL SANS-SERIF BOLD SMALL ALPHA -1D771 MATHEMATICAL SANS-SERIF BOLD SMALL BETA -1D772 MATHEMATICAL SANS-SERIF BOLD SMALL GAMMA -1D773 MATHEMATICAL SANS-SERIF BOLD SMALL DELTA -1D774 MATHEMATICAL SANS-SERIF BOLD SMALL EPSILON -1D775 MATHEMATICAL SANS-SERIF BOLD SMALL ZETA -1D776 MATHEMATICAL SANS-SERIF BOLD SMALL ETA -1D777 MATHEMATICAL SANS-SERIF BOLD SMALL THETA -1D778 MATHEMATICAL SANS-SERIF BOLD SMALL IOTA -1D779 MATHEMATICAL SANS-SERIF BOLD SMALL KAPPA -1D77A MATHEMATICAL SANS-SERIF BOLD SMALL LAMDA -1D77B MATHEMATICAL SANS-SERIF BOLD SMALL MU -1D77C MATHEMATICAL SANS-SERIF BOLD SMALL NU -1D77D MATHEMATICAL SANS-SERIF BOLD SMALL XI -1D77E MATHEMATICAL SANS-SERIF BOLD SMALL OMICRON -1D77F MATHEMATICAL SANS-SERIF BOLD SMALL PI -1D780 MATHEMATICAL SANS-SERIF BOLD SMALL RHO -1D781 MATHEMATICAL SANS-SERIF BOLD SMALL FINAL SIGMA -1D782 MATHEMATICAL SANS-SERIF BOLD SMALL SIGMA -1D783 MATHEMATICAL SANS-SERIF BOLD SMALL TAU -1D784 MATHEMATICAL SANS-SERIF BOLD SMALL UPSILON -1D785 MATHEMATICAL SANS-SERIF BOLD SMALL PHI -1D786 MATHEMATICAL SANS-SERIF BOLD SMALL CHI -1D787 MATHEMATICAL SANS-SERIF BOLD SMALL PSI -1D788 MATHEMATICAL SANS-SERIF BOLD SMALL OMEGA -1D789 MATHEMATICAL SANS-SERIF BOLD PARTIAL DIFFERENTIAL -1D78A MATHEMATICAL SANS-SERIF BOLD EPSILON SYMBOL -1D78B MATHEMATICAL SANS-SERIF BOLD THETA SYMBOL -1D78C MATHEMATICAL SANS-SERIF BOLD KAPPA SYMBOL -1D78D MATHEMATICAL SANS-SERIF BOLD PHI SYMBOL -1D78E MATHEMATICAL SANS-SERIF BOLD RHO SYMBOL -1D78F MATHEMATICAL SANS-SERIF BOLD PI SYMBOL -1D790 MATHEMATICAL SANS-SERIF BOLD ITALIC CAPITAL ALPHA -1D791 MATHEMATICAL SANS-SERIF BOLD ITALIC CAPITAL BETA -1D792 MATHEMATICAL SANS-SERIF BOLD ITALIC CAPITAL GAMMA -1D793 MATHEMATICAL SANS-SERIF BOLD ITALIC CAPITAL DELTA -1D794 MATHEMATICAL SANS-SERIF BOLD ITALIC CAPITAL EPSILON -1D795 MATHEMATICAL SANS-SERIF BOLD ITALIC CAPITAL ZETA -1D796 MATHEMATICAL SANS-SERIF BOLD ITALIC CAPITAL ETA -1D797 MATHEMATICAL SANS-SERIF BOLD ITALIC CAPITAL THETA -1D798 MATHEMATICAL SANS-SERIF BOLD ITALIC CAPITAL IOTA -1D799 MATHEMATICAL SANS-SERIF BOLD ITALIC CAPITAL KAPPA -1D79A MATHEMATICAL SANS-SERIF BOLD ITALIC CAPITAL LAMDA -1D79B MATHEMATICAL SANS-SERIF BOLD ITALIC CAPITAL MU -1D79C MATHEMATICAL SANS-SERIF BOLD ITALIC CAPITAL NU -1D79D MATHEMATICAL SANS-SERIF BOLD ITALIC CAPITAL XI -1D79E MATHEMATICAL SANS-SERIF BOLD ITALIC CAPITAL OMICRON -1D79F MATHEMATICAL SANS-SERIF BOLD ITALIC CAPITAL PI -1D7A0 MATHEMATICAL SANS-SERIF BOLD ITALIC CAPITAL RHO -1D7A1 MATHEMATICAL SANS-SERIF BOLD ITALIC CAPITAL THETA SYMBOL -1D7A2 MATHEMATICAL SANS-SERIF BOLD ITALIC CAPITAL SIGMA -1D7A3 MATHEMATICAL SANS-SERIF BOLD ITALIC CAPITAL TAU -1D7A4 MATHEMATICAL SANS-SERIF BOLD ITALIC CAPITAL UPSILON -1D7A5 MATHEMATICAL SANS-SERIF BOLD ITALIC CAPITAL PHI -1D7A6 MATHEMATICAL SANS-SERIF BOLD ITALIC CAPITAL CHI -1D7A7 MATHEMATICAL SANS-SERIF BOLD ITALIC CAPITAL PSI -1D7A8 MATHEMATICAL SANS-SERIF BOLD ITALIC CAPITAL OMEGA -1D7A9 MATHEMATICAL SANS-SERIF BOLD ITALIC NABLA -1D7AA MATHEMATICAL SANS-SERIF BOLD ITALIC SMALL ALPHA -1D7AB MATHEMATICAL SANS-SERIF BOLD ITALIC SMALL BETA -1D7AC MATHEMATICAL SANS-SERIF BOLD ITALIC SMALL GAMMA -1D7AD MATHEMATICAL SANS-SERIF BOLD ITALIC SMALL DELTA -1D7AE MATHEMATICAL SANS-SERIF BOLD ITALIC SMALL EPSILON -1D7AF MATHEMATICAL SANS-SERIF BOLD ITALIC SMALL ZETA -1D7B0 MATHEMATICAL SANS-SERIF BOLD ITALIC SMALL ETA -1D7B1 MATHEMATICAL SANS-SERIF BOLD ITALIC SMALL THETA -1D7B2 MATHEMATICAL SANS-SERIF BOLD ITALIC SMALL IOTA -1D7B3 MATHEMATICAL SANS-SERIF BOLD ITALIC SMALL KAPPA -1D7B4 MATHEMATICAL SANS-SERIF BOLD ITALIC SMALL LAMDA -1D7B5 MATHEMATICAL SANS-SERIF BOLD ITALIC SMALL MU -1D7B6 MATHEMATICAL SANS-SERIF BOLD ITALIC SMALL NU -1D7B7 MATHEMATICAL SANS-SERIF BOLD ITALIC SMALL XI -1D7B8 MATHEMATICAL SANS-SERIF BOLD ITALIC SMALL OMICRON -1D7B9 MATHEMATICAL SANS-SERIF BOLD ITALIC SMALL PI -1D7BA MATHEMATICAL SANS-SERIF BOLD ITALIC SMALL RHO -1D7BB MATHEMATICAL SANS-SERIF BOLD ITALIC SMALL FINAL SIGMA -1D7BC MATHEMATICAL SANS-SERIF BOLD ITALIC SMALL SIGMA -1D7BD MATHEMATICAL SANS-SERIF BOLD ITALIC SMALL TAU -1D7BE MATHEMATICAL SANS-SERIF BOLD ITALIC SMALL UPSILON -1D7BF MATHEMATICAL SANS-SERIF BOLD ITALIC SMALL PHI -1D7C0 MATHEMATICAL SANS-SERIF BOLD ITALIC SMALL CHI -1D7C1 MATHEMATICAL SANS-SERIF BOLD ITALIC SMALL PSI -1D7C2 MATHEMATICAL SANS-SERIF BOLD ITALIC SMALL OMEGA -1D7C3 MATHEMATICAL SANS-SERIF BOLD ITALIC PARTIAL DIFFERENTIAL -1D7C4 MATHEMATICAL SANS-SERIF BOLD ITALIC EPSILON SYMBOL -1D7C5 MATHEMATICAL SANS-SERIF BOLD ITALIC THETA SYMBOL -1D7C6 MATHEMATICAL SANS-SERIF BOLD ITALIC KAPPA SYMBOL -1D7C7 MATHEMATICAL SANS-SERIF BOLD ITALIC PHI SYMBOL -1D7C8 MATHEMATICAL SANS-SERIF BOLD ITALIC RHO SYMBOL -1D7C9 MATHEMATICAL SANS-SERIF BOLD ITALIC PI SYMBOL -1D7CA MATHEMATICAL BOLD CAPITAL DIGAMMA -1D7CB MATHEMATICAL BOLD SMALL DIGAMMA -1D7CE MATHEMATICAL BOLD DIGIT ZERO -1D7CF MATHEMATICAL BOLD DIGIT ONE -1D7D0 MATHEMATICAL BOLD DIGIT TWO -1D7D1 MATHEMATICAL BOLD DIGIT THREE -1D7D2 MATHEMATICAL BOLD DIGIT FOUR -1D7D3 MATHEMATICAL BOLD DIGIT FIVE -1D7D4 MATHEMATICAL BOLD DIGIT SIX -1D7D5 MATHEMATICAL BOLD DIGIT SEVEN -1D7D6 MATHEMATICAL BOLD DIGIT EIGHT -1D7D7 MATHEMATICAL BOLD DIGIT NINE -1D7D8 MATHEMATICAL DOUBLE-STRUCK DIGIT ZERO -1D7D9 MATHEMATICAL DOUBLE-STRUCK DIGIT ONE -1D7DA MATHEMATICAL DOUBLE-STRUCK DIGIT TWO -1D7DB MATHEMATICAL DOUBLE-STRUCK DIGIT THREE -1D7DC MATHEMATICAL DOUBLE-STRUCK DIGIT FOUR -1D7DD MATHEMATICAL DOUBLE-STRUCK DIGIT FIVE -1D7DE MATHEMATICAL DOUBLE-STRUCK DIGIT SIX -1D7DF MATHEMATICAL DOUBLE-STRUCK DIGIT SEVEN -1D7E0 MATHEMATICAL DOUBLE-STRUCK DIGIT EIGHT -1D7E1 MATHEMATICAL DOUBLE-STRUCK DIGIT NINE -1D7E2 MATHEMATICAL SANS-SERIF DIGIT ZERO -1D7E3 MATHEMATICAL SANS-SERIF DIGIT ONE -1D7E4 MATHEMATICAL SANS-SERIF DIGIT TWO -1D7E5 MATHEMATICAL SANS-SERIF DIGIT THREE -1D7E6 MATHEMATICAL SANS-SERIF DIGIT FOUR -1D7E7 MATHEMATICAL SANS-SERIF DIGIT FIVE -1D7E8 MATHEMATICAL SANS-SERIF DIGIT SIX -1D7E9 MATHEMATICAL SANS-SERIF DIGIT SEVEN -1D7EA MATHEMATICAL SANS-SERIF DIGIT EIGHT -1D7EB MATHEMATICAL SANS-SERIF DIGIT NINE -1D7EC MATHEMATICAL SANS-SERIF BOLD DIGIT ZERO -1D7ED MATHEMATICAL SANS-SERIF BOLD DIGIT ONE -1D7EE MATHEMATICAL SANS-SERIF BOLD DIGIT TWO -1D7EF MATHEMATICAL SANS-SERIF BOLD DIGIT THREE -1D7F0 MATHEMATICAL SANS-SERIF BOLD DIGIT FOUR -1D7F1 MATHEMATICAL SANS-SERIF BOLD DIGIT FIVE -1D7F2 MATHEMATICAL SANS-SERIF BOLD DIGIT SIX -1D7F3 MATHEMATICAL SANS-SERIF BOLD DIGIT SEVEN -1D7F4 MATHEMATICAL SANS-SERIF BOLD DIGIT EIGHT -1D7F5 MATHEMATICAL SANS-SERIF BOLD DIGIT NINE -1D7F6 MATHEMATICAL MONOSPACE DIGIT ZERO -1D7F7 MATHEMATICAL MONOSPACE DIGIT ONE -1D7F8 MATHEMATICAL MONOSPACE DIGIT TWO -1D7F9 MATHEMATICAL MONOSPACE DIGIT THREE -1D7FA MATHEMATICAL MONOSPACE DIGIT FOUR -1D7FB MATHEMATICAL MONOSPACE DIGIT FIVE -1D7FC MATHEMATICAL MONOSPACE DIGIT SIX -1D7FD MATHEMATICAL MONOSPACE DIGIT SEVEN -1D7FE MATHEMATICAL MONOSPACE DIGIT EIGHT -1D7FF MATHEMATICAL MONOSPACE DIGIT NINE -1F000 MAHJONG TILE EAST WIND -1F001 MAHJONG TILE SOUTH WIND -1F002 MAHJONG TILE WEST WIND -1F003 MAHJONG TILE NORTH WIND -1F004 MAHJONG TILE RED DRAGON -1F005 MAHJONG TILE GREEN DRAGON -1F006 MAHJONG TILE WHITE DRAGON -1F007 MAHJONG TILE ONE OF CHARACTERS -1F008 MAHJONG TILE TWO OF CHARACTERS -1F009 MAHJONG TILE THREE OF CHARACTERS -1F00A MAHJONG TILE FOUR OF CHARACTERS -1F00B MAHJONG TILE FIVE OF CHARACTERS -1F00C MAHJONG TILE SIX OF CHARACTERS -1F00D MAHJONG TILE SEVEN OF CHARACTERS -1F00E MAHJONG TILE EIGHT OF CHARACTERS -1F00F MAHJONG TILE NINE OF CHARACTERS -1F010 MAHJONG TILE ONE OF BAMBOOS -1F011 MAHJONG TILE TWO OF BAMBOOS -1F012 MAHJONG TILE THREE OF BAMBOOS -1F013 MAHJONG TILE FOUR OF BAMBOOS -1F014 MAHJONG TILE FIVE OF BAMBOOS -1F015 MAHJONG TILE SIX OF BAMBOOS -1F016 MAHJONG TILE SEVEN OF BAMBOOS -1F017 MAHJONG TILE EIGHT OF BAMBOOS -1F018 MAHJONG TILE NINE OF BAMBOOS -1F019 MAHJONG TILE ONE OF CIRCLES -1F01A MAHJONG TILE TWO OF CIRCLES -1F01B MAHJONG TILE THREE OF CIRCLES -1F01C MAHJONG TILE FOUR OF CIRCLES -1F01D MAHJONG TILE FIVE OF CIRCLES -1F01E MAHJONG TILE SIX OF CIRCLES -1F01F MAHJONG TILE SEVEN OF CIRCLES -1F020 MAHJONG TILE EIGHT OF CIRCLES -1F021 MAHJONG TILE NINE OF CIRCLES -1F022 MAHJONG TILE PLUM -1F023 MAHJONG TILE ORCHID -1F024 MAHJONG TILE BAMBOO -1F025 MAHJONG TILE CHRYSANTHEMUM -1F026 MAHJONG TILE SPRING -1F027 MAHJONG TILE SUMMER -1F028 MAHJONG TILE AUTUMN -1F029 MAHJONG TILE WINTER -1F02A MAHJONG TILE JOKER -1F02B MAHJONG TILE BACK -1F030 DOMINO TILE HORIZONTAL BACK -1F031 DOMINO TILE HORIZONTAL-00-00 -1F032 DOMINO TILE HORIZONTAL-00-01 -1F033 DOMINO TILE HORIZONTAL-00-02 -1F034 DOMINO TILE HORIZONTAL-00-03 -1F035 DOMINO TILE HORIZONTAL-00-04 -1F036 DOMINO TILE HORIZONTAL-00-05 -1F037 DOMINO TILE HORIZONTAL-00-06 -1F038 DOMINO TILE HORIZONTAL-01-00 -1F039 DOMINO TILE HORIZONTAL-01-01 -1F03A DOMINO TILE HORIZONTAL-01-02 -1F03B DOMINO TILE HORIZONTAL-01-03 -1F03C DOMINO TILE HORIZONTAL-01-04 -1F03D DOMINO TILE HORIZONTAL-01-05 -1F03E DOMINO TILE HORIZONTAL-01-06 -1F03F DOMINO TILE HORIZONTAL-02-00 -1F040 DOMINO TILE HORIZONTAL-02-01 -1F041 DOMINO TILE HORIZONTAL-02-02 -1F042 DOMINO TILE HORIZONTAL-02-03 -1F043 DOMINO TILE HORIZONTAL-02-04 -1F044 DOMINO TILE HORIZONTAL-02-05 -1F045 DOMINO TILE HORIZONTAL-02-06 -1F046 DOMINO TILE HORIZONTAL-03-00 -1F047 DOMINO TILE HORIZONTAL-03-01 -1F048 DOMINO TILE HORIZONTAL-03-02 -1F049 DOMINO TILE HORIZONTAL-03-03 -1F04A DOMINO TILE HORIZONTAL-03-04 -1F04B DOMINO TILE HORIZONTAL-03-05 -1F04C DOMINO TILE HORIZONTAL-03-06 -1F04D DOMINO TILE HORIZONTAL-04-00 -1F04E DOMINO TILE HORIZONTAL-04-01 -1F04F DOMINO TILE HORIZONTAL-04-02 -1F050 DOMINO TILE HORIZONTAL-04-03 -1F051 DOMINO TILE HORIZONTAL-04-04 -1F052 DOMINO TILE HORIZONTAL-04-05 -1F053 DOMINO TILE HORIZONTAL-04-06 -1F054 DOMINO TILE HORIZONTAL-05-00 -1F055 DOMINO TILE HORIZONTAL-05-01 -1F056 DOMINO TILE HORIZONTAL-05-02 -1F057 DOMINO TILE HORIZONTAL-05-03 -1F058 DOMINO TILE HORIZONTAL-05-04 -1F059 DOMINO TILE HORIZONTAL-05-05 -1F05A DOMINO TILE HORIZONTAL-05-06 -1F05B DOMINO TILE HORIZONTAL-06-00 -1F05C DOMINO TILE HORIZONTAL-06-01 -1F05D DOMINO TILE HORIZONTAL-06-02 -1F05E DOMINO TILE HORIZONTAL-06-03 -1F05F DOMINO TILE HORIZONTAL-06-04 -1F060 DOMINO TILE HORIZONTAL-06-05 -1F061 DOMINO TILE HORIZONTAL-06-06 -1F062 DOMINO TILE VERTICAL BACK -1F063 DOMINO TILE VERTICAL-00-00 -1F064 DOMINO TILE VERTICAL-00-01 -1F065 DOMINO TILE VERTICAL-00-02 -1F066 DOMINO TILE VERTICAL-00-03 -1F067 DOMINO TILE VERTICAL-00-04 -1F068 DOMINO TILE VERTICAL-00-05 -1F069 DOMINO TILE VERTICAL-00-06 -1F06A DOMINO TILE VERTICAL-01-00 -1F06B DOMINO TILE VERTICAL-01-01 -1F06C DOMINO TILE VERTICAL-01-02 -1F06D DOMINO TILE VERTICAL-01-03 -1F06E DOMINO TILE VERTICAL-01-04 -1F06F DOMINO TILE VERTICAL-01-05 -1F070 DOMINO TILE VERTICAL-01-06 -1F071 DOMINO TILE VERTICAL-02-00 -1F072 DOMINO TILE VERTICAL-02-01 -1F073 DOMINO TILE VERTICAL-02-02 -1F074 DOMINO TILE VERTICAL-02-03 -1F075 DOMINO TILE VERTICAL-02-04 -1F076 DOMINO TILE VERTICAL-02-05 -1F077 DOMINO TILE VERTICAL-02-06 -1F078 DOMINO TILE VERTICAL-03-00 -1F079 DOMINO TILE VERTICAL-03-01 -1F07A DOMINO TILE VERTICAL-03-02 -1F07B DOMINO TILE VERTICAL-03-03 -1F07C DOMINO TILE VERTICAL-03-04 -1F07D DOMINO TILE VERTICAL-03-05 -1F07E DOMINO TILE VERTICAL-03-06 -1F07F DOMINO TILE VERTICAL-04-00 -1F080 DOMINO TILE VERTICAL-04-01 -1F081 DOMINO TILE VERTICAL-04-02 -1F082 DOMINO TILE VERTICAL-04-03 -1F083 DOMINO TILE VERTICAL-04-04 -1F084 DOMINO TILE VERTICAL-04-05 -1F085 DOMINO TILE VERTICAL-04-06 -1F086 DOMINO TILE VERTICAL-05-00 -1F087 DOMINO TILE VERTICAL-05-01 -1F088 DOMINO TILE VERTICAL-05-02 -1F089 DOMINO TILE VERTICAL-05-03 -1F08A DOMINO TILE VERTICAL-05-04 -1F08B DOMINO TILE VERTICAL-05-05 -1F08C DOMINO TILE VERTICAL-05-06 -1F08D DOMINO TILE VERTICAL-06-00 -1F08E DOMINO TILE VERTICAL-06-01 -1F08F DOMINO TILE VERTICAL-06-02 -1F090 DOMINO TILE VERTICAL-06-03 -1F091 DOMINO TILE VERTICAL-06-04 -1F092 DOMINO TILE VERTICAL-06-05 -1F093 DOMINO TILE VERTICAL-06-06 -1F100 DIGIT ZERO FULL STOP -1F101 DIGIT ZERO COMMA -1F102 DIGIT ONE COMMA -1F103 DIGIT TWO COMMA -1F104 DIGIT THREE COMMA -1F105 DIGIT FOUR COMMA -1F106 DIGIT FIVE COMMA -1F107 DIGIT SIX COMMA -1F108 DIGIT SEVEN COMMA -1F109 DIGIT EIGHT COMMA -1F10A DIGIT NINE COMMA -1F110 PARENTHESIZED LATIN CAPITAL LETTER A -1F111 PARENTHESIZED LATIN CAPITAL LETTER B -1F112 PARENTHESIZED LATIN CAPITAL LETTER C -1F113 PARENTHESIZED LATIN CAPITAL LETTER D -1F114 PARENTHESIZED LATIN CAPITAL LETTER E -1F115 PARENTHESIZED LATIN CAPITAL LETTER F -1F116 PARENTHESIZED LATIN CAPITAL LETTER G -1F117 PARENTHESIZED LATIN CAPITAL LETTER H -1F118 PARENTHESIZED LATIN CAPITAL LETTER I -1F119 PARENTHESIZED LATIN CAPITAL LETTER J -1F11A PARENTHESIZED LATIN CAPITAL LETTER K -1F11B PARENTHESIZED LATIN CAPITAL LETTER L -1F11C PARENTHESIZED LATIN CAPITAL LETTER M -1F11D PARENTHESIZED LATIN CAPITAL LETTER N -1F11E PARENTHESIZED LATIN CAPITAL LETTER O -1F11F PARENTHESIZED LATIN CAPITAL LETTER P -1F120 PARENTHESIZED LATIN CAPITAL LETTER Q -1F121 PARENTHESIZED LATIN CAPITAL LETTER R -1F122 PARENTHESIZED LATIN CAPITAL LETTER S -1F123 PARENTHESIZED LATIN CAPITAL LETTER T -1F124 PARENTHESIZED LATIN CAPITAL LETTER U -1F125 PARENTHESIZED LATIN CAPITAL LETTER V -1F126 PARENTHESIZED LATIN CAPITAL LETTER W -1F127 PARENTHESIZED LATIN CAPITAL LETTER X -1F128 PARENTHESIZED LATIN CAPITAL LETTER Y -1F129 PARENTHESIZED LATIN CAPITAL LETTER Z -1F12A TORTOISE SHELL BRACKETED LATIN CAPITAL LETTER S -1F12B CIRCLED ITALIC LATIN CAPITAL LETTER C -1F12C CIRCLED ITALIC LATIN CAPITAL LETTER R -1F12D CIRCLED CD -1F12E CIRCLED WZ -1F131 SQUARED LATIN CAPITAL LETTER B -1F13D SQUARED LATIN CAPITAL LETTER N -1F13F SQUARED LATIN CAPITAL LETTER P -1F142 SQUARED LATIN CAPITAL LETTER S -1F146 SQUARED LATIN CAPITAL LETTER W -1F14A SQUARED HV -1F14B SQUARED MV -1F14C SQUARED SD -1F14D SQUARED SS -1F14E SQUARED PPV -1F157 NEGATIVE CIRCLED LATIN CAPITAL LETTER H -1F15F NEGATIVE CIRCLED LATIN CAPITAL LETTER P -1F179 NEGATIVE SQUARED LATIN CAPITAL LETTER J -1F17B NEGATIVE SQUARED LATIN CAPITAL LETTER L -1F17C NEGATIVE SQUARED LATIN CAPITAL LETTER M -1F17F NEGATIVE SQUARED LATIN CAPITAL LETTER P -1F18A CROSSED NEGATIVE SQUARED LATIN CAPITAL LETTER P -1F18B NEGATIVE SQUARED IC -1F18C NEGATIVE SQUARED PA -1F18D NEGATIVE SQUARED SA -1F190 SQUARE DJ -1F200 SQUARE HIRAGANA HOKA -1F210 SQUARED CJK UNIFIED IDEOGRAPH-624B -1F211 SQUARED CJK UNIFIED IDEOGRAPH-5B57 -1F212 SQUARED CJK UNIFIED IDEOGRAPH-53CC -1F213 SQUARED KATAKANA DE -1F214 SQUARED CJK UNIFIED IDEOGRAPH-4E8C -1F215 SQUARED CJK UNIFIED IDEOGRAPH-591A -1F216 SQUARED CJK UNIFIED IDEOGRAPH-89E3 -1F217 SQUARED CJK UNIFIED IDEOGRAPH-5929 -1F218 SQUARED CJK UNIFIED IDEOGRAPH-4EA4 -1F219 SQUARED CJK UNIFIED IDEOGRAPH-6620 -1F21A SQUARED CJK UNIFIED IDEOGRAPH-7121 -1F21B SQUARED CJK UNIFIED IDEOGRAPH-6599 -1F21C SQUARED CJK UNIFIED IDEOGRAPH-524D -1F21D SQUARED CJK UNIFIED IDEOGRAPH-5F8C -1F21E SQUARED CJK UNIFIED IDEOGRAPH-518D -1F21F SQUARED CJK UNIFIED IDEOGRAPH-65B0 -1F220 SQUARED CJK UNIFIED IDEOGRAPH-521D -1F221 SQUARED CJK UNIFIED IDEOGRAPH-7D42 -1F222 SQUARED CJK UNIFIED IDEOGRAPH-751F -1F223 SQUARED CJK UNIFIED IDEOGRAPH-8CA9 -1F224 SQUARED CJK UNIFIED IDEOGRAPH-58F0 -1F225 SQUARED CJK UNIFIED IDEOGRAPH-5439 -1F226 SQUARED CJK UNIFIED IDEOGRAPH-6F14 -1F227 SQUARED CJK UNIFIED IDEOGRAPH-6295 -1F228 SQUARED CJK UNIFIED IDEOGRAPH-6355 -1F229 SQUARED CJK UNIFIED IDEOGRAPH-4E00 -1F22A SQUARED CJK UNIFIED IDEOGRAPH-4E09 -1F22B SQUARED CJK UNIFIED IDEOGRAPH-904A -1F22C SQUARED CJK UNIFIED IDEOGRAPH-5DE6 -1F22D SQUARED CJK UNIFIED IDEOGRAPH-4E2D -1F22E SQUARED CJK UNIFIED IDEOGRAPH-53F3 -1F22F SQUARED CJK UNIFIED IDEOGRAPH-6307 -1F230 SQUARED CJK UNIFIED IDEOGRAPH-8D70 -1F231 SQUARED CJK UNIFIED IDEOGRAPH-6253 -1F240 TORTOISE SHELL BRACKETED CJK UNIFIED IDEOGRAPH-672C -1F241 TORTOISE SHELL BRACKETED CJK UNIFIED IDEOGRAPH-4E09 -1F242 TORTOISE SHELL BRACKETED CJK UNIFIED IDEOGRAPH-4E8C -1F243 TORTOISE SHELL BRACKETED CJK UNIFIED IDEOGRAPH-5B89 -1F244 TORTOISE SHELL BRACKETED CJK UNIFIED IDEOGRAPH-70B9 -1F245 TORTOISE SHELL BRACKETED CJK UNIFIED IDEOGRAPH-6253 -1F246 TORTOISE SHELL BRACKETED CJK UNIFIED IDEOGRAPH-76D7 -1F247 TORTOISE SHELL BRACKETED CJK UNIFIED IDEOGRAPH-52DD -1F248 TORTOISE SHELL BRACKETED CJK UNIFIED IDEOGRAPH-6557 -20000 <CJK Ideograph Extension B, First> -2A6D6 <CJK Ideograph Extension B, Last> -2A700 <CJK Ideograph Extension C, First> -2B734 <CJK Ideograph Extension C, Last> -2F800 CJK COMPATIBILITY IDEOGRAPH-2F800 -2F801 CJK COMPATIBILITY IDEOGRAPH-2F801 -2F802 CJK COMPATIBILITY IDEOGRAPH-2F802 -2F803 CJK COMPATIBILITY IDEOGRAPH-2F803 -2F804 CJK COMPATIBILITY IDEOGRAPH-2F804 -2F805 CJK COMPATIBILITY IDEOGRAPH-2F805 -2F806 CJK COMPATIBILITY IDEOGRAPH-2F806 -2F807 CJK COMPATIBILITY IDEOGRAPH-2F807 -2F808 CJK COMPATIBILITY IDEOGRAPH-2F808 -2F809 CJK COMPATIBILITY IDEOGRAPH-2F809 -2F80A CJK COMPATIBILITY IDEOGRAPH-2F80A -2F80B CJK COMPATIBILITY IDEOGRAPH-2F80B -2F80C CJK COMPATIBILITY IDEOGRAPH-2F80C -2F80D CJK COMPATIBILITY IDEOGRAPH-2F80D -2F80E CJK COMPATIBILITY IDEOGRAPH-2F80E -2F80F CJK COMPATIBILITY IDEOGRAPH-2F80F -2F810 CJK COMPATIBILITY IDEOGRAPH-2F810 -2F811 CJK COMPATIBILITY IDEOGRAPH-2F811 -2F812 CJK COMPATIBILITY IDEOGRAPH-2F812 -2F813 CJK COMPATIBILITY IDEOGRAPH-2F813 -2F814 CJK COMPATIBILITY IDEOGRAPH-2F814 -2F815 CJK COMPATIBILITY IDEOGRAPH-2F815 -2F816 CJK COMPATIBILITY IDEOGRAPH-2F816 -2F817 CJK COMPATIBILITY IDEOGRAPH-2F817 -2F818 CJK COMPATIBILITY IDEOGRAPH-2F818 -2F819 CJK COMPATIBILITY IDEOGRAPH-2F819 -2F81A CJK COMPATIBILITY IDEOGRAPH-2F81A -2F81B CJK COMPATIBILITY IDEOGRAPH-2F81B -2F81C CJK COMPATIBILITY IDEOGRAPH-2F81C -2F81D CJK COMPATIBILITY IDEOGRAPH-2F81D -2F81E CJK COMPATIBILITY IDEOGRAPH-2F81E -2F81F CJK COMPATIBILITY IDEOGRAPH-2F81F -2F820 CJK COMPATIBILITY IDEOGRAPH-2F820 -2F821 CJK COMPATIBILITY IDEOGRAPH-2F821 -2F822 CJK COMPATIBILITY IDEOGRAPH-2F822 -2F823 CJK COMPATIBILITY IDEOGRAPH-2F823 -2F824 CJK COMPATIBILITY IDEOGRAPH-2F824 -2F825 CJK COMPATIBILITY IDEOGRAPH-2F825 -2F826 CJK COMPATIBILITY IDEOGRAPH-2F826 -2F827 CJK COMPATIBILITY IDEOGRAPH-2F827 -2F828 CJK COMPATIBILITY IDEOGRAPH-2F828 -2F829 CJK COMPATIBILITY IDEOGRAPH-2F829 -2F82A CJK COMPATIBILITY IDEOGRAPH-2F82A -2F82B CJK COMPATIBILITY IDEOGRAPH-2F82B -2F82C CJK COMPATIBILITY IDEOGRAPH-2F82C -2F82D CJK COMPATIBILITY IDEOGRAPH-2F82D -2F82E CJK COMPATIBILITY IDEOGRAPH-2F82E -2F82F CJK COMPATIBILITY IDEOGRAPH-2F82F -2F830 CJK COMPATIBILITY IDEOGRAPH-2F830 -2F831 CJK COMPATIBILITY IDEOGRAPH-2F831 -2F832 CJK COMPATIBILITY IDEOGRAPH-2F832 -2F833 CJK COMPATIBILITY IDEOGRAPH-2F833 -2F834 CJK COMPATIBILITY IDEOGRAPH-2F834 -2F835 CJK COMPATIBILITY IDEOGRAPH-2F835 -2F836 CJK COMPATIBILITY IDEOGRAPH-2F836 -2F837 CJK COMPATIBILITY IDEOGRAPH-2F837 -2F838 CJK COMPATIBILITY IDEOGRAPH-2F838 -2F839 CJK COMPATIBILITY IDEOGRAPH-2F839 -2F83A CJK COMPATIBILITY IDEOGRAPH-2F83A -2F83B CJK COMPATIBILITY IDEOGRAPH-2F83B -2F83C CJK COMPATIBILITY IDEOGRAPH-2F83C -2F83D CJK COMPATIBILITY IDEOGRAPH-2F83D -2F83E CJK COMPATIBILITY IDEOGRAPH-2F83E -2F83F CJK COMPATIBILITY IDEOGRAPH-2F83F -2F840 CJK COMPATIBILITY IDEOGRAPH-2F840 -2F841 CJK COMPATIBILITY IDEOGRAPH-2F841 -2F842 CJK COMPATIBILITY IDEOGRAPH-2F842 -2F843 CJK COMPATIBILITY IDEOGRAPH-2F843 -2F844 CJK COMPATIBILITY IDEOGRAPH-2F844 -2F845 CJK COMPATIBILITY IDEOGRAPH-2F845 -2F846 CJK COMPATIBILITY IDEOGRAPH-2F846 -2F847 CJK COMPATIBILITY IDEOGRAPH-2F847 -2F848 CJK COMPATIBILITY IDEOGRAPH-2F848 -2F849 CJK COMPATIBILITY IDEOGRAPH-2F849 -2F84A CJK COMPATIBILITY IDEOGRAPH-2F84A -2F84B CJK COMPATIBILITY IDEOGRAPH-2F84B -2F84C CJK COMPATIBILITY IDEOGRAPH-2F84C -2F84D CJK COMPATIBILITY IDEOGRAPH-2F84D -2F84E CJK COMPATIBILITY IDEOGRAPH-2F84E -2F84F CJK COMPATIBILITY IDEOGRAPH-2F84F -2F850 CJK COMPATIBILITY IDEOGRAPH-2F850 -2F851 CJK COMPATIBILITY IDEOGRAPH-2F851 -2F852 CJK COMPATIBILITY IDEOGRAPH-2F852 -2F853 CJK COMPATIBILITY IDEOGRAPH-2F853 -2F854 CJK COMPATIBILITY IDEOGRAPH-2F854 -2F855 CJK COMPATIBILITY IDEOGRAPH-2F855 -2F856 CJK COMPATIBILITY IDEOGRAPH-2F856 -2F857 CJK COMPATIBILITY IDEOGRAPH-2F857 -2F858 CJK COMPATIBILITY IDEOGRAPH-2F858 -2F859 CJK COMPATIBILITY IDEOGRAPH-2F859 -2F85A CJK COMPATIBILITY IDEOGRAPH-2F85A -2F85B CJK COMPATIBILITY IDEOGRAPH-2F85B -2F85C CJK COMPATIBILITY IDEOGRAPH-2F85C -2F85D CJK COMPATIBILITY IDEOGRAPH-2F85D -2F85E CJK COMPATIBILITY IDEOGRAPH-2F85E -2F85F CJK COMPATIBILITY IDEOGRAPH-2F85F -2F860 CJK COMPATIBILITY IDEOGRAPH-2F860 -2F861 CJK COMPATIBILITY IDEOGRAPH-2F861 -2F862 CJK COMPATIBILITY IDEOGRAPH-2F862 -2F863 CJK COMPATIBILITY IDEOGRAPH-2F863 -2F864 CJK COMPATIBILITY IDEOGRAPH-2F864 -2F865 CJK COMPATIBILITY IDEOGRAPH-2F865 -2F866 CJK COMPATIBILITY IDEOGRAPH-2F866 -2F867 CJK COMPATIBILITY IDEOGRAPH-2F867 -2F868 CJK COMPATIBILITY IDEOGRAPH-2F868 -2F869 CJK COMPATIBILITY IDEOGRAPH-2F869 -2F86A CJK COMPATIBILITY IDEOGRAPH-2F86A -2F86B CJK COMPATIBILITY IDEOGRAPH-2F86B -2F86C CJK COMPATIBILITY IDEOGRAPH-2F86C -2F86D CJK COMPATIBILITY IDEOGRAPH-2F86D -2F86E CJK COMPATIBILITY IDEOGRAPH-2F86E -2F86F CJK COMPATIBILITY IDEOGRAPH-2F86F -2F870 CJK COMPATIBILITY IDEOGRAPH-2F870 -2F871 CJK COMPATIBILITY IDEOGRAPH-2F871 -2F872 CJK COMPATIBILITY IDEOGRAPH-2F872 -2F873 CJK COMPATIBILITY IDEOGRAPH-2F873 -2F874 CJK COMPATIBILITY IDEOGRAPH-2F874 -2F875 CJK COMPATIBILITY IDEOGRAPH-2F875 -2F876 CJK COMPATIBILITY IDEOGRAPH-2F876 -2F877 CJK COMPATIBILITY IDEOGRAPH-2F877 -2F878 CJK COMPATIBILITY IDEOGRAPH-2F878 -2F879 CJK COMPATIBILITY IDEOGRAPH-2F879 -2F87A CJK COMPATIBILITY IDEOGRAPH-2F87A -2F87B CJK COMPATIBILITY IDEOGRAPH-2F87B -2F87C CJK COMPATIBILITY IDEOGRAPH-2F87C -2F87D CJK COMPATIBILITY IDEOGRAPH-2F87D -2F87E CJK COMPATIBILITY IDEOGRAPH-2F87E -2F87F CJK COMPATIBILITY IDEOGRAPH-2F87F -2F880 CJK COMPATIBILITY IDEOGRAPH-2F880 -2F881 CJK COMPATIBILITY IDEOGRAPH-2F881 -2F882 CJK COMPATIBILITY IDEOGRAPH-2F882 -2F883 CJK COMPATIBILITY IDEOGRAPH-2F883 -2F884 CJK COMPATIBILITY IDEOGRAPH-2F884 -2F885 CJK COMPATIBILITY IDEOGRAPH-2F885 -2F886 CJK COMPATIBILITY IDEOGRAPH-2F886 -2F887 CJK COMPATIBILITY IDEOGRAPH-2F887 -2F888 CJK COMPATIBILITY IDEOGRAPH-2F888 -2F889 CJK COMPATIBILITY IDEOGRAPH-2F889 -2F88A CJK COMPATIBILITY IDEOGRAPH-2F88A -2F88B CJK COMPATIBILITY IDEOGRAPH-2F88B -2F88C CJK COMPATIBILITY IDEOGRAPH-2F88C -2F88D CJK COMPATIBILITY IDEOGRAPH-2F88D -2F88E CJK COMPATIBILITY IDEOGRAPH-2F88E -2F88F CJK COMPATIBILITY IDEOGRAPH-2F88F -2F890 CJK COMPATIBILITY IDEOGRAPH-2F890 -2F891 CJK COMPATIBILITY IDEOGRAPH-2F891 -2F892 CJK COMPATIBILITY IDEOGRAPH-2F892 -2F893 CJK COMPATIBILITY IDEOGRAPH-2F893 -2F894 CJK COMPATIBILITY IDEOGRAPH-2F894 -2F895 CJK COMPATIBILITY IDEOGRAPH-2F895 -2F896 CJK COMPATIBILITY IDEOGRAPH-2F896 -2F897 CJK COMPATIBILITY IDEOGRAPH-2F897 -2F898 CJK COMPATIBILITY IDEOGRAPH-2F898 -2F899 CJK COMPATIBILITY IDEOGRAPH-2F899 -2F89A CJK COMPATIBILITY IDEOGRAPH-2F89A -2F89B CJK COMPATIBILITY IDEOGRAPH-2F89B -2F89C CJK COMPATIBILITY IDEOGRAPH-2F89C -2F89D CJK COMPATIBILITY IDEOGRAPH-2F89D -2F89E CJK COMPATIBILITY IDEOGRAPH-2F89E -2F89F CJK COMPATIBILITY IDEOGRAPH-2F89F -2F8A0 CJK COMPATIBILITY IDEOGRAPH-2F8A0 -2F8A1 CJK COMPATIBILITY IDEOGRAPH-2F8A1 -2F8A2 CJK COMPATIBILITY IDEOGRAPH-2F8A2 -2F8A3 CJK COMPATIBILITY IDEOGRAPH-2F8A3 -2F8A4 CJK COMPATIBILITY IDEOGRAPH-2F8A4 -2F8A5 CJK COMPATIBILITY IDEOGRAPH-2F8A5 -2F8A6 CJK COMPATIBILITY IDEOGRAPH-2F8A6 -2F8A7 CJK COMPATIBILITY IDEOGRAPH-2F8A7 -2F8A8 CJK COMPATIBILITY IDEOGRAPH-2F8A8 -2F8A9 CJK COMPATIBILITY IDEOGRAPH-2F8A9 -2F8AA CJK COMPATIBILITY IDEOGRAPH-2F8AA -2F8AB CJK COMPATIBILITY IDEOGRAPH-2F8AB -2F8AC CJK COMPATIBILITY IDEOGRAPH-2F8AC -2F8AD CJK COMPATIBILITY IDEOGRAPH-2F8AD -2F8AE CJK COMPATIBILITY IDEOGRAPH-2F8AE -2F8AF CJK COMPATIBILITY IDEOGRAPH-2F8AF -2F8B0 CJK COMPATIBILITY IDEOGRAPH-2F8B0 -2F8B1 CJK COMPATIBILITY IDEOGRAPH-2F8B1 -2F8B2 CJK COMPATIBILITY IDEOGRAPH-2F8B2 -2F8B3 CJK COMPATIBILITY IDEOGRAPH-2F8B3 -2F8B4 CJK COMPATIBILITY IDEOGRAPH-2F8B4 -2F8B5 CJK COMPATIBILITY IDEOGRAPH-2F8B5 -2F8B6 CJK COMPATIBILITY IDEOGRAPH-2F8B6 -2F8B7 CJK COMPATIBILITY IDEOGRAPH-2F8B7 -2F8B8 CJK COMPATIBILITY IDEOGRAPH-2F8B8 -2F8B9 CJK COMPATIBILITY IDEOGRAPH-2F8B9 -2F8BA CJK COMPATIBILITY IDEOGRAPH-2F8BA -2F8BB CJK COMPATIBILITY IDEOGRAPH-2F8BB -2F8BC CJK COMPATIBILITY IDEOGRAPH-2F8BC -2F8BD CJK COMPATIBILITY IDEOGRAPH-2F8BD -2F8BE CJK COMPATIBILITY IDEOGRAPH-2F8BE -2F8BF CJK COMPATIBILITY IDEOGRAPH-2F8BF -2F8C0 CJK COMPATIBILITY IDEOGRAPH-2F8C0 -2F8C1 CJK COMPATIBILITY IDEOGRAPH-2F8C1 -2F8C2 CJK COMPATIBILITY IDEOGRAPH-2F8C2 -2F8C3 CJK COMPATIBILITY IDEOGRAPH-2F8C3 -2F8C4 CJK COMPATIBILITY IDEOGRAPH-2F8C4 -2F8C5 CJK COMPATIBILITY IDEOGRAPH-2F8C5 -2F8C6 CJK COMPATIBILITY IDEOGRAPH-2F8C6 -2F8C7 CJK COMPATIBILITY IDEOGRAPH-2F8C7 -2F8C8 CJK COMPATIBILITY IDEOGRAPH-2F8C8 -2F8C9 CJK COMPATIBILITY IDEOGRAPH-2F8C9 -2F8CA CJK COMPATIBILITY IDEOGRAPH-2F8CA -2F8CB CJK COMPATIBILITY IDEOGRAPH-2F8CB -2F8CC CJK COMPATIBILITY IDEOGRAPH-2F8CC -2F8CD CJK COMPATIBILITY IDEOGRAPH-2F8CD -2F8CE CJK COMPATIBILITY IDEOGRAPH-2F8CE -2F8CF CJK COMPATIBILITY IDEOGRAPH-2F8CF -2F8D0 CJK COMPATIBILITY IDEOGRAPH-2F8D0 -2F8D1 CJK COMPATIBILITY IDEOGRAPH-2F8D1 -2F8D2 CJK COMPATIBILITY IDEOGRAPH-2F8D2 -2F8D3 CJK COMPATIBILITY IDEOGRAPH-2F8D3 -2F8D4 CJK COMPATIBILITY IDEOGRAPH-2F8D4 -2F8D5 CJK COMPATIBILITY IDEOGRAPH-2F8D5 -2F8D6 CJK COMPATIBILITY IDEOGRAPH-2F8D6 -2F8D7 CJK COMPATIBILITY IDEOGRAPH-2F8D7 -2F8D8 CJK COMPATIBILITY IDEOGRAPH-2F8D8 -2F8D9 CJK COMPATIBILITY IDEOGRAPH-2F8D9 -2F8DA CJK COMPATIBILITY IDEOGRAPH-2F8DA -2F8DB CJK COMPATIBILITY IDEOGRAPH-2F8DB -2F8DC CJK COMPATIBILITY IDEOGRAPH-2F8DC -2F8DD CJK COMPATIBILITY IDEOGRAPH-2F8DD -2F8DE CJK COMPATIBILITY IDEOGRAPH-2F8DE -2F8DF CJK COMPATIBILITY IDEOGRAPH-2F8DF -2F8E0 CJK COMPATIBILITY IDEOGRAPH-2F8E0 -2F8E1 CJK COMPATIBILITY IDEOGRAPH-2F8E1 -2F8E2 CJK COMPATIBILITY IDEOGRAPH-2F8E2 -2F8E3 CJK COMPATIBILITY IDEOGRAPH-2F8E3 -2F8E4 CJK COMPATIBILITY IDEOGRAPH-2F8E4 -2F8E5 CJK COMPATIBILITY IDEOGRAPH-2F8E5 -2F8E6 CJK COMPATIBILITY IDEOGRAPH-2F8E6 -2F8E7 CJK COMPATIBILITY IDEOGRAPH-2F8E7 -2F8E8 CJK COMPATIBILITY IDEOGRAPH-2F8E8 -2F8E9 CJK COMPATIBILITY IDEOGRAPH-2F8E9 -2F8EA CJK COMPATIBILITY IDEOGRAPH-2F8EA -2F8EB CJK COMPATIBILITY IDEOGRAPH-2F8EB -2F8EC CJK COMPATIBILITY IDEOGRAPH-2F8EC -2F8ED CJK COMPATIBILITY IDEOGRAPH-2F8ED -2F8EE CJK COMPATIBILITY IDEOGRAPH-2F8EE -2F8EF CJK COMPATIBILITY IDEOGRAPH-2F8EF -2F8F0 CJK COMPATIBILITY IDEOGRAPH-2F8F0 -2F8F1 CJK COMPATIBILITY IDEOGRAPH-2F8F1 -2F8F2 CJK COMPATIBILITY IDEOGRAPH-2F8F2 -2F8F3 CJK COMPATIBILITY IDEOGRAPH-2F8F3 -2F8F4 CJK COMPATIBILITY IDEOGRAPH-2F8F4 -2F8F5 CJK COMPATIBILITY IDEOGRAPH-2F8F5 -2F8F6 CJK COMPATIBILITY IDEOGRAPH-2F8F6 -2F8F7 CJK COMPATIBILITY IDEOGRAPH-2F8F7 -2F8F8 CJK COMPATIBILITY IDEOGRAPH-2F8F8 -2F8F9 CJK COMPATIBILITY IDEOGRAPH-2F8F9 -2F8FA CJK COMPATIBILITY IDEOGRAPH-2F8FA -2F8FB CJK COMPATIBILITY IDEOGRAPH-2F8FB -2F8FC CJK COMPATIBILITY IDEOGRAPH-2F8FC -2F8FD CJK COMPATIBILITY IDEOGRAPH-2F8FD -2F8FE CJK COMPATIBILITY IDEOGRAPH-2F8FE -2F8FF CJK COMPATIBILITY IDEOGRAPH-2F8FF -2F900 CJK COMPATIBILITY IDEOGRAPH-2F900 -2F901 CJK COMPATIBILITY IDEOGRAPH-2F901 -2F902 CJK COMPATIBILITY IDEOGRAPH-2F902 -2F903 CJK COMPATIBILITY IDEOGRAPH-2F903 -2F904 CJK COMPATIBILITY IDEOGRAPH-2F904 -2F905 CJK COMPATIBILITY IDEOGRAPH-2F905 -2F906 CJK COMPATIBILITY IDEOGRAPH-2F906 -2F907 CJK COMPATIBILITY IDEOGRAPH-2F907 -2F908 CJK COMPATIBILITY IDEOGRAPH-2F908 -2F909 CJK COMPATIBILITY IDEOGRAPH-2F909 -2F90A CJK COMPATIBILITY IDEOGRAPH-2F90A -2F90B CJK COMPATIBILITY IDEOGRAPH-2F90B -2F90C CJK COMPATIBILITY IDEOGRAPH-2F90C -2F90D CJK COMPATIBILITY IDEOGRAPH-2F90D -2F90E CJK COMPATIBILITY IDEOGRAPH-2F90E -2F90F CJK COMPATIBILITY IDEOGRAPH-2F90F -2F910 CJK COMPATIBILITY IDEOGRAPH-2F910 -2F911 CJK COMPATIBILITY IDEOGRAPH-2F911 -2F912 CJK COMPATIBILITY IDEOGRAPH-2F912 -2F913 CJK COMPATIBILITY IDEOGRAPH-2F913 -2F914 CJK COMPATIBILITY IDEOGRAPH-2F914 -2F915 CJK COMPATIBILITY IDEOGRAPH-2F915 -2F916 CJK COMPATIBILITY IDEOGRAPH-2F916 -2F917 CJK COMPATIBILITY IDEOGRAPH-2F917 -2F918 CJK COMPATIBILITY IDEOGRAPH-2F918 -2F919 CJK COMPATIBILITY IDEOGRAPH-2F919 -2F91A CJK COMPATIBILITY IDEOGRAPH-2F91A -2F91B CJK COMPATIBILITY IDEOGRAPH-2F91B -2F91C CJK COMPATIBILITY IDEOGRAPH-2F91C -2F91D CJK COMPATIBILITY IDEOGRAPH-2F91D -2F91E CJK COMPATIBILITY IDEOGRAPH-2F91E -2F91F CJK COMPATIBILITY IDEOGRAPH-2F91F -2F920 CJK COMPATIBILITY IDEOGRAPH-2F920 -2F921 CJK COMPATIBILITY IDEOGRAPH-2F921 -2F922 CJK COMPATIBILITY IDEOGRAPH-2F922 -2F923 CJK COMPATIBILITY IDEOGRAPH-2F923 -2F924 CJK COMPATIBILITY IDEOGRAPH-2F924 -2F925 CJK COMPATIBILITY IDEOGRAPH-2F925 -2F926 CJK COMPATIBILITY IDEOGRAPH-2F926 -2F927 CJK COMPATIBILITY IDEOGRAPH-2F927 -2F928 CJK COMPATIBILITY IDEOGRAPH-2F928 -2F929 CJK COMPATIBILITY IDEOGRAPH-2F929 -2F92A CJK COMPATIBILITY IDEOGRAPH-2F92A -2F92B CJK COMPATIBILITY IDEOGRAPH-2F92B -2F92C CJK COMPATIBILITY IDEOGRAPH-2F92C -2F92D CJK COMPATIBILITY IDEOGRAPH-2F92D -2F92E CJK COMPATIBILITY IDEOGRAPH-2F92E -2F92F CJK COMPATIBILITY IDEOGRAPH-2F92F -2F930 CJK COMPATIBILITY IDEOGRAPH-2F930 -2F931 CJK COMPATIBILITY IDEOGRAPH-2F931 -2F932 CJK COMPATIBILITY IDEOGRAPH-2F932 -2F933 CJK COMPATIBILITY IDEOGRAPH-2F933 -2F934 CJK COMPATIBILITY IDEOGRAPH-2F934 -2F935 CJK COMPATIBILITY IDEOGRAPH-2F935 -2F936 CJK COMPATIBILITY IDEOGRAPH-2F936 -2F937 CJK COMPATIBILITY IDEOGRAPH-2F937 -2F938 CJK COMPATIBILITY IDEOGRAPH-2F938 -2F939 CJK COMPATIBILITY IDEOGRAPH-2F939 -2F93A CJK COMPATIBILITY IDEOGRAPH-2F93A -2F93B CJK COMPATIBILITY IDEOGRAPH-2F93B -2F93C CJK COMPATIBILITY IDEOGRAPH-2F93C -2F93D CJK COMPATIBILITY IDEOGRAPH-2F93D -2F93E CJK COMPATIBILITY IDEOGRAPH-2F93E -2F93F CJK COMPATIBILITY IDEOGRAPH-2F93F -2F940 CJK COMPATIBILITY IDEOGRAPH-2F940 -2F941 CJK COMPATIBILITY IDEOGRAPH-2F941 -2F942 CJK COMPATIBILITY IDEOGRAPH-2F942 -2F943 CJK COMPATIBILITY IDEOGRAPH-2F943 -2F944 CJK COMPATIBILITY IDEOGRAPH-2F944 -2F945 CJK COMPATIBILITY IDEOGRAPH-2F945 -2F946 CJK COMPATIBILITY IDEOGRAPH-2F946 -2F947 CJK COMPATIBILITY IDEOGRAPH-2F947 -2F948 CJK COMPATIBILITY IDEOGRAPH-2F948 -2F949 CJK COMPATIBILITY IDEOGRAPH-2F949 -2F94A CJK COMPATIBILITY IDEOGRAPH-2F94A -2F94B CJK COMPATIBILITY IDEOGRAPH-2F94B -2F94C CJK COMPATIBILITY IDEOGRAPH-2F94C -2F94D CJK COMPATIBILITY IDEOGRAPH-2F94D -2F94E CJK COMPATIBILITY IDEOGRAPH-2F94E -2F94F CJK COMPATIBILITY IDEOGRAPH-2F94F -2F950 CJK COMPATIBILITY IDEOGRAPH-2F950 -2F951 CJK COMPATIBILITY IDEOGRAPH-2F951 -2F952 CJK COMPATIBILITY IDEOGRAPH-2F952 -2F953 CJK COMPATIBILITY IDEOGRAPH-2F953 -2F954 CJK COMPATIBILITY IDEOGRAPH-2F954 -2F955 CJK COMPATIBILITY IDEOGRAPH-2F955 -2F956 CJK COMPATIBILITY IDEOGRAPH-2F956 -2F957 CJK COMPATIBILITY IDEOGRAPH-2F957 -2F958 CJK COMPATIBILITY IDEOGRAPH-2F958 -2F959 CJK COMPATIBILITY IDEOGRAPH-2F959 -2F95A CJK COMPATIBILITY IDEOGRAPH-2F95A -2F95B CJK COMPATIBILITY IDEOGRAPH-2F95B -2F95C CJK COMPATIBILITY IDEOGRAPH-2F95C -2F95D CJK COMPATIBILITY IDEOGRAPH-2F95D -2F95E CJK COMPATIBILITY IDEOGRAPH-2F95E -2F95F CJK COMPATIBILITY IDEOGRAPH-2F95F -2F960 CJK COMPATIBILITY IDEOGRAPH-2F960 -2F961 CJK COMPATIBILITY IDEOGRAPH-2F961 -2F962 CJK COMPATIBILITY IDEOGRAPH-2F962 -2F963 CJK COMPATIBILITY IDEOGRAPH-2F963 -2F964 CJK COMPATIBILITY IDEOGRAPH-2F964 -2F965 CJK COMPATIBILITY IDEOGRAPH-2F965 -2F966 CJK COMPATIBILITY IDEOGRAPH-2F966 -2F967 CJK COMPATIBILITY IDEOGRAPH-2F967 -2F968 CJK COMPATIBILITY IDEOGRAPH-2F968 -2F969 CJK COMPATIBILITY IDEOGRAPH-2F969 -2F96A CJK COMPATIBILITY IDEOGRAPH-2F96A -2F96B CJK COMPATIBILITY IDEOGRAPH-2F96B -2F96C CJK COMPATIBILITY IDEOGRAPH-2F96C -2F96D CJK COMPATIBILITY IDEOGRAPH-2F96D -2F96E CJK COMPATIBILITY IDEOGRAPH-2F96E -2F96F CJK COMPATIBILITY IDEOGRAPH-2F96F -2F970 CJK COMPATIBILITY IDEOGRAPH-2F970 -2F971 CJK COMPATIBILITY IDEOGRAPH-2F971 -2F972 CJK COMPATIBILITY IDEOGRAPH-2F972 -2F973 CJK COMPATIBILITY IDEOGRAPH-2F973 -2F974 CJK COMPATIBILITY IDEOGRAPH-2F974 -2F975 CJK COMPATIBILITY IDEOGRAPH-2F975 -2F976 CJK COMPATIBILITY IDEOGRAPH-2F976 -2F977 CJK COMPATIBILITY IDEOGRAPH-2F977 -2F978 CJK COMPATIBILITY IDEOGRAPH-2F978 -2F979 CJK COMPATIBILITY IDEOGRAPH-2F979 -2F97A CJK COMPATIBILITY IDEOGRAPH-2F97A -2F97B CJK COMPATIBILITY IDEOGRAPH-2F97B -2F97C CJK COMPATIBILITY IDEOGRAPH-2F97C -2F97D CJK COMPATIBILITY IDEOGRAPH-2F97D -2F97E CJK COMPATIBILITY IDEOGRAPH-2F97E -2F97F CJK COMPATIBILITY IDEOGRAPH-2F97F -2F980 CJK COMPATIBILITY IDEOGRAPH-2F980 -2F981 CJK COMPATIBILITY IDEOGRAPH-2F981 -2F982 CJK COMPATIBILITY IDEOGRAPH-2F982 -2F983 CJK COMPATIBILITY IDEOGRAPH-2F983 -2F984 CJK COMPATIBILITY IDEOGRAPH-2F984 -2F985 CJK COMPATIBILITY IDEOGRAPH-2F985 -2F986 CJK COMPATIBILITY IDEOGRAPH-2F986 -2F987 CJK COMPATIBILITY IDEOGRAPH-2F987 -2F988 CJK COMPATIBILITY IDEOGRAPH-2F988 -2F989 CJK COMPATIBILITY IDEOGRAPH-2F989 -2F98A CJK COMPATIBILITY IDEOGRAPH-2F98A -2F98B CJK COMPATIBILITY IDEOGRAPH-2F98B -2F98C CJK COMPATIBILITY IDEOGRAPH-2F98C -2F98D CJK COMPATIBILITY IDEOGRAPH-2F98D -2F98E CJK COMPATIBILITY IDEOGRAPH-2F98E -2F98F CJK COMPATIBILITY IDEOGRAPH-2F98F -2F990 CJK COMPATIBILITY IDEOGRAPH-2F990 -2F991 CJK COMPATIBILITY IDEOGRAPH-2F991 -2F992 CJK COMPATIBILITY IDEOGRAPH-2F992 -2F993 CJK COMPATIBILITY IDEOGRAPH-2F993 -2F994 CJK COMPATIBILITY IDEOGRAPH-2F994 -2F995 CJK COMPATIBILITY IDEOGRAPH-2F995 -2F996 CJK COMPATIBILITY IDEOGRAPH-2F996 -2F997 CJK COMPATIBILITY IDEOGRAPH-2F997 -2F998 CJK COMPATIBILITY IDEOGRAPH-2F998 -2F999 CJK COMPATIBILITY IDEOGRAPH-2F999 -2F99A CJK COMPATIBILITY IDEOGRAPH-2F99A -2F99B CJK COMPATIBILITY IDEOGRAPH-2F99B -2F99C CJK COMPATIBILITY IDEOGRAPH-2F99C -2F99D CJK COMPATIBILITY IDEOGRAPH-2F99D -2F99E CJK COMPATIBILITY IDEOGRAPH-2F99E -2F99F CJK COMPATIBILITY IDEOGRAPH-2F99F -2F9A0 CJK COMPATIBILITY IDEOGRAPH-2F9A0 -2F9A1 CJK COMPATIBILITY IDEOGRAPH-2F9A1 -2F9A2 CJK COMPATIBILITY IDEOGRAPH-2F9A2 -2F9A3 CJK COMPATIBILITY IDEOGRAPH-2F9A3 -2F9A4 CJK COMPATIBILITY IDEOGRAPH-2F9A4 -2F9A5 CJK COMPATIBILITY IDEOGRAPH-2F9A5 -2F9A6 CJK COMPATIBILITY IDEOGRAPH-2F9A6 -2F9A7 CJK COMPATIBILITY IDEOGRAPH-2F9A7 -2F9A8 CJK COMPATIBILITY IDEOGRAPH-2F9A8 -2F9A9 CJK COMPATIBILITY IDEOGRAPH-2F9A9 -2F9AA CJK COMPATIBILITY IDEOGRAPH-2F9AA -2F9AB CJK COMPATIBILITY IDEOGRAPH-2F9AB -2F9AC CJK COMPATIBILITY IDEOGRAPH-2F9AC -2F9AD CJK COMPATIBILITY IDEOGRAPH-2F9AD -2F9AE CJK COMPATIBILITY IDEOGRAPH-2F9AE -2F9AF CJK COMPATIBILITY IDEOGRAPH-2F9AF -2F9B0 CJK COMPATIBILITY IDEOGRAPH-2F9B0 -2F9B1 CJK COMPATIBILITY IDEOGRAPH-2F9B1 -2F9B2 CJK COMPATIBILITY IDEOGRAPH-2F9B2 -2F9B3 CJK COMPATIBILITY IDEOGRAPH-2F9B3 -2F9B4 CJK COMPATIBILITY IDEOGRAPH-2F9B4 -2F9B5 CJK COMPATIBILITY IDEOGRAPH-2F9B5 -2F9B6 CJK COMPATIBILITY IDEOGRAPH-2F9B6 -2F9B7 CJK COMPATIBILITY IDEOGRAPH-2F9B7 -2F9B8 CJK COMPATIBILITY IDEOGRAPH-2F9B8 -2F9B9 CJK COMPATIBILITY IDEOGRAPH-2F9B9 -2F9BA CJK COMPATIBILITY IDEOGRAPH-2F9BA -2F9BB CJK COMPATIBILITY IDEOGRAPH-2F9BB -2F9BC CJK COMPATIBILITY IDEOGRAPH-2F9BC -2F9BD CJK COMPATIBILITY IDEOGRAPH-2F9BD -2F9BE CJK COMPATIBILITY IDEOGRAPH-2F9BE -2F9BF CJK COMPATIBILITY IDEOGRAPH-2F9BF -2F9C0 CJK COMPATIBILITY IDEOGRAPH-2F9C0 -2F9C1 CJK COMPATIBILITY IDEOGRAPH-2F9C1 -2F9C2 CJK COMPATIBILITY IDEOGRAPH-2F9C2 -2F9C3 CJK COMPATIBILITY IDEOGRAPH-2F9C3 -2F9C4 CJK COMPATIBILITY IDEOGRAPH-2F9C4 -2F9C5 CJK COMPATIBILITY IDEOGRAPH-2F9C5 -2F9C6 CJK COMPATIBILITY IDEOGRAPH-2F9C6 -2F9C7 CJK COMPATIBILITY IDEOGRAPH-2F9C7 -2F9C8 CJK COMPATIBILITY IDEOGRAPH-2F9C8 -2F9C9 CJK COMPATIBILITY IDEOGRAPH-2F9C9 -2F9CA CJK COMPATIBILITY IDEOGRAPH-2F9CA -2F9CB CJK COMPATIBILITY IDEOGRAPH-2F9CB -2F9CC CJK COMPATIBILITY IDEOGRAPH-2F9CC -2F9CD CJK COMPATIBILITY IDEOGRAPH-2F9CD -2F9CE CJK COMPATIBILITY IDEOGRAPH-2F9CE -2F9CF CJK COMPATIBILITY IDEOGRAPH-2F9CF -2F9D0 CJK COMPATIBILITY IDEOGRAPH-2F9D0 -2F9D1 CJK COMPATIBILITY IDEOGRAPH-2F9D1 -2F9D2 CJK COMPATIBILITY IDEOGRAPH-2F9D2 -2F9D3 CJK COMPATIBILITY IDEOGRAPH-2F9D3 -2F9D4 CJK COMPATIBILITY IDEOGRAPH-2F9D4 -2F9D5 CJK COMPATIBILITY IDEOGRAPH-2F9D5 -2F9D6 CJK COMPATIBILITY IDEOGRAPH-2F9D6 -2F9D7 CJK COMPATIBILITY IDEOGRAPH-2F9D7 -2F9D8 CJK COMPATIBILITY IDEOGRAPH-2F9D8 -2F9D9 CJK COMPATIBILITY IDEOGRAPH-2F9D9 -2F9DA CJK COMPATIBILITY IDEOGRAPH-2F9DA -2F9DB CJK COMPATIBILITY IDEOGRAPH-2F9DB -2F9DC CJK COMPATIBILITY IDEOGRAPH-2F9DC -2F9DD CJK COMPATIBILITY IDEOGRAPH-2F9DD -2F9DE CJK COMPATIBILITY IDEOGRAPH-2F9DE -2F9DF CJK COMPATIBILITY IDEOGRAPH-2F9DF -2F9E0 CJK COMPATIBILITY IDEOGRAPH-2F9E0 -2F9E1 CJK COMPATIBILITY IDEOGRAPH-2F9E1 -2F9E2 CJK COMPATIBILITY IDEOGRAPH-2F9E2 -2F9E3 CJK COMPATIBILITY IDEOGRAPH-2F9E3 -2F9E4 CJK COMPATIBILITY IDEOGRAPH-2F9E4 -2F9E5 CJK COMPATIBILITY IDEOGRAPH-2F9E5 -2F9E6 CJK COMPATIBILITY IDEOGRAPH-2F9E6 -2F9E7 CJK COMPATIBILITY IDEOGRAPH-2F9E7 -2F9E8 CJK COMPATIBILITY IDEOGRAPH-2F9E8 -2F9E9 CJK COMPATIBILITY IDEOGRAPH-2F9E9 -2F9EA CJK COMPATIBILITY IDEOGRAPH-2F9EA -2F9EB CJK COMPATIBILITY IDEOGRAPH-2F9EB -2F9EC CJK COMPATIBILITY IDEOGRAPH-2F9EC -2F9ED CJK COMPATIBILITY IDEOGRAPH-2F9ED -2F9EE CJK COMPATIBILITY IDEOGRAPH-2F9EE -2F9EF CJK COMPATIBILITY IDEOGRAPH-2F9EF -2F9F0 CJK COMPATIBILITY IDEOGRAPH-2F9F0 -2F9F1 CJK COMPATIBILITY IDEOGRAPH-2F9F1 -2F9F2 CJK COMPATIBILITY IDEOGRAPH-2F9F2 -2F9F3 CJK COMPATIBILITY IDEOGRAPH-2F9F3 -2F9F4 CJK COMPATIBILITY IDEOGRAPH-2F9F4 -2F9F5 CJK COMPATIBILITY IDEOGRAPH-2F9F5 -2F9F6 CJK COMPATIBILITY IDEOGRAPH-2F9F6 -2F9F7 CJK COMPATIBILITY IDEOGRAPH-2F9F7 -2F9F8 CJK COMPATIBILITY IDEOGRAPH-2F9F8 -2F9F9 CJK COMPATIBILITY IDEOGRAPH-2F9F9 -2F9FA CJK COMPATIBILITY IDEOGRAPH-2F9FA -2F9FB CJK COMPATIBILITY IDEOGRAPH-2F9FB -2F9FC CJK COMPATIBILITY IDEOGRAPH-2F9FC -2F9FD CJK COMPATIBILITY IDEOGRAPH-2F9FD -2F9FE CJK COMPATIBILITY IDEOGRAPH-2F9FE -2F9FF CJK COMPATIBILITY IDEOGRAPH-2F9FF -2FA00 CJK COMPATIBILITY IDEOGRAPH-2FA00 -2FA01 CJK COMPATIBILITY IDEOGRAPH-2FA01 -2FA02 CJK COMPATIBILITY IDEOGRAPH-2FA02 -2FA03 CJK COMPATIBILITY IDEOGRAPH-2FA03 -2FA04 CJK COMPATIBILITY IDEOGRAPH-2FA04 -2FA05 CJK COMPATIBILITY IDEOGRAPH-2FA05 -2FA06 CJK COMPATIBILITY IDEOGRAPH-2FA06 -2FA07 CJK COMPATIBILITY IDEOGRAPH-2FA07 -2FA08 CJK COMPATIBILITY IDEOGRAPH-2FA08 -2FA09 CJK COMPATIBILITY IDEOGRAPH-2FA09 -2FA0A CJK COMPATIBILITY IDEOGRAPH-2FA0A -2FA0B CJK COMPATIBILITY IDEOGRAPH-2FA0B -2FA0C CJK COMPATIBILITY IDEOGRAPH-2FA0C -2FA0D CJK COMPATIBILITY IDEOGRAPH-2FA0D -2FA0E CJK COMPATIBILITY IDEOGRAPH-2FA0E -2FA0F CJK COMPATIBILITY IDEOGRAPH-2FA0F -2FA10 CJK COMPATIBILITY IDEOGRAPH-2FA10 -2FA11 CJK COMPATIBILITY IDEOGRAPH-2FA11 -2FA12 CJK COMPATIBILITY IDEOGRAPH-2FA12 -2FA13 CJK COMPATIBILITY IDEOGRAPH-2FA13 -2FA14 CJK COMPATIBILITY IDEOGRAPH-2FA14 -2FA15 CJK COMPATIBILITY IDEOGRAPH-2FA15 -2FA16 CJK COMPATIBILITY IDEOGRAPH-2FA16 -2FA17 CJK COMPATIBILITY IDEOGRAPH-2FA17 -2FA18 CJK COMPATIBILITY IDEOGRAPH-2FA18 -2FA19 CJK COMPATIBILITY IDEOGRAPH-2FA19 -2FA1A CJK COMPATIBILITY IDEOGRAPH-2FA1A -2FA1B CJK COMPATIBILITY IDEOGRAPH-2FA1B -2FA1C CJK COMPATIBILITY IDEOGRAPH-2FA1C -2FA1D CJK COMPATIBILITY IDEOGRAPH-2FA1D -E0001 LANGUAGE TAG -E0020 TAG SPACE -E0021 TAG EXCLAMATION MARK -E0022 TAG QUOTATION MARK -E0023 TAG NUMBER SIGN -E0024 TAG DOLLAR SIGN -E0025 TAG PERCENT SIGN -E0026 TAG AMPERSAND -E0027 TAG APOSTROPHE -E0028 TAG LEFT PARENTHESIS -E0029 TAG RIGHT PARENTHESIS -E002A TAG ASTERISK -E002B TAG PLUS SIGN -E002C TAG COMMA -E002D TAG HYPHEN-MINUS -E002E TAG FULL STOP -E002F TAG SOLIDUS -E0030 TAG DIGIT ZERO -E0031 TAG DIGIT ONE -E0032 TAG DIGIT TWO -E0033 TAG DIGIT THREE -E0034 TAG DIGIT FOUR -E0035 TAG DIGIT FIVE -E0036 TAG DIGIT SIX -E0037 TAG DIGIT SEVEN -E0038 TAG DIGIT EIGHT -E0039 TAG DIGIT NINE -E003A TAG COLON -E003B TAG SEMICOLON -E003C TAG LESS-THAN SIGN -E003D TAG EQUALS SIGN -E003E TAG GREATER-THAN SIGN -E003F TAG QUESTION MARK -E0040 TAG COMMERCIAL AT -E0041 TAG LATIN CAPITAL LETTER A -E0042 TAG LATIN CAPITAL LETTER B -E0043 TAG LATIN CAPITAL LETTER C -E0044 TAG LATIN CAPITAL LETTER D -E0045 TAG LATIN CAPITAL LETTER E -E0046 TAG LATIN CAPITAL LETTER F -E0047 TAG LATIN CAPITAL LETTER G -E0048 TAG LATIN CAPITAL LETTER H -E0049 TAG LATIN CAPITAL LETTER I -E004A TAG LATIN CAPITAL LETTER J -E004B TAG LATIN CAPITAL LETTER K -E004C TAG LATIN CAPITAL LETTER L -E004D TAG LATIN CAPITAL LETTER M -E004E TAG LATIN CAPITAL LETTER N -E004F TAG LATIN CAPITAL LETTER O -E0050 TAG LATIN CAPITAL LETTER P -E0051 TAG LATIN CAPITAL LETTER Q -E0052 TAG LATIN CAPITAL LETTER R -E0053 TAG LATIN CAPITAL LETTER S -E0054 TAG LATIN CAPITAL LETTER T -E0055 TAG LATIN CAPITAL LETTER U -E0056 TAG LATIN CAPITAL LETTER V -E0057 TAG LATIN CAPITAL LETTER W -E0058 TAG LATIN CAPITAL LETTER X -E0059 TAG LATIN CAPITAL LETTER Y -E005A TAG LATIN CAPITAL LETTER Z -E005B TAG LEFT SQUARE BRACKET -E005C TAG REVERSE SOLIDUS -E005D TAG RIGHT SQUARE BRACKET -E005E TAG CIRCUMFLEX ACCENT -E005F TAG LOW LINE -E0060 TAG GRAVE ACCENT -E0061 TAG LATIN SMALL LETTER A -E0062 TAG LATIN SMALL LETTER B -E0063 TAG LATIN SMALL LETTER C -E0064 TAG LATIN SMALL LETTER D -E0065 TAG LATIN SMALL LETTER E -E0066 TAG LATIN SMALL LETTER F -E0067 TAG LATIN SMALL LETTER G -E0068 TAG LATIN SMALL LETTER H -E0069 TAG LATIN SMALL LETTER I -E006A TAG LATIN SMALL LETTER J -E006B TAG LATIN SMALL LETTER K -E006C TAG LATIN SMALL LETTER L -E006D TAG LATIN SMALL LETTER M -E006E TAG LATIN SMALL LETTER N -E006F TAG LATIN SMALL LETTER O -E0070 TAG LATIN SMALL LETTER P -E0071 TAG LATIN SMALL LETTER Q -E0072 TAG LATIN SMALL LETTER R -E0073 TAG LATIN SMALL LETTER S -E0074 TAG LATIN SMALL LETTER T -E0075 TAG LATIN SMALL LETTER U -E0076 TAG LATIN SMALL LETTER V -E0077 TAG LATIN SMALL LETTER W -E0078 TAG LATIN SMALL LETTER X -E0079 TAG LATIN SMALL LETTER Y -E007A TAG LATIN SMALL LETTER Z -E007B TAG LEFT CURLY BRACKET -E007C TAG VERTICAL LINE -E007D TAG RIGHT CURLY BRACKET -E007E TAG TILDE -E007F CANCEL TAG -E0100 VARIATION SELECTOR-17 -E0101 VARIATION SELECTOR-18 -E0102 VARIATION SELECTOR-19 -E0103 VARIATION SELECTOR-20 -E0104 VARIATION SELECTOR-21 -E0105 VARIATION SELECTOR-22 -E0106 VARIATION SELECTOR-23 -E0107 VARIATION SELECTOR-24 -E0108 VARIATION SELECTOR-25 -E0109 VARIATION SELECTOR-26 -E010A VARIATION SELECTOR-27 -E010B VARIATION SELECTOR-28 -E010C VARIATION SELECTOR-29 -E010D VARIATION SELECTOR-30 -E010E VARIATION SELECTOR-31 -E010F VARIATION SELECTOR-32 -E0110 VARIATION SELECTOR-33 -E0111 VARIATION SELECTOR-34 -E0112 VARIATION SELECTOR-35 -E0113 VARIATION SELECTOR-36 -E0114 VARIATION SELECTOR-37 -E0115 VARIATION SELECTOR-38 -E0116 VARIATION SELECTOR-39 -E0117 VARIATION SELECTOR-40 -E0118 VARIATION SELECTOR-41 -E0119 VARIATION SELECTOR-42 -E011A VARIATION SELECTOR-43 -E011B VARIATION SELECTOR-44 -E011C VARIATION SELECTOR-45 -E011D VARIATION SELECTOR-46 -E011E VARIATION SELECTOR-47 -E011F VARIATION SELECTOR-48 -E0120 VARIATION SELECTOR-49 -E0121 VARIATION SELECTOR-50 -E0122 VARIATION SELECTOR-51 -E0123 VARIATION SELECTOR-52 -E0124 VARIATION SELECTOR-53 -E0125 VARIATION SELECTOR-54 -E0126 VARIATION SELECTOR-55 -E0127 VARIATION SELECTOR-56 -E0128 VARIATION SELECTOR-57 -E0129 VARIATION SELECTOR-58 -E012A VARIATION SELECTOR-59 -E012B VARIATION SELECTOR-60 -E012C VARIATION SELECTOR-61 -E012D VARIATION SELECTOR-62 -E012E VARIATION SELECTOR-63 -E012F VARIATION SELECTOR-64 -E0130 VARIATION SELECTOR-65 -E0131 VARIATION SELECTOR-66 -E0132 VARIATION SELECTOR-67 -E0133 VARIATION SELECTOR-68 -E0134 VARIATION SELECTOR-69 -E0135 VARIATION SELECTOR-70 -E0136 VARIATION SELECTOR-71 -E0137 VARIATION SELECTOR-72 -E0138 VARIATION SELECTOR-73 -E0139 VARIATION SELECTOR-74 -E013A VARIATION SELECTOR-75 -E013B VARIATION SELECTOR-76 -E013C VARIATION SELECTOR-77 -E013D VARIATION SELECTOR-78 -E013E VARIATION SELECTOR-79 -E013F VARIATION SELECTOR-80 -E0140 VARIATION SELECTOR-81 -E0141 VARIATION SELECTOR-82 -E0142 VARIATION SELECTOR-83 -E0143 VARIATION SELECTOR-84 -E0144 VARIATION SELECTOR-85 -E0145 VARIATION SELECTOR-86 -E0146 VARIATION SELECTOR-87 -E0147 VARIATION SELECTOR-88 -E0148 VARIATION SELECTOR-89 -E0149 VARIATION SELECTOR-90 -E014A VARIATION SELECTOR-91 -E014B VARIATION SELECTOR-92 -E014C VARIATION SELECTOR-93 -E014D VARIATION SELECTOR-94 -E014E VARIATION SELECTOR-95 -E014F VARIATION SELECTOR-96 -E0150 VARIATION SELECTOR-97 -E0151 VARIATION SELECTOR-98 -E0152 VARIATION SELECTOR-99 -E0153 VARIATION SELECTOR-100 -E0154 VARIATION SELECTOR-101 -E0155 VARIATION SELECTOR-102 -E0156 VARIATION SELECTOR-103 -E0157 VARIATION SELECTOR-104 -E0158 VARIATION SELECTOR-105 -E0159 VARIATION SELECTOR-106 -E015A VARIATION SELECTOR-107 -E015B VARIATION SELECTOR-108 -E015C VARIATION SELECTOR-109 -E015D VARIATION SELECTOR-110 -E015E VARIATION SELECTOR-111 -E015F VARIATION SELECTOR-112 -E0160 VARIATION SELECTOR-113 -E0161 VARIATION SELECTOR-114 -E0162 VARIATION SELECTOR-115 -E0163 VARIATION SELECTOR-116 -E0164 VARIATION SELECTOR-117 -E0165 VARIATION SELECTOR-118 -E0166 VARIATION SELECTOR-119 -E0167 VARIATION SELECTOR-120 -E0168 VARIATION SELECTOR-121 -E0169 VARIATION SELECTOR-122 -E016A VARIATION SELECTOR-123 -E016B VARIATION SELECTOR-124 -E016C VARIATION SELECTOR-125 -E016D VARIATION SELECTOR-126 -E016E VARIATION SELECTOR-127 -E016F VARIATION SELECTOR-128 -E0170 VARIATION SELECTOR-129 -E0171 VARIATION SELECTOR-130 -E0172 VARIATION SELECTOR-131 -E0173 VARIATION SELECTOR-132 -E0174 VARIATION SELECTOR-133 -E0175 VARIATION SELECTOR-134 -E0176 VARIATION SELECTOR-135 -E0177 VARIATION SELECTOR-136 -E0178 VARIATION SELECTOR-137 -E0179 VARIATION SELECTOR-138 -E017A VARIATION SELECTOR-139 -E017B VARIATION SELECTOR-140 -E017C VARIATION SELECTOR-141 -E017D VARIATION SELECTOR-142 -E017E VARIATION SELECTOR-143 -E017F VARIATION SELECTOR-144 -E0180 VARIATION SELECTOR-145 -E0181 VARIATION SELECTOR-146 -E0182 VARIATION SELECTOR-147 -E0183 VARIATION SELECTOR-148 -E0184 VARIATION SELECTOR-149 -E0185 VARIATION SELECTOR-150 -E0186 VARIATION SELECTOR-151 -E0187 VARIATION SELECTOR-152 -E0188 VARIATION SELECTOR-153 -E0189 VARIATION SELECTOR-154 -E018A VARIATION SELECTOR-155 -E018B VARIATION SELECTOR-156 -E018C VARIATION SELECTOR-157 -E018D VARIATION SELECTOR-158 -E018E VARIATION SELECTOR-159 -E018F VARIATION SELECTOR-160 -E0190 VARIATION SELECTOR-161 -E0191 VARIATION SELECTOR-162 -E0192 VARIATION SELECTOR-163 -E0193 VARIATION SELECTOR-164 -E0194 VARIATION SELECTOR-165 -E0195 VARIATION SELECTOR-166 -E0196 VARIATION SELECTOR-167 -E0197 VARIATION SELECTOR-168 -E0198 VARIATION SELECTOR-169 -E0199 VARIATION SELECTOR-170 -E019A VARIATION SELECTOR-171 -E019B VARIATION SELECTOR-172 -E019C VARIATION SELECTOR-173 -E019D VARIATION SELECTOR-174 -E019E VARIATION SELECTOR-175 -E019F VARIATION SELECTOR-176 -E01A0 VARIATION SELECTOR-177 -E01A1 VARIATION SELECTOR-178 -E01A2 VARIATION SELECTOR-179 -E01A3 VARIATION SELECTOR-180 -E01A4 VARIATION SELECTOR-181 -E01A5 VARIATION SELECTOR-182 -E01A6 VARIATION SELECTOR-183 -E01A7 VARIATION SELECTOR-184 -E01A8 VARIATION SELECTOR-185 -E01A9 VARIATION SELECTOR-186 -E01AA VARIATION SELECTOR-187 -E01AB VARIATION SELECTOR-188 -E01AC VARIATION SELECTOR-189 -E01AD VARIATION SELECTOR-190 -E01AE VARIATION SELECTOR-191 -E01AF VARIATION SELECTOR-192 -E01B0 VARIATION SELECTOR-193 -E01B1 VARIATION SELECTOR-194 -E01B2 VARIATION SELECTOR-195 -E01B3 VARIATION SELECTOR-196 -E01B4 VARIATION SELECTOR-197 -E01B5 VARIATION SELECTOR-198 -E01B6 VARIATION SELECTOR-199 -E01B7 VARIATION SELECTOR-200 -E01B8 VARIATION SELECTOR-201 -E01B9 VARIATION SELECTOR-202 -E01BA VARIATION SELECTOR-203 -E01BB VARIATION SELECTOR-204 -E01BC VARIATION SELECTOR-205 -E01BD VARIATION SELECTOR-206 -E01BE VARIATION SELECTOR-207 -E01BF VARIATION SELECTOR-208 -E01C0 VARIATION SELECTOR-209 -E01C1 VARIATION SELECTOR-210 -E01C2 VARIATION SELECTOR-211 -E01C3 VARIATION SELECTOR-212 -E01C4 VARIATION SELECTOR-213 -E01C5 VARIATION SELECTOR-214 -E01C6 VARIATION SELECTOR-215 -E01C7 VARIATION SELECTOR-216 -E01C8 VARIATION SELECTOR-217 -E01C9 VARIATION SELECTOR-218 -E01CA VARIATION SELECTOR-219 -E01CB VARIATION SELECTOR-220 -E01CC VARIATION SELECTOR-221 -E01CD VARIATION SELECTOR-222 -E01CE VARIATION SELECTOR-223 -E01CF VARIATION SELECTOR-224 -E01D0 VARIATION SELECTOR-225 -E01D1 VARIATION SELECTOR-226 -E01D2 VARIATION SELECTOR-227 -E01D3 VARIATION SELECTOR-228 -E01D4 VARIATION SELECTOR-229 -E01D5 VARIATION SELECTOR-230 -E01D6 VARIATION SELECTOR-231 -E01D7 VARIATION SELECTOR-232 -E01D8 VARIATION SELECTOR-233 -E01D9 VARIATION SELECTOR-234 -E01DA VARIATION SELECTOR-235 -E01DB VARIATION SELECTOR-236 -E01DC VARIATION SELECTOR-237 -E01DD VARIATION SELECTOR-238 -E01DE VARIATION SELECTOR-239 -E01DF VARIATION SELECTOR-240 -E01E0 VARIATION SELECTOR-241 -E01E1 VARIATION SELECTOR-242 -E01E2 VARIATION SELECTOR-243 -E01E3 VARIATION SELECTOR-244 -E01E4 VARIATION SELECTOR-245 -E01E5 VARIATION SELECTOR-246 -E01E6 VARIATION SELECTOR-247 -E01E7 VARIATION SELECTOR-248 -E01E8 VARIATION SELECTOR-249 -E01E9 VARIATION SELECTOR-250 -E01EA VARIATION SELECTOR-251 -E01EB VARIATION SELECTOR-252 -E01EC VARIATION SELECTOR-253 -E01ED VARIATION SELECTOR-254 -E01EE VARIATION SELECTOR-255 -E01EF VARIATION SELECTOR-256 -F0000 <Plane 15 Private Use, First> -FFFFD <Plane 15 Private Use, Last> -100000 <Plane 16 Private Use, First> -10FFFD <Plane 16 Private Use, Last> -""" - -def _makeunicodes(): +def _makeunicodes(f): import re - firstRE = re.compile("<(.*?), First>") - firstREmatch = firstRE.match - lastRE = re.compile("<(.*?), Last>") - lastREmatch = lastRE.match - - lines = _unicode.splitlines() - while not lines[-1]: - del lines[-1] # empty string - + lines = iter(f.readlines()) unicodes = {} - i = 0 - lenLines = len(lines) - while i < lenLines: - line = lines[i] - num, name = line.split('\t') + for line in lines: + if not line: continue + num, name = line.split(';')[:2] + if name[0] == '<': continue # "<control>", etc. num = int(num, 16) - if firstREmatch(name) is not None: - i = i + 1 - line = lines[i] - numLast, nameLast = line.split('\t') - m = lastREmatch(nameLast) - assert m is not None - name = m.group(1) - numLast = int(numLast, 16) - for num in range(num, numLast + 1): - unicodes[num] = name - else: - unicodes[num] = name - i = i + 1 + unicodes[num] = name return unicodes -class _Unicode: +class _UnicodeCustom(object): - def __init__(self): - self.codes = _makeunicodes() + def __init__(self, f): + if isinstance(f, basestring): + f = open(f) + self.codes = _makeunicodes(f) def __getitem__(self, charCode): try: @@ -21877,5 +27,17 @@ except KeyError: return "????" +class _UnicodeBuiltin(object): + + def __getitem__(self, charCode): + import unicodedata + try: + return unicodedata.name(unichr(charCode)) + except ValueError: + return "????" + +Unicode = _UnicodeBuiltin() -Unicode = _Unicode() +def setUnicodeData(f): + global Unicode + Unicode = _UnicodeCustom(f) diff -Nru fonttools-2.4/Lib/sstruct.py fonttools-3.0/Lib/sstruct.py --- fonttools-2.4/Lib/sstruct.py 2013-04-23 04:08:08.000000000 +0000 +++ fonttools-3.0/Lib/sstruct.py 2015-08-31 17:57:15.000000000 +0000 @@ -1,204 +1,7 @@ -"""sstruct.py -- SuperStruct +# Added here for backward compatibility -Higher level layer on top of the struct module, enabling to -bind names to struct elements. The interface is similar to -struct, except the objects passed and returned are not tuples -(or argument lists), but dictionaries or instances. +from __future__ import print_function, division, absolute_import +from fontTools.misc.py23 import * -Just like struct, we use format strings to describe a data -structure, except we use one line per element. Lines are -separated by newlines or semi-colons. Each line contains -either one of the special struct characters ('@', '=', '<', -'>' or '!') or a 'name:formatchar' combo (eg. 'myFloat:f'). -Repetitions, like the struct module offers them are not useful -in this context, except for fixed length strings (eg. 'myInt:5h' -is not allowed but 'myString:5s' is). The 'x' format character -(pad byte) is treated as 'special', since it is by definition -anonymous. Extra whitespace is allowed everywhere. - -The sstruct module offers one feature that the "normal" struct -module doesn't: support for fixed point numbers. These are spelled -as "n.mF", where n is the number of bits before the point, and m -the number of bits after the point. Fixed point numbers get -converted to floats. - -pack(format, object): - 'object' is either a dictionary or an instance (or actually - anything that has a __dict__ attribute). If it is a dictionary, - its keys are used for names. If it is an instance, it's - attributes are used to grab struct elements from. Returns - a string containing the data. - -unpack(format, data, object=None) - If 'object' is omitted (or None), a new dictionary will be - returned. If 'object' is a dictionary, it will be used to add - struct elements to. If it is an instance (or in fact anything - that has a __dict__ attribute), an attribute will be added for - each struct element. In the latter two cases, 'object' itself - is returned. - -unpack2(format, data, object=None) - Convenience function. Same as unpack, except data may be longer - than needed. The returned value is a tuple: (object, leftoverdata). - -calcsize(format) - like struct.calcsize(), but uses our own format strings: - it returns the size of the data in bytes. -""" - -# XXX I would like to support pascal strings, too, but I'm not -# sure if that's wise. Would be nice if struct supported them -# "properly", but that would certainly break calcsize()... - -__version__ = "1.2" -__copyright__ = "Copyright 1998, Just van Rossum <just@letterror.com>" - -import struct -import re -import types - - -error = "sstruct.error" - -def pack(format, object): - formatstring, names, fixes = getformat(format) - elements = [] - if type(object) is not types.DictType: - object = object.__dict__ - for name in names: - value = object[name] - if fixes.has_key(name): - # fixed point conversion - value = int(round(value*fixes[name])) - elements.append(value) - data = apply(struct.pack, (formatstring,) + tuple(elements)) - return data - -def unpack(format, data, object=None): - if object is None: - object = {} - formatstring, names, fixes = getformat(format) - if type(object) is types.DictType: - dict = object - else: - dict = object.__dict__ - elements = struct.unpack(formatstring, data) - for i in range(len(names)): - name = names[i] - value = elements[i] - if fixes.has_key(name): - # fixed point conversion - value = value / fixes[name] - dict[name] = value - return object - -def unpack2(format, data, object=None): - length = calcsize(format) - return unpack(format, data[:length], object), data[length:] - -def calcsize(format): - formatstring, names, fixes = getformat(format) - return struct.calcsize(formatstring) - - -# matches "name:formatchar" (whitespace is allowed) -_elementRE = re.compile( - "\s*" # whitespace - "([A-Za-z_][A-Za-z_0-9]*)" # name (python identifier) - "\s*:\s*" # whitespace : whitespace - "([cbBhHiIlLfd]|[0-9]+[ps]|" # formatchar... - "([0-9]+)\.([0-9]+)(F))" # ...formatchar - "\s*" # whitespace - "(#.*)?$" # [comment] + end of string - ) - -# matches the special struct format chars and 'x' (pad byte) -_extraRE = re.compile("\s*([x@=<>!])\s*(#.*)?$") - -# matches an "empty" string, possibly containing whitespace and/or a comment -_emptyRE = re.compile("\s*(#.*)?$") - -_fixedpointmappings = { - 8: "b", - 16: "h", - 32: "l"} - -_formatcache = {} - -def getformat(format): - try: - formatstring, names, fixes = _formatcache[format] - except KeyError: - lines = re.split("[\n;]", format) - formatstring = "" - names = [] - fixes = {} - for line in lines: - if _emptyRE.match(line): - continue - m = _extraRE.match(line) - if m: - formatchar = m.group(1) - if formatchar <> 'x' and formatstring: - raise error, "a special format char must be first" - else: - m = _elementRE.match(line) - if not m: - raise error, "syntax error in format: '%s'" % line - name = m.group(1) - names.append(name) - formatchar = m.group(2) - if m.group(3): - # fixed point - before = int(m.group(3)) - after = int(m.group(4)) - bits = before + after - if bits not in [8, 16, 32]: - raise error, "fixed point must be 8, 16 or 32 bits long" - formatchar = _fixedpointmappings[bits] - assert m.group(5) == "F" - fixes[name] = float(1 << after) - formatstring = formatstring + formatchar - _formatcache[format] = formatstring, names, fixes - return formatstring, names, fixes - -def _test(): - format = """ - # comments are allowed - > # big endian (see documentation for struct) - # empty lines are allowed: - - ashort: h - along: l - abyte: b # a byte - achar: c - astr: 5s - afloat: f; adouble: d # multiple "statements" are allowed - afixed: 16.16F - """ - - print 'size:', calcsize(format) - - class foo: - pass - - i = foo() - - i.ashort = 0x7fff - i.along = 0x7fffffff - i.abyte = 0x7f - i.achar = "a" - i.astr = "12345" - i.afloat = 0.5 - i.adouble = 0.5 - i.afixed = 1.5 - - data = pack(format, i) - print 'data:', `data` - print unpack(format, data) - i2 = foo() - unpack(format, data, i2) - print vars(i2) - -if __name__ == "__main__": - _test() +from fontTools.misc.sstruct import * +from fontTools.misc.sstruct import __doc__ diff -Nru fonttools-2.4/Lib/xmlWriter.py fonttools-3.0/Lib/xmlWriter.py --- fonttools-2.4/Lib/xmlWriter.py 2013-04-23 04:08:08.000000000 +0000 +++ fonttools-3.0/Lib/xmlWriter.py 2015-08-31 17:57:15.000000000 +0000 @@ -1,176 +1,7 @@ -"""xmlWriter.py -- Simple XML authoring class""" +# Added back here for backward compatibility -import string -import struct -import os - -INDENT = " " - - -class XMLWriter: - - def __init__(self, fileOrPath, indentwhite=INDENT, idlefunc=None, encoding="ISO-8859-1"): - if not hasattr(fileOrPath, "write"): - self.file = open(fileOrPath, "w") - else: - # assume writable file object - self.file = fileOrPath - self.indentwhite = indentwhite - self.indentlevel = 0 - self.stack = [] - self.needindent = 1 - self.idlefunc = idlefunc - self.idlecounter = 0 - if encoding: - self.writeraw('<?xml version="1.0" encoding="%s"?>' % encoding) - else: - self.writeraw('<?xml version="1.0"?>') - self.newline() - - def close(self): - self.file.close() - - def write(self, data): - self.writeraw(escape(data)) - - def write_noindent(self, data): - self.file.write(escape(data)) - - def write8bit(self, data): - self.writeraw(escape8bit(data)) - - def write16bit(self, data): - self.writeraw(escape16bit(data)) - - def writeraw(self, data): - if self.needindent: - self.file.write(self.indentlevel * self.indentwhite) - self.needindent = 0 - self.file.write(data) - - def newline(self): - self.file.write("\n") - self.needindent = 1 - idlecounter = self.idlecounter - if not idlecounter % 100 and self.idlefunc is not None: - self.idlefunc() - self.idlecounter = idlecounter + 1 - - def comment(self, data): - data = escape(data) - lines = string.split(data, "\n") - self.writeraw("<!-- " + lines[0]) - for line in lines[1:]: - self.newline() - self.writeraw(" " + line) - self.writeraw(" -->") - - def simpletag(self, _TAG_, *args, **kwargs): - attrdata = apply(self.stringifyattrs, args, kwargs) - data = "<%s%s/>" % (_TAG_, attrdata) - self.writeraw(data) - - def begintag(self, _TAG_, *args, **kwargs): - attrdata = apply(self.stringifyattrs, args, kwargs) - data = "<%s%s>" % (_TAG_, attrdata) - self.writeraw(data) - self.stack.append(_TAG_) - self.indent() - - def endtag(self, _TAG_): - assert self.stack and self.stack[-1] == _TAG_, "nonmatching endtag" - del self.stack[-1] - self.dedent() - data = "</%s>" % _TAG_ - self.writeraw(data) - - def dumphex(self, data): - linelength = 16 - hexlinelength = linelength * 2 - chunksize = 8 - for i in range(0, len(data), linelength): - hexline = hexStr(data[i:i+linelength]) - line = "" - white = "" - for j in range(0, hexlinelength, chunksize): - line = line + white + hexline[j:j+chunksize] - white = " " - self.writeraw(line) - self.newline() - - def indent(self): - self.indentlevel = self.indentlevel + 1 - - def dedent(self): - assert self.indentlevel > 0 - self.indentlevel = self.indentlevel - 1 - - def stringifyattrs(self, *args, **kwargs): - if kwargs: - assert not args - attributes = kwargs.items() - attributes.sort() - elif args: - assert len(args) == 1 - attributes = args[0] - else: - return "" - data = "" - for attr, value in attributes: - data = data + ' %s="%s"' % (attr, escapeattr(str(value))) - return data - - -def escape(data): - data = string.replace(data, "&", "&amp;") - data = string.replace(data, "<", "&lt;") - return data - -def escapeattr(data): - data = string.replace(data, "&", "&amp;") - data = string.replace(data, "<", "&lt;") - data = string.replace(data, '"', "&quot;") - return data - -def escape8bit(data): - def escapechar(c): - n = ord(c) - if c in "<&": - if c == "&": - return "&amp;" - else: - return "&lt;" - elif 32 <= n <= 127: - return c - else: - return "&#" + `n` + ";" - return string.join(map(escapechar, data), "") - -needswap = struct.pack("h", 1) == "\001\000" - -def escape16bit(data): - import array - a = array.array("H") - a.fromstring(data) - if needswap: - a.byteswap() - def escapenum(n, amp=ord("&"), lt=ord("<")): - if n == amp: - return "&amp;" - elif n == lt: - return "&lt;" - elif 32 <= n <= 127: - return chr(n) - else: - return "&#" + `n` + ";" - return string.join(map(escapenum, a), "") - - -def hexStr(s): - h = string.hexdigits - r = '' - for c in s: - i = ord(c) - r = r + h[(i >> 4) & 0xF] + h[i & 0xF] - return r +from __future__ import print_function, division, absolute_import +from fontTools.misc.py23 import * +from fontTools.misc.xmlWriter import * +from fontTools.misc.xmlWriter import __doc__ diff -Nru fonttools-2.4/Mac/README.txt fonttools-3.0/Mac/README.txt --- fonttools-2.4/Mac/README.txt 2013-04-23 04:08:08.000000000 +0000 +++ fonttools-3.0/Mac/README.txt 1970-01-01 00:00:00.000000000 +0000 @@ -1 +0,0 @@ -The stuff in this folder is old and rusty, don't pay too much attention to it... diff -Nru fonttools-2.4/Mac/TTX.py fonttools-3.0/Mac/TTX.py --- fonttools-2.4/Mac/TTX.py 2013-04-23 04:08:08.000000000 +0000 +++ fonttools-3.0/Mac/TTX.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,322 +0,0 @@ -"""Main TTX application, Mac-only""" - - -#make sure we don't lose events to SIOUX -import MacOS -MacOS.EnableAppswitch(-1) - -def SetWatchCursor(): - import Qd, QuickDraw - Qd.SetCursor(Qd.GetCursor(QuickDraw.watchCursor).data) - -def SetArrowCursor(): - import Qd - Qd.SetCursor(Qd.qd.arrow) - -SetWatchCursor() - -# a few constants -LOGFILENAME = "TTX errors" -PREFSFILENAME = "TTX preferences" -DEFAULTXMLOUTPUT = ":XML output" -DEFAULTTTOUTPUT = ":TrueType output" - - -import FrameWork -import MiniAEFrame, AppleEvents -import EasyDialogs -import Res -import macfs -import os -import sys, time -import re, string -import traceback -from fontTools import ttLib, version -from fontTools.ttLib import xmlImport -from fontTools.ttLib.macUtils import ProgressBar - -abouttext = """\ -TTX - The free TrueType to XML to TrueType converter -(version %s) -Copyright 1999-2001, Just van Rossum (Letterror) -just@letterror.com""" % version - - -class TTX(FrameWork.Application, MiniAEFrame.AEServer): - - def __init__(self): - FrameWork.Application.__init__(self) - MiniAEFrame.AEServer.__init__(self) - self.installaehandler( - AppleEvents.kCoreEventClass, AppleEvents.kAEOpenApplication, self.do_nothing) - self.installaehandler( - AppleEvents.kCoreEventClass, AppleEvents.kAEPrintDocuments, self.do_nothing) - self.installaehandler( - AppleEvents.kCoreEventClass, AppleEvents.kAEOpenDocuments, self.handle_opendocumentsevent) - self.installaehandler( - AppleEvents.kCoreEventClass, AppleEvents.kAEQuitApplication, self.handle_quitevent) - - def idle(self, event): - SetArrowCursor() - - def makeusermenus(self): - m = FrameWork.Menu(self.menubar, "File") - FrameWork.MenuItem(m, "Open...", "O", self.domenu_open) - FrameWork.Separator(m) - FrameWork.MenuItem(m, "Quit", "Q", self._quit) - - def do_about(self, *args): - EasyDialogs.Message(abouttext) - - def handle_quitevent(self, *args, **kwargs): - self._quit() - - def domenu_open(self, *args): - fss, ok = macfs.StandardGetFile() - if ok: - self.opendocument(fss.as_pathname()) - - def handle_opendocumentsevent(self, docs, **kwargs): - if type(docs) <> type([]): - docs = [docs] - for doc in docs: - fss, a = doc.Resolve() - path = fss.as_pathname() - self.opendocument(path) - - def opendocument(self, path): - filename = os.path.basename(path) - filetype = guessfiletype(path) - handler = getattr(self, "handle_%s_file" % filetype) - handler(path) - - def handle_xml_file(self, path): - prefs = getprefs() - makesuitcase = int(prefs.get("makesuitcases", 0)) - dstfolder = prefs.get("ttoutput", DEFAULTTTOUTPUT) - if not os.path.exists(dstfolder): - os.mkdir(dstfolder) - srcfilename = dstfilename = os.path.basename(path) - if dstfilename[-4:] in (".ttx", ".xml"): - dstfilename = dstfilename[:-4] - if dstfilename[-4:] not in (".TTF", ".ttf"): - dstfilename = dstfilename + ".TTF" - dst = os.path.join(dstfolder, dstfilename) - - if makesuitcase: - try: - # see if the destination file is writable, - # otherwise we'll get an error waaay at the end of - # the parse procedure - testref = Res.FSpOpenResFile(macfs.FSSpec(dst), 3) # read-write - except Res.Error, why: - if why[0] <> -43: # file not found - EasyDialogs.Message("Can't create '%s'; file already open" % dst) - return - else: - Res.CloseResFile(testref) - else: - try: - f = open(dst, "wb") - except IOError, why: - EasyDialogs.Message("Can't create '%s'; file already open" % dst) - return - else: - f.close() - pb = ProgressBar("Reading TTX file '%s'..." % srcfilename) - try: - tt = ttLib.TTFont() - tt.importXML(path, pb) - pb.setlabel("Compiling and saving...") - tt.save(dst, makesuitcase) - finally: - pb.close() - - def handle_datafork_file(self, path): - prefs = getprefs() - dstfolder = prefs.get("xmloutput", DEFAULTXMLOUTPUT) - if not os.path.exists(dstfolder): - os.mkdir(dstfolder) - filename = os.path.basename(path) - pb = ProgressBar("Dumping '%s' to XML..." % filename) - if filename[-4:] in (".TTF", ".ttf"): - filename = filename[:-4] - filename = filename + ".ttx" - dst = os.path.join(dstfolder, filename) - try: - tt = ttLib.TTFont(path) - tt.saveXML(dst, pb) - finally: - pb.close() - - def handle_resource_file(self, path): - prefs = getprefs() - dstfolder = prefs.get("xmloutput", DEFAULTXMLOUTPUT) - if not os.path.exists(dstfolder): - os.mkdir(dstfolder) - filename = os.path.basename(path) - fss = macfs.FSSpec(path) - try: - resref = Res.FSpOpenResFile(fss, 1) # read-only - except: - return "unknown" - Res.UseResFile(resref) - pb = None - try: - n = Res.Count1Resources("sfnt") - for i in range(1, n+1): - res = Res.Get1IndResource('sfnt', i) - resid, restype, resname = res.GetResInfo() - if not resname: - resname = filename + `i` - pb = ProgressBar("Dumping '%s' to XML..." % resname) - dst = os.path.join(dstfolder, resname + ".ttx") - try: - tt = ttLib.TTFont(path, i) - tt.saveXML(dst, pb) - finally: - pb.close() - finally: - Res.CloseResFile(resref) - - def handle_python_file(self, path): - pass - #print "python", path - - def handle_unknown_file(self, path): - EasyDialogs.Message("Cannot open '%s': unknown file kind" % os.path.basename(path)) - - def do_nothing(self, *args, **kwargs): - pass - - def mainloop(self, mask=FrameWork.everyEvent, wait=0): - self.quitting = 0 - while not self.quitting: - try: - self.do1event(mask, wait) - except self.__class__: - # D'OH! FrameWork tries to quit us on cmd-.! - pass - except KeyboardInterrupt: - pass - except ttLib.xmlImport.xml_parse_error, why: - EasyDialogs.Message( - "An error occurred while parsing the XML file:\n" + why) - except: - exc = traceback.format_exception(sys.exc_type, sys.exc_value, None)[0] - exc = string.strip(exc) - EasyDialogs.Message("An error occurred!\n%s\n[see the logfile '%s' for details]" % - (exc, LOGFILENAME)) - traceback.print_exc() - - def do_kHighLevelEvent(self, event): - import AE - AE.AEProcessAppleEvent(event) - - - -def guessfiletype(path): - #if path[-3:] == ".py": - # return "python" - f = open(path, "rb") - data = f.read(21) - f.close() - if data[:5] == "<?xml": - return "xml" - elif data[:4] in ("\000\001\000\000", "OTTO", "true"): - return "datafork" - else: - # assume res fork font - fss = macfs.FSSpec(path) - try: - resref = Res.FSpOpenResFile(fss, 1) # read-only - except: - return "unknown" - Res.UseResFile(resref) - i = Res.Count1Resources("sfnt") - Res.CloseResFile(resref) - if i > 0: - return "resource" - return "unknown" - - -default_prefs = """\ -xmloutput: ":XML output" -ttoutput: ":TrueType output" -makesuitcases: 1 -""" - -def getprefs(path=PREFSFILENAME): - if not os.path.exists(path): - f = open(path, "w") - f.write(default_prefs) - f.close() - f = open(path) - lines = f.readlines() - prefs = {} - for line in lines: - if line[-1:] == "\n": - line = line[:-1] - try: - name, value = re.split(":", line, 1) - prefs[string.strip(name)] = eval(value) - except: - pass - return prefs - - -class dummy_stdin: - def readline(self): - return "" -sys.stdin = dummy_stdin() - -# redirect all output to a log file -sys.stdout = sys.stderr = open(LOGFILENAME, "w", 0) # unbuffered -print "Starting TTX at " + time.ctime(time.time()) - -# fire it up! -ttx = TTX() -ttx.mainloop() - - -# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -# clues for BuildApplication/MacFreeze. -# -# These modules somehow get imported, but we don't want/have them: -# -# macfreeze: exclude msvcrt -# macfreeze: exclude W -# macfreeze: exclude SOCKS -# macfreeze: exclude TERMIOS -# macfreeze: exclude termios -# macfreeze: exclude icglue -# macfreeze: exclude ce -# -# these modules are imported dynamically, so MacFreeze won't see them: -# -# macfreeze: include fontTools.ttLib.tables._c_m_a_p -# macfreeze: include fontTools.ttLib.tables._c_v_t -# macfreeze: include fontTools.ttLib.tables._f_p_g_m -# macfreeze: include fontTools.ttLib.tables._g_a_s_p -# macfreeze: include fontTools.ttLib.tables._g_l_y_f -# macfreeze: include fontTools.ttLib.tables._h_d_m_x -# macfreeze: include fontTools.ttLib.tables._h_e_a_d -# macfreeze: include fontTools.ttLib.tables._h_h_e_a -# macfreeze: include fontTools.ttLib.tables._h_m_t_x -# macfreeze: include fontTools.ttLib.tables._k_e_r_n -# macfreeze: include fontTools.ttLib.tables._l_o_c_a -# macfreeze: include fontTools.ttLib.tables._m_a_x_p -# macfreeze: include fontTools.ttLib.tables._n_a_m_e -# macfreeze: include fontTools.ttLib.tables._p_o_s_t -# macfreeze: include fontTools.ttLib.tables._p_r_e_p -# macfreeze: include fontTools.ttLib.tables._v_h_e_a -# macfreeze: include fontTools.ttLib.tables._v_m_t_x -# macfreeze: include fontTools.ttLib.tables.L_T_S_H_ -# macfreeze: include fontTools.ttLib.tables.O_S_2f_2 -# macfreeze: include fontTools.ttLib.tables.T_S_I__0 -# macfreeze: include fontTools.ttLib.tables.T_S_I__1 -# macfreeze: include fontTools.ttLib.tables.T_S_I__2 -# macfreeze: include fontTools.ttLib.tables.T_S_I__3 -# macfreeze: include fontTools.ttLib.tables.T_S_I__5 -# macfreeze: include fontTools.ttLib.tables.C_F_F_ - diff -Nru fonttools-2.4/Mac/TTX.rsrc.hqx fonttools-3.0/Mac/TTX.rsrc.hqx --- fonttools-2.4/Mac/TTX.rsrc.hqx 2013-04-23 04:08:08.000000000 +0000 +++ fonttools-3.0/Mac/TTX.rsrc.hqx 1970-01-01 00:00:00.000000000 +0000 @@ -1,68 +0,0 @@ -(This file must be converted with BinHex 4.0) -:#&48@#jbFh*M!(*cFQ058d9%!3!!!!!!!!!-5eK3!!!!!!%!!!!+kJ!!#HS!!!& -KJ'-!!%[qIYf!IJ!)5rjqr6KJ!!")!!!8J*m!!$Kr!!!)9&4B,R*cFQ0c,d0[FQp -eG'PZCA0c8hPcC'9bC@eLC3),FR0bBe*6483"!!!f!-%!!!!!!!!!!!!!!!!!!!! -!!!#dKq%9!!!!!!!!$%B!!$L&!'*,rr2T,!-!!%##!!`iB!!!5!!!J)!G!!JS!!! -!3B)!1)!G!"3S!!!!3B)!*)"LLqJi!!!!N!!$!!#!I3!)JCd!&%J*2df!33!81m- -!!$J!!!#3!"d!##`Hrrp!JJ!8J'+,f)"M!!",rRi*5!!!+#`H!!""JJ!31(i!!%[ -pE68!!!!39%9B9&)UBfJ!U$!a1$%!!!!!!!`%!!!!!!!!!3%"!!!!!!!"!!!!!!G -"8&"-!!!!!!!#!2!!!!!!!!!!!!!!!!!!!!$r%4%4%4%4%4%4%4%4%4%Jrr)5%K) -5%K)5%K)5%K)5X2rr)5%K)5%K)5%K)5%K)E$rrr)5%K)5%K)5%K)5%K+`rrrr)5% -K)5%K)5%K)5%KX2rrrr)5'lX5%K)5%K)5%V$rrrrr)5rrS5%K)5rk)5'`rrrrmK+ -[rr)5%K,rra)5X2rrrb%Krrrl)5'rrrmK)E$rrr)5'rrrqK)Errrk%K+`rrmK)5V -krrmK[rrrqL%KX2rb%K)DSUrrS[qUUU)5%V$r)5%K)5'rrrrk)5%K)5'`m4)5%K) -5(rrrSK)5%K)5X!%K)5%K)5VrqL%K)5%K)E!"%K)5%K)DrrX5%K)5%K+`!5%K)5% -K,rrk)5%Km5%KX!%5%K)5%[rrra)5%[m5%V!")5'a)5VkrrqUUb(rm5'`!4)DqK+ -[SUrrrrm5rrm5X!%K,rq[qb'rrrrl)IrrmE!"%Krrrl)5[rrrmK,rrrq`!5%[rrS -K)5Vrrk%Krrrrm!%5(rqL%K)5rrS5%[rrrrm")5'l)5%K)5Ul)5(rrrr`!4)5%K) -5%K)5%K)5rrrr!!%K)5%K)5%K)5%K)Irrm!!"%K)5%K)5%K)5%K,rr`!!!5%K)5% -K)5%K)5%Krr!!!!+lZlZlZlZlZlZlZrm!!!!!!!!!!!!!!!!!!!$`!!!!!!!%!2m -!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!rrm,#`X,#`X,#`X,#`X,#`X -,#`X,#`X,#`X,#`X,%3$rrrm4%4%4%4%4%4%4%4%4%4%4%4%4%4%4%4%4%4&I!2r -rrrm4%4%4%4%4%4%4%4%4%4%4%4%4%4%4%4%4%9m!rrrrrrm4%4%4%4%4%4%4%4% -4%4%4%4%4%4%4%4%4A`$rrrrrrrm4%4%4%4%4%4%4%4%4%4%4%4%4%4%4%4&I!2r -rrrrrrrm4%4%4AepI04%4%4%4%4%4068e%4%4%9m!rrrrrrrrrrm4%6Arrrq*%4% -4%4%4%6AJp)N4%4%4A`$rrrrrrrrr%4%4rIrrrrme%4%4%4%ehrrri"%4%4&I!2r -rrrrrra%4%6Arrrrrrem4%4%4Arlrrrrr%4%4%9m!rrrrrrm4%4%4ArrrrrrrV4% -4%9rrrrrrrrd4%4%4A`$rrrrr%4%4%4'*rrhrrrrJ04&Ii2rrrrrrV4%4%4&I!2r -rra%4%4%4%Df*0Dhrrrq*%IlrVB1*rBNe%4%4%9m!rrm4%4%4%4%41c84Arrrrrr -qrkde%4%4%4%4%4%4A`$r#a%4%4%4%4%4%4%er[rrrrq*04%4%4%4%4%4%4&I!!! -,%4%4%4%4%4%4%4'YrrrrV684%4%4%4%4%4%4%9m!!!X4%4%4%4%4%4%4%BRrrrp -I%4%4%4%4%4%4%4%4A`!!#a%4%4%4%4%4%4%er[rrriN4%4%4%4(r%4%4%4&I!!! -,%4%4%4%4%4%40IlrrrrrrM84%4%4%Irr%4%4%9m!!!X4%4%eAc84%4'Yrkhqrrr -rLB1YAa%4rrrr%4%4A`!!#a%4%BRdL4%eLIq$%BRrrrrrrrrq04(rrrrr%4&I!!! -,%4%4r[rqVIlrAa%4Arrrrrrrrem4%Irrrrrr%9m!!!X4%6Arrrrrrem4%4&Irrr -rrrrI04%4rrrrrrrrA`!!#a%40Irrrrq*%4%4%6@Yrrrrri-4%4(rrrrrrrrr!!! -,%4%er[rrL4%4%4%4%6[rrrq*04%4%Irrrrrrrrrr!!X4%4%eAeme%4%4%4%4%6Z -$Ac34%4%4rrrrrrrrr`!!#a%4%4%4%4%4%4%4%4%4%4%4%4%4%4(rrrrrrrm!!!! -,%4%4%4%4%4%4%4%4%4%4%4%4%4%4%Irrrrrr!!!!!!X4%4%4%4%4%4%4%4%4%4% -4%4%4%4%4rrrrr`!!!!!!#a%4%4%4%4%4%4%4%4%4%4%4%4%4%4(rrrm!!!!!!!! -4AepIAepIAepIAepIAepIAepIAepIArrr!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! -!!!!!!!!!r`!!!!!!!!!!!!%!J!!!!2rrrrlJ!!!#m!!!![J!!!,m!!!#rK`!![m -H!B,q2J2#r$m(`[Kr$m,`Iar#i'qrJX!2m!,!"q!#3!I!!N!(J!*!"i##3!r!`N) -Ic1*(1rcb4r2mqNIMq2j(`IMq4i(`rd-!i2j!!!$m3!!!q%!!!2"!!!$JIrrr`!! -!!)#!!!!!rrrrr[rrrrlrrrrqrrrrr[rrrrlrrrrqrrrrr[rrrrlrrrrqrrrrr[r -rrrlrrrrqrrrrr[rrrrjrrrrqIrrrrRrrrrjrrrrqIrrrrRrrrrjrrrrqIrrrrRr -rrrjrrrrrIrrrrRrrrrarrrriIrrrm(rrrq"rrrr!!!!!J!!!!%$rrm!"i!(h'IF -jlhR,HB2"JiQAVEh[[Hqjci!1J!crq2rrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrr -rrrlrr2ri!!!!J2%4%4%4%4%5rb%K)5%K)5[rmK)5%K)5'rrr+rSK,r%VrrmIra, -rmK[rmErr,rra+rm5U[rrUU)Em5%KrrSK)5X5%K,rmK,b'a%V+[rkXImV%[rrVrr -brrX4rrX[rr(rra,rXK,r%[rr%5%K)5%Krr!5%K)5%K,r!#ZlZlZlZr!!!!!"!2m -,#`X,#`X,#`X,#`X,#a(rra%4%4%4%4%4%4%4%4&Irrrr%4%4%4%4%4%4%4%4Arr -rrrm40Iq*%4%4Ar34%9rrrrrr09rrra%4r[rr%4&Irrrr%9rrrrmei2rrra%4Arr -r%4'YLIrrrrqYrBN4%9rr%4%4%4(qrrq*%4%4%4&I#a%4%4%4r[rr%4%4ra%4A`X -409m4VIrrriQY%Irr%9m,%Ilrr[pIrrrrrcArrrpI#a(rrrpI%BRrrpm4rrrrr`X -4r[pI%4%lrrme%Irrrrm,%4%4%4%4%4%4%4(rrrm!#a%4%4%4%4%4%4%4rrm!!"& -IAepIAepIAepIArm!!!!!!!!(+LSU+J!"!!!!!#48G&KU!!!!!8C548B!!3!!!)! -!!3#"5801)`!"!!!!J!!"!!!!!!!+81!!HK)!!"EMB!!!!!T3i!#lI!!!I2`!!!! -!#P$J!,Ym!!!@if!!!!!Z!3G!!!!!"Na88Lp54L"")'CbC@8J6'9dG'9bFQpb,e* -[BQp'EfFJF(*[C(9MG!!!!%X""d!!!!!&-5i`B6Bq-5i`B6FJ,5"MEh"jFQPRD(3 -J-6Nj15db-$!`)%TeFh3JGQ&Z)&*[Fh0eE5`J6'9dG'9bFQpb,e*[BQp'EfF!!!% -!!!!+kJ!!#HS!!!&K"Z5f9%Y)!!!!(!&5!!a(9EG*!!!!DP"[F(3!!!"f3Nj%6!! -!!)*8G&KU!!!!MNC548B!!3#DD@0X0!!!!,**3diM!!!![QPMFb-!!!$+D@0c0!! -!!0CTBh-i!!!!iRCPFR-!!3$Z8dPD43!#!3CTBf`i!!!"+LJ"rrm!!!!!!!!!!!$ -Prrm!!!!8!!!!!!#!rrm!!!NA!!!!!!!!!!!!!!!N!!!!!!#!rrm!!!!T!!!!!!# -"rrm!!!N-!!!!!!#!rrm!!!!d!!!!!!#!rrm!!!Bm!!!!!!#!rrm!!!G!!!!!!!# -!rrm!!!H%!!!!!!#!rrm!!!J)!!!!!!!"rrm!!!QE"Z5e,!!#rrm!!!PT"Z5e-2r -rrrm!!!Nr"Z5e*!!"rrm!!!P0"Z5e+!!!rrm!!!PE"Z5e)!#!rrm!!!)i!!!!!!j -2GfjPFL"bCA0[GA*MCHPB: diff -Nru fonttools-2.4/Makefile fonttools-3.0/Makefile --- fonttools-2.4/Makefile 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.0/Makefile 2015-08-31 17:57:15.000000000 +0000 @@ -0,0 +1,14 @@ +all: + ./setup.py bdist + +dist: + ./setup.py sdist + +install: + ./setup.py install + +install-user: + ./setup.py install --user + +check: all + ./run-tests.sh diff -Nru fonttools-2.4/MANIFEST.in fonttools-3.0/MANIFEST.in --- fonttools-2.4/MANIFEST.in 2013-04-23 04:08:08.000000000 +0000 +++ fonttools-3.0/MANIFEST.in 2015-08-31 17:57:15.000000000 +0000 @@ -5,14 +5,9 @@ include Doc/*.txt include Doc/*.html include MetaTools/*.py -include MetaTools/*.diff -include Mac/TTX.py -include Mac/README.txt -include Mac/TTX.rsrc.hqx include Windows/mcmillan.bat include Windows/ttx.ico include Windows/README.TXT include Windows/fonttools-win-setup.iss include Windows/fonttools-win-setup.txt include Lib/fontTools/ttLib/tables/table_API_readme.txt -include Src/eexecOp/README.txt diff -Nru fonttools-2.4/MetaTools/buildChangeLog.py fonttools-3.0/MetaTools/buildChangeLog.py --- fonttools-2.4/MetaTools/buildChangeLog.py 2013-06-22 14:25:29.000000000 +0000 +++ fonttools-3.0/MetaTools/buildChangeLog.py 2015-08-31 17:57:15.000000000 +0000 @@ -6,5 +6,5 @@ os.path.join(os.getcwd(), sys.argv[0])))) os.chdir(fontToolsDir) -os.system("svn2cl -o Doc/ChangeLog https://svn.code.sf.net/p/fonttools/code/trunk") -print "done." +os.system("git2cl > Doc/ChangeLog") +print("done.") diff -Nru fonttools-2.4/MetaTools/build_otData.py fonttools-3.0/MetaTools/build_otData.py --- fonttools-2.4/MetaTools/build_otData.py 2013-06-22 14:25:28.000000000 +0000 +++ fonttools-3.0/MetaTools/build_otData.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,158 +0,0 @@ -#! /usr/bin/env python - - -"""This script builds the Lib/fontTools/ttLib/tables/otData.py file -from the OpenType HTML documentation. However, it depends on a slightly -patched version the the HTML, as there are some inconsistencies in the -markup and the naming of certain fields. See doco.diff for differences, -but this is probably against a slightly older version of the documentation -than what is currently online. The documentation was taken from this URL: - http://www.microsoft.com/typography/otspec/default.htm -""" - - -from sgmllib import SGMLParser - - -class HTMLParser(SGMLParser): - - def __init__(self): - SGMLParser.__init__(self) - self.data = None - self.currenttable = None - self.lastcaption = None - - def handle_data(self, data): - if self.data is not None: - self.data.append(data) - - def start_i(self, attrs): - if self.currenttable is None: - self.data = [] - def end_i(self): - if self.currenttable is None: - self.lastcaption = " ".join(self.data) - self.data = None - - def start_b(self, attrs): - if self.currenttable is None: - self.data = [] - def end_b(self): - if self.currenttable is None: - self.lastcaption = " ".join(self.data) - self.data = None - - def start_table(self, attrs): - attrs = dict(attrs) - if attrs.get('width') in ('455', '460'): - #print "---", attrs - self.currenttable = [] - else: - self.currenttable = None - def end_table(self): - if self.currenttable is not None and self.lastcaption is not None: - if self.currenttable[0] == ['Type', 'Name', 'Description'] or \ - self.currenttable[0] == ['Value', 'Type', 'Description']: - caption = self.lastcaption.split() - name = caption[0] - if name == "LookupType" or name == "LookupFlag": - self.currenttable = None - return - elif name == "Device": - if "Tables" in caption: - # XXX skip this one - self.currenttable = None - return - buildTable(name, self.currenttable[1:], self.lastcaption) - self.currenttable = None - - def start_tr(self, attrs): - if self.currenttable is not None: - self.currenttable.append([]) - def end_tr(self): - pass - - def start_td(self, attrs): - self.data = [] - def end_td(self): - if self.currenttable is not None and self.data is not None: - self.currenttable[-1].append(" ".join(self.data)) - self.data = None - - -globalDups = {} -localDups = {} -not3 = [] - -def buildTable(name, table, caption): - if globalDups.has_key(name): - globalDups[name].append(caption) - else: - globalDups[name] = [caption] - print "\t(%s, [" % repr(name) - allFields = {} - for row in table: - row = [" ".join(x.split()) for x in row] - if len(row) <> 3: - not3.append(row) - row = makeRow(row) - fieldName = row[1] - if allFields.has_key(fieldName): - key = (name, fieldName) - localDups[key] = 1 - allFields[fieldName] = 1 - print "\t\t%s," % (tuple(row),) - print "\t])," - print - - -def makeRow(rawRow): - tp, name = rawRow[:2] - name = name.strip() - rest = tuple(rawRow[2:]) - if '[' in name: - name, repeat = name.split("[") - name = name.strip() - assert repeat[-1] == "]" - repeat = repeat[:-1].split() - if repeat[1:]: - repeatOffset = int("".join(repeat[1:])) - else: - repeatOffset = 0 - if not repeat: - repeat = "" - else: - repeat = repeat[0] - else: - repeat = None - repeatOffset = None - row = (tp, name, repeat, repeatOffset) + rest - return row - - -if __name__ == "__main__": - import sys, os - if "-" not in sys.argv: - sys.stdout = open("otData.py", "w") - print "otData = [" - for file in ["chapter2.htm", "gpos.htm", "gsub.htm", "gdef.htm", "base.htm", "jstf.htm"]: - name = os.path.splitext(file)[0] - if name == "chapter2": - name = "common" - print - print "\t#" - print "\t# %s (generated from %s)" % (name, file) - print "\t#" - print - p = HTMLParser() - p.feed(open(file).read()) - p.close() - print "]" - print - for k, v in globalDups.items(): - if len(v) > 1: - print "# XXX duplicate table name:", k, v - for (name, fieldName), v in localDups.items(): - print "# XXX duplicate field name '%s' in table '%s'" % (fieldName, name) - for n in not3: - print "#XXX", not3 diff -Nru fonttools-2.4/MetaTools/buildTableList.py fonttools-3.0/MetaTools/buildTableList.py --- fonttools-2.4/MetaTools/buildTableList.py 2013-04-23 04:08:08.000000000 +0000 +++ fonttools-3.0/MetaTools/buildTableList.py 2015-08-31 17:57:15.000000000 +0000 @@ -31,13 +31,27 @@ file = open(os.path.join(tablesDir, "__init__.py"), "w") -file.write("# DON'T EDIT! This file is generated by MetaTools/buildTableList.py.\n") -file.write("def _moduleFinderHint():\n") -file.write('\t"""Dummy function to let modulefinder know what tables may be\n') -file.write('\tdynamically imported. Generated by MetaTools/buildTableList.py.\n') -file.write('\t"""\n') +file.write(''' +from __future__ import print_function, division, absolute_import +from fontTools.misc.py23 import * + +# DON'T EDIT! This file is generated by MetaTools/buildTableList.py. +def _moduleFinderHint(): + """Dummy function to let modulefinder know what tables may be + dynamically imported. Generated by MetaTools/buildTableList.py. + + >>> _moduleFinderHint() + """ +''') + for module in modules: - file.write("\timport %s\n" % module) + file.write("\tfrom . import %s\n" % module) + +file.write(''' +if __name__ == "__main__": + import doctest, sys + sys.exit(doctest.testmod().failed) +''') file.close() diff -Nru fonttools-2.4/MetaTools/doco.diff fonttools-3.0/MetaTools/doco.diff --- fonttools-2.4/MetaTools/doco.diff 2013-06-22 14:25:28.000000000 +0000 +++ fonttools-3.0/MetaTools/doco.diff 1970-01-01 00:00:00.000000000 +0000 @@ -1,108 +0,0 @@ ---- htmlorig/gpos.htm Fri Apr 5 23:55:58 2002 -+++ htmlbak/gpos.htm Tue May 7 09:53:30 2002 -@@ -270,7 +270,7 @@ - - <P>Example 2 at the end of this chapter shows a SinglePosFormat1 subtable used to adjust the placement of subscript glyphs. - --<P>SinglePosFormat1 subtable: Single positioning value -+<P><I>SinglePosFormat1 subtable: Single positioning value</I> - <P> - - <TABLE BGCOLOR="#F0F0F0" WIDTH=460 BORDER=0 CELLPADDING=3> -@@ -312,7 +312,7 @@ - - <P>Example 3 at the end of this chapter shows how to adjust the spacing of three dash glyphs with a SinglePosFormat2 subtable. - --<P>SinglePosFormat2 subtable: Array of positioning values -+<P><I>SinglePosFormat2 subtable: Array of positioning values</I> - <P> - - <TABLE BGCOLOR="#F0F0F0" WIDTH=460 BORDER=0 CELLPADDING=3> -@@ -392,8 +392,8 @@ - <TR><TD CLASS=tab VALIGN=TOP>uint16</TD> - <TD CLASS=tab VALIGN=TOP>PairSetCount</TD> - <TD CLASS=tab>Number of PairSet tables</TD></TR> --<TR><TD CLASS=tab VALIGN=TOP>ValueRecord</TD> --<TD CLASS=tab VALIGN=TOP>PairSet<BR>[Offset]</TD> -+<TR><TD CLASS=tab VALIGN=TOP>Offset</TD> -+<TD CLASS=tab VALIGN=TOP>PairSet<BR>[PairSetCount]</TD> - <TD CLASS=tab>Array of offsets to PairSet tables-from beginning of PairPos subtable-ordered by Coverage Index</TD></TR> - </TABLE> - -@@ -855,7 +855,8 @@ - <TD CLASS=tab>Offset to Base Mark Coverage table-from beginning of MarkMarkPos subtable</TD></TR> - <TR><TD CLASS=tab VALIGN=TOP>uint16</TD> - <TD CLASS=tab VALIGN=TOP>ClassCount</TD> --<TD CLASS=tab>Number of Combining Mark classes defined<TR> -+<TD CLASS=tab>Number of Combining Mark classes defined</TD> -+<TR> - <TD CLASS=tab VALIGN=TOP>Offset</TD> - <TD CLASS=tab VALIGN=TOP>Mark1Array</TD> - <TD CLASS=tab VALIGN=TOP>Offset to MarkArray table for Mark1-from beginning of MarkMarkPos subtable</TD></TR> -@@ -1386,19 +1387,19 @@ - <TD CLASS=tab VALIGN=TOP>BacktrackGlyphCount</TD> - <TD CLASS=tab>Number of glyphs in the backtracking sequence</TD></TR> - <TR><TD CLASS=tab VALIGN=TOP>Offset</TD> --<TD CLASS=tab VALIGN=TOP>Coverage[BacktrackGlyphCount]</TD> -+<TD CLASS=tab VALIGN=TOP>BacktrackCoverage[BacktrackGlyphCount]</TD> - <TD CLASS=tab>Array of offsets to coverage tables in backtracking sequence, in glyph sequence order</TD></TR> - <TR><TD CLASS=tab VALIGN=TOP>uint16</TD> - <TD CLASS=tab VALIGN=TOP>InputGlyphCount</TD> - <TD CLASS=tab>Number of glyphs in input sequence</TD></TR> - <TR><TD CLASS=tab VALIGN=TOP>Offset</TD> --<TD CLASS=tab VALIGN=TOP>Coverage[InputGlyphCount]</TD> -+<TD CLASS=tab VALIGN=TOP>InputCoverage[InputGlyphCount]</TD> - <TD CLASS=tab>Array of offsets to coverage tables in input sequence, in glyph sequence order</TD></TR> - <TR><TD CLASS=tab VALIGN=TOP>uint16</TD> - <TD CLASS=tab VALIGN=TOP>LookaheadGlyphCount</TD> - <TD CLASS=tab>Number of glyphs in lookahead sequence</TD></TR> - <TR><TD CLASS=tab VALIGN=TOP>Offset</TD> --<TD CLASS=tab VALIGN=TOP>Coverage[LookaheadGlyphCount]</TD> -+<TD CLASS=tab VALIGN=TOP>LookaheadCoverage[LookaheadGlyphCount]</TD> - <TD CLASS=tab>Array of offsets to coverage tables in lookahead sequence, in glyph sequence order</TD></TR> - <TR><TD CLASS=tab VALIGN=TOP>uint16</TD> - <TD CLASS=tab VALIGN=TOP>PosCount</TD> -diff -u htmlorig/gsub.htm htmlbak/gsub.htm ---- htmlorig/gsub.htm Fri Apr 5 23:55:58 2002 -+++ htmlbak/gsub.htm Tue May 7 09:53:17 2002 -@@ -758,7 +758,7 @@ - - <A HREF="#EX9"><P>Example 9</A> at the end of this chapter substitutes swash glyphs for two out of three glyphs in a sequence. - --<P><BR><I>ChainContextSubstFormat3 subtable: Coverage-based context glyph substitution</I><P> -+<P><BR><I>ContextSubstFormat3 subtable: Coverage-based context glyph substitution</I><P> - - <TABLE BGCOLOR="#F0F0F0" WIDTH=460 BORDER=0 CELLPADDING=3> - <TR> -@@ -880,7 +880,7 @@ - <TD CLASS=tab VALIGN=TOP>LookaheadGlyphCount</TD> - <TD CLASS=tab>Total number of glyphs in the look ahead sequence (number of glyphs to be matched after the input sequence)</TD></TR> - <TR><TD CLASS=tab VALIGN=TOP>GlyphID</TD> --<TD CLASS=tab VALIGN=TOP>LookAhead<BR>[LookAheadGlyphCount]</TD> -+<TD CLASS=tab VALIGN=TOP>Lookahead<BR>[LookAheadGlyphCount]</TD> - <TD CLASS=tab VALIGN=TOP>Array of lookahead GlyphID's (to be matched after the input sequence)</TD></TR> - <TR><TD CLASS=tab VALIGN=TOP>uint16</TD> - <TD CLASS=tab VALIGN=TOP>SubstCount</TD> -@@ -1023,19 +1023,19 @@ - <TD CLASS=tab VALIGN=TOP>BacktrackGlyphCount</TD> - <TD CLASS=tab>Number of glyphs in the backtracking sequence</TD></TR> - <TR><TD CLASS=tab VALIGN=TOP>Offset</TD> --<TD CLASS=tab VALIGN=TOP>Coverage[BacktrackGlyphCount]</TD> -+<TD CLASS=tab VALIGN=TOP>BacktrackCoverage[BacktrackGlyphCount]</TD> - <TD CLASS=tab>Array of offsets to coverage tables in backtracking sequence, in glyph sequence order</TD></TR> - <TR><TD CLASS=tab VALIGN=TOP>uint16</TD> - <TD CLASS=tab VALIGN=TOP>InputGlyphCount</TD> - <TD CLASS=tab>Number of glyphs in input sequence</TD></TR> - <TR><TD CLASS=tab VALIGN=TOP>Offset</TD> --<TD CLASS=tab VALIGN=TOP>Coverage[InputGlyphCount]</TD> -+<TD CLASS=tab VALIGN=TOP>InputCoverage[InputGlyphCount]</TD> - <TD CLASS=tab>Array of offsets to coverage tables in input sequence, in glyph sequence order</TD></TR> - <TR><TD CLASS=tab VALIGN=TOP>uint16</TD> - <TD CLASS=tab VALIGN=TOP>LookaheadGlyphCount</TD> - <TD CLASS=tab>Number of glyphs in lookahead sequence</TD></TR> - <TR><TD CLASS=tab VALIGN=TOP>Offset</TD> --<TD CLASS=tab VALIGN=TOP>Coverage[LookaheadGlyphCount]</TD> -+<TD CLASS=tab VALIGN=TOP>LookaheadCoverage[LookaheadGlyphCount]</TD> - <TD CLASS=tab>Array of offsets to coverage tables in lookahead sequence, in glyph sequence order</TD></TR> - <TR><TD CLASS=tab VALIGN=TOP>uint16</TD> - <TD CLASS=tab VALIGN=TOP>SubstCount</TD> diff -Nru fonttools-2.4/MetaTools/roundTrip.py fonttools-3.0/MetaTools/roundTrip.py --- fonttools-2.4/MetaTools/roundTrip.py 2013-04-23 04:08:08.000000000 +0000 +++ fonttools-3.0/MetaTools/roundTrip.py 2015-08-31 17:57:15.000000000 +0000 @@ -25,7 +25,7 @@ def usage(): - print __doc__ + print(__doc__) sys.exit(2) @@ -46,7 +46,7 @@ diffcmd = 'diff -U2 -I ".*modified value\|checkSumAdjustment.*" "%s" "%s"' % (xmlFile1, xmlFile2) output = os.popen(diffcmd, "r", 1) lines = [] - while 1: + while True: line = output.readline() if not line: break @@ -58,7 +58,7 @@ report.write("-------------------------------------------------------------\n") report.writelines(lines) else: - print "(TTX files are the same)" + print("(TTX files are the same)") finally: for tmpFile in (xmlFile1, ttFile2, xmlFile2): if os.path.exists(tmpFile): @@ -80,10 +80,10 @@ try: roundTrip(ttFile, options, report) except KeyboardInterrupt: - print "(Cancelled)" + print("(Cancelled)") break except: - print "*** round tripping aborted ***" + print("*** round tripping aborted ***") traceback.print_exc() report.write("=============================================================\n") report.write(" An exception occurred while round tripping") diff -Nru fonttools-2.4/PKG-INFO fonttools-3.0/PKG-INFO --- fonttools-2.4/PKG-INFO 2013-06-22 14:29:57.000000000 +0000 +++ fonttools-3.0/PKG-INFO 1970-01-01 00:00:00.000000000 +0000 @@ -1,26 +0,0 @@ -Metadata-Version: 1.1 -Name: fonttools -Version: 2.4 -Summary: Tools to manipulate font files -Home-page: http://fonttools.sourceforge.net/ -Author: Just van Rossum -Author-email: just@letterror.com -License: OpenSource, BSD-style -Description: FontTools/TTX is a library to manipulate font files from Python. - It supports reading and writing of TrueType/OpenType fonts, reading - and writing of AFM files, reading (and partially writing) of PS Type 1 - fonts. The package also contains a tool called "TTX" which converts - TrueType/OpenType fonts to and from an XML-based format. - -Platform: Any -Classifier: Development Status :: 4 - Beta -Classifier: Environment :: Console -Classifier: Environment :: Other Environment -Classifier: Intended Audience :: Developers -Classifier: Intended Audience :: End Users/Desktop -Classifier: License :: OSI Approved :: BSD License -Classifier: Natural Language :: English -Classifier: Operating System :: OS Independent -Classifier: Programming Language :: Python -Classifier: Topic :: Multimedia :: Graphics -Classifier: Topic :: Multimedia :: Graphics :: Graphics Conversion diff -Nru fonttools-2.4/README.md fonttools-3.0/README.md --- fonttools-2.4/README.md 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.0/README.md 2015-08-31 17:57:15.000000000 +0000 @@ -0,0 +1,43 @@ +[![Build Status](https://travis-ci.org/behdad/fonttools.svg)](https://travis-ci.org/behdad/fonttools) +[![Health](https://landscape.io/github/behdad/fonttools/master/landscape.svg?style=flat)](https://landscape.io/github/behdad/fonttools/master) +[![Coverage Status](https://img.shields.io/coveralls/behdad/fonttools.svg)](https://coveralls.io/r/behdad/fonttools) + +### What it is ? + +Quoting from [TTX/FontTools Sourceforge Project](http://sourceforge.net/projects/fonttools/) +> a tool to convert OpenType and TrueType fonts to and from XML. FontTools is a library for manipulating fonts, written in Python. It supports TrueType, OpenType, AFM and to an extent Type 1 and some Mac-specific formats. + +### Quick start + +```python setup.py install``` + +From your command line type the above command to get fontools installed on your system. FontTools requires Python 2.7, or Python 3.3 or later. + +### Installation + +See [install.txt](https://github.com/behdad/fonttools/blob/master/Doc/install.txt) in the 'Doc' subdirectory for instructions on how to build and install TTX/FontTools from the sources. + + +### Documentation + +#### What is TTX ? + +See [documentation.html](https://rawgit.com/behdad/fonttools/master/Doc/documentation.html) in the "Doc" subdirectory for TTX usage instructions and information about the TTX file format. + +#### History + +The fontTools project was started by Just van Rossum in 1999, and was maintained as an open source project at <http://sourceforge.net/projects/fonttools/>. In 2008, Paul Wise (pabs3) began helping Just with stability maintenance. In 2013 Behdad Esfahbod began a friendly fork, thoroughly reviewing the codebase and making changes at <https://github.com/behdad/fonttools> to add new features and support for new font formats. + +### Community + +* https://groups.google.com/d/forum/fonttools + +### License + +See "LICENSE.txt" for licensing information. + + + +Have fun! + +Just van Rossum <just@letterror.com> diff -Nru fonttools-2.4/README.txt fonttools-3.0/README.txt --- fonttools-2.4/README.txt 2013-04-23 04:08:08.000000000 +0000 +++ fonttools-3.0/README.txt 1970-01-01 00:00:00.000000000 +0000 @@ -1,13 +0,0 @@ -See the file "documentation.html" in the "Doc" subdirectory for TTX -usage instructions and information about the TTX file format. - -See the file "install.txt" in the "Doc" subdirectory for instructions -how to build and install TTX/FontTools from the sources. - -Quick start: run python setup.py install from the command line. - -See the file "LICENSE.txt" for licensing info. - -Have fun! - -Just van Rossum <just@letterror.com> diff -Nru fonttools-2.4/requirements.txt fonttools-3.0/requirements.txt --- fonttools-2.4/requirements.txt 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.0/requirements.txt 2015-08-31 17:57:15.000000000 +0000 @@ -0,0 +1 @@ +git+https://github.com/google/brotli@v0.1.0#egg=Brotli \ No newline at end of file diff -Nru fonttools-2.4/run-tests.sh fonttools-3.0/run-tests.sh --- fonttools-2.4/run-tests.sh 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.0/run-tests.sh 2015-08-31 17:57:15.000000000 +0000 @@ -0,0 +1,50 @@ +#!/bin/sh + +# Choose python version +if test "x$1" = x-3; then + PYTHON=python3 + shift +elif test "x$1" = x-2; then + PYTHON=python2 + shift +fi +test "x$PYTHON" = x && PYTHON=python +echo "$(which $PYTHON) --version" +$PYTHON --version 2>&1 +echo + +# Setup environment +DIR=`dirname "$0"` +cd "$DIR/Lib" +PYTHONPATH=".:$PYTHONPATH" +export PYTHONPATH + +# Find tests +FILTER= +for arg in "$@"; do + test "x$FILTER" != x && FILTER="$FILTER|" + FILTER="$FILTER$arg" +done + +test "x$FILTER" = "x" && FILTER=. +TESTS=`grep -r --include='*.py' -l -e doctest -e unittest * | grep -E "$FILTER"` + +ret=0 +FAILS= +for test in $TESTS; do + echo "Running tests in $test" + test=`echo "$test" | sed 's@[/\\]@.@g;s@[.]py$@@'` + if ! $PYTHON -m $test -v; then + ret=$((ret+1)) + FAILS="$FAILS +$test" + fi +done + echo + echo "SUMMARY:" +if test $ret = 0; then + echo "All tests passed." +else + echo "$ret source file(s) had tests failing:$FAILS" >&2 +fi +exit $ret diff -Nru fonttools-2.4/setup.py fonttools-3.0/setup.py --- fonttools-2.4/setup.py 2013-06-22 14:25:29.000000000 +0000 +++ fonttools-3.0/setup.py 2015-08-31 17:57:15.000000000 +0000 @@ -1,58 +1,50 @@ #! /usr/bin/env python +from __future__ import print_function import os, sys -from distutils.core import setup, Extension -from distutils.command.build_ext import build_ext +# if setuptools is not installed, fall back to distutils try: - # load py2exe distutils extension, if available - import py2exe + from setuptools import setup except ImportError: - pass - -try: - import numpy -except ImportError: - print "*** Warning: FontTools needs the numpy library, see:" - print " http://numpy.scipy.org/" + from distutils.core import setup + distutils_scripts = [ + "Tools/ttx", "Tools/pyftsubset", "Tools/pyftinspect", "Tools/pyftmerge"] +else: + distutils_scripts = [] try: import xml.parsers.expat except ImportError: - print "*** Warning: FontTools needs PyXML, see:" - print " http://sourceforge.net/projects/pyxml/" + print("*** Warning: FontTools needs PyXML, see:") + print(" http://sourceforge.net/projects/pyxml/") -class build_ext_optional(build_ext): - """build_ext command which doesn't abort when it fails.""" - def build_extension(self, ext): - # Skip extensions which cannot be built - try: - build_ext.build_extension(self, ext) - except: - self.announce( - '*** WARNING: Building of extension "%s" ' - 'failed: %s' % - (ext.name, sys.exc_info()[1])) - - -if sys.version_info > (2, 3, 0, 'alpha', 1): - # Trove classifiers for PyPI - classifiers = {"classifiers": [ - "Development Status :: 4 - Beta", - "Environment :: Console", - "Environment :: Other Environment", - "Intended Audience :: Developers", - "Intended Audience :: End Users/Desktop", - "License :: OSI Approved :: BSD License", - "Natural Language :: English", - "Operating System :: OS Independent", - "Programming Language :: Python", - "Topic :: Multimedia :: Graphics", - "Topic :: Multimedia :: Graphics :: Graphics Conversion", - ]} -else: - classifiers = {} +# Force distutils to use py_compile.compile() function with 'doraise' argument +# set to True, in order to raise an exception on compilation errors +import py_compile +orig_py_compile = py_compile.compile + +def doraise_py_compile(file, cfile=None, dfile=None, doraise=False): + orig_py_compile(file, cfile=cfile, dfile=dfile, doraise=True) + +py_compile.compile = doraise_py_compile + + +# Trove classifiers for PyPI +classifiers = {"classifiers": [ + "Development Status :: 4 - Beta", + "Environment :: Console", + "Environment :: Other Environment", + "Intended Audience :: Developers", + "Intended Audience :: End Users/Desktop", + "License :: OSI Approved :: BSD License", + "Natural Language :: English", + "Operating System :: OS Independent", + "Programming Language :: Python", + "Topic :: Multimedia :: Graphics", + "Topic :: Multimedia :: Graphics :: Graphics Conversion", +]} long_description = """\ FontTools/TTX is a library to manipulate font files from Python. @@ -64,42 +56,37 @@ setup( name = "fonttools", - version = "2.4", + version = "3.0", description = "Tools to manipulate font files", author = "Just van Rossum", author_email = "just@letterror.com", - maintainer = "Just van Rossum", - maintainer_email = "just@letterror.com", - url = "http://fonttools.sourceforge.net/", + maintainer = "Behdad Esfahbod", + maintainer_email = "behdad@behdad.org", + url = "http://github.com/behdad/fonttools", license = "OpenSource, BSD-style", platforms = ["Any"], long_description = long_description, packages = [ - "", "fontTools", "fontTools.encodings", "fontTools.misc", "fontTools.pens", "fontTools.ttLib", "fontTools.ttLib.tables", - "fontTools.ttLib.test", ], + py_modules = ['sstruct', 'xmlWriter'], package_dir = {'': 'Lib'}, extra_path = 'FontTools', - ext_modules = [ - Extension( - "fontTools.misc.eexecOp", - ["Src/eexecOp/eexecOpmodule.c"], - include_dirs=[], - define_macros=[], - library_dirs=[], - libraries=[], - ) - ], - scripts = ["Tools/ttx"], - console = ["Tools/ttx"], - cmdclass = {"build_ext": build_ext_optional}, data_files = [('share/man/man1', ["Doc/ttx.1"])], + scripts = distutils_scripts, + entry_points = { + 'console_scripts': [ + "ttx = fontTools.ttx:main", + "pyftsubset = fontTools.subset:main", + "pyftmerge = fontTools.merge:main", + "pyftinspect = fontTools.inspect:main" + ] + }, **classifiers ) diff -Nru fonttools-2.4/Snippets/cmap-format.py fonttools-3.0/Snippets/cmap-format.py --- fonttools-2.4/Snippets/cmap-format.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.0/Snippets/cmap-format.py 2015-08-31 17:57:15.000000000 +0000 @@ -0,0 +1,40 @@ +#! /usr/bin/env python + +# Sample script to convert legacy cmap subtables to format-4 +# subtables. Note that this is rarely what one needs. You +# probably need to just drop the legacy subtables if the font +# already has a format-4 subtable. +# +# Other times, you would need to convert a non-Unicode cmap +# legacy subtable to a Unicode one. In those cases, use the +# getEncoding() of subtable and use that encoding to map the +# characters to Unicode... TODO: Extend this script to do that. + +from __future__ import print_function, division, absolute_import +from fontTools.misc.py23 import * +from fontTools.ttLib import TTFont +from fontTools.ttLib.tables._c_m_a_p import CmapSubtable +import sys + +if len(sys.argv) != 3: + print("usage: cmap-format.py fontfile.ttf outfile.ttf") + sys.exit(1) +fontfile = sys.argv[1] +outfile = sys.argv[2] +font = TTFont(fontfile) + +cmap = font['cmap'] +outtables = [] +for table in cmap.tables: + if table.format in [4, 12, 13, 14]: + outtables.append(table) + # Convert ot format4 + newtable = CmapSubtable.newSubtable(4) + newtable.platformID = table.platformID + newtable.platEncID = table.platEncID + newtable.language = table.language + newtable.cmap = table.cmap + outtables.append(newtable) +cmap.tables = outtables + +font.save(outfile) diff -Nru fonttools-2.4/Snippets/fontTools/afmLib.py fonttools-3.0/Snippets/fontTools/afmLib.py --- fonttools-2.4/Snippets/fontTools/afmLib.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.0/Snippets/fontTools/afmLib.py 2015-08-31 17:57:15.000000000 +0000 @@ -0,0 +1,376 @@ +"""Module for reading and writing AFM files.""" + +# XXX reads AFM's generated by Fog, not tested with much else. +# It does not implement the full spec (Adobe Technote 5004, Adobe Font Metrics +# File Format Specification). Still, it should read most "common" AFM files. + +from __future__ import print_function, division, absolute_import +from fontTools.misc.py23 import * +import re + +# every single line starts with a "word" +identifierRE = re.compile("^([A-Za-z]+).*") + +# regular expression to parse char lines +charRE = re.compile( + "(-?\d+)" # charnum + "\s*;\s*WX\s+" # ; WX + "(-?\d+)" # width + "\s*;\s*N\s+" # ; N + "([.A-Za-z0-9_]+)" # charname + "\s*;\s*B\s+" # ; B + "(-?\d+)" # left + "\s+" + "(-?\d+)" # bottom + "\s+" + "(-?\d+)" # right + "\s+" + "(-?\d+)" # top + "\s*;\s*" # ; + ) + +# regular expression to parse kerning lines +kernRE = re.compile( + "([.A-Za-z0-9_]+)" # leftchar + "\s+" + "([.A-Za-z0-9_]+)" # rightchar + "\s+" + "(-?\d+)" # value + "\s*" + ) + +# regular expressions to parse composite info lines of the form: +# Aacute 2 ; PCC A 0 0 ; PCC acute 182 211 ; +compositeRE = re.compile( + "([.A-Za-z0-9_]+)" # char name + "\s+" + "(\d+)" # number of parts + "\s*;\s*" + ) +componentRE = re.compile( + "PCC\s+" # PPC + "([.A-Za-z0-9_]+)" # base char name + "\s+" + "(-?\d+)" # x offset + "\s+" + "(-?\d+)" # y offset + "\s*;\s*" + ) + +preferredAttributeOrder = [ + "FontName", + "FullName", + "FamilyName", + "Weight", + "ItalicAngle", + "IsFixedPitch", + "FontBBox", + "UnderlinePosition", + "UnderlineThickness", + "Version", + "Notice", + "EncodingScheme", + "CapHeight", + "XHeight", + "Ascender", + "Descender", +] + + +class error(Exception): + pass + + +class AFM(object): + + _attrs = None + + _keywords = ['StartFontMetrics', + 'EndFontMetrics', + 'StartCharMetrics', + 'EndCharMetrics', + 'StartKernData', + 'StartKernPairs', + 'EndKernPairs', + 'EndKernData', + 'StartComposites', + 'EndComposites', + ] + + def __init__(self, path=None): + self._attrs = {} + self._chars = {} + self._kerning = {} + self._index = {} + self._comments = [] + self._composites = {} + if path is not None: + self.read(path) + + def read(self, path): + lines = readlines(path) + for line in lines: + if not line.strip(): + continue + m = identifierRE.match(line) + if m is None: + raise error("syntax error in AFM file: " + repr(line)) + + pos = m.regs[1][1] + word = line[:pos] + rest = line[pos:].strip() + if word in self._keywords: + continue + if word == "C": + self.parsechar(rest) + elif word == "KPX": + self.parsekernpair(rest) + elif word == "CC": + self.parsecomposite(rest) + else: + self.parseattr(word, rest) + + def parsechar(self, rest): + m = charRE.match(rest) + if m is None: + raise error("syntax error in AFM file: " + repr(rest)) + things = [] + for fr, to in m.regs[1:]: + things.append(rest[fr:to]) + charname = things[2] + del things[2] + charnum, width, l, b, r, t = (int(thing) for thing in things) + self._chars[charname] = charnum, width, (l, b, r, t) + + def parsekernpair(self, rest): + m = kernRE.match(rest) + if m is None: + raise error("syntax error in AFM file: " + repr(rest)) + things = [] + for fr, to in m.regs[1:]: + things.append(rest[fr:to]) + leftchar, rightchar, value = things + value = int(value) + self._kerning[(leftchar, rightchar)] = value + + def parseattr(self, word, rest): + if word == "FontBBox": + l, b, r, t = [int(thing) for thing in rest.split()] + self._attrs[word] = l, b, r, t + elif word == "Comment": + self._comments.append(rest) + else: + try: + value = int(rest) + except (ValueError, OverflowError): + self._attrs[word] = rest + else: + self._attrs[word] = value + + def parsecomposite(self, rest): + m = compositeRE.match(rest) + if m is None: + raise error("syntax error in AFM file: " + repr(rest)) + charname = m.group(1) + ncomponents = int(m.group(2)) + rest = rest[m.regs[0][1]:] + components = [] + while True: + m = componentRE.match(rest) + if m is None: + raise error("syntax error in AFM file: " + repr(rest)) + basechar = m.group(1) + xoffset = int(m.group(2)) + yoffset = int(m.group(3)) + components.append((basechar, xoffset, yoffset)) + rest = rest[m.regs[0][1]:] + if not rest: + break + assert len(components) == ncomponents + self._composites[charname] = components + + def write(self, path, sep='\r'): + import time + lines = [ "StartFontMetrics 2.0", + "Comment Generated by afmLib; at %s" % ( + time.strftime("%m/%d/%Y %H:%M:%S", + time.localtime(time.time())))] + + # write comments, assuming (possibly wrongly!) they should + # all appear at the top + for comment in self._comments: + lines.append("Comment " + comment) + + # write attributes, first the ones we know about, in + # a preferred order + attrs = self._attrs + for attr in preferredAttributeOrder: + if attr in attrs: + value = attrs[attr] + if attr == "FontBBox": + value = "%s %s %s %s" % value + lines.append(attr + " " + str(value)) + # then write the attributes we don't know about, + # in alphabetical order + items = sorted(attrs.items()) + for attr, value in items: + if attr in preferredAttributeOrder: + continue + lines.append(attr + " " + str(value)) + + # write char metrics + lines.append("StartCharMetrics " + repr(len(self._chars))) + items = [(charnum, (charname, width, box)) for charname, (charnum, width, box) in self._chars.items()] + + def myKey(a): + """Custom key function to make sure unencoded chars (-1) + end up at the end of the list after sorting.""" + if a[0] == -1: + a = (0xffff,) + a[1:] # 0xffff is an arbitrary large number + return a + items.sort(key=myKey) + + for charnum, (charname, width, (l, b, r, t)) in items: + lines.append("C %d ; WX %d ; N %s ; B %d %d %d %d ;" % + (charnum, width, charname, l, b, r, t)) + lines.append("EndCharMetrics") + + # write kerning info + lines.append("StartKernData") + lines.append("StartKernPairs " + repr(len(self._kerning))) + items = sorted(self._kerning.items()) + for (leftchar, rightchar), value in items: + lines.append("KPX %s %s %d" % (leftchar, rightchar, value)) + lines.append("EndKernPairs") + lines.append("EndKernData") + + if self._composites: + composites = sorted(self._composites.items()) + lines.append("StartComposites %s" % len(self._composites)) + for charname, components in composites: + line = "CC %s %s ;" % (charname, len(components)) + for basechar, xoffset, yoffset in components: + line = line + " PCC %s %s %s ;" % (basechar, xoffset, yoffset) + lines.append(line) + lines.append("EndComposites") + + lines.append("EndFontMetrics") + + writelines(path, lines, sep) + + def has_kernpair(self, pair): + return pair in self._kerning + + def kernpairs(self): + return list(self._kerning.keys()) + + def has_char(self, char): + return char in self._chars + + def chars(self): + return list(self._chars.keys()) + + def comments(self): + return self._comments + + def addComment(self, comment): + self._comments.append(comment) + + def addComposite(self, glyphName, components): + self._composites[glyphName] = components + + def __getattr__(self, attr): + if attr in self._attrs: + return self._attrs[attr] + else: + raise AttributeError(attr) + + def __setattr__(self, attr, value): + # all attrs *not* starting with "_" are consider to be AFM keywords + if attr[:1] == "_": + self.__dict__[attr] = value + else: + self._attrs[attr] = value + + def __delattr__(self, attr): + # all attrs *not* starting with "_" are consider to be AFM keywords + if attr[:1] == "_": + try: + del self.__dict__[attr] + except KeyError: + raise AttributeError(attr) + else: + try: + del self._attrs[attr] + except KeyError: + raise AttributeError(attr) + + def __getitem__(self, key): + if isinstance(key, tuple): + # key is a tuple, return the kernpair + return self._kerning[key] + else: + # return the metrics instead + return self._chars[key] + + def __setitem__(self, key, value): + if isinstance(key, tuple): + # key is a tuple, set kernpair + self._kerning[key] = value + else: + # set char metrics + self._chars[key] = value + + def __delitem__(self, key): + if isinstance(key, tuple): + # key is a tuple, del kernpair + del self._kerning[key] + else: + # del char metrics + del self._chars[key] + + def __repr__(self): + if hasattr(self, "FullName"): + return '<AFM object for %s>' % self.FullName + else: + return '<AFM object at %x>' % id(self) + + +def readlines(path): + f = open(path, 'rb') + data = f.read() + f.close() + # read any text file, regardless whether it's formatted for Mac, Unix or Dos + sep = "" + if '\r' in data: + sep = sep + '\r' # mac or dos + if '\n' in data: + sep = sep + '\n' # unix or dos + return data.split(sep) + +def writelines(path, lines, sep='\r'): + f = open(path, 'wb') + for line in lines: + f.write(line + sep) + f.close() + + +if __name__ == "__main__": + import EasyDialogs + path = EasyDialogs.AskFileForOpen() + if path: + afm = AFM(path) + char = 'A' + if afm.has_char(char): + print(afm[char]) # print charnum, width and boundingbox + pair = ('A', 'V') + if afm.has_kernpair(pair): + print(afm[pair]) # print kerning value for pair + print(afm.Version) # various other afm entries have become attributes + print(afm.Weight) + # afm.comments() returns a list of all Comment lines found in the AFM + print(afm.comments()) + #print afm.chars() + #print afm.kernpairs() + print(afm) + afm.write(path + ".muck") diff -Nru fonttools-2.4/Snippets/fontTools/agl.py fonttools-3.0/Snippets/fontTools/agl.py --- fonttools-2.4/Snippets/fontTools/agl.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.0/Snippets/fontTools/agl.py 2015-08-31 17:57:15.000000000 +0000 @@ -0,0 +1,738 @@ +# The table below is taken from +# http://www.adobe.com/devnet/opentype/archives/aglfn.txt + +from __future__ import print_function, division, absolute_import +from fontTools.misc.py23 import * + +_aglText = """\ +# ----------------------------------------------------------- +# Copyright 2003, 2005-2008, 2010 Adobe Systems Incorporated. +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or +# without modification, are permitted provided that the +# following conditions are met: +# +# Redistributions of source code must retain the above +# copyright notice, this list of conditions and the following +# disclaimer. +# +# Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following +# disclaimer in the documentation and/or other materials +# provided with the distribution. +# +# Neither the name of Adobe Systems Incorporated nor the names +# of its contributors may be used to endorse or promote +# products derived from this software without specific prior +# written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND +# CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, +# INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF +# MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR +# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT +# NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) +# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR +# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +# ----------------------------------------------------------- +# Name: Adobe Glyph List For New Fonts +# Table version: 1.7 +# Date: November 6, 2008 +# URL: http://sourceforge.net/adobe/aglfn/ +# +# Description: +# +# AGLFN (Adobe Glyph List For New Fonts) provides a list of base glyph +# names that are recommended for new fonts, which are compatible with +# the AGL (Adobe Glyph List) Specification, and which should be used +# as described in Section 6 of that document. AGLFN comprises the set +# of glyph names from AGL that map via the AGL Specification rules to +# the semantically correct UV (Unicode Value). For example, "Asmall" +# is omitted because AGL maps this glyph name to the PUA (Private Use +# Area) value U+F761, rather than to the UV that maps from the glyph +# name "A." Also omitted is "ffi," because AGL maps this to the +# Alphabetic Presentation Forms value U+FB03, rather than decomposing +# it into the following sequence of three UVs: U+0066, U+0066, and +# U+0069. The name "arrowvertex" has been omitted because this glyph +# now has a real UV, and AGL is now incorrect in mapping it to the PUA +# value U+F8E6. If you do not find an appropriate name for your glyph +# in this list, then please refer to Section 6 of the AGL +# Specification. +# +# Format: three semicolon-delimited fields: +# (1) Standard UV or CUS UV--four uppercase hexadecimal digits +# (2) Glyph name--upper/lowercase letters and digits +# (3) Character names: Unicode character names for standard UVs, and +# descriptive names for CUS UVs--uppercase letters, hyphen, and +# space +# +# The records are sorted by glyph name in increasing ASCII order, +# entries with the same glyph name are sorted in decreasing priority +# order, the UVs and Unicode character names are provided for +# convenience, lines starting with "#" are comments, and blank lines +# should be ignored. +# +# Revision History: +# +# 1.7 [6 November 2008] +# - Reverted to the original 1.4 and earlier mappings for Delta, +# Omega, and mu. +# - Removed mappings for "afii" names. These should now be assigned +# "uni" names. +# - Removed mappings for "commaaccent" names. These should now be +# assigned "uni" names. +# +# 1.6 [30 January 2006] +# - Completed work intended in 1.5. +# +# 1.5 [23 November 2005] +# - Removed duplicated block at end of file. +# - Changed mappings: +# 2206;Delta;INCREMENT changed to 0394;Delta;GREEK CAPITAL LETTER DELTA +# 2126;Omega;OHM SIGN changed to 03A9;Omega;GREEK CAPITAL LETTER OMEGA +# 03BC;mu;MICRO SIGN changed to 03BC;mu;GREEK SMALL LETTER MU +# - Corrected statement above about why "ffi" is omitted. +# +# 1.4 [24 September 2003] +# - Changed version to 1.4, to avoid confusion with the AGL 1.3. +# - Fixed spelling errors in the header. +# - Fully removed "arrowvertex," as it is mapped only to a PUA Unicode +# value in some fonts. +# +# 1.1 [17 April 2003] +# - Renamed [Tt]cedilla back to [Tt]commaaccent. +# +# 1.0 [31 January 2003] +# - Original version. +# - Derived from the AGLv1.2 by: +# removing the PUA area codes; +# removing duplicate Unicode mappings; and +# renaming "tcommaaccent" to "tcedilla" and "Tcommaaccent" to "Tcedilla" +# +0041;A;LATIN CAPITAL LETTER A +00C6;AE;LATIN CAPITAL LETTER AE +01FC;AEacute;LATIN CAPITAL LETTER AE WITH ACUTE +00C1;Aacute;LATIN CAPITAL LETTER A WITH ACUTE +0102;Abreve;LATIN CAPITAL LETTER A WITH BREVE +00C2;Acircumflex;LATIN CAPITAL LETTER A WITH CIRCUMFLEX +00C4;Adieresis;LATIN CAPITAL LETTER A WITH DIAERESIS +00C0;Agrave;LATIN CAPITAL LETTER A WITH GRAVE +0391;Alpha;GREEK CAPITAL LETTER ALPHA +0386;Alphatonos;GREEK CAPITAL LETTER ALPHA WITH TONOS +0100;Amacron;LATIN CAPITAL LETTER A WITH MACRON +0104;Aogonek;LATIN CAPITAL LETTER A WITH OGONEK +00C5;Aring;LATIN CAPITAL LETTER A WITH RING ABOVE +01FA;Aringacute;LATIN CAPITAL LETTER A WITH RING ABOVE AND ACUTE +00C3;Atilde;LATIN CAPITAL LETTER A WITH TILDE +0042;B;LATIN CAPITAL LETTER B +0392;Beta;GREEK CAPITAL LETTER BETA +0043;C;LATIN CAPITAL LETTER C +0106;Cacute;LATIN CAPITAL LETTER C WITH ACUTE +010C;Ccaron;LATIN CAPITAL LETTER C WITH CARON +00C7;Ccedilla;LATIN CAPITAL LETTER C WITH CEDILLA +0108;Ccircumflex;LATIN CAPITAL LETTER C WITH CIRCUMFLEX +010A;Cdotaccent;LATIN CAPITAL LETTER C WITH DOT ABOVE +03A7;Chi;GREEK CAPITAL LETTER CHI +0044;D;LATIN CAPITAL LETTER D +010E;Dcaron;LATIN CAPITAL LETTER D WITH CARON +0110;Dcroat;LATIN CAPITAL LETTER D WITH STROKE +2206;Delta;INCREMENT +0045;E;LATIN CAPITAL LETTER E +00C9;Eacute;LATIN CAPITAL LETTER E WITH ACUTE +0114;Ebreve;LATIN CAPITAL LETTER E WITH BREVE +011A;Ecaron;LATIN CAPITAL LETTER E WITH CARON +00CA;Ecircumflex;LATIN CAPITAL LETTER E WITH CIRCUMFLEX +00CB;Edieresis;LATIN CAPITAL LETTER E WITH DIAERESIS +0116;Edotaccent;LATIN CAPITAL LETTER E WITH DOT ABOVE +00C8;Egrave;LATIN CAPITAL LETTER E WITH GRAVE +0112;Emacron;LATIN CAPITAL LETTER E WITH MACRON +014A;Eng;LATIN CAPITAL LETTER ENG +0118;Eogonek;LATIN CAPITAL LETTER E WITH OGONEK +0395;Epsilon;GREEK CAPITAL LETTER EPSILON +0388;Epsilontonos;GREEK CAPITAL LETTER EPSILON WITH TONOS +0397;Eta;GREEK CAPITAL LETTER ETA +0389;Etatonos;GREEK CAPITAL LETTER ETA WITH TONOS +00D0;Eth;LATIN CAPITAL LETTER ETH +20AC;Euro;EURO SIGN +0046;F;LATIN CAPITAL LETTER F +0047;G;LATIN CAPITAL LETTER G +0393;Gamma;GREEK CAPITAL LETTER GAMMA +011E;Gbreve;LATIN CAPITAL LETTER G WITH BREVE +01E6;Gcaron;LATIN CAPITAL LETTER G WITH CARON +011C;Gcircumflex;LATIN CAPITAL LETTER G WITH CIRCUMFLEX +0120;Gdotaccent;LATIN CAPITAL LETTER G WITH DOT ABOVE +0048;H;LATIN CAPITAL LETTER H +25CF;H18533;BLACK CIRCLE +25AA;H18543;BLACK SMALL SQUARE +25AB;H18551;WHITE SMALL SQUARE +25A1;H22073;WHITE SQUARE +0126;Hbar;LATIN CAPITAL LETTER H WITH STROKE +0124;Hcircumflex;LATIN CAPITAL LETTER H WITH CIRCUMFLEX +0049;I;LATIN CAPITAL LETTER I +0132;IJ;LATIN CAPITAL LIGATURE IJ +00CD;Iacute;LATIN CAPITAL LETTER I WITH ACUTE +012C;Ibreve;LATIN CAPITAL LETTER I WITH BREVE +00CE;Icircumflex;LATIN CAPITAL LETTER I WITH CIRCUMFLEX +00CF;Idieresis;LATIN CAPITAL LETTER I WITH DIAERESIS +0130;Idotaccent;LATIN CAPITAL LETTER I WITH DOT ABOVE +2111;Ifraktur;BLACK-LETTER CAPITAL I +00CC;Igrave;LATIN CAPITAL LETTER I WITH GRAVE +012A;Imacron;LATIN CAPITAL LETTER I WITH MACRON +012E;Iogonek;LATIN CAPITAL LETTER I WITH OGONEK +0399;Iota;GREEK CAPITAL LETTER IOTA +03AA;Iotadieresis;GREEK CAPITAL LETTER IOTA WITH DIALYTIKA +038A;Iotatonos;GREEK CAPITAL LETTER IOTA WITH TONOS +0128;Itilde;LATIN CAPITAL LETTER I WITH TILDE +004A;J;LATIN CAPITAL LETTER J +0134;Jcircumflex;LATIN CAPITAL LETTER J WITH CIRCUMFLEX +004B;K;LATIN CAPITAL LETTER K +039A;Kappa;GREEK CAPITAL LETTER KAPPA +004C;L;LATIN CAPITAL LETTER L +0139;Lacute;LATIN CAPITAL LETTER L WITH ACUTE +039B;Lambda;GREEK CAPITAL LETTER LAMDA +013D;Lcaron;LATIN CAPITAL LETTER L WITH CARON +013F;Ldot;LATIN CAPITAL LETTER L WITH MIDDLE DOT +0141;Lslash;LATIN CAPITAL LETTER L WITH STROKE +004D;M;LATIN CAPITAL LETTER M +039C;Mu;GREEK CAPITAL LETTER MU +004E;N;LATIN CAPITAL LETTER N +0143;Nacute;LATIN CAPITAL LETTER N WITH ACUTE +0147;Ncaron;LATIN CAPITAL LETTER N WITH CARON +00D1;Ntilde;LATIN CAPITAL LETTER N WITH TILDE +039D;Nu;GREEK CAPITAL LETTER NU +004F;O;LATIN CAPITAL LETTER O +0152;OE;LATIN CAPITAL LIGATURE OE +00D3;Oacute;LATIN CAPITAL LETTER O WITH ACUTE +014E;Obreve;LATIN CAPITAL LETTER O WITH BREVE +00D4;Ocircumflex;LATIN CAPITAL LETTER O WITH CIRCUMFLEX +00D6;Odieresis;LATIN CAPITAL LETTER O WITH DIAERESIS +00D2;Ograve;LATIN CAPITAL LETTER O WITH GRAVE +01A0;Ohorn;LATIN CAPITAL LETTER O WITH HORN +0150;Ohungarumlaut;LATIN CAPITAL LETTER O WITH DOUBLE ACUTE +014C;Omacron;LATIN CAPITAL LETTER O WITH MACRON +2126;Omega;OHM SIGN +038F;Omegatonos;GREEK CAPITAL LETTER OMEGA WITH TONOS +039F;Omicron;GREEK CAPITAL LETTER OMICRON +038C;Omicrontonos;GREEK CAPITAL LETTER OMICRON WITH TONOS +00D8;Oslash;LATIN CAPITAL LETTER O WITH STROKE +01FE;Oslashacute;LATIN CAPITAL LETTER O WITH STROKE AND ACUTE +00D5;Otilde;LATIN CAPITAL LETTER O WITH TILDE +0050;P;LATIN CAPITAL LETTER P +03A6;Phi;GREEK CAPITAL LETTER PHI +03A0;Pi;GREEK CAPITAL LETTER PI +03A8;Psi;GREEK CAPITAL LETTER PSI +0051;Q;LATIN CAPITAL LETTER Q +0052;R;LATIN CAPITAL LETTER R +0154;Racute;LATIN CAPITAL LETTER R WITH ACUTE +0158;Rcaron;LATIN CAPITAL LETTER R WITH CARON +211C;Rfraktur;BLACK-LETTER CAPITAL R +03A1;Rho;GREEK CAPITAL LETTER RHO +0053;S;LATIN CAPITAL LETTER S +250C;SF010000;BOX DRAWINGS LIGHT DOWN AND RIGHT +2514;SF020000;BOX DRAWINGS LIGHT UP AND RIGHT +2510;SF030000;BOX DRAWINGS LIGHT DOWN AND LEFT +2518;SF040000;BOX DRAWINGS LIGHT UP AND LEFT +253C;SF050000;BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL +252C;SF060000;BOX DRAWINGS LIGHT DOWN AND HORIZONTAL +2534;SF070000;BOX DRAWINGS LIGHT UP AND HORIZONTAL +251C;SF080000;BOX DRAWINGS LIGHT VERTICAL AND RIGHT +2524;SF090000;BOX DRAWINGS LIGHT VERTICAL AND LEFT +2500;SF100000;BOX DRAWINGS LIGHT HORIZONTAL +2502;SF110000;BOX DRAWINGS LIGHT VERTICAL +2561;SF190000;BOX DRAWINGS VERTICAL SINGLE AND LEFT DOUBLE +2562;SF200000;BOX DRAWINGS VERTICAL DOUBLE AND LEFT SINGLE +2556;SF210000;BOX DRAWINGS DOWN DOUBLE AND LEFT SINGLE +2555;SF220000;BOX DRAWINGS DOWN SINGLE AND LEFT DOUBLE +2563;SF230000;BOX DRAWINGS DOUBLE VERTICAL AND LEFT +2551;SF240000;BOX DRAWINGS DOUBLE VERTICAL +2557;SF250000;BOX DRAWINGS DOUBLE DOWN AND LEFT +255D;SF260000;BOX DRAWINGS DOUBLE UP AND LEFT +255C;SF270000;BOX DRAWINGS UP DOUBLE AND LEFT SINGLE +255B;SF280000;BOX DRAWINGS UP SINGLE AND LEFT DOUBLE +255E;SF360000;BOX DRAWINGS VERTICAL SINGLE AND RIGHT DOUBLE +255F;SF370000;BOX DRAWINGS VERTICAL DOUBLE AND RIGHT SINGLE +255A;SF380000;BOX DRAWINGS DOUBLE UP AND RIGHT +2554;SF390000;BOX DRAWINGS DOUBLE DOWN AND RIGHT +2569;SF400000;BOX DRAWINGS DOUBLE UP AND HORIZONTAL +2566;SF410000;BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL +2560;SF420000;BOX DRAWINGS DOUBLE VERTICAL AND RIGHT +2550;SF430000;BOX DRAWINGS DOUBLE HORIZONTAL +256C;SF440000;BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL +2567;SF450000;BOX DRAWINGS UP SINGLE AND HORIZONTAL DOUBLE +2568;SF460000;BOX DRAWINGS UP DOUBLE AND HORIZONTAL SINGLE +2564;SF470000;BOX DRAWINGS DOWN SINGLE AND HORIZONTAL DOUBLE +2565;SF480000;BOX DRAWINGS DOWN DOUBLE AND HORIZONTAL SINGLE +2559;SF490000;BOX DRAWINGS UP DOUBLE AND RIGHT SINGLE +2558;SF500000;BOX DRAWINGS UP SINGLE AND RIGHT DOUBLE +2552;SF510000;BOX DRAWINGS DOWN SINGLE AND RIGHT DOUBLE +2553;SF520000;BOX DRAWINGS DOWN DOUBLE AND RIGHT SINGLE +256B;SF530000;BOX DRAWINGS VERTICAL DOUBLE AND HORIZONTAL SINGLE +256A;SF540000;BOX DRAWINGS VERTICAL SINGLE AND HORIZONTAL DOUBLE +015A;Sacute;LATIN CAPITAL LETTER S WITH ACUTE +0160;Scaron;LATIN CAPITAL LETTER S WITH CARON +015E;Scedilla;LATIN CAPITAL LETTER S WITH CEDILLA +015C;Scircumflex;LATIN CAPITAL LETTER S WITH CIRCUMFLEX +03A3;Sigma;GREEK CAPITAL LETTER SIGMA +0054;T;LATIN CAPITAL LETTER T +03A4;Tau;GREEK CAPITAL LETTER TAU +0166;Tbar;LATIN CAPITAL LETTER T WITH STROKE +0164;Tcaron;LATIN CAPITAL LETTER T WITH CARON +0398;Theta;GREEK CAPITAL LETTER THETA +00DE;Thorn;LATIN CAPITAL LETTER THORN +0055;U;LATIN CAPITAL LETTER U +00DA;Uacute;LATIN CAPITAL LETTER U WITH ACUTE +016C;Ubreve;LATIN CAPITAL LETTER U WITH BREVE +00DB;Ucircumflex;LATIN CAPITAL LETTER U WITH CIRCUMFLEX +00DC;Udieresis;LATIN CAPITAL LETTER U WITH DIAERESIS +00D9;Ugrave;LATIN CAPITAL LETTER U WITH GRAVE +01AF;Uhorn;LATIN CAPITAL LETTER U WITH HORN +0170;Uhungarumlaut;LATIN CAPITAL LETTER U WITH DOUBLE ACUTE +016A;Umacron;LATIN CAPITAL LETTER U WITH MACRON +0172;Uogonek;LATIN CAPITAL LETTER U WITH OGONEK +03A5;Upsilon;GREEK CAPITAL LETTER UPSILON +03D2;Upsilon1;GREEK UPSILON WITH HOOK SYMBOL +03AB;Upsilondieresis;GREEK CAPITAL LETTER UPSILON WITH DIALYTIKA +038E;Upsilontonos;GREEK CAPITAL LETTER UPSILON WITH TONOS +016E;Uring;LATIN CAPITAL LETTER U WITH RING ABOVE +0168;Utilde;LATIN CAPITAL LETTER U WITH TILDE +0056;V;LATIN CAPITAL LETTER V +0057;W;LATIN CAPITAL LETTER W +1E82;Wacute;LATIN CAPITAL LETTER W WITH ACUTE +0174;Wcircumflex;LATIN CAPITAL LETTER W WITH CIRCUMFLEX +1E84;Wdieresis;LATIN CAPITAL LETTER W WITH DIAERESIS +1E80;Wgrave;LATIN CAPITAL LETTER W WITH GRAVE +0058;X;LATIN CAPITAL LETTER X +039E;Xi;GREEK CAPITAL LETTER XI +0059;Y;LATIN CAPITAL LETTER Y +00DD;Yacute;LATIN CAPITAL LETTER Y WITH ACUTE +0176;Ycircumflex;LATIN CAPITAL LETTER Y WITH CIRCUMFLEX +0178;Ydieresis;LATIN CAPITAL LETTER Y WITH DIAERESIS +1EF2;Ygrave;LATIN CAPITAL LETTER Y WITH GRAVE +005A;Z;LATIN CAPITAL LETTER Z +0179;Zacute;LATIN CAPITAL LETTER Z WITH ACUTE +017D;Zcaron;LATIN CAPITAL LETTER Z WITH CARON +017B;Zdotaccent;LATIN CAPITAL LETTER Z WITH DOT ABOVE +0396;Zeta;GREEK CAPITAL LETTER ZETA +0061;a;LATIN SMALL LETTER A +00E1;aacute;LATIN SMALL LETTER A WITH ACUTE +0103;abreve;LATIN SMALL LETTER A WITH BREVE +00E2;acircumflex;LATIN SMALL LETTER A WITH CIRCUMFLEX +00B4;acute;ACUTE ACCENT +0301;acutecomb;COMBINING ACUTE ACCENT +00E4;adieresis;LATIN SMALL LETTER A WITH DIAERESIS +00E6;ae;LATIN SMALL LETTER AE +01FD;aeacute;LATIN SMALL LETTER AE WITH ACUTE +00E0;agrave;LATIN SMALL LETTER A WITH GRAVE +2135;aleph;ALEF SYMBOL +03B1;alpha;GREEK SMALL LETTER ALPHA +03AC;alphatonos;GREEK SMALL LETTER ALPHA WITH TONOS +0101;amacron;LATIN SMALL LETTER A WITH MACRON +0026;ampersand;AMPERSAND +2220;angle;ANGLE +2329;angleleft;LEFT-POINTING ANGLE BRACKET +232A;angleright;RIGHT-POINTING ANGLE BRACKET +0387;anoteleia;GREEK ANO TELEIA +0105;aogonek;LATIN SMALL LETTER A WITH OGONEK +2248;approxequal;ALMOST EQUAL TO +00E5;aring;LATIN SMALL LETTER A WITH RING ABOVE +01FB;aringacute;LATIN SMALL LETTER A WITH RING ABOVE AND ACUTE +2194;arrowboth;LEFT RIGHT ARROW +21D4;arrowdblboth;LEFT RIGHT DOUBLE ARROW +21D3;arrowdbldown;DOWNWARDS DOUBLE ARROW +21D0;arrowdblleft;LEFTWARDS DOUBLE ARROW +21D2;arrowdblright;RIGHTWARDS DOUBLE ARROW +21D1;arrowdblup;UPWARDS DOUBLE ARROW +2193;arrowdown;DOWNWARDS ARROW +2190;arrowleft;LEFTWARDS ARROW +2192;arrowright;RIGHTWARDS ARROW +2191;arrowup;UPWARDS ARROW +2195;arrowupdn;UP DOWN ARROW +21A8;arrowupdnbse;UP DOWN ARROW WITH BASE +005E;asciicircum;CIRCUMFLEX ACCENT +007E;asciitilde;TILDE +002A;asterisk;ASTERISK +2217;asteriskmath;ASTERISK OPERATOR +0040;at;COMMERCIAL AT +00E3;atilde;LATIN SMALL LETTER A WITH TILDE +0062;b;LATIN SMALL LETTER B +005C;backslash;REVERSE SOLIDUS +007C;bar;VERTICAL LINE +03B2;beta;GREEK SMALL LETTER BETA +2588;block;FULL BLOCK +007B;braceleft;LEFT CURLY BRACKET +007D;braceright;RIGHT CURLY BRACKET +005B;bracketleft;LEFT SQUARE BRACKET +005D;bracketright;RIGHT SQUARE BRACKET +02D8;breve;BREVE +00A6;brokenbar;BROKEN BAR +2022;bullet;BULLET +0063;c;LATIN SMALL LETTER C +0107;cacute;LATIN SMALL LETTER C WITH ACUTE +02C7;caron;CARON +21B5;carriagereturn;DOWNWARDS ARROW WITH CORNER LEFTWARDS +010D;ccaron;LATIN SMALL LETTER C WITH CARON +00E7;ccedilla;LATIN SMALL LETTER C WITH CEDILLA +0109;ccircumflex;LATIN SMALL LETTER C WITH CIRCUMFLEX +010B;cdotaccent;LATIN SMALL LETTER C WITH DOT ABOVE +00B8;cedilla;CEDILLA +00A2;cent;CENT SIGN +03C7;chi;GREEK SMALL LETTER CHI +25CB;circle;WHITE CIRCLE +2297;circlemultiply;CIRCLED TIMES +2295;circleplus;CIRCLED PLUS +02C6;circumflex;MODIFIER LETTER CIRCUMFLEX ACCENT +2663;club;BLACK CLUB SUIT +003A;colon;COLON +20A1;colonmonetary;COLON SIGN +002C;comma;COMMA +2245;congruent;APPROXIMATELY EQUAL TO +00A9;copyright;COPYRIGHT SIGN +00A4;currency;CURRENCY SIGN +0064;d;LATIN SMALL LETTER D +2020;dagger;DAGGER +2021;daggerdbl;DOUBLE DAGGER +010F;dcaron;LATIN SMALL LETTER D WITH CARON +0111;dcroat;LATIN SMALL LETTER D WITH STROKE +00B0;degree;DEGREE SIGN +03B4;delta;GREEK SMALL LETTER DELTA +2666;diamond;BLACK DIAMOND SUIT +00A8;dieresis;DIAERESIS +0385;dieresistonos;GREEK DIALYTIKA TONOS +00F7;divide;DIVISION SIGN +2593;dkshade;DARK SHADE +2584;dnblock;LOWER HALF BLOCK +0024;dollar;DOLLAR SIGN +20AB;dong;DONG SIGN +02D9;dotaccent;DOT ABOVE +0323;dotbelowcomb;COMBINING DOT BELOW +0131;dotlessi;LATIN SMALL LETTER DOTLESS I +22C5;dotmath;DOT OPERATOR +0065;e;LATIN SMALL LETTER E +00E9;eacute;LATIN SMALL LETTER E WITH ACUTE +0115;ebreve;LATIN SMALL LETTER E WITH BREVE +011B;ecaron;LATIN SMALL LETTER E WITH CARON +00EA;ecircumflex;LATIN SMALL LETTER E WITH CIRCUMFLEX +00EB;edieresis;LATIN SMALL LETTER E WITH DIAERESIS +0117;edotaccent;LATIN SMALL LETTER E WITH DOT ABOVE +00E8;egrave;LATIN SMALL LETTER E WITH GRAVE +0038;eight;DIGIT EIGHT +2208;element;ELEMENT OF +2026;ellipsis;HORIZONTAL ELLIPSIS +0113;emacron;LATIN SMALL LETTER E WITH MACRON +2014;emdash;EM DASH +2205;emptyset;EMPTY SET +2013;endash;EN DASH +014B;eng;LATIN SMALL LETTER ENG +0119;eogonek;LATIN SMALL LETTER E WITH OGONEK +03B5;epsilon;GREEK SMALL LETTER EPSILON +03AD;epsilontonos;GREEK SMALL LETTER EPSILON WITH TONOS +003D;equal;EQUALS SIGN +2261;equivalence;IDENTICAL TO +212E;estimated;ESTIMATED SYMBOL +03B7;eta;GREEK SMALL LETTER ETA +03AE;etatonos;GREEK SMALL LETTER ETA WITH TONOS +00F0;eth;LATIN SMALL LETTER ETH +0021;exclam;EXCLAMATION MARK +203C;exclamdbl;DOUBLE EXCLAMATION MARK +00A1;exclamdown;INVERTED EXCLAMATION MARK +2203;existential;THERE EXISTS +0066;f;LATIN SMALL LETTER F +2640;female;FEMALE SIGN +2012;figuredash;FIGURE DASH +25A0;filledbox;BLACK SQUARE +25AC;filledrect;BLACK RECTANGLE +0035;five;DIGIT FIVE +215D;fiveeighths;VULGAR FRACTION FIVE EIGHTHS +0192;florin;LATIN SMALL LETTER F WITH HOOK +0034;four;DIGIT FOUR +2044;fraction;FRACTION SLASH +20A3;franc;FRENCH FRANC SIGN +0067;g;LATIN SMALL LETTER G +03B3;gamma;GREEK SMALL LETTER GAMMA +011F;gbreve;LATIN SMALL LETTER G WITH BREVE +01E7;gcaron;LATIN SMALL LETTER G WITH CARON +011D;gcircumflex;LATIN SMALL LETTER G WITH CIRCUMFLEX +0121;gdotaccent;LATIN SMALL LETTER G WITH DOT ABOVE +00DF;germandbls;LATIN SMALL LETTER SHARP S +2207;gradient;NABLA +0060;grave;GRAVE ACCENT +0300;gravecomb;COMBINING GRAVE ACCENT +003E;greater;GREATER-THAN SIGN +2265;greaterequal;GREATER-THAN OR EQUAL TO +00AB;guillemotleft;LEFT-POINTING DOUBLE ANGLE QUOTATION MARK +00BB;guillemotright;RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK +2039;guilsinglleft;SINGLE LEFT-POINTING ANGLE QUOTATION MARK +203A;guilsinglright;SINGLE RIGHT-POINTING ANGLE QUOTATION MARK +0068;h;LATIN SMALL LETTER H +0127;hbar;LATIN SMALL LETTER H WITH STROKE +0125;hcircumflex;LATIN SMALL LETTER H WITH CIRCUMFLEX +2665;heart;BLACK HEART SUIT +0309;hookabovecomb;COMBINING HOOK ABOVE +2302;house;HOUSE +02DD;hungarumlaut;DOUBLE ACUTE ACCENT +002D;hyphen;HYPHEN-MINUS +0069;i;LATIN SMALL LETTER I +00ED;iacute;LATIN SMALL LETTER I WITH ACUTE +012D;ibreve;LATIN SMALL LETTER I WITH BREVE +00EE;icircumflex;LATIN SMALL LETTER I WITH CIRCUMFLEX +00EF;idieresis;LATIN SMALL LETTER I WITH DIAERESIS +00EC;igrave;LATIN SMALL LETTER I WITH GRAVE +0133;ij;LATIN SMALL LIGATURE IJ +012B;imacron;LATIN SMALL LETTER I WITH MACRON +221E;infinity;INFINITY +222B;integral;INTEGRAL +2321;integralbt;BOTTOM HALF INTEGRAL +2320;integraltp;TOP HALF INTEGRAL +2229;intersection;INTERSECTION +25D8;invbullet;INVERSE BULLET +25D9;invcircle;INVERSE WHITE CIRCLE +263B;invsmileface;BLACK SMILING FACE +012F;iogonek;LATIN SMALL LETTER I WITH OGONEK +03B9;iota;GREEK SMALL LETTER IOTA +03CA;iotadieresis;GREEK SMALL LETTER IOTA WITH DIALYTIKA +0390;iotadieresistonos;GREEK SMALL LETTER IOTA WITH DIALYTIKA AND TONOS +03AF;iotatonos;GREEK SMALL LETTER IOTA WITH TONOS +0129;itilde;LATIN SMALL LETTER I WITH TILDE +006A;j;LATIN SMALL LETTER J +0135;jcircumflex;LATIN SMALL LETTER J WITH CIRCUMFLEX +006B;k;LATIN SMALL LETTER K +03BA;kappa;GREEK SMALL LETTER KAPPA +0138;kgreenlandic;LATIN SMALL LETTER KRA +006C;l;LATIN SMALL LETTER L +013A;lacute;LATIN SMALL LETTER L WITH ACUTE +03BB;lambda;GREEK SMALL LETTER LAMDA +013E;lcaron;LATIN SMALL LETTER L WITH CARON +0140;ldot;LATIN SMALL LETTER L WITH MIDDLE DOT +003C;less;LESS-THAN SIGN +2264;lessequal;LESS-THAN OR EQUAL TO +258C;lfblock;LEFT HALF BLOCK +20A4;lira;LIRA SIGN +2227;logicaland;LOGICAL AND +00AC;logicalnot;NOT SIGN +2228;logicalor;LOGICAL OR +017F;longs;LATIN SMALL LETTER LONG S +25CA;lozenge;LOZENGE +0142;lslash;LATIN SMALL LETTER L WITH STROKE +2591;ltshade;LIGHT SHADE +006D;m;LATIN SMALL LETTER M +00AF;macron;MACRON +2642;male;MALE SIGN +2212;minus;MINUS SIGN +2032;minute;PRIME +00B5;mu;MICRO SIGN +00D7;multiply;MULTIPLICATION SIGN +266A;musicalnote;EIGHTH NOTE +266B;musicalnotedbl;BEAMED EIGHTH NOTES +006E;n;LATIN SMALL LETTER N +0144;nacute;LATIN SMALL LETTER N WITH ACUTE +0149;napostrophe;LATIN SMALL LETTER N PRECEDED BY APOSTROPHE +0148;ncaron;LATIN SMALL LETTER N WITH CARON +0039;nine;DIGIT NINE +2209;notelement;NOT AN ELEMENT OF +2260;notequal;NOT EQUAL TO +2284;notsubset;NOT A SUBSET OF +00F1;ntilde;LATIN SMALL LETTER N WITH TILDE +03BD;nu;GREEK SMALL LETTER NU +0023;numbersign;NUMBER SIGN +006F;o;LATIN SMALL LETTER O +00F3;oacute;LATIN SMALL LETTER O WITH ACUTE +014F;obreve;LATIN SMALL LETTER O WITH BREVE +00F4;ocircumflex;LATIN SMALL LETTER O WITH CIRCUMFLEX +00F6;odieresis;LATIN SMALL LETTER O WITH DIAERESIS +0153;oe;LATIN SMALL LIGATURE OE +02DB;ogonek;OGONEK +00F2;ograve;LATIN SMALL LETTER O WITH GRAVE +01A1;ohorn;LATIN SMALL LETTER O WITH HORN +0151;ohungarumlaut;LATIN SMALL LETTER O WITH DOUBLE ACUTE +014D;omacron;LATIN SMALL LETTER O WITH MACRON +03C9;omega;GREEK SMALL LETTER OMEGA +03D6;omega1;GREEK PI SYMBOL +03CE;omegatonos;GREEK SMALL LETTER OMEGA WITH TONOS +03BF;omicron;GREEK SMALL LETTER OMICRON +03CC;omicrontonos;GREEK SMALL LETTER OMICRON WITH TONOS +0031;one;DIGIT ONE +2024;onedotenleader;ONE DOT LEADER +215B;oneeighth;VULGAR FRACTION ONE EIGHTH +00BD;onehalf;VULGAR FRACTION ONE HALF +00BC;onequarter;VULGAR FRACTION ONE QUARTER +2153;onethird;VULGAR FRACTION ONE THIRD +25E6;openbullet;WHITE BULLET +00AA;ordfeminine;FEMININE ORDINAL INDICATOR +00BA;ordmasculine;MASCULINE ORDINAL INDICATOR +221F;orthogonal;RIGHT ANGLE +00F8;oslash;LATIN SMALL LETTER O WITH STROKE +01FF;oslashacute;LATIN SMALL LETTER O WITH STROKE AND ACUTE +00F5;otilde;LATIN SMALL LETTER O WITH TILDE +0070;p;LATIN SMALL LETTER P +00B6;paragraph;PILCROW SIGN +0028;parenleft;LEFT PARENTHESIS +0029;parenright;RIGHT PARENTHESIS +2202;partialdiff;PARTIAL DIFFERENTIAL +0025;percent;PERCENT SIGN +002E;period;FULL STOP +00B7;periodcentered;MIDDLE DOT +22A5;perpendicular;UP TACK +2030;perthousand;PER MILLE SIGN +20A7;peseta;PESETA SIGN +03C6;phi;GREEK SMALL LETTER PHI +03D5;phi1;GREEK PHI SYMBOL +03C0;pi;GREEK SMALL LETTER PI +002B;plus;PLUS SIGN +00B1;plusminus;PLUS-MINUS SIGN +211E;prescription;PRESCRIPTION TAKE +220F;product;N-ARY PRODUCT +2282;propersubset;SUBSET OF +2283;propersuperset;SUPERSET OF +221D;proportional;PROPORTIONAL TO +03C8;psi;GREEK SMALL LETTER PSI +0071;q;LATIN SMALL LETTER Q +003F;question;QUESTION MARK +00BF;questiondown;INVERTED QUESTION MARK +0022;quotedbl;QUOTATION MARK +201E;quotedblbase;DOUBLE LOW-9 QUOTATION MARK +201C;quotedblleft;LEFT DOUBLE QUOTATION MARK +201D;quotedblright;RIGHT DOUBLE QUOTATION MARK +2018;quoteleft;LEFT SINGLE QUOTATION MARK +201B;quotereversed;SINGLE HIGH-REVERSED-9 QUOTATION MARK +2019;quoteright;RIGHT SINGLE QUOTATION MARK +201A;quotesinglbase;SINGLE LOW-9 QUOTATION MARK +0027;quotesingle;APOSTROPHE +0072;r;LATIN SMALL LETTER R +0155;racute;LATIN SMALL LETTER R WITH ACUTE +221A;radical;SQUARE ROOT +0159;rcaron;LATIN SMALL LETTER R WITH CARON +2286;reflexsubset;SUBSET OF OR EQUAL TO +2287;reflexsuperset;SUPERSET OF OR EQUAL TO +00AE;registered;REGISTERED SIGN +2310;revlogicalnot;REVERSED NOT SIGN +03C1;rho;GREEK SMALL LETTER RHO +02DA;ring;RING ABOVE +2590;rtblock;RIGHT HALF BLOCK +0073;s;LATIN SMALL LETTER S +015B;sacute;LATIN SMALL LETTER S WITH ACUTE +0161;scaron;LATIN SMALL LETTER S WITH CARON +015F;scedilla;LATIN SMALL LETTER S WITH CEDILLA +015D;scircumflex;LATIN SMALL LETTER S WITH CIRCUMFLEX +2033;second;DOUBLE PRIME +00A7;section;SECTION SIGN +003B;semicolon;SEMICOLON +0037;seven;DIGIT SEVEN +215E;seveneighths;VULGAR FRACTION SEVEN EIGHTHS +2592;shade;MEDIUM SHADE +03C3;sigma;GREEK SMALL LETTER SIGMA +03C2;sigma1;GREEK SMALL LETTER FINAL SIGMA +223C;similar;TILDE OPERATOR +0036;six;DIGIT SIX +002F;slash;SOLIDUS +263A;smileface;WHITE SMILING FACE +0020;space;SPACE +2660;spade;BLACK SPADE SUIT +00A3;sterling;POUND SIGN +220B;suchthat;CONTAINS AS MEMBER +2211;summation;N-ARY SUMMATION +263C;sun;WHITE SUN WITH RAYS +0074;t;LATIN SMALL LETTER T +03C4;tau;GREEK SMALL LETTER TAU +0167;tbar;LATIN SMALL LETTER T WITH STROKE +0165;tcaron;LATIN SMALL LETTER T WITH CARON +2234;therefore;THEREFORE +03B8;theta;GREEK SMALL LETTER THETA +03D1;theta1;GREEK THETA SYMBOL +00FE;thorn;LATIN SMALL LETTER THORN +0033;three;DIGIT THREE +215C;threeeighths;VULGAR FRACTION THREE EIGHTHS +00BE;threequarters;VULGAR FRACTION THREE QUARTERS +02DC;tilde;SMALL TILDE +0303;tildecomb;COMBINING TILDE +0384;tonos;GREEK TONOS +2122;trademark;TRADE MARK SIGN +25BC;triagdn;BLACK DOWN-POINTING TRIANGLE +25C4;triaglf;BLACK LEFT-POINTING POINTER +25BA;triagrt;BLACK RIGHT-POINTING POINTER +25B2;triagup;BLACK UP-POINTING TRIANGLE +0032;two;DIGIT TWO +2025;twodotenleader;TWO DOT LEADER +2154;twothirds;VULGAR FRACTION TWO THIRDS +0075;u;LATIN SMALL LETTER U +00FA;uacute;LATIN SMALL LETTER U WITH ACUTE +016D;ubreve;LATIN SMALL LETTER U WITH BREVE +00FB;ucircumflex;LATIN SMALL LETTER U WITH CIRCUMFLEX +00FC;udieresis;LATIN SMALL LETTER U WITH DIAERESIS +00F9;ugrave;LATIN SMALL LETTER U WITH GRAVE +01B0;uhorn;LATIN SMALL LETTER U WITH HORN +0171;uhungarumlaut;LATIN SMALL LETTER U WITH DOUBLE ACUTE +016B;umacron;LATIN SMALL LETTER U WITH MACRON +005F;underscore;LOW LINE +2017;underscoredbl;DOUBLE LOW LINE +222A;union;UNION +2200;universal;FOR ALL +0173;uogonek;LATIN SMALL LETTER U WITH OGONEK +2580;upblock;UPPER HALF BLOCK +03C5;upsilon;GREEK SMALL LETTER UPSILON +03CB;upsilondieresis;GREEK SMALL LETTER UPSILON WITH DIALYTIKA +03B0;upsilondieresistonos;GREEK SMALL LETTER UPSILON WITH DIALYTIKA AND TONOS +03CD;upsilontonos;GREEK SMALL LETTER UPSILON WITH TONOS +016F;uring;LATIN SMALL LETTER U WITH RING ABOVE +0169;utilde;LATIN SMALL LETTER U WITH TILDE +0076;v;LATIN SMALL LETTER V +0077;w;LATIN SMALL LETTER W +1E83;wacute;LATIN SMALL LETTER W WITH ACUTE +0175;wcircumflex;LATIN SMALL LETTER W WITH CIRCUMFLEX +1E85;wdieresis;LATIN SMALL LETTER W WITH DIAERESIS +2118;weierstrass;SCRIPT CAPITAL P +1E81;wgrave;LATIN SMALL LETTER W WITH GRAVE +0078;x;LATIN SMALL LETTER X +03BE;xi;GREEK SMALL LETTER XI +0079;y;LATIN SMALL LETTER Y +00FD;yacute;LATIN SMALL LETTER Y WITH ACUTE +0177;ycircumflex;LATIN SMALL LETTER Y WITH CIRCUMFLEX +00FF;ydieresis;LATIN SMALL LETTER Y WITH DIAERESIS +00A5;yen;YEN SIGN +1EF3;ygrave;LATIN SMALL LETTER Y WITH GRAVE +007A;z;LATIN SMALL LETTER Z +017A;zacute;LATIN SMALL LETTER Z WITH ACUTE +017E;zcaron;LATIN SMALL LETTER Z WITH CARON +017C;zdotaccent;LATIN SMALL LETTER Z WITH DOT ABOVE +0030;zero;DIGIT ZERO +03B6;zeta;GREEK SMALL LETTER ZETA +#END +""" + + +class AGLError(Exception): + pass + +AGL2UV = {} +UV2AGL = {} + +def _builddicts(): + import re + + lines = _aglText.splitlines() + + parseAGL_RE = re.compile("([0-9A-F]{4});([A-Za-z_0-9.]+);.*?$") + + for line in lines: + if not line or line[:1] == '#': + continue + m = parseAGL_RE.match(line) + if not m: + raise AGLError("syntax error in glyphlist.txt: %s" % repr(line[:20])) + unicode = m.group(1) + assert len(unicode) == 4 + unicode = int(unicode, 16) + glyphName = m.group(2) + if glyphName in AGL2UV: + # the above table contains identical duplicates + assert AGL2UV[glyphName] == unicode + else: + AGL2UV[glyphName] = unicode + UV2AGL[unicode] = glyphName + +_builddicts() diff -Nru fonttools-2.4/Snippets/fontTools/cffLib.py fonttools-3.0/Snippets/fontTools/cffLib.py --- fonttools-2.4/Snippets/fontTools/cffLib.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.0/Snippets/fontTools/cffLib.py 2015-08-31 17:57:15.000000000 +0000 @@ -0,0 +1,1810 @@ +"""cffLib.py -- read/write tools for Adobe CFF fonts.""" + +from __future__ import print_function, division, absolute_import +from fontTools.misc.py23 import * +from fontTools.misc import sstruct +from fontTools.misc import psCharStrings +from fontTools.misc.textTools import safeEval +import struct + +DEBUG = 0 + + +cffHeaderFormat = """ + major: B + minor: B + hdrSize: B + offSize: B +""" + +class CFFFontSet(object): + + def __init__(self): + pass + + def decompile(self, file, otFont): + sstruct.unpack(cffHeaderFormat, file.read(4), self) + assert self.major == 1 and self.minor == 0, \ + "unknown CFF format: %d.%d" % (self.major, self.minor) + + file.seek(self.hdrSize) + self.fontNames = list(Index(file)) + self.topDictIndex = TopDictIndex(file) + self.strings = IndexedStrings(file) + self.GlobalSubrs = GlobalSubrsIndex(file) + self.topDictIndex.strings = self.strings + self.topDictIndex.GlobalSubrs = self.GlobalSubrs + + def __len__(self): + return len(self.fontNames) + + def keys(self): + return list(self.fontNames) + + def values(self): + return self.topDictIndex + + def __getitem__(self, name): + try: + index = self.fontNames.index(name) + except ValueError: + raise KeyError(name) + return self.topDictIndex[index] + + def compile(self, file, otFont): + strings = IndexedStrings() + writer = CFFWriter() + writer.add(sstruct.pack(cffHeaderFormat, self)) + fontNames = Index() + for name in self.fontNames: + fontNames.append(name) + writer.add(fontNames.getCompiler(strings, None)) + topCompiler = self.topDictIndex.getCompiler(strings, None) + writer.add(topCompiler) + writer.add(strings.getCompiler()) + writer.add(self.GlobalSubrs.getCompiler(strings, None)) + + for topDict in self.topDictIndex: + if not hasattr(topDict, "charset") or topDict.charset is None: + charset = otFont.getGlyphOrder() + topDict.charset = charset + + for child in topCompiler.getChildren(strings): + writer.add(child) + + writer.toFile(file) + + def toXML(self, xmlWriter, progress=None): + for fontName in self.fontNames: + xmlWriter.begintag("CFFFont", name=tostr(fontName)) + xmlWriter.newline() + font = self[fontName] + font.toXML(xmlWriter, progress) + xmlWriter.endtag("CFFFont") + xmlWriter.newline() + xmlWriter.newline() + xmlWriter.begintag("GlobalSubrs") + xmlWriter.newline() + self.GlobalSubrs.toXML(xmlWriter, progress) + xmlWriter.endtag("GlobalSubrs") + xmlWriter.newline() + + def fromXML(self, name, attrs, content): + if not hasattr(self, "GlobalSubrs"): + self.GlobalSubrs = GlobalSubrsIndex() + self.major = 1 + self.minor = 0 + self.hdrSize = 4 + self.offSize = 4 # XXX ?? + if name == "CFFFont": + if not hasattr(self, "fontNames"): + self.fontNames = [] + self.topDictIndex = TopDictIndex() + fontName = attrs["name"] + topDict = TopDict(GlobalSubrs=self.GlobalSubrs) + topDict.charset = None # gets filled in later + self.fontNames.append(fontName) + self.topDictIndex.append(topDict) + for element in content: + if isinstance(element, basestring): + continue + name, attrs, content = element + topDict.fromXML(name, attrs, content) + elif name == "GlobalSubrs": + for element in content: + if isinstance(element, basestring): + continue + name, attrs, content = element + subr = psCharStrings.T2CharString() + subr.fromXML(name, attrs, content) + self.GlobalSubrs.append(subr) + + +class CFFWriter(object): + + def __init__(self): + self.data = [] + + def add(self, table): + self.data.append(table) + + def toFile(self, file): + lastPosList = None + count = 1 + while True: + if DEBUG: + print("CFFWriter.toFile() iteration:", count) + count = count + 1 + pos = 0 + posList = [pos] + for item in self.data: + if hasattr(item, "getDataLength"): + endPos = pos + item.getDataLength() + else: + endPos = pos + len(item) + if hasattr(item, "setPos"): + item.setPos(pos, endPos) + pos = endPos + posList.append(pos) + if posList == lastPosList: + break + lastPosList = posList + if DEBUG: + print("CFFWriter.toFile() writing to file.") + begin = file.tell() + posList = [0] + for item in self.data: + if hasattr(item, "toFile"): + item.toFile(file) + else: + file.write(item) + posList.append(file.tell() - begin) + assert posList == lastPosList + + +def calcOffSize(largestOffset): + if largestOffset < 0x100: + offSize = 1 + elif largestOffset < 0x10000: + offSize = 2 + elif largestOffset < 0x1000000: + offSize = 3 + else: + offSize = 4 + return offSize + + +class IndexCompiler(object): + + def __init__(self, items, strings, parent): + self.items = self.getItems(items, strings) + self.parent = parent + + def getItems(self, items, strings): + return items + + def getOffsets(self): + pos = 1 + offsets = [pos] + for item in self.items: + if hasattr(item, "getDataLength"): + pos = pos + item.getDataLength() + else: + pos = pos + len(item) + offsets.append(pos) + return offsets + + def getDataLength(self): + lastOffset = self.getOffsets()[-1] + offSize = calcOffSize(lastOffset) + dataLength = ( + 2 + # count + 1 + # offSize + (len(self.items) + 1) * offSize + # the offsets + lastOffset - 1 # size of object data + ) + return dataLength + + def toFile(self, file): + offsets = self.getOffsets() + writeCard16(file, len(self.items)) + offSize = calcOffSize(offsets[-1]) + writeCard8(file, offSize) + offSize = -offSize + pack = struct.pack + for offset in offsets: + binOffset = pack(">l", offset)[offSize:] + assert len(binOffset) == -offSize + file.write(binOffset) + for item in self.items: + if hasattr(item, "toFile"): + item.toFile(file) + else: + file.write(tobytes(item, encoding="latin1")) + + +class IndexedStringsCompiler(IndexCompiler): + + def getItems(self, items, strings): + return items.strings + + +class TopDictIndexCompiler(IndexCompiler): + + def getItems(self, items, strings): + out = [] + for item in items: + out.append(item.getCompiler(strings, self)) + return out + + def getChildren(self, strings): + children = [] + for topDict in self.items: + children.extend(topDict.getChildren(strings)) + return children + + +class FDArrayIndexCompiler(IndexCompiler): + + def getItems(self, items, strings): + out = [] + for item in items: + out.append(item.getCompiler(strings, self)) + return out + + def getChildren(self, strings): + children = [] + for fontDict in self.items: + children.extend(fontDict.getChildren(strings)) + return children + + def toFile(self, file): + offsets = self.getOffsets() + writeCard16(file, len(self.items)) + offSize = calcOffSize(offsets[-1]) + writeCard8(file, offSize) + offSize = -offSize + pack = struct.pack + for offset in offsets: + binOffset = pack(">l", offset)[offSize:] + assert len(binOffset) == -offSize + file.write(binOffset) + for item in self.items: + if hasattr(item, "toFile"): + item.toFile(file) + else: + file.write(item) + + def setPos(self, pos, endPos): + self.parent.rawDict["FDArray"] = pos + + +class GlobalSubrsCompiler(IndexCompiler): + def getItems(self, items, strings): + out = [] + for cs in items: + cs.compile() + out.append(cs.bytecode) + return out + +class SubrsCompiler(GlobalSubrsCompiler): + def setPos(self, pos, endPos): + offset = pos - self.parent.pos + self.parent.rawDict["Subrs"] = offset + +class CharStringsCompiler(GlobalSubrsCompiler): + def setPos(self, pos, endPos): + self.parent.rawDict["CharStrings"] = pos + + +class Index(object): + + """This class represents what the CFF spec calls an INDEX.""" + + compilerClass = IndexCompiler + + def __init__(self, file=None): + self.items = [] + name = self.__class__.__name__ + if file is None: + return + if DEBUG: + print("loading %s at %s" % (name, file.tell())) + self.file = file + count = readCard16(file) + if count == 0: + return + self.items = [None] * count + offSize = readCard8(file) + if DEBUG: + print(" index count: %s offSize: %s" % (count, offSize)) + assert offSize <= 4, "offSize too large: %s" % offSize + self.offsets = offsets = [] + pad = b'\0' * (4 - offSize) + for index in range(count+1): + chunk = file.read(offSize) + chunk = pad + chunk + offset, = struct.unpack(">L", chunk) + offsets.append(int(offset)) + self.offsetBase = file.tell() - 1 + file.seek(self.offsetBase + offsets[-1]) # pretend we've read the whole lot + if DEBUG: + print(" end of %s at %s" % (name, file.tell())) + + def __len__(self): + return len(self.items) + + def __getitem__(self, index): + item = self.items[index] + if item is not None: + return item + offset = self.offsets[index] + self.offsetBase + size = self.offsets[index+1] - self.offsets[index] + file = self.file + file.seek(offset) + data = file.read(size) + assert len(data) == size + item = self.produceItem(index, data, file, offset, size) + self.items[index] = item + return item + + def produceItem(self, index, data, file, offset, size): + return data + + def append(self, item): + self.items.append(item) + + def getCompiler(self, strings, parent): + return self.compilerClass(self, strings, parent) + + +class GlobalSubrsIndex(Index): + + compilerClass = GlobalSubrsCompiler + + def __init__(self, file=None, globalSubrs=None, private=None, fdSelect=None, fdArray=None): + Index.__init__(self, file) + self.globalSubrs = globalSubrs + self.private = private + if fdSelect: + self.fdSelect = fdSelect + if fdArray: + self.fdArray = fdArray + + def produceItem(self, index, data, file, offset, size): + if self.private is not None: + private = self.private + elif hasattr(self, 'fdArray') and self.fdArray is not None: + private = self.fdArray[self.fdSelect[index]].Private + else: + private = None + return psCharStrings.T2CharString(data, private=private, globalSubrs=self.globalSubrs) + + def toXML(self, xmlWriter, progress): + xmlWriter.comment("The 'index' attribute is only for humans; it is ignored when parsed.") + xmlWriter.newline() + for i in range(len(self)): + subr = self[i] + if subr.needsDecompilation(): + xmlWriter.begintag("CharString", index=i, raw=1) + else: + xmlWriter.begintag("CharString", index=i) + xmlWriter.newline() + subr.toXML(xmlWriter) + xmlWriter.endtag("CharString") + xmlWriter.newline() + + def fromXML(self, name, attrs, content): + if name != "CharString": + return + subr = psCharStrings.T2CharString() + subr.fromXML(name, attrs, content) + self.append(subr) + + def getItemAndSelector(self, index): + sel = None + if hasattr(self, 'fdSelect'): + sel = self.fdSelect[index] + return self[index], sel + + +class SubrsIndex(GlobalSubrsIndex): + compilerClass = SubrsCompiler + + +class TopDictIndex(Index): + + compilerClass = TopDictIndexCompiler + + def produceItem(self, index, data, file, offset, size): + top = TopDict(self.strings, file, offset, self.GlobalSubrs) + top.decompile(data) + return top + + def toXML(self, xmlWriter, progress): + for i in range(len(self)): + xmlWriter.begintag("FontDict", index=i) + xmlWriter.newline() + self[i].toXML(xmlWriter, progress) + xmlWriter.endtag("FontDict") + xmlWriter.newline() + + +class FDArrayIndex(TopDictIndex): + + compilerClass = FDArrayIndexCompiler + + def fromXML(self, name, attrs, content): + if name != "FontDict": + return + fontDict = FontDict() + for element in content: + if isinstance(element, basestring): + continue + name, attrs, content = element + fontDict.fromXML(name, attrs, content) + self.append(fontDict) + + +class FDSelect: + def __init__(self, file=None, numGlyphs=None, format=None): + if file: + # read data in from file + self.format = readCard8(file) + if self.format == 0: + from array import array + self.gidArray = array("B", file.read(numGlyphs)).tolist() + elif self.format == 3: + gidArray = [None] * numGlyphs + nRanges = readCard16(file) + fd = None + prev = None + for i in range(nRanges): + first = readCard16(file) + if prev is not None: + for glyphID in range(prev, first): + gidArray[glyphID] = fd + prev = first + fd = readCard8(file) + if prev is not None: + first = readCard16(file) + for glyphID in range(prev, first): + gidArray[glyphID] = fd + self.gidArray = gidArray + else: + assert False, "unsupported FDSelect format: %s" % format + else: + # reading from XML. Make empty gidArray,, and leave format as passed in. + # format is None will result in the smallest representation being used. + self.format = format + self.gidArray = [] + + def __len__(self): + return len(self.gidArray) + + def __getitem__(self, index): + return self.gidArray[index] + + def __setitem__(self, index, fdSelectValue): + self.gidArray[index] = fdSelectValue + + def append(self, fdSelectValue): + self.gidArray.append(fdSelectValue) + + +class CharStrings(object): + + def __init__(self, file, charset, globalSubrs, private, fdSelect, fdArray): + if file is not None: + self.charStringsIndex = SubrsIndex(file, globalSubrs, private, fdSelect, fdArray) + self.charStrings = charStrings = {} + for i in range(len(charset)): + charStrings[charset[i]] = i + self.charStringsAreIndexed = 1 + else: + self.charStrings = {} + self.charStringsAreIndexed = 0 + self.globalSubrs = globalSubrs + self.private = private + if fdSelect is not None: + self.fdSelect = fdSelect + if fdArray is not None: + self.fdArray = fdArray + + def keys(self): + return list(self.charStrings.keys()) + + def values(self): + if self.charStringsAreIndexed: + return self.charStringsIndex + else: + return list(self.charStrings.values()) + + def has_key(self, name): + return name in self.charStrings + + __contains__ = has_key + + def __len__(self): + return len(self.charStrings) + + def __getitem__(self, name): + charString = self.charStrings[name] + if self.charStringsAreIndexed: + charString = self.charStringsIndex[charString] + return charString + + def __setitem__(self, name, charString): + if self.charStringsAreIndexed: + index = self.charStrings[name] + self.charStringsIndex[index] = charString + else: + self.charStrings[name] = charString + + def getItemAndSelector(self, name): + if self.charStringsAreIndexed: + index = self.charStrings[name] + return self.charStringsIndex.getItemAndSelector(index) + else: + if hasattr(self, 'fdSelect'): + sel = self.fdSelect[index] # index is not defined at this point. Read R. ? + else: + raise KeyError("fdSelect array not yet defined.") + return self.charStrings[name], sel + + def toXML(self, xmlWriter, progress): + names = sorted(self.keys()) + i = 0 + step = 10 + numGlyphs = len(names) + for name in names: + charStr, fdSelectIndex = self.getItemAndSelector(name) + if charStr.needsDecompilation(): + raw = [("raw", 1)] + else: + raw = [] + if fdSelectIndex is None: + xmlWriter.begintag("CharString", [('name', name)] + raw) + else: + xmlWriter.begintag("CharString", + [('name', name), ('fdSelectIndex', fdSelectIndex)] + raw) + xmlWriter.newline() + charStr.toXML(xmlWriter) + xmlWriter.endtag("CharString") + xmlWriter.newline() + if not i % step and progress is not None: + progress.setLabel("Dumping 'CFF ' table... (%s)" % name) + progress.increment(step / numGlyphs) + i = i + 1 + + def fromXML(self, name, attrs, content): + for element in content: + if isinstance(element, basestring): + continue + name, attrs, content = element + if name != "CharString": + continue + fdID = -1 + if hasattr(self, "fdArray"): + fdID = safeEval(attrs["fdSelectIndex"]) + private = self.fdArray[fdID].Private + else: + private = self.private + + glyphName = attrs["name"] + charString = psCharStrings.T2CharString( + private=private, + globalSubrs=self.globalSubrs) + charString.fromXML(name, attrs, content) + if fdID >= 0: + charString.fdSelectIndex = fdID + self[glyphName] = charString + + +def readCard8(file): + return byteord(file.read(1)) + +def readCard16(file): + value, = struct.unpack(">H", file.read(2)) + return value + +def writeCard8(file, value): + file.write(bytechr(value)) + +def writeCard16(file, value): + file.write(struct.pack(">H", value)) + +def packCard8(value): + return bytechr(value) + +def packCard16(value): + return struct.pack(">H", value) + +def buildOperatorDict(table): + d = {} + for op, name, arg, default, conv in table: + d[op] = (name, arg) + return d + +def buildOpcodeDict(table): + d = {} + for op, name, arg, default, conv in table: + if isinstance(op, tuple): + op = bytechr(op[0]) + bytechr(op[1]) + else: + op = bytechr(op) + d[name] = (op, arg) + return d + +def buildOrder(table): + l = [] + for op, name, arg, default, conv in table: + l.append(name) + return l + +def buildDefaults(table): + d = {} + for op, name, arg, default, conv in table: + if default is not None: + d[name] = default + return d + +def buildConverters(table): + d = {} + for op, name, arg, default, conv in table: + d[name] = conv + return d + + +class SimpleConverter(object): + def read(self, parent, value): + return value + def write(self, parent, value): + return value + def xmlWrite(self, xmlWriter, name, value, progress): + xmlWriter.simpletag(name, value=value) + xmlWriter.newline() + def xmlRead(self, name, attrs, content, parent): + return attrs["value"] + +class ASCIIConverter(SimpleConverter): + def read(self, parent, value): + return tostr(value, encoding='ascii') + def write(self, parent, value): + return tobytes(value, encoding='ascii') + def xmlWrite(self, xmlWriter, name, value, progress): + xmlWriter.simpletag(name, value=tounicode(value, encoding="ascii")) + xmlWriter.newline() + def xmlRead(self, name, attrs, content, parent): + return tobytes(attrs["value"], encoding=("ascii")) + +class Latin1Converter(SimpleConverter): + def read(self, parent, value): + return tostr(value, encoding='latin1') + def write(self, parent, value): + return tobytes(value, encoding='latin1') + def xmlWrite(self, xmlWriter, name, value, progress): + xmlWriter.simpletag(name, value=tounicode(value, encoding="latin1")) + xmlWriter.newline() + def xmlRead(self, name, attrs, content, parent): + return tobytes(attrs["value"], encoding=("latin1")) + + +def parseNum(s): + try: + value = int(s) + except: + value = float(s) + return value + +class NumberConverter(SimpleConverter): + def xmlRead(self, name, attrs, content, parent): + return parseNum(attrs["value"]) + +class ArrayConverter(SimpleConverter): + def xmlWrite(self, xmlWriter, name, value, progress): + value = " ".join(map(str, value)) + xmlWriter.simpletag(name, value=value) + xmlWriter.newline() + def xmlRead(self, name, attrs, content, parent): + values = attrs["value"].split() + return [parseNum(value) for value in values] + +class TableConverter(SimpleConverter): + def xmlWrite(self, xmlWriter, name, value, progress): + xmlWriter.begintag(name) + xmlWriter.newline() + value.toXML(xmlWriter, progress) + xmlWriter.endtag(name) + xmlWriter.newline() + def xmlRead(self, name, attrs, content, parent): + ob = self.getClass()() + for element in content: + if isinstance(element, basestring): + continue + name, attrs, content = element + ob.fromXML(name, attrs, content) + return ob + +class PrivateDictConverter(TableConverter): + def getClass(self): + return PrivateDict + def read(self, parent, value): + size, offset = value + file = parent.file + priv = PrivateDict(parent.strings, file, offset) + file.seek(offset) + data = file.read(size) + assert len(data) == size + priv.decompile(data) + return priv + def write(self, parent, value): + return (0, 0) # dummy value + +class SubrsConverter(TableConverter): + def getClass(self): + return SubrsIndex + def read(self, parent, value): + file = parent.file + file.seek(parent.offset + value) # Offset(self) + return SubrsIndex(file) + def write(self, parent, value): + return 0 # dummy value + +class CharStringsConverter(TableConverter): + def read(self, parent, value): + file = parent.file + charset = parent.charset + globalSubrs = parent.GlobalSubrs + if hasattr(parent, "ROS"): + fdSelect, fdArray = parent.FDSelect, parent.FDArray + private = None + else: + fdSelect, fdArray = None, None + private = parent.Private + file.seek(value) # Offset(0) + return CharStrings(file, charset, globalSubrs, private, fdSelect, fdArray) + def write(self, parent, value): + return 0 # dummy value + def xmlRead(self, name, attrs, content, parent): + if hasattr(parent, "ROS"): + # if it is a CID-keyed font, then the private Dict is extracted from the parent.FDArray + private, fdSelect, fdArray = None, parent.FDSelect, parent.FDArray + else: + # if it is a name-keyed font, then the private dict is in the top dict, and there is no fdArray. + private, fdSelect, fdArray = parent.Private, None, None + charStrings = CharStrings(None, None, parent.GlobalSubrs, private, fdSelect, fdArray) + charStrings.fromXML(name, attrs, content) + return charStrings + +class CharsetConverter(object): + def read(self, parent, value): + isCID = hasattr(parent, "ROS") + if value > 2: + numGlyphs = parent.numGlyphs + file = parent.file + file.seek(value) + if DEBUG: + print("loading charset at %s" % value) + format = readCard8(file) + if format == 0: + charset = parseCharset0(numGlyphs, file, parent.strings, isCID) + elif format == 1 or format == 2: + charset = parseCharset(numGlyphs, file, parent.strings, isCID, format) + else: + raise NotImplementedError + assert len(charset) == numGlyphs + if DEBUG: + print(" charset end at %s" % file.tell()) + else: # offset == 0 -> no charset data. + if isCID or "CharStrings" not in parent.rawDict: + assert value == 0 # We get here only when processing fontDicts from the FDArray of CFF-CID fonts. Only the real topDict references the chrset. + charset = None + elif value == 0: + charset = cffISOAdobeStrings + elif value == 1: + charset = cffIExpertStrings + elif value == 2: + charset = cffExpertSubsetStrings + return charset + + def write(self, parent, value): + return 0 # dummy value + def xmlWrite(self, xmlWriter, name, value, progress): + # XXX only write charset when not in OT/TTX context, where we + # dump charset as a separate "GlyphOrder" table. + ##xmlWriter.simpletag("charset") + xmlWriter.comment("charset is dumped separately as the 'GlyphOrder' element") + xmlWriter.newline() + def xmlRead(self, name, attrs, content, parent): + if 0: + return safeEval(attrs["value"]) + + +class CharsetCompiler(object): + + def __init__(self, strings, charset, parent): + assert charset[0] == '.notdef' + isCID = hasattr(parent.dictObj, "ROS") + data0 = packCharset0(charset, isCID, strings) + data = packCharset(charset, isCID, strings) + if len(data) < len(data0): + self.data = data + else: + self.data = data0 + self.parent = parent + + def setPos(self, pos, endPos): + self.parent.rawDict["charset"] = pos + + def getDataLength(self): + return len(self.data) + + def toFile(self, file): + file.write(self.data) + + +def getCIDfromName(name, strings): + return int(name[3:]) + +def getSIDfromName(name, strings): + return strings.getSID(name) + +def packCharset0(charset, isCID, strings): + fmt = 0 + data = [packCard8(fmt)] + if isCID: + getNameID = getCIDfromName + else: + getNameID = getSIDfromName + + for name in charset[1:]: + data.append(packCard16(getNameID(name,strings))) + return bytesjoin(data) + + +def packCharset(charset, isCID, strings): + fmt = 1 + ranges = [] + first = None + end = 0 + if isCID: + getNameID = getCIDfromName + else: + getNameID = getSIDfromName + + for name in charset[1:]: + SID = getNameID(name, strings) + if first is None: + first = SID + elif end + 1 != SID: + nLeft = end - first + if nLeft > 255: + fmt = 2 + ranges.append((first, nLeft)) + first = SID + end = SID + if end: + nLeft = end - first + if nLeft > 255: + fmt = 2 + ranges.append((first, nLeft)) + + data = [packCard8(fmt)] + if fmt == 1: + nLeftFunc = packCard8 + else: + nLeftFunc = packCard16 + for first, nLeft in ranges: + data.append(packCard16(first) + nLeftFunc(nLeft)) + return bytesjoin(data) + +def parseCharset0(numGlyphs, file, strings, isCID): + charset = [".notdef"] + if isCID: + for i in range(numGlyphs - 1): + CID = readCard16(file) + charset.append("cid" + str(CID).zfill(5)) + else: + for i in range(numGlyphs - 1): + SID = readCard16(file) + charset.append(strings[SID]) + return charset + +def parseCharset(numGlyphs, file, strings, isCID, fmt): + charset = ['.notdef'] + count = 1 + if fmt == 1: + nLeftFunc = readCard8 + else: + nLeftFunc = readCard16 + while count < numGlyphs: + first = readCard16(file) + nLeft = nLeftFunc(file) + if isCID: + for CID in range(first, first+nLeft+1): + charset.append("cid" + str(CID).zfill(5)) + else: + for SID in range(first, first+nLeft+1): + charset.append(strings[SID]) + count = count + nLeft + 1 + return charset + + +class EncodingCompiler(object): + + def __init__(self, strings, encoding, parent): + assert not isinstance(encoding, basestring) + data0 = packEncoding0(parent.dictObj.charset, encoding, parent.strings) + data1 = packEncoding1(parent.dictObj.charset, encoding, parent.strings) + if len(data0) < len(data1): + self.data = data0 + else: + self.data = data1 + self.parent = parent + + def setPos(self, pos, endPos): + self.parent.rawDict["Encoding"] = pos + + def getDataLength(self): + return len(self.data) + + def toFile(self, file): + file.write(self.data) + + +class EncodingConverter(SimpleConverter): + + def read(self, parent, value): + if value == 0: + return "StandardEncoding" + elif value == 1: + return "ExpertEncoding" + else: + assert value > 1 + file = parent.file + file.seek(value) + if DEBUG: + print("loading Encoding at %s" % value) + fmt = readCard8(file) + haveSupplement = fmt & 0x80 + if haveSupplement: + raise NotImplementedError("Encoding supplements are not yet supported") + fmt = fmt & 0x7f + if fmt == 0: + encoding = parseEncoding0(parent.charset, file, haveSupplement, + parent.strings) + elif fmt == 1: + encoding = parseEncoding1(parent.charset, file, haveSupplement, + parent.strings) + return encoding + + def write(self, parent, value): + if value == "StandardEncoding": + return 0 + elif value == "ExpertEncoding": + return 1 + return 0 # dummy value + + def xmlWrite(self, xmlWriter, name, value, progress): + if value in ("StandardEncoding", "ExpertEncoding"): + xmlWriter.simpletag(name, name=value) + xmlWriter.newline() + return + xmlWriter.begintag(name) + xmlWriter.newline() + for code in range(len(value)): + glyphName = value[code] + if glyphName != ".notdef": + xmlWriter.simpletag("map", code=hex(code), name=glyphName) + xmlWriter.newline() + xmlWriter.endtag(name) + xmlWriter.newline() + + def xmlRead(self, name, attrs, content, parent): + if "name" in attrs: + return attrs["name"] + encoding = [".notdef"] * 256 + for element in content: + if isinstance(element, basestring): + continue + name, attrs, content = element + code = safeEval(attrs["code"]) + glyphName = attrs["name"] + encoding[code] = glyphName + return encoding + + +def parseEncoding0(charset, file, haveSupplement, strings): + nCodes = readCard8(file) + encoding = [".notdef"] * 256 + for glyphID in range(1, nCodes + 1): + code = readCard8(file) + if code != 0: + encoding[code] = charset[glyphID] + return encoding + +def parseEncoding1(charset, file, haveSupplement, strings): + nRanges = readCard8(file) + encoding = [".notdef"] * 256 + glyphID = 1 + for i in range(nRanges): + code = readCard8(file) + nLeft = readCard8(file) + for glyphID in range(glyphID, glyphID + nLeft + 1): + encoding[code] = charset[glyphID] + code = code + 1 + glyphID = glyphID + 1 + return encoding + +def packEncoding0(charset, encoding, strings): + fmt = 0 + m = {} + for code in range(len(encoding)): + name = encoding[code] + if name != ".notdef": + m[name] = code + codes = [] + for name in charset[1:]: + code = m.get(name) + codes.append(code) + + while codes and codes[-1] is None: + codes.pop() + + data = [packCard8(fmt), packCard8(len(codes))] + for code in codes: + if code is None: + code = 0 + data.append(packCard8(code)) + return bytesjoin(data) + +def packEncoding1(charset, encoding, strings): + fmt = 1 + m = {} + for code in range(len(encoding)): + name = encoding[code] + if name != ".notdef": + m[name] = code + ranges = [] + first = None + end = 0 + for name in charset[1:]: + code = m.get(name, -1) + if first is None: + first = code + elif end + 1 != code: + nLeft = end - first + ranges.append((first, nLeft)) + first = code + end = code + nLeft = end - first + ranges.append((first, nLeft)) + + # remove unencoded glyphs at the end. + while ranges and ranges[-1][0] == -1: + ranges.pop() + + data = [packCard8(fmt), packCard8(len(ranges))] + for first, nLeft in ranges: + if first == -1: # unencoded + first = 0 + data.append(packCard8(first) + packCard8(nLeft)) + return bytesjoin(data) + + +class FDArrayConverter(TableConverter): + + def read(self, parent, value): + file = parent.file + file.seek(value) + fdArray = FDArrayIndex(file) + fdArray.strings = parent.strings + fdArray.GlobalSubrs = parent.GlobalSubrs + return fdArray + + def write(self, parent, value): + return 0 # dummy value + + def xmlRead(self, name, attrs, content, parent): + fdArray = FDArrayIndex() + for element in content: + if isinstance(element, basestring): + continue + name, attrs, content = element + fdArray.fromXML(name, attrs, content) + return fdArray + + +class FDSelectConverter(object): + + def read(self, parent, value): + file = parent.file + file.seek(value) + fdSelect = FDSelect(file, parent.numGlyphs) + return fdSelect + + def write(self, parent, value): + return 0 # dummy value + + # The FDSelect glyph data is written out to XML in the charstring keys, + # so we write out only the format selector + def xmlWrite(self, xmlWriter, name, value, progress): + xmlWriter.simpletag(name, [('format', value.format)]) + xmlWriter.newline() + + def xmlRead(self, name, attrs, content, parent): + fmt = safeEval(attrs["format"]) + file = None + numGlyphs = None + fdSelect = FDSelect(file, numGlyphs, fmt) + return fdSelect + + +def packFDSelect0(fdSelectArray): + fmt = 0 + data = [packCard8(fmt)] + for index in fdSelectArray: + data.append(packCard8(index)) + return bytesjoin(data) + + +def packFDSelect3(fdSelectArray): + fmt = 3 + fdRanges = [] + first = None + end = 0 + lenArray = len(fdSelectArray) + lastFDIndex = -1 + for i in range(lenArray): + fdIndex = fdSelectArray[i] + if lastFDIndex != fdIndex: + fdRanges.append([i, fdIndex]) + lastFDIndex = fdIndex + sentinelGID = i + 1 + + data = [packCard8(fmt)] + data.append(packCard16( len(fdRanges) )) + for fdRange in fdRanges: + data.append(packCard16(fdRange[0])) + data.append(packCard8(fdRange[1])) + data.append(packCard16(sentinelGID)) + return bytesjoin(data) + + +class FDSelectCompiler(object): + + def __init__(self, fdSelect, parent): + fmt = fdSelect.format + fdSelectArray = fdSelect.gidArray + if fmt == 0: + self.data = packFDSelect0(fdSelectArray) + elif fmt == 3: + self.data = packFDSelect3(fdSelectArray) + else: + # choose smaller of the two formats + data0 = packFDSelect0(fdSelectArray) + data3 = packFDSelect3(fdSelectArray) + if len(data0) < len(data3): + self.data = data0 + fdSelect.format = 0 + else: + self.data = data3 + fdSelect.format = 3 + + self.parent = parent + + def setPos(self, pos, endPos): + self.parent.rawDict["FDSelect"] = pos + + def getDataLength(self): + return len(self.data) + + def toFile(self, file): + file.write(self.data) + + +class ROSConverter(SimpleConverter): + + def xmlWrite(self, xmlWriter, name, value, progress): + registry, order, supplement = value + xmlWriter.simpletag(name, [('Registry', tostr(registry)), ('Order', tostr(order)), + ('Supplement', supplement)]) + xmlWriter.newline() + + def xmlRead(self, name, attrs, content, parent): + return (attrs['Registry'], attrs['Order'], safeEval(attrs['Supplement'])) + + +topDictOperators = [ +# opcode name argument type default converter + ((12, 30), 'ROS', ('SID', 'SID', 'number'), None, ROSConverter()), + ((12, 20), 'SyntheticBase', 'number', None, None), + (0, 'version', 'SID', None, None), + (1, 'Notice', 'SID', None, Latin1Converter()), + ((12, 0), 'Copyright', 'SID', None, Latin1Converter()), + (2, 'FullName', 'SID', None, None), + ((12, 38), 'FontName', 'SID', None, None), + (3, 'FamilyName', 'SID', None, None), + (4, 'Weight', 'SID', None, None), + ((12, 1), 'isFixedPitch', 'number', 0, None), + ((12, 2), 'ItalicAngle', 'number', 0, None), + ((12, 3), 'UnderlinePosition', 'number', None, None), + ((12, 4), 'UnderlineThickness', 'number', 50, None), + ((12, 5), 'PaintType', 'number', 0, None), + ((12, 6), 'CharstringType', 'number', 2, None), + ((12, 7), 'FontMatrix', 'array', [0.001, 0, 0, 0.001, 0, 0], None), + (13, 'UniqueID', 'number', None, None), + (5, 'FontBBox', 'array', [0, 0, 0, 0], None), + ((12, 8), 'StrokeWidth', 'number', 0, None), + (14, 'XUID', 'array', None, None), + ((12, 21), 'PostScript', 'SID', None, None), + ((12, 22), 'BaseFontName', 'SID', None, None), + ((12, 23), 'BaseFontBlend', 'delta', None, None), + ((12, 31), 'CIDFontVersion', 'number', 0, None), + ((12, 32), 'CIDFontRevision', 'number', 0, None), + ((12, 33), 'CIDFontType', 'number', 0, None), + ((12, 34), 'CIDCount', 'number', 8720, None), + (15, 'charset', 'number', 0, CharsetConverter()), + ((12, 35), 'UIDBase', 'number', None, None), + (16, 'Encoding', 'number', 0, EncodingConverter()), + (18, 'Private', ('number', 'number'), None, PrivateDictConverter()), + ((12, 37), 'FDSelect', 'number', None, FDSelectConverter()), + ((12, 36), 'FDArray', 'number', None, FDArrayConverter()), + (17, 'CharStrings', 'number', None, CharStringsConverter()), +] + +# Note! FDSelect and FDArray must both preceed CharStrings in the output XML build order, +# in order for the font to compile back from xml. + + +privateDictOperators = [ +# opcode name argument type default converter + (6, 'BlueValues', 'delta', None, None), + (7, 'OtherBlues', 'delta', None, None), + (8, 'FamilyBlues', 'delta', None, None), + (9, 'FamilyOtherBlues', 'delta', None, None), + ((12, 9), 'BlueScale', 'number', 0.039625, None), + ((12, 10), 'BlueShift', 'number', 7, None), + ((12, 11), 'BlueFuzz', 'number', 1, None), + (10, 'StdHW', 'number', None, None), + (11, 'StdVW', 'number', None, None), + ((12, 12), 'StemSnapH', 'delta', None, None), + ((12, 13), 'StemSnapV', 'delta', None, None), + ((12, 14), 'ForceBold', 'number', 0, None), + ((12, 15), 'ForceBoldThreshold', 'number', None, None), # deprecated + ((12, 16), 'lenIV', 'number', None, None), # deprecated + ((12, 17), 'LanguageGroup', 'number', 0, None), + ((12, 18), 'ExpansionFactor', 'number', 0.06, None), + ((12, 19), 'initialRandomSeed', 'number', 0, None), + (20, 'defaultWidthX', 'number', 0, None), + (21, 'nominalWidthX', 'number', 0, None), + (19, 'Subrs', 'number', None, SubrsConverter()), +] + +def addConverters(table): + for i in range(len(table)): + op, name, arg, default, conv = table[i] + if conv is not None: + continue + if arg in ("delta", "array"): + conv = ArrayConverter() + elif arg == "number": + conv = NumberConverter() + elif arg == "SID": + conv = ASCIIConverter() + else: + assert False + table[i] = op, name, arg, default, conv + +addConverters(privateDictOperators) +addConverters(topDictOperators) + + +class TopDictDecompiler(psCharStrings.DictDecompiler): + operators = buildOperatorDict(topDictOperators) + + +class PrivateDictDecompiler(psCharStrings.DictDecompiler): + operators = buildOperatorDict(privateDictOperators) + + +class DictCompiler(object): + + def __init__(self, dictObj, strings, parent): + assert isinstance(strings, IndexedStrings) + self.dictObj = dictObj + self.strings = strings + self.parent = parent + rawDict = {} + for name in dictObj.order: + value = getattr(dictObj, name, None) + if value is None: + continue + conv = dictObj.converters[name] + value = conv.write(dictObj, value) + if value == dictObj.defaults.get(name): + continue + rawDict[name] = value + self.rawDict = rawDict + + def setPos(self, pos, endPos): + pass + + def getDataLength(self): + return len(self.compile("getDataLength")) + + def compile(self, reason): + if DEBUG: + print("-- compiling %s for %s" % (self.__class__.__name__, reason)) + print("in baseDict: ", self) + rawDict = self.rawDict + data = [] + for name in self.dictObj.order: + value = rawDict.get(name) + if value is None: + continue + op, argType = self.opcodes[name] + if isinstance(argType, tuple): + l = len(argType) + assert len(value) == l, "value doesn't match arg type" + for i in range(l): + arg = argType[i] + v = value[i] + arghandler = getattr(self, "arg_" + arg) + data.append(arghandler(v)) + else: + arghandler = getattr(self, "arg_" + argType) + data.append(arghandler(value)) + data.append(op) + return bytesjoin(data) + + def toFile(self, file): + file.write(self.compile("toFile")) + + def arg_number(self, num): + return encodeNumber(num) + def arg_SID(self, s): + return psCharStrings.encodeIntCFF(self.strings.getSID(s)) + def arg_array(self, value): + data = [] + for num in value: + data.append(encodeNumber(num)) + return bytesjoin(data) + def arg_delta(self, value): + out = [] + last = 0 + for v in value: + out.append(v - last) + last = v + data = [] + for num in out: + data.append(encodeNumber(num)) + return bytesjoin(data) + + +def encodeNumber(num): + if isinstance(num, float): + return psCharStrings.encodeFloat(num) + else: + return psCharStrings.encodeIntCFF(num) + + +class TopDictCompiler(DictCompiler): + + opcodes = buildOpcodeDict(topDictOperators) + + def getChildren(self, strings): + children = [] + if hasattr(self.dictObj, "charset") and self.dictObj.charset: + children.append(CharsetCompiler(strings, self.dictObj.charset, self)) + if hasattr(self.dictObj, "Encoding"): + encoding = self.dictObj.Encoding + if not isinstance(encoding, basestring): + children.append(EncodingCompiler(strings, encoding, self)) + if hasattr(self.dictObj, "FDSelect"): + # I have not yet supported merging a ttx CFF-CID font, as there are interesting + # issues about merging the FDArrays. Here I assume that + # either the font was read from XML, and teh FDSelect indices are all + # in the charstring data, or the FDSelect array is already fully defined. + fdSelect = self.dictObj.FDSelect + if len(fdSelect) == 0: # probably read in from XML; assume fdIndex in CharString data + charStrings = self.dictObj.CharStrings + for name in self.dictObj.charset: + fdSelect.append(charStrings[name].fdSelectIndex) + fdSelectComp = FDSelectCompiler(fdSelect, self) + children.append(fdSelectComp) + if hasattr(self.dictObj, "CharStrings"): + items = [] + charStrings = self.dictObj.CharStrings + for name in self.dictObj.charset: + items.append(charStrings[name]) + charStringsComp = CharStringsCompiler(items, strings, self) + children.append(charStringsComp) + if hasattr(self.dictObj, "FDArray"): + # I have not yet supported merging a ttx CFF-CID font, as there are interesting + # issues about merging the FDArrays. Here I assume that the FDArray info is correct + # and complete. + fdArrayIndexComp = self.dictObj.FDArray.getCompiler(strings, self) + children.append(fdArrayIndexComp) + children.extend(fdArrayIndexComp.getChildren(strings)) + if hasattr(self.dictObj, "Private"): + privComp = self.dictObj.Private.getCompiler(strings, self) + children.append(privComp) + children.extend(privComp.getChildren(strings)) + return children + + +class FontDictCompiler(DictCompiler): + + opcodes = buildOpcodeDict(topDictOperators) + + def getChildren(self, strings): + children = [] + if hasattr(self.dictObj, "Private"): + privComp = self.dictObj.Private.getCompiler(strings, self) + children.append(privComp) + children.extend(privComp.getChildren(strings)) + return children + + +class PrivateDictCompiler(DictCompiler): + + opcodes = buildOpcodeDict(privateDictOperators) + + def setPos(self, pos, endPos): + size = endPos - pos + self.parent.rawDict["Private"] = size, pos + self.pos = pos + + def getChildren(self, strings): + children = [] + if hasattr(self.dictObj, "Subrs"): + children.append(self.dictObj.Subrs.getCompiler(strings, self)) + return children + + +class BaseDict(object): + + def __init__(self, strings=None, file=None, offset=None): + self.rawDict = {} + if DEBUG: + print("loading %s at %s" % (self.__class__.__name__, offset)) + self.file = file + self.offset = offset + self.strings = strings + self.skipNames = [] + + def decompile(self, data): + if DEBUG: + print(" length %s is %s" % (self.__class__.__name__, len(data))) + dec = self.decompilerClass(self.strings) + dec.decompile(data) + self.rawDict = dec.getDict() + self.postDecompile() + + def postDecompile(self): + pass + + def getCompiler(self, strings, parent): + return self.compilerClass(self, strings, parent) + + def __getattr__(self, name): + value = self.rawDict.get(name) + if value is None: + value = self.defaults.get(name) + if value is None: + raise AttributeError(name) + conv = self.converters[name] + value = conv.read(self, value) + setattr(self, name, value) + return value + + def toXML(self, xmlWriter, progress): + for name in self.order: + if name in self.skipNames: + continue + value = getattr(self, name, None) + if value is None: + continue + conv = self.converters[name] + conv.xmlWrite(xmlWriter, name, value, progress) + + def fromXML(self, name, attrs, content): + conv = self.converters[name] + value = conv.xmlRead(name, attrs, content, self) + setattr(self, name, value) + + +class TopDict(BaseDict): + + defaults = buildDefaults(topDictOperators) + converters = buildConverters(topDictOperators) + order = buildOrder(topDictOperators) + decompilerClass = TopDictDecompiler + compilerClass = TopDictCompiler + + def __init__(self, strings=None, file=None, offset=None, GlobalSubrs=None): + BaseDict.__init__(self, strings, file, offset) + self.GlobalSubrs = GlobalSubrs + + def getGlyphOrder(self): + return self.charset + + def postDecompile(self): + offset = self.rawDict.get("CharStrings") + if offset is None: + return + # get the number of glyphs beforehand. + self.file.seek(offset) + self.numGlyphs = readCard16(self.file) + + def toXML(self, xmlWriter, progress): + if hasattr(self, "CharStrings"): + self.decompileAllCharStrings(progress) + if hasattr(self, "ROS"): + self.skipNames = ['Encoding'] + if not hasattr(self, "ROS") or not hasattr(self, "CharStrings"): + # these values have default values, but I only want them to show up + # in CID fonts. + self.skipNames = ['CIDFontVersion', 'CIDFontRevision', 'CIDFontType', + 'CIDCount'] + BaseDict.toXML(self, xmlWriter, progress) + + def decompileAllCharStrings(self, progress): + # XXX only when doing ttdump -i? + i = 0 + for charString in self.CharStrings.values(): + try: + charString.decompile() + except: + print("Error in charstring ", i) + import sys + typ, value = sys.exc_info()[0:2] + raise typ(value) + if not i % 30 and progress: + progress.increment(0) # update + i = i + 1 + + +class FontDict(BaseDict): + + defaults = buildDefaults(topDictOperators) + converters = buildConverters(topDictOperators) + order = buildOrder(topDictOperators) + decompilerClass = None + compilerClass = FontDictCompiler + + def __init__(self, strings=None, file=None, offset=None, GlobalSubrs=None): + BaseDict.__init__(self, strings, file, offset) + self.GlobalSubrs = GlobalSubrs + + def getGlyphOrder(self): + return self.charset + + def toXML(self, xmlWriter, progress): + self.skipNames = ['Encoding'] + BaseDict.toXML(self, xmlWriter, progress) + + +class PrivateDict(BaseDict): + defaults = buildDefaults(privateDictOperators) + converters = buildConverters(privateDictOperators) + order = buildOrder(privateDictOperators) + decompilerClass = PrivateDictDecompiler + compilerClass = PrivateDictCompiler + + +class IndexedStrings(object): + + """SID -> string mapping.""" + + def __init__(self, file=None): + if file is None: + strings = [] + else: + strings = [tostr(s, encoding="latin1") for s in Index(file)] + self.strings = strings + + def getCompiler(self): + return IndexedStringsCompiler(self, None, None) + + def __len__(self): + return len(self.strings) + + def __getitem__(self, SID): + if SID < cffStandardStringCount: + return cffStandardStrings[SID] + else: + return self.strings[SID - cffStandardStringCount] + + def getSID(self, s): + if not hasattr(self, "stringMapping"): + self.buildStringMapping() + if s in cffStandardStringMapping: + SID = cffStandardStringMapping[s] + elif s in self.stringMapping: + SID = self.stringMapping[s] + else: + SID = len(self.strings) + cffStandardStringCount + self.strings.append(s) + self.stringMapping[s] = SID + return SID + + def getStrings(self): + return self.strings + + def buildStringMapping(self): + self.stringMapping = {} + for index in range(len(self.strings)): + self.stringMapping[self.strings[index]] = index + cffStandardStringCount + + +# The 391 Standard Strings as used in the CFF format. +# from Adobe Technical None #5176, version 1.0, 18 March 1998 + +cffStandardStrings = ['.notdef', 'space', 'exclam', 'quotedbl', 'numbersign', + 'dollar', 'percent', 'ampersand', 'quoteright', 'parenleft', 'parenright', + 'asterisk', 'plus', 'comma', 'hyphen', 'period', 'slash', 'zero', 'one', + 'two', 'three', 'four', 'five', 'six', 'seven', 'eight', 'nine', 'colon', + 'semicolon', 'less', 'equal', 'greater', 'question', 'at', 'A', 'B', 'C', + 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M', 'N', 'O', 'P', 'Q', 'R', + 'S', 'T', 'U', 'V', 'W', 'X', 'Y', 'Z', 'bracketleft', 'backslash', + 'bracketright', 'asciicircum', 'underscore', 'quoteleft', 'a', 'b', 'c', + 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', + 's', 't', 'u', 'v', 'w', 'x', 'y', 'z', 'braceleft', 'bar', 'braceright', + 'asciitilde', 'exclamdown', 'cent', 'sterling', 'fraction', 'yen', 'florin', + 'section', 'currency', 'quotesingle', 'quotedblleft', 'guillemotleft', + 'guilsinglleft', 'guilsinglright', 'fi', 'fl', 'endash', 'dagger', + 'daggerdbl', 'periodcentered', 'paragraph', 'bullet', 'quotesinglbase', + 'quotedblbase', 'quotedblright', 'guillemotright', 'ellipsis', 'perthousand', + 'questiondown', 'grave', 'acute', 'circumflex', 'tilde', 'macron', 'breve', + 'dotaccent', 'dieresis', 'ring', 'cedilla', 'hungarumlaut', 'ogonek', 'caron', + 'emdash', 'AE', 'ordfeminine', 'Lslash', 'Oslash', 'OE', 'ordmasculine', 'ae', + 'dotlessi', 'lslash', 'oslash', 'oe', 'germandbls', 'onesuperior', + 'logicalnot', 'mu', 'trademark', 'Eth', 'onehalf', 'plusminus', 'Thorn', + 'onequarter', 'divide', 'brokenbar', 'degree', 'thorn', 'threequarters', + 'twosuperior', 'registered', 'minus', 'eth', 'multiply', 'threesuperior', + 'copyright', 'Aacute', 'Acircumflex', 'Adieresis', 'Agrave', 'Aring', + 'Atilde', 'Ccedilla', 'Eacute', 'Ecircumflex', 'Edieresis', 'Egrave', + 'Iacute', 'Icircumflex', 'Idieresis', 'Igrave', 'Ntilde', 'Oacute', + 'Ocircumflex', 'Odieresis', 'Ograve', 'Otilde', 'Scaron', 'Uacute', + 'Ucircumflex', 'Udieresis', 'Ugrave', 'Yacute', 'Ydieresis', 'Zcaron', + 'aacute', 'acircumflex', 'adieresis', 'agrave', 'aring', 'atilde', 'ccedilla', + 'eacute', 'ecircumflex', 'edieresis', 'egrave', 'iacute', 'icircumflex', + 'idieresis', 'igrave', 'ntilde', 'oacute', 'ocircumflex', 'odieresis', + 'ograve', 'otilde', 'scaron', 'uacute', 'ucircumflex', 'udieresis', 'ugrave', + 'yacute', 'ydieresis', 'zcaron', 'exclamsmall', 'Hungarumlautsmall', + 'dollaroldstyle', 'dollarsuperior', 'ampersandsmall', 'Acutesmall', + 'parenleftsuperior', 'parenrightsuperior', 'twodotenleader', 'onedotenleader', + 'zerooldstyle', 'oneoldstyle', 'twooldstyle', 'threeoldstyle', 'fouroldstyle', + 'fiveoldstyle', 'sixoldstyle', 'sevenoldstyle', 'eightoldstyle', + 'nineoldstyle', 'commasuperior', 'threequartersemdash', 'periodsuperior', + 'questionsmall', 'asuperior', 'bsuperior', 'centsuperior', 'dsuperior', + 'esuperior', 'isuperior', 'lsuperior', 'msuperior', 'nsuperior', 'osuperior', + 'rsuperior', 'ssuperior', 'tsuperior', 'ff', 'ffi', 'ffl', 'parenleftinferior', + 'parenrightinferior', 'Circumflexsmall', 'hyphensuperior', 'Gravesmall', + 'Asmall', 'Bsmall', 'Csmall', 'Dsmall', 'Esmall', 'Fsmall', 'Gsmall', 'Hsmall', + 'Ismall', 'Jsmall', 'Ksmall', 'Lsmall', 'Msmall', 'Nsmall', 'Osmall', 'Psmall', + 'Qsmall', 'Rsmall', 'Ssmall', 'Tsmall', 'Usmall', 'Vsmall', 'Wsmall', 'Xsmall', + 'Ysmall', 'Zsmall', 'colonmonetary', 'onefitted', 'rupiah', 'Tildesmall', + 'exclamdownsmall', 'centoldstyle', 'Lslashsmall', 'Scaronsmall', 'Zcaronsmall', + 'Dieresissmall', 'Brevesmall', 'Caronsmall', 'Dotaccentsmall', 'Macronsmall', + 'figuredash', 'hypheninferior', 'Ogoneksmall', 'Ringsmall', 'Cedillasmall', + 'questiondownsmall', 'oneeighth', 'threeeighths', 'fiveeighths', 'seveneighths', + 'onethird', 'twothirds', 'zerosuperior', 'foursuperior', 'fivesuperior', + 'sixsuperior', 'sevensuperior', 'eightsuperior', 'ninesuperior', 'zeroinferior', + 'oneinferior', 'twoinferior', 'threeinferior', 'fourinferior', 'fiveinferior', + 'sixinferior', 'seveninferior', 'eightinferior', 'nineinferior', 'centinferior', + 'dollarinferior', 'periodinferior', 'commainferior', 'Agravesmall', + 'Aacutesmall', 'Acircumflexsmall', 'Atildesmall', 'Adieresissmall', 'Aringsmall', + 'AEsmall', 'Ccedillasmall', 'Egravesmall', 'Eacutesmall', 'Ecircumflexsmall', + 'Edieresissmall', 'Igravesmall', 'Iacutesmall', 'Icircumflexsmall', + 'Idieresissmall', 'Ethsmall', 'Ntildesmall', 'Ogravesmall', 'Oacutesmall', + 'Ocircumflexsmall', 'Otildesmall', 'Odieresissmall', 'OEsmall', 'Oslashsmall', + 'Ugravesmall', 'Uacutesmall', 'Ucircumflexsmall', 'Udieresissmall', + 'Yacutesmall', 'Thornsmall', 'Ydieresissmall', '001.000', '001.001', '001.002', + '001.003', 'Black', 'Bold', 'Book', 'Light', 'Medium', 'Regular', 'Roman', + 'Semibold' +] + +cffStandardStringCount = 391 +assert len(cffStandardStrings) == cffStandardStringCount +# build reverse mapping +cffStandardStringMapping = {} +for _i in range(cffStandardStringCount): + cffStandardStringMapping[cffStandardStrings[_i]] = _i + +cffISOAdobeStrings = [".notdef", "space", "exclam", "quotedbl", "numbersign", +"dollar", "percent", "ampersand", "quoteright", "parenleft", "parenright", +"asterisk", "plus", "comma", "hyphen", "period", "slash", "zero", "one", "two", +"three", "four", "five", "six", "seven", "eight", "nine", "colon", "semicolon", +"less", "equal", "greater", "question", "at", "A", "B", "C", "D", "E", "F", "G", +"H", "I", "J", "K", "L", "M", "N", "O", "P", "Q", "R", "S", "T", "U", "V", "W", +"X", "Y", "Z", "bracketleft", "backslash", "bracketright", "asciicircum", +"underscore", "quoteleft", "a", "b", "c", "d", "e", "f", "g", "h", "i", "j", +"k", "l", "m", "n", "o", "p", "q", "r", "s", "t", "u", "v", "w", "x", "y", "z", +"braceleft", "bar", "braceright", "asciitilde", "exclamdown", "cent", +"sterling", "fraction", "yen", "florin", "section", "currency", "quotesingle", +"quotedblleft", "guillemotleft", "guilsinglleft", "guilsinglright", "fi", "fl", +"endash", "dagger", "daggerdbl", "periodcentered", "paragraph", "bullet", +"quotesinglbase", "quotedblbase", "quotedblright", "guillemotright", "ellipsis", +"perthousand", "questiondown", "grave", "acute", "circumflex", "tilde", +"macron", "breve", "dotaccent", "dieresis", "ring", "cedilla", "hungarumlaut", +"ogonek", "caron", "emdash", "AE", "ordfeminine", "Lslash", "Oslash", "OE", +"ordmasculine", "ae", "dotlessi", "lslash", "oslash", "oe", "germandbls", +"onesuperior", "logicalnot", "mu", "trademark", "Eth", "onehalf", "plusminus", +"Thorn", "onequarter", "divide", "brokenbar", "degree", "thorn", +"threequarters", "twosuperior", "registered", "minus", "eth", "multiply", +"threesuperior", "copyright", "Aacute", "Acircumflex", "Adieresis", "Agrave", +"Aring", "Atilde", "Ccedilla", "Eacute", "Ecircumflex", "Edieresis", "Egrave", +"Iacute", "Icircumflex", "Idieresis", "Igrave", "Ntilde", "Oacute", +"Ocircumflex", "Odieresis", "Ograve", "Otilde", "Scaron", "Uacute", +"Ucircumflex", "Udieresis", "Ugrave", "Yacute", "Ydieresis", "Zcaron", "aacute", +"acircumflex", "adieresis", "agrave", "aring", "atilde", "ccedilla", "eacute", +"ecircumflex", "edieresis", "egrave", "iacute", "icircumflex", "idieresis", +"igrave", "ntilde", "oacute", "ocircumflex", "odieresis", "ograve", "otilde", +"scaron", "uacute", "ucircumflex", "udieresis", "ugrave", "yacute", "ydieresis", +"zcaron"] + +cffISOAdobeStringCount = 229 +assert len(cffISOAdobeStrings) == cffISOAdobeStringCount + +cffIExpertStrings = [".notdef", "space", "exclamsmall", "Hungarumlautsmall", +"dollaroldstyle", "dollarsuperior", "ampersandsmall", "Acutesmall", +"parenleftsuperior", "parenrightsuperior", "twodotenleader", "onedotenleader", +"comma", "hyphen", "period", "fraction", "zerooldstyle", "oneoldstyle", +"twooldstyle", "threeoldstyle", "fouroldstyle", "fiveoldstyle", "sixoldstyle", +"sevenoldstyle", "eightoldstyle", "nineoldstyle", "colon", "semicolon", +"commasuperior", "threequartersemdash", "periodsuperior", "questionsmall", +"asuperior", "bsuperior", "centsuperior", "dsuperior", "esuperior", "isuperior", +"lsuperior", "msuperior", "nsuperior", "osuperior", "rsuperior", "ssuperior", +"tsuperior", "ff", "fi", "fl", "ffi", "ffl", "parenleftinferior", +"parenrightinferior", "Circumflexsmall", "hyphensuperior", "Gravesmall", +"Asmall", "Bsmall", "Csmall", "Dsmall", "Esmall", "Fsmall", "Gsmall", "Hsmall", +"Ismall", "Jsmall", "Ksmall", "Lsmall", "Msmall", "Nsmall", "Osmall", "Psmall", +"Qsmall", "Rsmall", "Ssmall", "Tsmall", "Usmall", "Vsmall", "Wsmall", "Xsmall", +"Ysmall", "Zsmall", "colonmonetary", "onefitted", "rupiah", "Tildesmall", +"exclamdownsmall", "centoldstyle", "Lslashsmall", "Scaronsmall", "Zcaronsmall", +"Dieresissmall", "Brevesmall", "Caronsmall", "Dotaccentsmall", "Macronsmall", +"figuredash", "hypheninferior", "Ogoneksmall", "Ringsmall", "Cedillasmall", +"onequarter", "onehalf", "threequarters", "questiondownsmall", "oneeighth", +"threeeighths", "fiveeighths", "seveneighths", "onethird", "twothirds", +"zerosuperior", "onesuperior", "twosuperior", "threesuperior", "foursuperior", +"fivesuperior", "sixsuperior", "sevensuperior", "eightsuperior", "ninesuperior", +"zeroinferior", "oneinferior", "twoinferior", "threeinferior", "fourinferior", +"fiveinferior", "sixinferior", "seveninferior", "eightinferior", "nineinferior", +"centinferior", "dollarinferior", "periodinferior", "commainferior", +"Agravesmall", "Aacutesmall", "Acircumflexsmall", "Atildesmall", +"Adieresissmall", "Aringsmall", "AEsmall", "Ccedillasmall", "Egravesmall", +"Eacutesmall", "Ecircumflexsmall", "Edieresissmall", "Igravesmall", +"Iacutesmall", "Icircumflexsmall", "Idieresissmall", "Ethsmall", "Ntildesmall", +"Ogravesmall", "Oacutesmall", "Ocircumflexsmall", "Otildesmall", +"Odieresissmall", "OEsmall", "Oslashsmall", "Ugravesmall", "Uacutesmall", +"Ucircumflexsmall", "Udieresissmall", "Yacutesmall", "Thornsmall", +"Ydieresissmall"] + +cffExpertStringCount = 166 +assert len(cffIExpertStrings) == cffExpertStringCount + +cffExpertSubsetStrings = [".notdef", "space", "dollaroldstyle", +"dollarsuperior", "parenleftsuperior", "parenrightsuperior", "twodotenleader", +"onedotenleader", "comma", "hyphen", "period", "fraction", "zerooldstyle", +"oneoldstyle", "twooldstyle", "threeoldstyle", "fouroldstyle", "fiveoldstyle", +"sixoldstyle", "sevenoldstyle", "eightoldstyle", "nineoldstyle", "colon", +"semicolon", "commasuperior", "threequartersemdash", "periodsuperior", +"asuperior", "bsuperior", "centsuperior", "dsuperior", "esuperior", "isuperior", +"lsuperior", "msuperior", "nsuperior", "osuperior", "rsuperior", "ssuperior", +"tsuperior", "ff", "fi", "fl", "ffi", "ffl", "parenleftinferior", +"parenrightinferior", "hyphensuperior", "colonmonetary", "onefitted", "rupiah", +"centoldstyle", "figuredash", "hypheninferior", "onequarter", "onehalf", +"threequarters", "oneeighth", "threeeighths", "fiveeighths", "seveneighths", +"onethird", "twothirds", "zerosuperior", "onesuperior", "twosuperior", +"threesuperior", "foursuperior", "fivesuperior", "sixsuperior", "sevensuperior", +"eightsuperior", "ninesuperior", "zeroinferior", "oneinferior", "twoinferior", +"threeinferior", "fourinferior", "fiveinferior", "sixinferior", "seveninferior", +"eightinferior", "nineinferior", "centinferior", "dollarinferior", +"periodinferior", "commainferior"] + +cffExpertSubsetStringCount = 87 +assert len(cffExpertSubsetStrings) == cffExpertSubsetStringCount diff -Nru fonttools-2.4/Snippets/fontTools/encodings/codecs.py fonttools-3.0/Snippets/fontTools/encodings/codecs.py --- fonttools-2.4/Snippets/fontTools/encodings/codecs.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.0/Snippets/fontTools/encodings/codecs.py 2015-08-31 17:57:15.000000000 +0000 @@ -0,0 +1,135 @@ +"""Extend the Python codecs module with a few encodings that are used in OpenType (name table) +but missing from Python. See https://github.com/behdad/fonttools/issues/236 for details.""" + +from __future__ import print_function, division, absolute_import +from fontTools.misc.py23 import * +import codecs +import encodings + +class ExtendCodec(codecs.Codec): + + def __init__(self, name, base_encoding, mapping): + self.name = name + self.base_encoding = base_encoding + self.mapping = mapping + self.reverse = {v:k for k,v in mapping.items()} + self.max_len = max(len(v) for v in mapping.values()) + self.info = codecs.CodecInfo(name=self.name, encode=self.encode, decode=self.decode) + codecs.register_error(name, self.error) + + def encode(self, input, errors='strict'): + assert errors == 'strict' + #return codecs.encode(input, self.base_encoding, self.name), len(input) + + # The above line could totally be all we needed, relying on the error + # handling to replace the unencodable Unicode characters with our extended + # byte sequences. + # + # However, there seems to be a design bug in Python (probably intentional): + # the error handler for encoding is supposed to return a **Unicode** character, + # that then needs to be encodable itself... Ugh. + # + # So we implement what codecs.encode() should have been doing: which is expect + # error handler to return bytes() to be added to the output. + # + # This seems to have been fixed in Python 3.3. We should try using that and + # use fallback only if that failed. + # https://docs.python.org/3.3/library/codecs.html#codecs.register_error + + length = len(input) + out = b'' + while input: + try: + part = codecs.encode(input, self.base_encoding) + out += part + input = '' # All converted + except UnicodeEncodeError as e: + # Convert the correct part + out += codecs.encode(input[:e.start], self.base_encoding) + replacement, pos = self.error(e) + out += replacement + input = input[pos:] + return out, length + + def decode(self, input, errors='strict'): + assert errors == 'strict' + return codecs.decode(input, self.base_encoding, self.name), len(input) + + def error(self, e): + if isinstance(e, UnicodeDecodeError): + for end in range(e.start + 1, e.end + 1): + s = e.object[e.start:end] + if s in self.mapping: + return self.mapping[s], end + elif isinstance(e, UnicodeEncodeError): + for end in range(e.start + 1, e.start + self.max_len + 1): + s = e.object[e.start:end] + if s in self.reverse: + return self.reverse[s], end + e.encoding = self.name + raise e + + +_extended_encodings = { + "x_mac_japanese_ttx": ("shift_jis", { + b"\xFC": unichr(0x007C), + b"\x7E": unichr(0x007E), + b"\x80": unichr(0x005C), + b"\xA0": unichr(0x00A0), + b"\xFD": unichr(0x00A9), + b"\xFE": unichr(0x2122), + b"\xFF": unichr(0x2026), + }), + "x_mac_trad_chinese_ttx": ("big5", { + b"\x80": unichr(0x005C), + b"\xA0": unichr(0x00A0), + b"\xFD": unichr(0x00A9), + b"\xFE": unichr(0x2122), + b"\xFF": unichr(0x2026), + }), + "x_mac_korean_ttx": ("euc_kr", { + b"\x80": unichr(0x00A0), + b"\x81": unichr(0x20A9), + b"\x82": unichr(0x2014), + b"\x83": unichr(0x00A9), + b"\xFE": unichr(0x2122), + b"\xFF": unichr(0x2026), + }), + "x_mac_simp_chinese_ttx": ("gb2312", { + b"\x80": unichr(0x00FC), + b"\xA0": unichr(0x00A0), + b"\xFD": unichr(0x00A9), + b"\xFE": unichr(0x2122), + b"\xFF": unichr(0x2026), + }), +} + +_cache = {} + +def search_function(name): + name = encodings.normalize_encoding(name) # Rather undocumented... + if name in _extended_encodings: + if name not in _cache: + base_encoding, mapping = _extended_encodings[name] + assert(name[-4:] == "_ttx") + # Python 2 didn't have any of the encodings that we are implementing + # in this file. Python 3 added aliases for the East Asian ones, mapping + # them "temporarily" to the same base encoding as us, with a comment + # suggesting that full implementation will appear some time later. + # As such, try the Python version of the x_mac_... first, if that is found, + # use *that* as our base encoding. This would make our encoding upgrade + # to the full encoding when and if Python finally implements that. + # http://bugs.python.org/issue24041 + base_encodings = [name[:-4], base_encoding] + for base_encoding in base_encodings: + try: + codecs.lookup(base_encoding) + except LookupError: + continue + _cache[name] = ExtendCodec(name, base_encoding, mapping) + break + return _cache[name].info + + return None + +codecs.register(search_function) diff -Nru fonttools-2.4/Snippets/fontTools/encodings/codecs_test.py fonttools-3.0/Snippets/fontTools/encodings/codecs_test.py --- fonttools-2.4/Snippets/fontTools/encodings/codecs_test.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.0/Snippets/fontTools/encodings/codecs_test.py 2015-08-31 17:57:15.000000000 +0000 @@ -0,0 +1,25 @@ +from __future__ import print_function, division, absolute_import, unicode_literals +from fontTools.misc.py23 import * +import unittest +import fontTools.encodings.codecs # Not to be confused with "import codecs" + +class ExtendedCodecsTest(unittest.TestCase): + + def test_decode_mac_japanese(self): + self.assertEqual(b'x\xfe\xfdy'.decode("x_mac_japanese_ttx"), + unichr(0x78)+unichr(0x2122)+unichr(0x00A9)+unichr(0x79)) + + def test_encode_mac_japanese(self): + self.assertEqual(b'x\xfe\xfdy', + (unichr(0x78)+unichr(0x2122)+unichr(0x00A9)+unichr(0x79)).encode("x_mac_japanese_ttx")) + + def test_decode_mac_trad_chinese(self): + self.assertEqual(b'\x80'.decode("x_mac_trad_chinese_ttx"), + unichr(0x5C)) + + def test_decode_mac_romanian(self): + self.assertEqual(b'x\xfb'.decode("mac_romanian"), + unichr(0x78)+unichr(0x02DA)) + +if __name__ == '__main__': + unittest.main() diff -Nru fonttools-2.4/Snippets/fontTools/encodings/__init__.py fonttools-3.0/Snippets/fontTools/encodings/__init__.py --- fonttools-2.4/Snippets/fontTools/encodings/__init__.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.0/Snippets/fontTools/encodings/__init__.py 2015-08-31 17:57:15.000000000 +0000 @@ -0,0 +1,4 @@ +"""Empty __init__.py file to signal Python this directory is a package.""" + +from __future__ import print_function, division, absolute_import +from fontTools.misc.py23 import * diff -Nru fonttools-2.4/Snippets/fontTools/encodings/MacRoman.py fonttools-3.0/Snippets/fontTools/encodings/MacRoman.py --- fonttools-2.4/Snippets/fontTools/encodings/MacRoman.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.0/Snippets/fontTools/encodings/MacRoman.py 2015-08-31 17:57:15.000000000 +0000 @@ -0,0 +1,39 @@ +from __future__ import print_function, division, absolute_import +from fontTools.misc.py23 import * + +MacRoman = [ + 'NUL', 'Eth', 'eth', 'Lslash', 'lslash', 'Scaron', 'scaron', 'Yacute', + 'yacute', 'HT', 'LF', 'Thorn', 'thorn', 'CR', 'Zcaron', 'zcaron', 'DLE', 'DC1', + 'DC2', 'DC3', 'DC4', 'onehalf', 'onequarter', 'onesuperior', 'threequarters', + 'threesuperior', 'twosuperior', 'brokenbar', 'minus', 'multiply', 'RS', 'US', + 'space', 'exclam', 'quotedbl', 'numbersign', 'dollar', 'percent', 'ampersand', + 'quotesingle', 'parenleft', 'parenright', 'asterisk', 'plus', 'comma', + 'hyphen', 'period', 'slash', 'zero', 'one', 'two', 'three', 'four', 'five', + 'six', 'seven', 'eight', 'nine', 'colon', 'semicolon', 'less', 'equal', + 'greater', 'question', 'at', 'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', + 'K', 'L', 'M', 'N', 'O', 'P', 'Q', 'R', 'S', 'T', 'U', 'V', 'W', 'X', 'Y', 'Z', + 'bracketleft', 'backslash', 'bracketright', 'asciicircum', 'underscore', + 'grave', 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', + 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z', 'braceleft', 'bar', + 'braceright', 'asciitilde', 'DEL', 'Adieresis', 'Aring', 'Ccedilla', 'Eacute', + 'Ntilde', 'Odieresis', 'Udieresis', 'aacute', 'agrave', 'acircumflex', + 'adieresis', 'atilde', 'aring', 'ccedilla', 'eacute', 'egrave', 'ecircumflex', + 'edieresis', 'iacute', 'igrave', 'icircumflex', 'idieresis', 'ntilde', + 'oacute', 'ograve', 'ocircumflex', 'odieresis', 'otilde', 'uacute', 'ugrave', + 'ucircumflex', 'udieresis', 'dagger', 'degree', 'cent', 'sterling', 'section', + 'bullet', 'paragraph', 'germandbls', 'registered', 'copyright', 'trademark', + 'acute', 'dieresis', 'notequal', 'AE', 'Oslash', 'infinity', 'plusminus', + 'lessequal', 'greaterequal', 'yen', 'mu', 'partialdiff', 'summation', + 'product', 'pi', 'integral', 'ordfeminine', 'ordmasculine', 'Omega', 'ae', + 'oslash', 'questiondown', 'exclamdown', 'logicalnot', 'radical', 'florin', + 'approxequal', 'Delta', 'guillemotleft', 'guillemotright', 'ellipsis', + 'nbspace', 'Agrave', 'Atilde', 'Otilde', 'OE', 'oe', 'endash', 'emdash', + 'quotedblleft', 'quotedblright', 'quoteleft', 'quoteright', 'divide', 'lozenge', + 'ydieresis', 'Ydieresis', 'fraction', 'currency', 'guilsinglleft', + 'guilsinglright', 'fi', 'fl', 'daggerdbl', 'periodcentered', 'quotesinglbase', + 'quotedblbase', 'perthousand', 'Acircumflex', 'Ecircumflex', 'Aacute', + 'Edieresis', 'Egrave', 'Iacute', 'Icircumflex', 'Idieresis', 'Igrave', 'Oacute', + 'Ocircumflex', 'apple', 'Ograve', 'Uacute', 'Ucircumflex', 'Ugrave', 'dotlessi', + 'circumflex', 'tilde', 'macron', 'breve', 'dotaccent', 'ring', 'cedilla', + 'hungarumlaut', 'ogonek', 'caron' + ] diff -Nru fonttools-2.4/Snippets/fontTools/encodings/StandardEncoding.py fonttools-3.0/Snippets/fontTools/encodings/StandardEncoding.py --- fonttools-2.4/Snippets/fontTools/encodings/StandardEncoding.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.0/Snippets/fontTools/encodings/StandardEncoding.py 2015-08-31 17:57:15.000000000 +0000 @@ -0,0 +1,51 @@ +from __future__ import print_function, division, absolute_import +from fontTools.misc.py23 import * + +StandardEncoding = [ + '.notdef', '.notdef', '.notdef', '.notdef', '.notdef', + '.notdef', '.notdef', '.notdef', '.notdef', '.notdef', + '.notdef', '.notdef', '.notdef', '.notdef', '.notdef', + '.notdef', '.notdef', '.notdef', '.notdef', '.notdef', + '.notdef', '.notdef', '.notdef', '.notdef', '.notdef', + '.notdef', '.notdef', '.notdef', '.notdef', '.notdef', + '.notdef', '.notdef', 'space', 'exclam', 'quotedbl', + 'numbersign', 'dollar', 'percent', 'ampersand', + 'quoteright', 'parenleft', 'parenright', 'asterisk', 'plus', + 'comma', 'hyphen', 'period', 'slash', 'zero', 'one', 'two', + 'three', 'four', 'five', 'six', 'seven', 'eight', 'nine', + 'colon', 'semicolon', 'less', 'equal', 'greater', + 'question', 'at', 'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', + 'I', 'J', 'K', 'L', 'M', 'N', 'O', 'P', 'Q', 'R', 'S', 'T', + 'U', 'V', 'W', 'X', 'Y', 'Z', 'bracketleft', 'backslash', + 'bracketright', 'asciicircum', 'underscore', 'quoteleft', + 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', + 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', + 'y', 'z', 'braceleft', 'bar', 'braceright', 'asciitilde', + '.notdef', '.notdef', '.notdef', '.notdef', '.notdef', + '.notdef', '.notdef', '.notdef', '.notdef', '.notdef', + '.notdef', '.notdef', '.notdef', '.notdef', '.notdef', + '.notdef', '.notdef', '.notdef', '.notdef', '.notdef', + '.notdef', '.notdef', '.notdef', '.notdef', '.notdef', + '.notdef', '.notdef', '.notdef', '.notdef', '.notdef', + '.notdef', '.notdef', '.notdef', '.notdef', 'exclamdown', + 'cent', 'sterling', 'fraction', 'yen', 'florin', 'section', + 'currency', 'quotesingle', 'quotedblleft', 'guillemotleft', + 'guilsinglleft', 'guilsinglright', 'fi', 'fl', '.notdef', + 'endash', 'dagger', 'daggerdbl', 'periodcentered', + '.notdef', 'paragraph', 'bullet', 'quotesinglbase', + 'quotedblbase', 'quotedblright', 'guillemotright', + 'ellipsis', 'perthousand', '.notdef', 'questiondown', + '.notdef', 'grave', 'acute', 'circumflex', 'tilde', + 'macron', 'breve', 'dotaccent', 'dieresis', '.notdef', + 'ring', 'cedilla', '.notdef', 'hungarumlaut', 'ogonek', + 'caron', 'emdash', '.notdef', '.notdef', '.notdef', + '.notdef', '.notdef', '.notdef', '.notdef', '.notdef', + '.notdef', '.notdef', '.notdef', '.notdef', '.notdef', + '.notdef', '.notdef', '.notdef', 'AE', '.notdef', + 'ordfeminine', '.notdef', '.notdef', '.notdef', '.notdef', + 'Lslash', 'Oslash', 'OE', 'ordmasculine', '.notdef', + '.notdef', '.notdef', '.notdef', '.notdef', 'ae', '.notdef', + '.notdef', '.notdef', 'dotlessi', '.notdef', '.notdef', + 'lslash', 'oslash', 'oe', 'germandbls', '.notdef', + '.notdef', '.notdef', '.notdef' + ] diff -Nru fonttools-2.4/Snippets/fontTools/feaLib/ast.py fonttools-3.0/Snippets/fontTools/feaLib/ast.py --- fonttools-2.4/Snippets/fontTools/feaLib/ast.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.0/Snippets/fontTools/feaLib/ast.py 2015-08-31 17:57:15.000000000 +0000 @@ -0,0 +1,98 @@ +from __future__ import print_function, division, absolute_import +from __future__ import unicode_literals + + +class FeatureFile(object): + def __init__(self): + self.statements = [] + + +class FeatureBlock(object): + def __init__(self, location, name, use_extension): + self.location = location + self.name, self.use_extension = name, use_extension + self.statements = [] + + +class LookupBlock(object): + def __init__(self, location, name, use_extension): + self.location = location + self.name, self.use_extension = name, use_extension + self.statements = [] + + +class GlyphClassDefinition(object): + def __init__(self, location, name, glyphs): + self.location = location + self.name = name + self.glyphs = glyphs + + +class AlternateSubstitution(object): + def __init__(self, location, glyph, from_class): + self.location = location + self.glyph, self.from_class = (glyph, from_class) + + +class AnchorDefinition(object): + def __init__(self, location, name, x, y, contourpoint): + self.location = location + self.name, self.x, self.y, self.contourpoint = name, x, y, contourpoint + + +class LanguageStatement(object): + def __init__(self, location, language, include_default, required): + self.location = location + self.language = language + self.include_default = include_default + self.required = required + + +class LanguageSystemStatement(object): + def __init__(self, location, script, language): + self.location = location + self.script, self.language = (script, language) + + +class IgnoreSubstitutionRule(object): + def __init__(self, location, prefix, glyphs, suffix): + self.location = location + self.prefix, self.glyphs, self.suffix = (prefix, glyphs, suffix) + + +class LookupReferenceStatement(object): + def __init__(self, location, lookup): + self.location, self.lookup = (location, lookup) + + +class ScriptStatement(object): + def __init__(self, location, script): + self.location = location + self.script = script + + +class SubtableStatement(object): + def __init__(self, location): + self.location = location + + +class SubstitutionRule(object): + def __init__(self, location, old, new): + self.location, self.old, self.new = (location, old, new) + self.old_prefix = [] + self.old_suffix = [] + self.lookups = [None] * len(old) + + +class ValueRecord(object): + def __init__(self, location, xPlacement, yPlacement, xAdvance, yAdvance): + self.location = location + self.xPlacement, self.yPlacement = (xPlacement, yPlacement) + self.xAdvance, self.yAdvance = (xAdvance, yAdvance) + + +class ValueRecordDefinition(object): + def __init__(self, location, name, value): + self.location = location + self.name = name + self.value = value diff -Nru fonttools-2.4/Snippets/fontTools/feaLib/__init__.py fonttools-3.0/Snippets/fontTools/feaLib/__init__.py --- fonttools-2.4/Snippets/fontTools/feaLib/__init__.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.0/Snippets/fontTools/feaLib/__init__.py 2015-08-31 17:57:15.000000000 +0000 @@ -0,0 +1,4 @@ +"""fontTools.feaLib -- a package for dealing with OpenType feature files.""" + +# The structure of OpenType feature files is defined here: +# http://www.adobe.com/devnet/opentype/afdko/topic_feature_file_syntax.html diff -Nru fonttools-2.4/Snippets/fontTools/feaLib/lexer.py fonttools-3.0/Snippets/fontTools/feaLib/lexer.py --- fonttools-2.4/Snippets/fontTools/feaLib/lexer.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.0/Snippets/fontTools/feaLib/lexer.py 2015-08-31 17:57:15.000000000 +0000 @@ -0,0 +1,203 @@ +from __future__ import print_function, division, absolute_import +from __future__ import unicode_literals +import codecs +import os + + +class LexerError(Exception): + def __init__(self, message, location): + Exception.__init__(self, message) + self.location = location + + def __str__(self): + message = Exception.__str__(self) + if self.location: + path, line, column = self.location + return "%s:%d:%d: %s" % (path, line, column, message) + else: + return message + + +class Lexer(object): + NUMBER = "NUMBER" + STRING = "STRING" + NAME = "NAME" + FILENAME = "FILENAME" + GLYPHCLASS = "GLYPHCLASS" + CID = "CID" + SYMBOL = "SYMBOL" + COMMENT = "COMMENT" + NEWLINE = "NEWLINE" + + CHAR_WHITESPACE_ = " \t" + CHAR_NEWLINE_ = "\r\n" + CHAR_SYMBOL_ = ";:-+'{}[]<>()=" + CHAR_DIGIT_ = "0123456789" + CHAR_HEXDIGIT_ = "0123456789ABCDEFabcdef" + CHAR_LETTER_ = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz" + CHAR_NAME_START_ = CHAR_LETTER_ + "_.\\" + CHAR_NAME_CONTINUATION_ = CHAR_LETTER_ + CHAR_DIGIT_ + "_." + + MODE_NORMAL_ = "NORMAL" + MODE_FILENAME_ = "FILENAME" + + def __init__(self, text, filename): + self.filename_ = filename + self.line_ = 1 + self.pos_ = 0 + self.line_start_ = 0 + self.text_ = text + self.text_length_ = len(text) + self.mode_ = Lexer.MODE_NORMAL_ + + def __iter__(self): + return self + + def next(self): # Python 2 + return self.__next__() + + def __next__(self): # Python 3 + while True: + token_type, token, location = self.next_() + if token_type not in {Lexer.COMMENT, Lexer.NEWLINE}: + return (token_type, token, location) + + def next_(self): + self.scan_over_(Lexer.CHAR_WHITESPACE_) + column = self.pos_ - self.line_start_ + 1 + location = (self.filename_, self.line_, column) + start = self.pos_ + text = self.text_ + limit = len(text) + if start >= limit: + raise StopIteration() + cur_char = text[start] + next_char = text[start + 1] if start + 1 < limit else None + + if cur_char == "\n": + self.pos_ += 1 + self.line_ += 1 + self.line_start_ = self.pos_ + return (Lexer.NEWLINE, None, location) + if cur_char == "\r": + self.pos_ += (2 if next_char == "\n" else 1) + self.line_ += 1 + self.line_start_ = self.pos_ + return (Lexer.NEWLINE, None, location) + if cur_char == "#": + self.scan_until_(Lexer.CHAR_NEWLINE_) + return (Lexer.COMMENT, text[start:self.pos_], location) + + if self.mode_ is Lexer.MODE_FILENAME_: + if cur_char != "(": + raise LexerError("Expected '(' before file name", location) + self.scan_until_(")") + cur_char = text[self.pos_] if self.pos_ < limit else None + if cur_char != ")": + raise LexerError("Expected ')' after file name", location) + self.pos_ += 1 + self.mode_ = Lexer.MODE_NORMAL_ + return (Lexer.FILENAME, text[start + 1:self.pos_ - 1], location) + + if cur_char == "\\" and next_char in Lexer.CHAR_DIGIT_: + self.pos_ += 1 + self.scan_over_(Lexer.CHAR_DIGIT_) + return (Lexer.CID, int(text[start + 1:self.pos_], 10), location) + if cur_char == "@": + self.pos_ += 1 + self.scan_over_(Lexer.CHAR_NAME_CONTINUATION_) + glyphclass = text[start + 1:self.pos_] + if len(glyphclass) < 1: + raise LexerError("Expected glyph class name", location) + if len(glyphclass) > 30: + raise LexerError( + "Glyph class names must not be longer than 30 characters", + location) + return (Lexer.GLYPHCLASS, glyphclass, location) + if cur_char in Lexer.CHAR_NAME_START_: + self.pos_ += 1 + self.scan_over_(Lexer.CHAR_NAME_CONTINUATION_) + token = text[start:self.pos_] + if token == "include": + self.mode_ = Lexer.MODE_FILENAME_ + return (Lexer.NAME, token, location) + if cur_char == "0" and next_char in "xX": + self.pos_ += 2 + self.scan_over_(Lexer.CHAR_HEXDIGIT_) + return (Lexer.NUMBER, int(text[start:self.pos_], 16), location) + if cur_char in Lexer.CHAR_DIGIT_: + self.scan_over_(Lexer.CHAR_DIGIT_) + return (Lexer.NUMBER, int(text[start:self.pos_], 10), location) + if cur_char == "-" and next_char in Lexer.CHAR_DIGIT_: + self.pos_ += 1 + self.scan_over_(Lexer.CHAR_DIGIT_) + return (Lexer.NUMBER, int(text[start:self.pos_], 10), location) + if cur_char in Lexer.CHAR_SYMBOL_: + self.pos_ += 1 + return (Lexer.SYMBOL, cur_char, location) + if cur_char == '"': + self.pos_ += 1 + self.scan_until_('"\r\n') + if self.pos_ < self.text_length_ and self.text_[self.pos_] == '"': + self.pos_ += 1 + return (Lexer.STRING, text[start + 1:self.pos_ - 1], location) + else: + raise LexerError("Expected '\"' to terminate string", location) + raise LexerError("Unexpected character: '%s'" % cur_char, location) + + def scan_over_(self, valid): + p = self.pos_ + while p < self.text_length_ and self.text_[p] in valid: + p += 1 + self.pos_ = p + + def scan_until_(self, stop_at): + p = self.pos_ + while p < self.text_length_ and self.text_[p] not in stop_at: + p += 1 + self.pos_ = p + + +class IncludingLexer(object): + def __init__(self, filename): + self.lexers_ = [self.make_lexer_(filename, (filename, 0, 0))] + + def __iter__(self): + return self + + def next(self): # Python 2 + return self.__next__() + + def __next__(self): # Python 3 + while self.lexers_: + lexer = self.lexers_[-1] + try: + token_type, token, location = lexer.next() + except StopIteration: + self.lexers_.pop() + continue + if token_type is Lexer.NAME and token == "include": + fname_type, fname_token, fname_location = lexer.next() + if fname_type is not Lexer.FILENAME: + raise LexerError("Expected file name", fname_location) + semi_type, semi_token, semi_location = lexer.next() + if semi_type is not Lexer.SYMBOL or semi_token != ";": + raise LexerError("Expected ';'", semi_location) + curpath, _ = os.path.split(lexer.filename_) + path = os.path.join(curpath, fname_token) + if len(self.lexers_) >= 5: + raise LexerError("Too many recursive includes", + fname_location) + self.lexers_.append(self.make_lexer_(path, fname_location)) + continue + else: + return (token_type, token, location) + raise StopIteration() + + @staticmethod + def make_lexer_(filename, location): + try: + with codecs.open(filename, "rb", "utf-8") as f: + return Lexer(f.read(), filename) + except IOError as err: + raise LexerError(str(err), location) diff -Nru fonttools-2.4/Snippets/fontTools/feaLib/lexer_test.py fonttools-3.0/Snippets/fontTools/feaLib/lexer_test.py --- fonttools-2.4/Snippets/fontTools/feaLib/lexer_test.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.0/Snippets/fontTools/feaLib/lexer_test.py 2015-08-31 17:57:15.000000000 +0000 @@ -0,0 +1,160 @@ +from __future__ import print_function, division, absolute_import +from __future__ import unicode_literals +from fontTools.feaLib.lexer import IncludingLexer, Lexer, LexerError +import os +import unittest + + +def lex(s): + return [(typ, tok) for (typ, tok, _) in Lexer(s, "test.fea")] + + +class LexerErrorTest(unittest.TestCase): + def test_str(self): + err = LexerError("Squeak!", ("foo.fea", 23, 42)) + self.assertEqual(str(err), "foo.fea:23:42: Squeak!") + + def test_str_nolocation(self): + err = LexerError("Squeak!", None) + self.assertEqual(str(err), "Squeak!") + + +class LexerTest(unittest.TestCase): + def __init__(self, methodName): + unittest.TestCase.__init__(self, methodName) + # Python 3 renamed assertRaisesRegexp to assertRaisesRegex, + # and fires deprecation warnings if a program uses the old name. + if not hasattr(self, "assertRaisesRegex"): + self.assertRaisesRegex = self.assertRaisesRegexp + + def test_empty(self): + self.assertEqual(lex(""), []) + self.assertEqual(lex(" \t "), []) + + def test_name(self): + self.assertEqual(lex("a17"), [(Lexer.NAME, "a17")]) + self.assertEqual(lex(".notdef"), [(Lexer.NAME, ".notdef")]) + self.assertEqual(lex("two.oldstyle"), [(Lexer.NAME, "two.oldstyle")]) + self.assertEqual(lex("_"), [(Lexer.NAME, "_")]) + self.assertEqual(lex("\\table"), [(Lexer.NAME, "\\table")]) + + def test_cid(self): + self.assertEqual(lex("\\0 \\987"), [(Lexer.CID, 0), (Lexer.CID, 987)]) + + def test_glyphclass(self): + self.assertEqual(lex("@Vowel.sc"), [(Lexer.GLYPHCLASS, "Vowel.sc")]) + self.assertRaisesRegex(LexerError, "Expected glyph class", lex, "@(a)") + self.assertRaisesRegex(LexerError, "Expected glyph class", lex, "@ A") + self.assertRaisesRegex(LexerError, "not be longer than 30 characters", + lex, "@a123456789.a123456789.a123456789.x") + + def test_include(self): + self.assertEqual(lex("include (~/foo/bar baz.fea);"), [ + (Lexer.NAME, "include"), + (Lexer.FILENAME, "~/foo/bar baz.fea"), + (Lexer.SYMBOL, ";") + ]) + self.assertEqual(lex("include # Comment\n (foo) \n;"), [ + (Lexer.NAME, "include"), + (Lexer.FILENAME, "foo"), + (Lexer.SYMBOL, ";") + ]) + self.assertRaises(LexerError, lex, "include blah") + self.assertRaises(LexerError, lex, "include (blah") + + def test_number(self): + self.assertEqual(lex("123 -456"), + [(Lexer.NUMBER, 123), (Lexer.NUMBER, -456)]) + self.assertEqual(lex("0xCAFED00D"), [(Lexer.NUMBER, 0xCAFED00D)]) + self.assertEqual(lex("0xcafed00d"), [(Lexer.NUMBER, 0xCAFED00D)]) + + def test_symbol(self): + self.assertEqual(lex("a'"), [(Lexer.NAME, "a"), (Lexer.SYMBOL, "'")]) + self.assertEqual( + lex("foo - -2"), + [(Lexer.NAME, "foo"), (Lexer.SYMBOL, "-"), (Lexer.NUMBER, -2)]) + + def test_comment(self): + self.assertEqual(lex("# Comment\n#"), []) + + def test_string(self): + self.assertEqual(lex('"foo" "bar"'), + [(Lexer.STRING, "foo"), (Lexer.STRING, "bar")]) + self.assertRaises(LexerError, lambda: lex('"foo\n bar"')) + + def test_bad_character(self): + self.assertRaises(LexerError, lambda: lex("123 \u0001")) + + def test_newline(self): + lines = lambda s: [loc[1] for (_, _, loc) in Lexer(s, "test.fea")] + self.assertEqual(lines("FOO\n\nBAR\nBAZ"), [1, 3, 4]) # Unix + self.assertEqual(lines("FOO\r\rBAR\rBAZ"), [1, 3, 4]) # Macintosh + self.assertEqual(lines("FOO\r\n\r\n BAR\r\nBAZ"), [1, 3, 4]) # Windows + self.assertEqual(lines("FOO\n\rBAR\r\nBAZ"), [1, 3, 4]) # mixed + + def test_location(self): + locs = lambda s: ["%s:%d:%d" % loc + for (_, _, loc) in Lexer(s, "test.fea")] + self.assertEqual(locs("a b # Comment\n12 @x"), [ + "test.fea:1:1", "test.fea:1:3", "test.fea:2:1", + "test.fea:2:4" + ]) + + def test_scan_over_(self): + lexer = Lexer("abbacabba12", "test.fea") + self.assertEqual(lexer.pos_, 0) + lexer.scan_over_("xyz") + self.assertEqual(lexer.pos_, 0) + lexer.scan_over_("abc") + self.assertEqual(lexer.pos_, 9) + lexer.scan_over_("abc") + self.assertEqual(lexer.pos_, 9) + lexer.scan_over_("0123456789") + self.assertEqual(lexer.pos_, 11) + + def test_scan_until_(self): + lexer = Lexer("foo'bar", "test.fea") + self.assertEqual(lexer.pos_, 0) + lexer.scan_until_("'") + self.assertEqual(lexer.pos_, 3) + lexer.scan_until_("'") + self.assertEqual(lexer.pos_, 3) + + +class IncludingLexerTest(unittest.TestCase): + @staticmethod + def getpath(filename): + path, _ = os.path.split(__file__) + return os.path.join(path, "testdata", filename) + + def test_include(self): + lexer = IncludingLexer(self.getpath("include4.fea")) + result = ['%s %s:%d' % (token, os.path.split(loc[0])[1], loc[1]) + for _, token, loc in lexer] + self.assertEqual(result, [ + "I4a include4.fea:1", + "I3a include3.fea:1", + "I2a include2.fea:1", + "I1a include1.fea:1", + "I0 include0.fea:1", + "I1b include1.fea:3", + "I2b include2.fea:3", + "I3b include3.fea:3", + "I4b include4.fea:3" + ]) + + def test_include_limit(self): + lexer = IncludingLexer(self.getpath("include6.fea")) + self.assertRaises(LexerError, lambda: list(lexer)) + + def test_include_self(self): + lexer = IncludingLexer(self.getpath("includeself.fea")) + self.assertRaises(LexerError, lambda: list(lexer)) + + def test_include_missing_file(self): + lexer = IncludingLexer(self.getpath("includemissingfile.fea")) + self.assertRaises(LexerError, lambda: list(lexer)) + + +if __name__ == "__main__": + unittest.main() diff -Nru fonttools-2.4/Snippets/fontTools/feaLib/parser.py fonttools-3.0/Snippets/fontTools/feaLib/parser.py --- fonttools-2.4/Snippets/fontTools/feaLib/parser.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.0/Snippets/fontTools/feaLib/parser.py 2015-08-31 17:57:15.000000000 +0000 @@ -0,0 +1,466 @@ +from __future__ import print_function, division, absolute_import +from __future__ import unicode_literals +from fontTools.feaLib.lexer import Lexer, IncludingLexer +import fontTools.feaLib.ast as ast +import os +import re + + +class ParserError(Exception): + def __init__(self, message, location): + Exception.__init__(self, message) + self.location = location + + def __str__(self): + message = Exception.__str__(self) + if self.location: + path, line, column = self.location + return "%s:%d:%d: %s" % (path, line, column, message) + else: + return message + + +class Parser(object): + def __init__(self, path): + self.doc_ = ast.FeatureFile() + self.anchors_ = SymbolTable() + self.glyphclasses_ = SymbolTable() + self.lookups_ = SymbolTable() + self.valuerecords_ = SymbolTable() + self.symbol_tables_ = { + self.anchors_, self.glyphclasses_, + self.lookups_, self.valuerecords_ + } + self.next_token_type_, self.next_token_ = (None, None) + self.next_token_location_ = None + self.lexer_ = IncludingLexer(path) + self.advance_lexer_() + + def parse(self): + statements = self.doc_.statements + while self.next_token_type_ is not None: + self.advance_lexer_() + if self.cur_token_type_ is Lexer.GLYPHCLASS: + statements.append(self.parse_glyphclass_definition_()) + elif self.is_cur_keyword_("anchorDef"): + statements.append(self.parse_anchordef_()) + elif self.is_cur_keyword_("languagesystem"): + statements.append(self.parse_languagesystem_()) + elif self.is_cur_keyword_("lookup"): + statements.append(self.parse_lookup_(vertical=False)) + elif self.is_cur_keyword_("feature"): + statements.append(self.parse_feature_block_()) + elif self.is_cur_keyword_("valueRecordDef"): + statements.append( + self.parse_valuerecord_definition_(vertical=False)) + else: + raise ParserError("Expected feature, languagesystem, " + "lookup, or glyph class definition", + self.cur_token_location_) + return self.doc_ + + def parse_anchordef_(self): + assert self.is_cur_keyword_("anchorDef") + location = self.cur_token_location_ + x, y = self.expect_number_(), self.expect_number_() + contourpoint = None + if self.next_token_ == "contourpoint": + self.expect_keyword_("contourpoint") + contourpoint = self.expect_number_() + name = self.expect_name_() + self.expect_symbol_(";") + anchordef = ast.AnchorDefinition(location, name, x, y, contourpoint) + self.anchors_.define(name, anchordef) + return anchordef + + def parse_glyphclass_definition_(self): + location, name = self.cur_token_location_, self.cur_token_ + self.expect_symbol_("=") + glyphs = self.parse_glyphclass_(accept_glyphname=False) + self.expect_symbol_(";") + if self.glyphclasses_.resolve(name) is not None: + raise ParserError("Glyph class @%s already defined" % name, + location) + glyphclass = ast.GlyphClassDefinition(location, name, glyphs) + self.glyphclasses_.define(name, glyphclass) + return glyphclass + + def parse_glyphclass_(self, accept_glyphname): + result = set() + if accept_glyphname and self.next_token_type_ is Lexer.NAME: + result.add(self.expect_name_()) + return result + if self.next_token_type_ is Lexer.GLYPHCLASS: + self.advance_lexer_() + gc = self.glyphclasses_.resolve(self.cur_token_) + if gc is None: + raise ParserError("Unknown glyph class @%s" % self.cur_token_, + self.cur_token_location_) + result.update(gc.glyphs) + return result + + self.expect_symbol_("[") + while self.next_token_ != "]": + self.advance_lexer_() + if self.cur_token_type_ is Lexer.NAME: + if self.next_token_ == "-": + range_location_ = self.cur_token_location_ + range_start = self.cur_token_ + self.expect_symbol_("-") + range_end = self.expect_name_() + result.update(self.make_glyph_range_(range_location_, + range_start, + range_end)) + else: + result.add(self.cur_token_) + elif self.cur_token_type_ is Lexer.GLYPHCLASS: + gc = self.glyphclasses_.resolve(self.cur_token_) + if gc is None: + raise ParserError( + "Unknown glyph class @%s" % self.cur_token_, + self.cur_token_location_) + result.update(gc.glyphs) + else: + raise ParserError( + "Expected glyph name, glyph range, " + "or glyph class reference", + self.cur_token_location_) + self.expect_symbol_("]") + return result + + def parse_glyph_pattern_(self): + prefix, glyphs, lookups, suffix = ([], [], [], []) + while self.next_token_ not in {"by", "from", ";"}: + gc = self.parse_glyphclass_(accept_glyphname=True) + marked = False + if self.next_token_ == "'": + self.expect_symbol_("'") + marked = True + if marked: + glyphs.append(gc) + elif glyphs: + suffix.append(gc) + else: + prefix.append(gc) + + lookup = None + if self.next_token_ == "lookup": + self.expect_keyword_("lookup") + if not marked: + raise ParserError("Lookups can only follow marked glyphs", + self.cur_token_location_) + lookup_name = self.expect_name_() + lookup = self.lookups_.resolve(lookup_name) + if lookup is None: + raise ParserError('Unknown lookup "%s"' % lookup_name, + self.cur_token_location_) + if marked: + lookups.append(lookup) + + if not glyphs and not suffix: # eg., "sub f f i by" + assert lookups == [] + return ([], prefix, [None] * len(prefix), []) + else: + return (prefix, glyphs, lookups, suffix) + + def parse_ignore_(self): + assert self.is_cur_keyword_("ignore") + location = self.cur_token_location_ + self.advance_lexer_() + if self.cur_token_ in ["substitute", "sub"]: + prefix, glyphs, lookups, suffix = self.parse_glyph_pattern_() + self.expect_symbol_(";") + return ast.IgnoreSubstitutionRule(location, prefix, glyphs, suffix) + raise ParserError("Expected \"substitute\"", self.next_token_location_) + + def parse_language_(self): + assert self.is_cur_keyword_("language") + location, language = self.cur_token_location_, self.expect_tag_() + include_default, required = (True, False) + if self.next_token_ in {"exclude_dflt", "include_dflt"}: + include_default = (self.expect_name_() == "include_dflt") + if self.next_token_ == "required": + self.expect_keyword_("required") + required = True + self.expect_symbol_(";") + return ast.LanguageStatement(location, language.strip(), + include_default, required) + + def parse_lookup_(self, vertical): + assert self.is_cur_keyword_("lookup") + location, name = self.cur_token_location_, self.expect_name_() + + if self.next_token_ == ";": + lookup = self.lookups_.resolve(name) + if lookup is None: + raise ParserError("Unknown lookup \"%s\"" % name, + self.cur_token_location_) + self.expect_symbol_(";") + return ast.LookupReferenceStatement(location, lookup) + + use_extension = False + if self.next_token_ == "useExtension": + self.expect_keyword_("useExtension") + use_extension = True + + block = ast.LookupBlock(location, name, use_extension) + self.parse_block_(block, vertical) + self.lookups_.define(name, block) + return block + + def parse_script_(self): + assert self.is_cur_keyword_("script") + location, script = self.cur_token_location_, self.expect_tag_() + self.expect_symbol_(";") + return ast.ScriptStatement(location, script) + + def parse_substitute_(self): + assert self.cur_token_ in {"substitute", "sub"} + location = self.cur_token_location_ + old_prefix, old, lookups, old_suffix = self.parse_glyph_pattern_() + + new = [] + if self.next_token_ == "by": + keyword = self.expect_keyword_("by") + while self.next_token_ != ";": + new.append(self.parse_glyphclass_(accept_glyphname=True)) + elif self.next_token_ == "from": + keyword = self.expect_keyword_("from") + new = [self.parse_glyphclass_(accept_glyphname=False)] + else: + keyword = None + self.expect_symbol_(";") + if len(new) is 0 and not any(lookups): + raise ParserError( + 'Expected "by", "from" or explicit lookup references', + self.cur_token_location_) + + if keyword == "from": + if len(old) != 1 or len(old[0]) != 1: + raise ParserError('Expected a single glyph before "from"', + location) + if len(new) != 1: + raise ParserError('Expected a single glyphclass after "from"', + location) + return ast.AlternateSubstitution(location, list(old[0])[0], new[0]) + + rule = ast.SubstitutionRule(location, old, new) + rule.old_prefix, rule.old_suffix = old_prefix, old_suffix + rule.lookups = lookups + return rule + + def parse_subtable_(self): + assert self.is_cur_keyword_("subtable") + location = self.cur_token_location_ + self.expect_symbol_(";") + return ast.SubtableStatement(location) + + def parse_valuerecord_(self, vertical): + if self.next_token_type_ is Lexer.NUMBER: + number, location = self.expect_number_(), self.cur_token_location_ + if vertical: + val = ast.ValueRecord(location, 0, 0, 0, number) + else: + val = ast.ValueRecord(location, 0, 0, number, 0) + return val + self.expect_symbol_("<") + location = self.cur_token_location_ + if self.next_token_type_ is Lexer.NAME: + name = self.expect_name_() + vrd = self.valuerecords_.resolve(name) + if vrd is None: + raise ParserError("Unknown valueRecordDef \"%s\"" % name, + self.cur_token_location_) + value = vrd.value + xPlacement, yPlacement = (value.xPlacement, value.yPlacement) + xAdvance, yAdvance = (value.xAdvance, value.yAdvance) + else: + xPlacement, yPlacement, xAdvance, yAdvance = ( + self.expect_number_(), self.expect_number_(), + self.expect_number_(), self.expect_number_()) + self.expect_symbol_(">") + return ast.ValueRecord( + location, xPlacement, yPlacement, xAdvance, yAdvance) + + def parse_valuerecord_definition_(self, vertical): + assert self.is_cur_keyword_("valueRecordDef") + location = self.cur_token_location_ + value = self.parse_valuerecord_(vertical) + name = self.expect_name_() + self.expect_symbol_(";") + vrd = ast.ValueRecordDefinition(location, name, value) + self.valuerecords_.define(name, vrd) + return vrd + + def parse_languagesystem_(self): + assert self.cur_token_ == "languagesystem" + location = self.cur_token_location_ + script, language = self.expect_tag_(), self.expect_tag_() + self.expect_symbol_(";") + return ast.LanguageSystemStatement(location, script, language) + + def parse_feature_block_(self): + assert self.cur_token_ == "feature" + location = self.cur_token_location_ + tag = self.expect_tag_() + vertical = (tag == "vkrn") + + use_extension = False + if self.next_token_ == "useExtension": + self.expect_keyword_("useExtension") + use_extension = True + + block = ast.FeatureBlock(location, tag, use_extension) + self.parse_block_(block, vertical) + return block + + def parse_block_(self, block, vertical): + self.expect_symbol_("{") + for symtab in self.symbol_tables_: + symtab.enter_scope() + + statements = block.statements + while self.next_token_ != "}": + self.advance_lexer_() + if self.cur_token_type_ is Lexer.GLYPHCLASS: + statements.append(self.parse_glyphclass_definition_()) + elif self.is_cur_keyword_("anchorDef"): + statements.append(self.parse_anchordef_()) + elif self.is_cur_keyword_("ignore"): + statements.append(self.parse_ignore_()) + elif self.is_cur_keyword_("language"): + statements.append(self.parse_language_()) + elif self.is_cur_keyword_("lookup"): + statements.append(self.parse_lookup_(vertical)) + elif self.is_cur_keyword_("script"): + statements.append(self.parse_script_()) + elif (self.is_cur_keyword_("substitute") or + self.is_cur_keyword_("sub")): + statements.append(self.parse_substitute_()) + elif self.is_cur_keyword_("subtable"): + statements.append(self.parse_subtable_()) + elif self.is_cur_keyword_("valueRecordDef"): + statements.append(self.parse_valuerecord_definition_(vertical)) + else: + raise ParserError( + "Expected glyph class definition or statement", + self.cur_token_location_) + + self.expect_symbol_("}") + for symtab in self.symbol_tables_: + symtab.exit_scope() + + name = self.expect_name_() + if name != block.name.strip(): + raise ParserError("Expected \"%s\"" % block.name.strip(), + self.cur_token_location_) + self.expect_symbol_(";") + + def is_cur_keyword_(self, k): + return (self.cur_token_type_ is Lexer.NAME) and (self.cur_token_ == k) + + def expect_tag_(self): + self.advance_lexer_() + if self.cur_token_type_ is not Lexer.NAME: + raise ParserError("Expected a tag", self.cur_token_location_) + if len(self.cur_token_) > 4: + raise ParserError("Tags can not be longer than 4 characters", + self.cur_token_location_) + return (self.cur_token_ + " ")[:4] + + def expect_symbol_(self, symbol): + self.advance_lexer_() + if self.cur_token_type_ is Lexer.SYMBOL and self.cur_token_ == symbol: + return symbol + raise ParserError("Expected '%s'" % symbol, self.cur_token_location_) + + def expect_keyword_(self, keyword): + self.advance_lexer_() + if self.cur_token_type_ is Lexer.NAME and self.cur_token_ == keyword: + return self.cur_token_ + raise ParserError("Expected \"%s\"" % keyword, + self.cur_token_location_) + + def expect_name_(self): + self.advance_lexer_() + if self.cur_token_type_ is Lexer.NAME: + return self.cur_token_ + raise ParserError("Expected a name", self.cur_token_location_) + + def expect_number_(self): + self.advance_lexer_() + if self.cur_token_type_ is Lexer.NUMBER: + return self.cur_token_ + raise ParserError("Expected a number", self.cur_token_location_) + + def advance_lexer_(self): + self.cur_token_type_, self.cur_token_, self.cur_token_location_ = ( + self.next_token_type_, self.next_token_, self.next_token_location_) + try: + (self.next_token_type_, self.next_token_, + self.next_token_location_) = self.lexer_.next() + except StopIteration: + self.next_token_type_, self.next_token_ = (None, None) + + def make_glyph_range_(self, location, start, limit): + """("a.sc", "d.sc") --> {"a.sc", "b.sc", "c.sc", "d.sc"}""" + result = set() + if len(start) != len(limit): + raise ParserError( + "Bad range: \"%s\" and \"%s\" should have the same length" % + (start, limit), location) + rev = lambda s: ''.join(reversed(list(s))) # string reversal + prefix = os.path.commonprefix([start, limit]) + suffix = rev(os.path.commonprefix([rev(start), rev(limit)])) + if len(suffix) > 0: + start_range = start[len(prefix):-len(suffix)] + limit_range = limit[len(prefix):-len(suffix)] + else: + start_range = start[len(prefix):] + limit_range = limit[len(prefix):] + + if start_range >= limit_range: + raise ParserError("Start of range must be smaller than its end", + location) + + uppercase = re.compile(r'^[A-Z]$') + if uppercase.match(start_range) and uppercase.match(limit_range): + for c in range(ord(start_range), ord(limit_range) + 1): + result.add("%s%c%s" % (prefix, c, suffix)) + return result + + lowercase = re.compile(r'^[a-z]$') + if lowercase.match(start_range) and lowercase.match(limit_range): + for c in range(ord(start_range), ord(limit_range) + 1): + result.add("%s%c%s" % (prefix, c, suffix)) + return result + + digits = re.compile(r'^[0-9]{1,3}$') + if digits.match(start_range) and digits.match(limit_range): + for i in range(int(start_range, 10), int(limit_range, 10) + 1): + number = ("000" + str(i))[-len(start_range):] + result.add("%s%s%s" % (prefix, number, suffix)) + return result + + raise ParserError("Bad range: \"%s-%s\"" % (start, limit), location) + + +class SymbolTable(object): + def __init__(self): + self.scopes_ = [{}] + + def enter_scope(self): + self.scopes_.append({}) + + def exit_scope(self): + self.scopes_.pop() + + def define(self, name, item): + self.scopes_[-1][name] = item + + def resolve(self, name): + for scope in reversed(self.scopes_): + item = scope.get(name) + if item: + return item + return None diff -Nru fonttools-2.4/Snippets/fontTools/feaLib/parser_test.py fonttools-3.0/Snippets/fontTools/feaLib/parser_test.py --- fonttools-2.4/Snippets/fontTools/feaLib/parser_test.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.0/Snippets/fontTools/feaLib/parser_test.py 2015-08-31 17:57:15.000000000 +0000 @@ -0,0 +1,448 @@ +from __future__ import print_function, division, absolute_import +from __future__ import unicode_literals +from fontTools.feaLib.lexer import LexerError +from fontTools.feaLib.parser import Parser, ParserError, SymbolTable +from fontTools.misc.py23 import * +import fontTools.feaLib.ast as ast +import codecs +import os +import shutil +import sys +import tempfile +import unittest + + +class ParserTest(unittest.TestCase): + def __init__(self, methodName): + unittest.TestCase.__init__(self, methodName) + # Python 3 renamed assertRaisesRegexp to assertRaisesRegex, + # and fires deprecation warnings if a program uses the old name. + if not hasattr(self, "assertRaisesRegex"): + self.assertRaisesRegex = self.assertRaisesRegexp + + def test_anchordef(self): + [foo] = self.parse("anchorDef 123 456 foo;").statements + self.assertEqual(type(foo), ast.AnchorDefinition) + self.assertEqual(foo.name, "foo") + self.assertEqual(foo.x, 123) + self.assertEqual(foo.y, 456) + self.assertEqual(foo.contourpoint, None) + + def test_anchordef_contourpoint(self): + [foo] = self.parse("anchorDef 123 456 contourpoint 5 foo;").statements + self.assertEqual(type(foo), ast.AnchorDefinition) + self.assertEqual(foo.name, "foo") + self.assertEqual(foo.x, 123) + self.assertEqual(foo.y, 456) + self.assertEqual(foo.contourpoint, 5) + + def test_feature_block(self): + [liga] = self.parse("feature liga {} liga;").statements + self.assertEqual(liga.name, "liga") + self.assertFalse(liga.use_extension) + + def test_feature_block_useExtension(self): + [liga] = self.parse("feature liga useExtension {} liga;").statements + self.assertEqual(liga.name, "liga") + self.assertTrue(liga.use_extension) + + def test_glyphclass(self): + [gc] = self.parse("@dash = [endash emdash figuredash];").statements + self.assertEqual(gc.name, "dash") + self.assertEqual(gc.glyphs, {"endash", "emdash", "figuredash"}) + + def test_glyphclass_bad(self): + self.assertRaisesRegex( + ParserError, + "Expected glyph name, glyph range, or glyph class reference", + self.parse, "@bad = [a 123];") + + def test_glyphclass_duplicate(self): + self.assertRaisesRegex( + ParserError, "Glyph class @dup already defined", + self.parse, "@dup = [a b]; @dup = [x];") + + def test_glyphclass_empty(self): + [gc] = self.parse("@empty_set = [];").statements + self.assertEqual(gc.name, "empty_set") + self.assertEqual(gc.glyphs, set()) + + def test_glyphclass_equality(self): + [foo, bar] = self.parse("@foo = [a b]; @bar = @foo;").statements + self.assertEqual(foo.glyphs, {"a", "b"}) + self.assertEqual(bar.glyphs, {"a", "b"}) + + def test_glyphclass_range_uppercase(self): + [gc] = self.parse("@swashes = [X.swash-Z.swash];").statements + self.assertEqual(gc.name, "swashes") + self.assertEqual(gc.glyphs, {"X.swash", "Y.swash", "Z.swash"}) + + def test_glyphclass_range_lowercase(self): + [gc] = self.parse("@defg.sc = [d.sc-g.sc];").statements + self.assertEqual(gc.name, "defg.sc") + self.assertEqual(gc.glyphs, {"d.sc", "e.sc", "f.sc", "g.sc"}) + + def test_glyphclass_range_digit1(self): + [gc] = self.parse("@range = [foo.2-foo.5];").statements + self.assertEqual(gc.glyphs, {"foo.2", "foo.3", "foo.4", "foo.5"}) + + def test_glyphclass_range_digit2(self): + [gc] = self.parse("@range = [foo.09-foo.11];").statements + self.assertEqual(gc.glyphs, {"foo.09", "foo.10", "foo.11"}) + + def test_glyphclass_range_digit3(self): + [gc] = self.parse("@range = [foo.123-foo.125];").statements + self.assertEqual(gc.glyphs, {"foo.123", "foo.124", "foo.125"}) + + def test_glyphclass_range_bad(self): + self.assertRaisesRegex( + ParserError, + "Bad range: \"a\" and \"foobar\" should have the same length", + self.parse, "@bad = [a-foobar];") + self.assertRaisesRegex( + ParserError, "Bad range: \"A.swash-z.swash\"", + self.parse, "@bad = [A.swash-z.swash];") + self.assertRaisesRegex( + ParserError, "Start of range must be smaller than its end", + self.parse, "@bad = [B.swash-A.swash];") + self.assertRaisesRegex( + ParserError, "Bad range: \"foo.1234-foo.9876\"", + self.parse, "@bad = [foo.1234-foo.9876];") + + def test_glyphclass_range_mixed(self): + [gc] = self.parse("@range = [a foo.09-foo.11 X.sc-Z.sc];").statements + self.assertEqual(gc.glyphs, { + "a", "foo.09", "foo.10", "foo.11", "X.sc", "Y.sc", "Z.sc" + }) + + def test_glyphclass_reference(self): + [vowels_lc, vowels_uc, vowels] = self.parse( + "@Vowels.lc = [a e i o u]; @Vowels.uc = [A E I O U];" + "@Vowels = [@Vowels.lc @Vowels.uc y Y];").statements + self.assertEqual(vowels_lc.glyphs, set(list("aeiou"))) + self.assertEqual(vowels_uc.glyphs, set(list("AEIOU"))) + self.assertEqual(vowels.glyphs, set(list("aeiouyAEIOUY"))) + self.assertRaisesRegex( + ParserError, "Unknown glyph class @unknown", + self.parse, "@bad = [@unknown];") + + def test_glyphclass_scoping(self): + [foo, liga, smcp] = self.parse( + "@foo = [a b];" + "feature liga { @bar = [@foo l]; } liga;" + "feature smcp { @bar = [@foo s]; } smcp;" + ).statements + self.assertEqual(foo.glyphs, {"a", "b"}) + self.assertEqual(liga.statements[0].glyphs, {"a", "b", "l"}) + self.assertEqual(smcp.statements[0].glyphs, {"a", "b", "s"}) + + def test_ignore_sub(self): + doc = self.parse("feature test {ignore sub e t' c;} test;") + s = doc.statements[0].statements[0] + self.assertEqual(type(s), ast.IgnoreSubstitutionRule) + self.assertEqual(s.prefix, [{"e"}]) + self.assertEqual(s.glyphs, [{"t"}]) + self.assertEqual(s.suffix, [{"c"}]) + + def test_ignore_substitute(self): + doc = self.parse( + "feature test {" + " ignore substitute f [a e] d' [a u]' [e y];" + "} test;") + s = doc.statements[0].statements[0] + self.assertEqual(type(s), ast.IgnoreSubstitutionRule) + self.assertEqual(s.prefix, [{"f"}, {"a", "e"}]) + self.assertEqual(s.glyphs, [{"d"}, {"a", "u"}]) + self.assertEqual(s.suffix, [{"e", "y"}]) + + def test_language(self): + doc = self.parse("feature test {language DEU;} test;") + s = doc.statements[0].statements[0] + self.assertEqual(type(s), ast.LanguageStatement) + self.assertEqual(s.language, "DEU") + self.assertTrue(s.include_default) + self.assertFalse(s.required) + + def test_language_exclude_dflt(self): + doc = self.parse("feature test {language DEU exclude_dflt;} test;") + s = doc.statements[0].statements[0] + self.assertEqual(type(s), ast.LanguageStatement) + self.assertEqual(s.language, "DEU") + self.assertFalse(s.include_default) + self.assertFalse(s.required) + + def test_language_exclude_dflt_required(self): + doc = self.parse("feature test {" + " language DEU exclude_dflt required;" + "} test;") + s = doc.statements[0].statements[0] + self.assertEqual(type(s), ast.LanguageStatement) + self.assertEqual(s.language, "DEU") + self.assertFalse(s.include_default) + self.assertTrue(s.required) + + def test_language_include_dflt(self): + doc = self.parse("feature test {language DEU include_dflt;} test;") + s = doc.statements[0].statements[0] + self.assertEqual(type(s), ast.LanguageStatement) + self.assertEqual(s.language, "DEU") + self.assertTrue(s.include_default) + self.assertFalse(s.required) + + def test_language_include_dflt_required(self): + doc = self.parse("feature test {" + " language DEU include_dflt required;" + "} test;") + s = doc.statements[0].statements[0] + self.assertEqual(type(s), ast.LanguageStatement) + self.assertEqual(s.language, "DEU") + self.assertTrue(s.include_default) + self.assertTrue(s.required) + + def test_lookup_block(self): + [lookup] = self.parse("lookup Ligatures {} Ligatures;").statements + self.assertEqual(lookup.name, "Ligatures") + self.assertFalse(lookup.use_extension) + + def test_lookup_block_useExtension(self): + [lookup] = self.parse("lookup Foo useExtension {} Foo;").statements + self.assertEqual(lookup.name, "Foo") + self.assertTrue(lookup.use_extension) + + def test_lookup_block_name_mismatch(self): + self.assertRaisesRegex( + ParserError, 'Expected "Foo"', + self.parse, "lookup Foo {} Bar;") + + def test_lookup_block_with_horizontal_valueRecordDef(self): + doc = self.parse("feature liga {" + " lookup look {" + " valueRecordDef 123 foo;" + " } look;" + "} liga;") + [liga] = doc.statements + [look] = liga.statements + [foo] = look.statements + self.assertEqual(foo.value.xAdvance, 123) + self.assertEqual(foo.value.yAdvance, 0) + + def test_lookup_block_with_vertical_valueRecordDef(self): + doc = self.parse("feature vkrn {" + " lookup look {" + " valueRecordDef 123 foo;" + " } look;" + "} vkrn;") + [vkrn] = doc.statements + [look] = vkrn.statements + [foo] = look.statements + self.assertEqual(foo.value.xAdvance, 0) + self.assertEqual(foo.value.yAdvance, 123) + + def test_lookup_reference(self): + [foo, bar] = self.parse("lookup Foo {} Foo;" + "feature Bar {lookup Foo;} Bar;").statements + [ref] = bar.statements + self.assertEqual(type(ref), ast.LookupReferenceStatement) + self.assertEqual(ref.lookup, foo) + + def test_lookup_reference_unknown(self): + self.assertRaisesRegex( + ParserError, 'Unknown lookup "Huh"', + self.parse, "feature liga {lookup Huh;} liga;") + + def test_script(self): + doc = self.parse("feature test {script cyrl;} test;") + s = doc.statements[0].statements[0] + self.assertEqual(type(s), ast.ScriptStatement) + self.assertEqual(s.script, "cyrl") + + def test_substitute_single_format_a(self): # GSUB LookupType 1 + doc = self.parse("feature smcp {substitute a by a.sc;} smcp;") + sub = doc.statements[0].statements[0] + self.assertEqual(sub.old_prefix, []) + self.assertEqual(sub.old, [{"a"}]) + self.assertEqual(sub.old_suffix, []) + self.assertEqual(sub.new, [{"a.sc"}]) + self.assertEqual(sub.lookups, [None]) + + def test_substitute_single_format_b(self): # GSUB LookupType 1 + doc = self.parse( + "feature smcp {" + " substitute [one.fitted one.oldstyle] by one;" + "} smcp;") + sub = doc.statements[0].statements[0] + self.assertEqual(sub.old_prefix, []) + self.assertEqual(sub.old, [{"one.fitted", "one.oldstyle"}]) + self.assertEqual(sub.old_suffix, []) + self.assertEqual(sub.new, [{"one"}]) + self.assertEqual(sub.lookups, [None]) + + def test_substitute_single_format_c(self): # GSUB LookupType 1 + doc = self.parse( + "feature smcp {" + " substitute [a-d] by [A.sc-D.sc];" + "} smcp;") + sub = doc.statements[0].statements[0] + self.assertEqual(sub.old_prefix, []) + self.assertEqual(sub.old, [{"a", "b", "c", "d"}]) + self.assertEqual(sub.old_suffix, []) + self.assertEqual(sub.new, [{"A.sc", "B.sc", "C.sc", "D.sc"}]) + self.assertEqual(sub.lookups, [None]) + + def test_substitute_multiple(self): # GSUB LookupType 2 + doc = self.parse("lookup Look {substitute f_f_i by f f i;} Look;") + sub = doc.statements[0].statements[0] + self.assertEqual(type(sub), ast.SubstitutionRule) + self.assertEqual(sub.old_prefix, []) + self.assertEqual(sub.old, [{"f_f_i"}]) + self.assertEqual(sub.old_suffix, []) + self.assertEqual(sub.new, [{"f"}, {"f"}, {"i"}]) + self.assertEqual(sub.lookups, [None]) + + def test_substitute_from(self): # GSUB LookupType 3 + doc = self.parse("feature test {" + " substitute a from [a.1 a.2 a.3];" + "} test;") + sub = doc.statements[0].statements[0] + self.assertEqual(type(sub), ast.AlternateSubstitution) + self.assertEqual(sub.glyph, "a") + self.assertEqual(sub.from_class, {"a.1", "a.2", "a.3"}) + + def test_substitute_from_glyphclass(self): # GSUB LookupType 3 + doc = self.parse("feature test {" + " @Ampersands = [ampersand.1 ampersand.2];" + " substitute ampersand from @Ampersands;" + "} test;") + [glyphclass, sub] = doc.statements[0].statements + self.assertEqual(type(sub), ast.AlternateSubstitution) + self.assertEqual(sub.glyph, "ampersand") + self.assertEqual(sub.from_class, {"ampersand.1", "ampersand.2"}) + + def test_substitute_ligature(self): # GSUB LookupType 4 + doc = self.parse("feature liga {substitute f f i by f_f_i;} liga;") + sub = doc.statements[0].statements[0] + self.assertEqual(sub.old_prefix, []) + self.assertEqual(sub.old, [{"f"}, {"f"}, {"i"}]) + self.assertEqual(sub.old_suffix, []) + self.assertEqual(sub.new, [{"f_f_i"}]) + self.assertEqual(sub.lookups, [None, None, None]) + + def test_substitute_lookups(self): + doc = Parser(self.getpath("spec5fi.fea")).parse() + [ligs, sub, feature] = doc.statements + self.assertEqual(feature.statements[0].lookups, [ligs, None, sub]) + self.assertEqual(feature.statements[1].lookups, [ligs, None, sub]) + + def test_substitute_missing_by(self): + self.assertRaisesRegex( + ParserError, 'Expected "by", "from" or explicit lookup references', + self.parse, "feature liga {substitute f f i;} liga;") + + def test_subtable(self): + doc = self.parse("feature test {subtable;} test;") + s = doc.statements[0].statements[0] + self.assertEqual(type(s), ast.SubtableStatement) + + def test_valuerecord_format_a_horizontal(self): + doc = self.parse("feature liga {valueRecordDef 123 foo;} liga;") + value = doc.statements[0].statements[0].value + self.assertEqual(value.xPlacement, 0) + self.assertEqual(value.yPlacement, 0) + self.assertEqual(value.xAdvance, 123) + self.assertEqual(value.yAdvance, 0) + + def test_valuerecord_format_a_vertical(self): + doc = self.parse("feature vkrn {valueRecordDef 123 foo;} vkrn;") + value = doc.statements[0].statements[0].value + self.assertEqual(value.xPlacement, 0) + self.assertEqual(value.yPlacement, 0) + self.assertEqual(value.xAdvance, 0) + self.assertEqual(value.yAdvance, 123) + + def test_valuerecord_format_b(self): + doc = self.parse("feature liga {valueRecordDef <1 2 3 4> foo;} liga;") + value = doc.statements[0].statements[0].value + self.assertEqual(value.xPlacement, 1) + self.assertEqual(value.yPlacement, 2) + self.assertEqual(value.xAdvance, 3) + self.assertEqual(value.yAdvance, 4) + + def test_valuerecord_named(self): + doc = self.parse("valueRecordDef <1 2 3 4> foo;" + "feature liga {valueRecordDef <foo> bar;} liga;") + value = doc.statements[1].statements[0].value + self.assertEqual(value.xPlacement, 1) + self.assertEqual(value.yPlacement, 2) + self.assertEqual(value.xAdvance, 3) + self.assertEqual(value.yAdvance, 4) + + def test_valuerecord_named_unknown(self): + self.assertRaisesRegex( + ParserError, "Unknown valueRecordDef \"unknown\"", + self.parse, "valueRecordDef <unknown> foo;") + + def test_valuerecord_scoping(self): + [foo, liga, smcp] = self.parse( + "valueRecordDef 789 foo;" + "feature liga {valueRecordDef <foo> bar;} liga;" + "feature smcp {valueRecordDef <foo> bar;} smcp;" + ).statements + self.assertEqual(foo.value.xAdvance, 789) + self.assertEqual(liga.statements[0].value.xAdvance, 789) + self.assertEqual(smcp.statements[0].value.xAdvance, 789) + + def test_languagesystem(self): + [langsys] = self.parse("languagesystem latn DEU;").statements + self.assertEqual(langsys.script, "latn") + self.assertEqual(langsys.language, "DEU ") + self.assertRaisesRegex( + ParserError, "Expected ';'", + self.parse, "languagesystem latn DEU") + self.assertRaisesRegex( + ParserError, "longer than 4 characters", + self.parse, "languagesystem foobar DEU") + self.assertRaisesRegex( + ParserError, "longer than 4 characters", + self.parse, "languagesystem latn FOOBAR") + + def setUp(self): + self.tempdir = None + self.num_tempfiles = 0 + + def tearDown(self): + if self.tempdir: + shutil.rmtree(self.tempdir) + + def parse(self, text): + if not self.tempdir: + self.tempdir = tempfile.mkdtemp() + self.num_tempfiles += 1 + path = os.path.join(self.tempdir, "tmp%d.fea" % self.num_tempfiles) + with codecs.open(path, "wb", "utf-8") as outfile: + outfile.write(text) + return Parser(path).parse() + + @staticmethod + def getpath(testfile): + path, _ = os.path.split(__file__) + return os.path.join(path, "testdata", testfile) + + +class SymbolTableTest(unittest.TestCase): + def test_scopes(self): + symtab = SymbolTable() + symtab.define("foo", 23) + self.assertEqual(symtab.resolve("foo"), 23) + symtab.enter_scope() + self.assertEqual(symtab.resolve("foo"), 23) + symtab.define("foo", 42) + self.assertEqual(symtab.resolve("foo"), 42) + symtab.exit_scope() + self.assertEqual(symtab.resolve("foo"), 23) + + def test_resolve_undefined(self): + self.assertEqual(SymbolTable().resolve("abc"), None) + + +if __name__ == "__main__": + unittest.main() diff -Nru fonttools-2.4/Snippets/fontTools/feaLib/testdata/include0.fea fonttools-3.0/Snippets/fontTools/feaLib/testdata/include0.fea --- fonttools-2.4/Snippets/fontTools/feaLib/testdata/include0.fea 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.0/Snippets/fontTools/feaLib/testdata/include0.fea 2015-08-31 17:57:15.000000000 +0000 @@ -0,0 +1 @@ +I0 diff -Nru fonttools-2.4/Snippets/fontTools/feaLib/testdata/include1.fea fonttools-3.0/Snippets/fontTools/feaLib/testdata/include1.fea --- fonttools-2.4/Snippets/fontTools/feaLib/testdata/include1.fea 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.0/Snippets/fontTools/feaLib/testdata/include1.fea 2015-08-31 17:57:15.000000000 +0000 @@ -0,0 +1,3 @@ +I1a +include(include0.fea); +I1b diff -Nru fonttools-2.4/Snippets/fontTools/feaLib/testdata/include2.fea fonttools-3.0/Snippets/fontTools/feaLib/testdata/include2.fea --- fonttools-2.4/Snippets/fontTools/feaLib/testdata/include2.fea 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.0/Snippets/fontTools/feaLib/testdata/include2.fea 2015-08-31 17:57:15.000000000 +0000 @@ -0,0 +1,3 @@ +I2a +include(include1.fea); +I2b diff -Nru fonttools-2.4/Snippets/fontTools/feaLib/testdata/include3.fea fonttools-3.0/Snippets/fontTools/feaLib/testdata/include3.fea --- fonttools-2.4/Snippets/fontTools/feaLib/testdata/include3.fea 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.0/Snippets/fontTools/feaLib/testdata/include3.fea 2015-08-31 17:57:15.000000000 +0000 @@ -0,0 +1,4 @@ +I3a +include(include2.fea); +I3b + diff -Nru fonttools-2.4/Snippets/fontTools/feaLib/testdata/include4.fea fonttools-3.0/Snippets/fontTools/feaLib/testdata/include4.fea --- fonttools-2.4/Snippets/fontTools/feaLib/testdata/include4.fea 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.0/Snippets/fontTools/feaLib/testdata/include4.fea 2015-08-31 17:57:15.000000000 +0000 @@ -0,0 +1,4 @@ +I4a +include(include3.fea); +I4b + diff -Nru fonttools-2.4/Snippets/fontTools/feaLib/testdata/include5.fea fonttools-3.0/Snippets/fontTools/feaLib/testdata/include5.fea --- fonttools-2.4/Snippets/fontTools/feaLib/testdata/include5.fea 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.0/Snippets/fontTools/feaLib/testdata/include5.fea 2015-08-31 17:57:15.000000000 +0000 @@ -0,0 +1,3 @@ +I5a +include(include4.fea); +I5b diff -Nru fonttools-2.4/Snippets/fontTools/feaLib/testdata/include6.fea fonttools-3.0/Snippets/fontTools/feaLib/testdata/include6.fea --- fonttools-2.4/Snippets/fontTools/feaLib/testdata/include6.fea 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.0/Snippets/fontTools/feaLib/testdata/include6.fea 2015-08-31 17:57:15.000000000 +0000 @@ -0,0 +1,3 @@ +I6a +include(include5.fea); +I6b diff -Nru fonttools-2.4/Snippets/fontTools/feaLib/testdata/includemissingfile.fea fonttools-3.0/Snippets/fontTools/feaLib/testdata/includemissingfile.fea --- fonttools-2.4/Snippets/fontTools/feaLib/testdata/includemissingfile.fea 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.0/Snippets/fontTools/feaLib/testdata/includemissingfile.fea 2015-08-31 17:57:15.000000000 +0000 @@ -0,0 +1 @@ +include(missingfile.fea); diff -Nru fonttools-2.4/Snippets/fontTools/feaLib/testdata/includeself.fea fonttools-3.0/Snippets/fontTools/feaLib/testdata/includeself.fea --- fonttools-2.4/Snippets/fontTools/feaLib/testdata/includeself.fea 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.0/Snippets/fontTools/feaLib/testdata/includeself.fea 2015-08-31 17:57:15.000000000 +0000 @@ -0,0 +1 @@ +include(includeself.fea); diff -Nru fonttools-2.4/Snippets/fontTools/feaLib/testdata/mini.fea fonttools-3.0/Snippets/fontTools/feaLib/testdata/mini.fea --- fonttools-2.4/Snippets/fontTools/feaLib/testdata/mini.fea 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.0/Snippets/fontTools/feaLib/testdata/mini.fea 2015-08-31 17:57:15.000000000 +0000 @@ -0,0 +1,19 @@ +# Example file from OpenType Feature File specification, section 1. +# http://www.adobe.com/devnet/opentype/afdko/topic_feature_file_syntax.html + +# Script and language coverage +languagesystem DFLT dflt; +languagesystem latn dflt; + +# Ligature formation +feature liga { + substitute f i by f_i; + substitute f l by f_l; +} liga; + +# Kerning +feature kern { + position A Y -100; + position a y -80; + position s f' <0 0 10 0> t; +} kern; diff -Nru fonttools-2.4/Snippets/fontTools/feaLib/testdata/spec5fi.fea fonttools-3.0/Snippets/fontTools/feaLib/testdata/spec5fi.fea --- fonttools-2.4/Snippets/fontTools/feaLib/testdata/spec5fi.fea 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.0/Snippets/fontTools/feaLib/testdata/spec5fi.fea 2015-08-31 17:57:15.000000000 +0000 @@ -0,0 +1,18 @@ +# OpenType Feature File specification, section 5.f.i, example 1 +# "Specifying a Chain Sub rule and marking sub-runs" +# http://www.adobe.com/devnet/opentype/afdko/topic_feature_file_syntax.html + +lookup CNTXT_LIGS { + substitute f i by f_i; + substitute c t by c_t; + } CNTXT_LIGS; + +lookup CNTXT_SUB { + substitute n by n.end; + substitute s by s.end; + } CNTXT_SUB; + +feature test { + substitute [a e i o u] f' lookup CNTXT_LIGS i' n' lookup CNTXT_SUB; + substitute [a e i o u] c' lookup CNTXT_LIGS t' s' lookup CNTXT_SUB; +} test; diff -Nru fonttools-2.4/Snippets/fontTools/__init__.py fonttools-3.0/Snippets/fontTools/__init__.py --- fonttools-2.4/Snippets/fontTools/__init__.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.0/Snippets/fontTools/__init__.py 2015-08-31 17:57:15.000000000 +0000 @@ -0,0 +1,4 @@ +from __future__ import print_function, division, absolute_import +from fontTools.misc.py23 import * + +version = "3.0" diff -Nru fonttools-2.4/Snippets/fontTools/inspect.py fonttools-3.0/Snippets/fontTools/inspect.py --- fonttools-2.4/Snippets/fontTools/inspect.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.0/Snippets/fontTools/inspect.py 2015-08-31 17:57:15.000000000 +0000 @@ -0,0 +1,265 @@ +# Copyright 2013 Google, Inc. All Rights Reserved. +# +# Google Author(s): Behdad Esfahbod + +"""GUI font inspector. +""" + +from __future__ import print_function, division, absolute_import +from fontTools.misc.py23 import * +from fontTools import misc, ttLib, cffLib +import pygtk +pygtk.require('2.0') +import gtk +import sys + + +class Row(object): + def __init__(self, parent, index, key, value, font): + self._parent = parent + self._index = index + self._key = key + self._value = value + self._font = font + + if isinstance(value, ttLib.TTFont): + self._add_font(value) + return + + if not isinstance(value, basestring): + # Try sequences + is_sequence = True + try: + len(value) + iter(value) + # It's hard to differentiate list-type sequences + # from dict-type ones. Try fetching item 0. + value[0] + except (TypeError, AttributeError, KeyError, IndexError): + is_sequence = False + if is_sequence: + self._add_list(key, value) + return + if hasattr(value, '__dict__'): + self._add_object(key, value) + return + if hasattr(value, 'items'): + self._add_dict(key, value) + return + + if isinstance(value, basestring): + self._value_str = '"'+value+'"' + self._children = [] + return + + # Everything else + self._children = [] + + def _filter_items(self): + items = [] + for k,v in self._items: + if isinstance(v, ttLib.TTFont): + continue + if k in ['reader', 'file', 'tableTag', 'compileStatus', 'recurse']: + continue + if isinstance(k, basestring) and k[0] == '_': + continue + items.append((k,v)) + self._items = items + + def _add_font(self, font): + self._items = [(tag,font[tag]) for tag in font.keys()] + + def _add_object(self, key, value): + # Make sure item is decompiled + try: + value["asdf"] + except (AttributeError, KeyError, TypeError, ttLib.TTLibError): + pass + if isinstance(value, ttLib.getTableModule('glyf').Glyph): + # Glyph type needs explicit expanding to be useful + value.expand(self._font['glyf']) + if isinstance(value, misc.psCharStrings.T2CharString): + try: + value.decompile() + except TypeError: # Subroutines can't be decompiled + pass + if isinstance(value, cffLib.BaseDict): + for k in value.rawDict.keys(): + getattr(value, k) + if isinstance(value, cffLib.Index): + # Load all items + for i in range(len(value)): + value[i] + # Discard offsets as should not be needed anymore + if hasattr(value, 'offsets'): + del value.offsets + + self._value_str = value.__class__.__name__ + if isinstance(value, ttLib.tables.DefaultTable.DefaultTable): + self._value_str += ' (%d Bytes)' % self._font.reader.tables[key].length + self._items = sorted(value.__dict__.items()) + self._filter_items() + + def _add_dict(self, key, value): + self._value_str = '%s of %d items' % (value.__class__.__name__, len(value)) + self._items = sorted(value.items()) + + def _add_list(self, key, value): + if len(value) and len(value) <= 32: + self._value_str = str(value) + else: + self._value_str = '%s of %d items' % (value.__class__.__name__, len(value)) + self._items = list(enumerate(value)) + + def __len__(self): + if hasattr(self, '_children'): + return len(self._children) + if hasattr(self, '_items'): + return len(self._items) + assert False + + def _ensure_children(self): + if hasattr(self, '_children'): + return + children = [] + for i,(k,v) in enumerate(self._items): + children.append(Row(self, i, k, v, self._font)) + self._children = children + del self._items + + def __getitem__(self, n): + if n >= len(self): + return None + if not hasattr(self, '_children'): + self._children = [None] * len(self) + c = self._children[n] + if c is None: + k,v = self._items[n] + c = self._children[n] = Row(self, n, k, v, self._font) + self._items[n] = None + return c + + def get_parent(self): + return self._parent + + def get_index(self): + return self._index + + def get_key(self): + return self._key + + def get_value(self): + return self._value + + def get_value_str(self): + if hasattr(self,'_value_str'): + return self._value_str + return str(self._value) + +class FontTreeModel(gtk.GenericTreeModel): + + __gtype_name__ = 'FontTreeModel' + + def __init__(self, font): + super(FontTreeModel, self).__init__() + self._columns = (str, str) + self.font = font + self._root = Row(None, 0, "font", font, font) + + def on_get_flags(self): + return 0 + + def on_get_n_columns(self): + return len(self._columns) + + def on_get_column_type(self, index): + return self._columns[index] + + def on_get_iter(self, path): + rowref = self._root + while path: + rowref = rowref[path[0]] + path = path[1:] + return rowref + + def on_get_path(self, rowref): + path = [] + while rowref != self._root: + path.append(rowref.get_index()) + rowref = rowref.get_parent() + path.reverse() + return tuple(path) + + def on_get_value(self, rowref, column): + if column == 0: + return rowref.get_key() + else: + return rowref.get_value_str() + + def on_iter_next(self, rowref): + return rowref.get_parent()[rowref.get_index() + 1] + + def on_iter_children(self, rowref): + return rowref[0] + + def on_iter_has_child(self, rowref): + return bool(len(rowref)) + + def on_iter_n_children(self, rowref): + return len(rowref) + + def on_iter_nth_child(self, rowref, n): + if not rowref: rowref = self._root + return rowref[n] + + def on_iter_parent(self, rowref): + return rowref.get_parent() + +class Inspect(object): + + def _delete_event(self, widget, event, data=None): + gtk.main_quit() + return False + + def __init__(self, fontfile): + + self.window = gtk.Window(gtk.WINDOW_TOPLEVEL) + self.window.set_title("%s - pyftinspect" % fontfile) + self.window.connect("delete_event", self._delete_event) + self.window.set_size_request(400, 600) + + self.scrolled_window = gtk.ScrolledWindow() + self.window.add(self.scrolled_window) + + self.font = ttLib.TTFont(fontfile, lazy=True) + self.treemodel = FontTreeModel(self.font) + self.treeview = gtk.TreeView(self.treemodel) + #self.treeview.set_reorderable(True) + + for i in range(2): + col_name = ('Key', 'Value')[i] + col = gtk.TreeViewColumn(col_name) + col.set_sort_column_id(-1) + self.treeview.append_column(col) + + cell = gtk.CellRendererText() + col.pack_start(cell, True) + col.add_attribute(cell, 'text', i) + + self.treeview.set_search_column(1) + self.scrolled_window.add(self.treeview) + self.window.show_all() + +def main(args=None): + if args is None: + args = sys.argv[1:] + if len(args) < 1: + print("usage: pyftinspect font...", file=sys.stderr) + sys.exit(1) + for arg in args: + Inspect(arg) + gtk.main() + +if __name__ == "__main__": + main() diff -Nru fonttools-2.4/Snippets/fontTools/merge.py fonttools-3.0/Snippets/fontTools/merge.py --- fonttools-2.4/Snippets/fontTools/merge.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.0/Snippets/fontTools/merge.py 2015-08-31 17:57:15.000000000 +0000 @@ -0,0 +1,949 @@ +# Copyright 2013 Google, Inc. All Rights Reserved. +# +# Google Author(s): Behdad Esfahbod, Roozbeh Pournader + +"""Font merger. +""" + +from __future__ import print_function, division, absolute_import +from fontTools.misc.py23 import * +from fontTools.misc.timeTools import timestampNow +from fontTools import ttLib, cffLib +from fontTools.ttLib.tables import otTables, _h_e_a_d +from fontTools.ttLib.tables.DefaultTable import DefaultTable +from functools import reduce +import sys +import time +import operator + + +def _add_method(*clazzes, **kwargs): + """Returns a decorator function that adds a new method to one or + more classes.""" + allowDefault = kwargs.get('allowDefaultTable', False) + def wrapper(method): + for clazz in clazzes: + assert allowDefault or clazz != DefaultTable, 'Oops, table class not found.' + assert method.__name__ not in clazz.__dict__, \ + "Oops, class '%s' has method '%s'." % (clazz.__name__, method.__name__) + setattr(clazz, method.__name__, method) + return None + return wrapper + +# General utility functions for merging values from different fonts + +def equal(lst): + lst = list(lst) + t = iter(lst) + first = next(t) + assert all(item == first for item in t), "Expected all items to be equal: %s" % lst + return first + +def first(lst): + return next(iter(lst)) + +def recalculate(lst): + return NotImplemented + +def current_time(lst): + return timestampNow() + +def bitwise_and(lst): + return reduce(operator.and_, lst) + +def bitwise_or(lst): + return reduce(operator.or_, lst) + +def avg_int(lst): + lst = list(lst) + return sum(lst) // len(lst) + +def onlyExisting(func): + """Returns a filter func that when called with a list, + only calls func on the non-NotImplemented items of the list, + and only so if there's at least one item remaining. + Otherwise returns NotImplemented.""" + + def wrapper(lst): + items = [item for item in lst if item is not NotImplemented] + return func(items) if items else NotImplemented + + return wrapper + +def sumLists(lst): + l = [] + for item in lst: + l.extend(item) + return l + +def sumDicts(lst): + d = {} + for item in lst: + d.update(item) + return d + +def mergeObjects(lst): + lst = [item for item in lst if item is not NotImplemented] + if not lst: + return NotImplemented + lst = [item for item in lst if item is not None] + if not lst: + return None + + clazz = lst[0].__class__ + assert all(type(item) == clazz for item in lst), lst + + logic = clazz.mergeMap + returnTable = clazz() + returnDict = {} + + allKeys = set.union(set(), *(vars(table).keys() for table in lst)) + for key in allKeys: + try: + mergeLogic = logic[key] + except KeyError: + try: + mergeLogic = logic['*'] + except KeyError: + raise Exception("Don't know how to merge key %s of class %s" % + (key, clazz.__name__)) + if mergeLogic is NotImplemented: + continue + value = mergeLogic(getattr(table, key, NotImplemented) for table in lst) + if value is not NotImplemented: + returnDict[key] = value + + returnTable.__dict__ = returnDict + + return returnTable + +def mergeBits(bitmap): + + def wrapper(lst): + lst = list(lst) + returnValue = 0 + for bitNumber in range(bitmap['size']): + try: + mergeLogic = bitmap[bitNumber] + except KeyError: + try: + mergeLogic = bitmap['*'] + except KeyError: + raise Exception("Don't know how to merge bit %s" % bitNumber) + shiftedBit = 1 << bitNumber + mergedValue = mergeLogic(bool(item & shiftedBit) for item in lst) + returnValue |= mergedValue << bitNumber + return returnValue + + return wrapper + + +@_add_method(DefaultTable, allowDefaultTable=True) +def merge(self, m, tables): + if not hasattr(self, 'mergeMap'): + m.log("Don't know how to merge '%s'." % self.tableTag) + return NotImplemented + + logic = self.mergeMap + + if isinstance(logic, dict): + return m.mergeObjects(self, self.mergeMap, tables) + else: + return logic(tables) + + +ttLib.getTableClass('maxp').mergeMap = { + '*': max, + 'tableTag': equal, + 'tableVersion': equal, + 'numGlyphs': sum, + 'maxStorage': first, + 'maxFunctionDefs': first, + 'maxInstructionDefs': first, + # TODO When we correctly merge hinting data, update these values: + # maxFunctionDefs, maxInstructionDefs, maxSizeOfInstructions +} + +headFlagsMergeBitMap = { + 'size': 16, + '*': bitwise_or, + 1: bitwise_and, # Baseline at y = 0 + 2: bitwise_and, # lsb at x = 0 + 3: bitwise_and, # Force ppem to integer values. FIXME? + 5: bitwise_and, # Font is vertical + 6: lambda bit: 0, # Always set to zero + 11: bitwise_and, # Font data is 'lossless' + 13: bitwise_and, # Optimized for ClearType + 14: bitwise_and, # Last resort font. FIXME? equal or first may be better + 15: lambda bit: 0, # Always set to zero +} + +ttLib.getTableClass('head').mergeMap = { + 'tableTag': equal, + 'tableVersion': max, + 'fontRevision': max, + 'checkSumAdjustment': lambda lst: 0, # We need *something* here + 'magicNumber': equal, + 'flags': mergeBits(headFlagsMergeBitMap), + 'unitsPerEm': equal, + 'created': current_time, + 'modified': current_time, + 'xMin': min, + 'yMin': min, + 'xMax': max, + 'yMax': max, + 'macStyle': first, + 'lowestRecPPEM': max, + 'fontDirectionHint': lambda lst: 2, + 'indexToLocFormat': recalculate, + 'glyphDataFormat': equal, +} + +ttLib.getTableClass('hhea').mergeMap = { + '*': equal, + 'tableTag': equal, + 'tableVersion': max, + 'ascent': max, + 'descent': min, + 'lineGap': max, + 'advanceWidthMax': max, + 'minLeftSideBearing': min, + 'minRightSideBearing': min, + 'xMaxExtent': max, + 'caretSlopeRise': first, + 'caretSlopeRun': first, + 'caretOffset': first, + 'numberOfHMetrics': recalculate, +} + +os2FsTypeMergeBitMap = { + 'size': 16, + '*': lambda bit: 0, + 1: bitwise_or, # no embedding permitted + 2: bitwise_and, # allow previewing and printing documents + 3: bitwise_and, # allow editing documents + 8: bitwise_or, # no subsetting permitted + 9: bitwise_or, # no embedding of outlines permitted +} + +def mergeOs2FsType(lst): + lst = list(lst) + if all(item == 0 for item in lst): + return 0 + + # Compute least restrictive logic for each fsType value + for i in range(len(lst)): + # unset bit 1 (no embedding permitted) if either bit 2 or 3 is set + if lst[i] & 0x000C: + lst[i] &= ~0x0002 + # set bit 2 (allow previewing) if bit 3 is set (allow editing) + elif lst[i] & 0x0008: + lst[i] |= 0x0004 + # set bits 2 and 3 if everything is allowed + elif lst[i] == 0: + lst[i] = 0x000C + + fsType = mergeBits(os2FsTypeMergeBitMap)(lst) + # unset bits 2 and 3 if bit 1 is set (some font is "no embedding") + if fsType & 0x0002: + fsType &= ~0x000C + return fsType + + +ttLib.getTableClass('OS/2').mergeMap = { + '*': first, + 'tableTag': equal, + 'version': max, + 'xAvgCharWidth': avg_int, # Apparently fontTools doesn't recalc this + 'fsType': mergeOs2FsType, # Will be overwritten + 'panose': first, # FIXME: should really be the first Latin font + 'ulUnicodeRange1': bitwise_or, + 'ulUnicodeRange2': bitwise_or, + 'ulUnicodeRange3': bitwise_or, + 'ulUnicodeRange4': bitwise_or, + 'fsFirstCharIndex': min, + 'fsLastCharIndex': max, + 'sTypoAscender': max, + 'sTypoDescender': min, + 'sTypoLineGap': max, + 'usWinAscent': max, + 'usWinDescent': max, + # Version 2,3,4 + 'ulCodePageRange1': onlyExisting(bitwise_or), + 'ulCodePageRange2': onlyExisting(bitwise_or), + 'usMaxContex': onlyExisting(max), + # TODO version 5 +} + +@_add_method(ttLib.getTableClass('OS/2')) +def merge(self, m, tables): + DefaultTable.merge(self, m, tables) + if self.version < 2: + # bits 8 and 9 are reserved and should be set to zero + self.fsType &= ~0x0300 + if self.version >= 3: + # Only one of bits 1, 2, and 3 may be set. We already take + # care of bit 1 implications in mergeOs2FsType. So unset + # bit 2 if bit 3 is already set. + if self.fsType & 0x0008: + self.fsType &= ~0x0004 + return self + +ttLib.getTableClass('post').mergeMap = { + '*': first, + 'tableTag': equal, + 'formatType': max, + 'isFixedPitch': min, + 'minMemType42': max, + 'maxMemType42': lambda lst: 0, + 'minMemType1': max, + 'maxMemType1': lambda lst: 0, + 'mapping': onlyExisting(sumDicts), + 'extraNames': lambda lst: [], +} + +ttLib.getTableClass('vmtx').mergeMap = ttLib.getTableClass('hmtx').mergeMap = { + 'tableTag': equal, + 'metrics': sumDicts, +} + +ttLib.getTableClass('gasp').mergeMap = { + 'tableTag': equal, + 'version': max, + 'gaspRange': first, # FIXME? Appears irreconcilable +} + +ttLib.getTableClass('name').mergeMap = { + 'tableTag': equal, + 'names': first, # FIXME? Does mixing name records make sense? +} + +ttLib.getTableClass('loca').mergeMap = { + '*': recalculate, + 'tableTag': equal, +} + +ttLib.getTableClass('glyf').mergeMap = { + 'tableTag': equal, + 'glyphs': sumDicts, + 'glyphOrder': sumLists, +} + +@_add_method(ttLib.getTableClass('glyf')) +def merge(self, m, tables): + for i,table in enumerate(tables): + for g in table.glyphs.values(): + if i: + # Drop hints for all but first font, since + # we don't map functions / CVT values. + g.removeHinting() + # Expand composite glyphs to load their + # composite glyph names. + if g.isComposite(): + g.expand(table) + return DefaultTable.merge(self, m, tables) + +ttLib.getTableClass('prep').mergeMap = lambda self, lst: first(lst) +ttLib.getTableClass('fpgm').mergeMap = lambda self, lst: first(lst) +ttLib.getTableClass('cvt ').mergeMap = lambda self, lst: first(lst) + +@_add_method(ttLib.getTableClass('cmap')) +def merge(self, m, tables): + # TODO Handle format=14. + cmapTables = [(t,fontIdx) for fontIdx,table in enumerate(tables) for t in table.tables if t.isUnicode()] + # TODO Better handle format-4 and format-12 coexisting in same font. + # TODO Insert both a format-4 and format-12 if needed. + module = ttLib.getTableModule('cmap') + assert all(t.format in [4, 12] for t,_ in cmapTables) + format = max(t.format for t,_ in cmapTables) + cmapTable = module.cmap_classes[format](format) + cmapTable.cmap = {} + cmapTable.platformID = 3 + cmapTable.platEncID = max(t.platEncID for t,_ in cmapTables) + cmapTable.language = 0 + cmap = cmapTable.cmap + for table,fontIdx in cmapTables: + # TODO handle duplicates. + for uni,gid in table.cmap.items(): + oldgid = cmap.get(uni, None) + if oldgid is None: + cmap[uni] = gid + elif oldgid != gid: + # Char previously mapped to oldgid, now to gid. + # Record, to fix up in GSUB 'locl' later. + assert m.duplicateGlyphsPerFont[fontIdx].get(oldgid, gid) == gid + m.duplicateGlyphsPerFont[fontIdx][oldgid] = gid + self.tableVersion = 0 + self.tables = [cmapTable] + self.numSubTables = len(self.tables) + return self + + +otTables.ScriptList.mergeMap = { + 'ScriptCount': sum, + 'ScriptRecord': lambda lst: sorted(sumLists(lst), key=lambda s: s.ScriptTag), +} +otTables.BaseScriptList.mergeMap = { + 'BaseScriptCount': sum, + 'BaseScriptRecord': lambda lst: sorted(sumLists(lst), key=lambda s: s.BaseScriptTag), +} + +otTables.FeatureList.mergeMap = { + 'FeatureCount': sum, + 'FeatureRecord': sumLists, +} + +otTables.LookupList.mergeMap = { + 'LookupCount': sum, + 'Lookup': sumLists, +} + +otTables.Coverage.mergeMap = { + 'glyphs': sumLists, +} + +otTables.ClassDef.mergeMap = { + 'classDefs': sumDicts, +} + +otTables.LigCaretList.mergeMap = { + 'Coverage': mergeObjects, + 'LigGlyphCount': sum, + 'LigGlyph': sumLists, +} + +otTables.AttachList.mergeMap = { + 'Coverage': mergeObjects, + 'GlyphCount': sum, + 'AttachPoint': sumLists, +} + +# XXX Renumber MarkFilterSets of lookups +otTables.MarkGlyphSetsDef.mergeMap = { + 'MarkSetTableFormat': equal, + 'MarkSetCount': sum, + 'Coverage': sumLists, +} + +otTables.Axis.mergeMap = { + '*': mergeObjects, +} + +# XXX Fix BASE table merging +otTables.BaseTagList.mergeMap = { + 'BaseTagCount': sum, + 'BaselineTag': sumLists, +} + +otTables.GDEF.mergeMap = \ +otTables.GSUB.mergeMap = \ +otTables.GPOS.mergeMap = \ +otTables.BASE.mergeMap = \ +otTables.JSTF.mergeMap = \ +otTables.MATH.mergeMap = \ +{ + '*': mergeObjects, + 'Version': max, +} + +ttLib.getTableClass('GDEF').mergeMap = \ +ttLib.getTableClass('GSUB').mergeMap = \ +ttLib.getTableClass('GPOS').mergeMap = \ +ttLib.getTableClass('BASE').mergeMap = \ +ttLib.getTableClass('JSTF').mergeMap = \ +ttLib.getTableClass('MATH').mergeMap = \ +{ + 'tableTag': onlyExisting(equal), # XXX clean me up + 'table': mergeObjects, +} + +@_add_method(ttLib.getTableClass('GSUB')) +def merge(self, m, tables): + + assert len(tables) == len(m.duplicateGlyphsPerFont) + for i,(table,dups) in enumerate(zip(tables, m.duplicateGlyphsPerFont)): + if not dups: continue + assert (table is not None and table is not NotImplemented), "Have duplicates to resolve for font %d but no GSUB" % (i + 1) + lookupMap = {id(v):v for v in table.table.LookupList.Lookup} + featureMap = {id(v):v for v in table.table.FeatureList.FeatureRecord} + synthFeature = None + synthLookup = None + for script in table.table.ScriptList.ScriptRecord: + if script.ScriptTag == 'DFLT': continue # XXX + for langsys in [script.Script.DefaultLangSys] + [l.LangSys for l in script.Script.LangSysRecord]: + feature = [featureMap[v] for v in langsys.FeatureIndex if featureMap[v].FeatureTag == 'locl'] + assert len(feature) <= 1 + if feature: + feature = feature[0] + else: + if not synthFeature: + synthFeature = otTables.FeatureRecord() + synthFeature.FeatureTag = 'locl' + f = synthFeature.Feature = otTables.Feature() + f.FeatureParams = None + f.LookupCount = 0 + f.LookupListIndex = [] + langsys.FeatureIndex.append(id(synthFeature)) + featureMap[id(synthFeature)] = synthFeature + langsys.FeatureIndex.sort(key=lambda v: featureMap[v].FeatureTag) + table.table.FeatureList.FeatureRecord.append(synthFeature) + table.table.FeatureList.FeatureCount += 1 + feature = synthFeature + + if not synthLookup: + subtable = otTables.SingleSubst() + subtable.mapping = dups + synthLookup = otTables.Lookup() + synthLookup.LookupFlag = 0 + synthLookup.LookupType = 1 + synthLookup.SubTableCount = 1 + synthLookup.SubTable = [subtable] + table.table.LookupList.Lookup.append(synthLookup) + table.table.LookupList.LookupCount += 1 + + feature.Feature.LookupListIndex[:0] = [id(synthLookup)] + feature.Feature.LookupCount += 1 + + DefaultTable.merge(self, m, tables) + return self + +@_add_method(otTables.SingleSubst, + otTables.MultipleSubst, + otTables.AlternateSubst, + otTables.LigatureSubst, + otTables.ReverseChainSingleSubst, + otTables.SinglePos, + otTables.PairPos, + otTables.CursivePos, + otTables.MarkBasePos, + otTables.MarkLigPos, + otTables.MarkMarkPos) +def mapLookups(self, lookupMap): + pass + +# Copied and trimmed down from subset.py +@_add_method(otTables.ContextSubst, + otTables.ChainContextSubst, + otTables.ContextPos, + otTables.ChainContextPos) +def __merge_classify_context(self): + + class ContextHelper(object): + def __init__(self, klass, Format): + if klass.__name__.endswith('Subst'): + Typ = 'Sub' + Type = 'Subst' + else: + Typ = 'Pos' + Type = 'Pos' + if klass.__name__.startswith('Chain'): + Chain = 'Chain' + else: + Chain = '' + ChainTyp = Chain+Typ + + self.Typ = Typ + self.Type = Type + self.Chain = Chain + self.ChainTyp = ChainTyp + + self.LookupRecord = Type+'LookupRecord' + + if Format == 1: + self.Rule = ChainTyp+'Rule' + self.RuleSet = ChainTyp+'RuleSet' + elif Format == 2: + self.Rule = ChainTyp+'ClassRule' + self.RuleSet = ChainTyp+'ClassSet' + + if self.Format not in [1, 2, 3]: + return None # Don't shoot the messenger; let it go + if not hasattr(self.__class__, "__ContextHelpers"): + self.__class__.__ContextHelpers = {} + if self.Format not in self.__class__.__ContextHelpers: + helper = ContextHelper(self.__class__, self.Format) + self.__class__.__ContextHelpers[self.Format] = helper + return self.__class__.__ContextHelpers[self.Format] + + +@_add_method(otTables.ContextSubst, + otTables.ChainContextSubst, + otTables.ContextPos, + otTables.ChainContextPos) +def mapLookups(self, lookupMap): + c = self.__merge_classify_context() + + if self.Format in [1, 2]: + for rs in getattr(self, c.RuleSet): + if not rs: continue + for r in getattr(rs, c.Rule): + if not r: continue + for ll in getattr(r, c.LookupRecord): + if not ll: continue + ll.LookupListIndex = lookupMap[ll.LookupListIndex] + elif self.Format == 3: + for ll in getattr(self, c.LookupRecord): + if not ll: continue + ll.LookupListIndex = lookupMap[ll.LookupListIndex] + else: + assert 0, "unknown format: %s" % self.Format + +@_add_method(otTables.ExtensionSubst, + otTables.ExtensionPos) +def mapLookups(self, lookupMap): + if self.Format == 1: + self.ExtSubTable.mapLookups(lookupMap) + else: + assert 0, "unknown format: %s" % self.Format + +@_add_method(otTables.Lookup) +def mapLookups(self, lookupMap): + for st in self.SubTable: + if not st: continue + st.mapLookups(lookupMap) + +@_add_method(otTables.LookupList) +def mapLookups(self, lookupMap): + for l in self.Lookup: + if not l: continue + l.mapLookups(lookupMap) + +@_add_method(otTables.Feature) +def mapLookups(self, lookupMap): + self.LookupListIndex = [lookupMap[i] for i in self.LookupListIndex] + +@_add_method(otTables.FeatureList) +def mapLookups(self, lookupMap): + for f in self.FeatureRecord: + if not f or not f.Feature: continue + f.Feature.mapLookups(lookupMap) + +@_add_method(otTables.DefaultLangSys, + otTables.LangSys) +def mapFeatures(self, featureMap): + self.FeatureIndex = [featureMap[i] for i in self.FeatureIndex] + if self.ReqFeatureIndex != 65535: + self.ReqFeatureIndex = featureMap[self.ReqFeatureIndex] + +@_add_method(otTables.Script) +def mapFeatures(self, featureMap): + if self.DefaultLangSys: + self.DefaultLangSys.mapFeatures(featureMap) + for l in self.LangSysRecord: + if not l or not l.LangSys: continue + l.LangSys.mapFeatures(featureMap) + +@_add_method(otTables.ScriptList) +def mapFeatures(self, featureMap): + for s in self.ScriptRecord: + if not s or not s.Script: continue + s.Script.mapFeatures(featureMap) + + +class Options(object): + + class UnknownOptionError(Exception): + pass + + def __init__(self, **kwargs): + + self.set(**kwargs) + + def set(self, **kwargs): + for k,v in kwargs.items(): + if not hasattr(self, k): + raise self.UnknownOptionError("Unknown option '%s'" % k) + setattr(self, k, v) + + def parse_opts(self, argv, ignore_unknown=False): + ret = [] + opts = {} + for a in argv: + orig_a = a + if not a.startswith('--'): + ret.append(a) + continue + a = a[2:] + i = a.find('=') + op = '=' + if i == -1: + if a.startswith("no-"): + k = a[3:] + v = False + else: + k = a + v = True + else: + k = a[:i] + if k[-1] in "-+": + op = k[-1]+'=' # Ops is '-=' or '+=' now. + k = k[:-1] + v = a[i+1:] + k = k.replace('-', '_') + if not hasattr(self, k): + if ignore_unknown is True or k in ignore_unknown: + ret.append(orig_a) + continue + else: + raise self.UnknownOptionError("Unknown option '%s'" % a) + + ov = getattr(self, k) + if isinstance(ov, bool): + v = bool(v) + elif isinstance(ov, int): + v = int(v) + elif isinstance(ov, list): + vv = v.split(',') + if vv == ['']: + vv = [] + vv = [int(x, 0) if len(x) and x[0] in "0123456789" else x for x in vv] + if op == '=': + v = vv + elif op == '+=': + v = ov + v.extend(vv) + elif op == '-=': + v = ov + for x in vv: + if x in v: + v.remove(x) + else: + assert 0 + + opts[k] = v + self.set(**opts) + + return ret + + +class Merger(object): + + def __init__(self, options=None, log=None): + + if not log: + log = Logger() + if not options: + options = Options() + + self.options = options + self.log = log + + def merge(self, fontfiles): + + mega = ttLib.TTFont() + + # + # Settle on a mega glyph order. + # + fonts = [ttLib.TTFont(fontfile) for fontfile in fontfiles] + glyphOrders = [font.getGlyphOrder() for font in fonts] + megaGlyphOrder = self._mergeGlyphOrders(glyphOrders) + # Reload fonts and set new glyph names on them. + # TODO Is it necessary to reload font? I think it is. At least + # it's safer, in case tables were loaded to provide glyph names. + fonts = [ttLib.TTFont(fontfile) for fontfile in fontfiles] + for font,glyphOrder in zip(fonts, glyphOrders): + font.setGlyphOrder(glyphOrder) + mega.setGlyphOrder(megaGlyphOrder) + + for font in fonts: + self._preMerge(font) + + self.duplicateGlyphsPerFont = [{} for f in fonts] + + allTags = reduce(set.union, (list(font.keys()) for font in fonts), set()) + allTags.remove('GlyphOrder') + + # Make sure we process cmap before GSUB as we have a dependency there. + if 'GSUB' in allTags: + allTags.remove('GSUB') + allTags = ['GSUB'] + list(allTags) + if 'cmap' in allTags: + allTags.remove('cmap') + allTags = ['cmap'] + list(allTags) + + for tag in allTags: + + tables = [font.get(tag, NotImplemented) for font in fonts] + + clazz = ttLib.getTableClass(tag) + table = clazz(tag).merge(self, tables) + # XXX Clean this up and use: table = mergeObjects(tables) + + if table is not NotImplemented and table is not False: + mega[tag] = table + self.log("Merged '%s'." % tag) + else: + self.log("Dropped '%s'." % tag) + self.log.lapse("merge '%s'" % tag) + + del self.duplicateGlyphsPerFont + + self._postMerge(mega) + + return mega + + def _mergeGlyphOrders(self, glyphOrders): + """Modifies passed-in glyphOrders to reflect new glyph names. + Returns glyphOrder for the merged font.""" + # Simply append font index to the glyph name for now. + # TODO Even this simplistic numbering can result in conflicts. + # But then again, we have to improve this soon anyway. + mega = [] + for n,glyphOrder in enumerate(glyphOrders): + for i,glyphName in enumerate(glyphOrder): + glyphName += "#" + repr(n) + glyphOrder[i] = glyphName + mega.append(glyphName) + return mega + + def mergeObjects(self, returnTable, logic, tables): + # Right now we don't use self at all. Will use in the future + # for options and logging. + + allKeys = set.union(set(), *(vars(table).keys() for table in tables if table is not NotImplemented)) + for key in allKeys: + try: + mergeLogic = logic[key] + except KeyError: + try: + mergeLogic = logic['*'] + except KeyError: + raise Exception("Don't know how to merge key %s of class %s" % + (key, returnTable.__class__.__name__)) + if mergeLogic is NotImplemented: + continue + value = mergeLogic(getattr(table, key, NotImplemented) for table in tables) + if value is not NotImplemented: + setattr(returnTable, key, value) + + return returnTable + + def _preMerge(self, font): + + # Map indices to references + + GDEF = font.get('GDEF') + GSUB = font.get('GSUB') + GPOS = font.get('GPOS') + + for t in [GSUB, GPOS]: + if not t: continue + + if t.table.LookupList: + lookupMap = {i:id(v) for i,v in enumerate(t.table.LookupList.Lookup)} + t.table.LookupList.mapLookups(lookupMap) + if t.table.FeatureList: + # XXX Handle present FeatureList but absent LookupList + t.table.FeatureList.mapLookups(lookupMap) + + if t.table.FeatureList and t.table.ScriptList: + featureMap = {i:id(v) for i,v in enumerate(t.table.FeatureList.FeatureRecord)} + t.table.ScriptList.mapFeatures(featureMap) + + # TODO GDEF/Lookup MarkFilteringSets + # TODO FeatureParams nameIDs + + def _postMerge(self, font): + + # Map references back to indices + + GDEF = font.get('GDEF') + GSUB = font.get('GSUB') + GPOS = font.get('GPOS') + + for t in [GSUB, GPOS]: + if not t: continue + + if t.table.LookupList: + lookupMap = {id(v):i for i,v in enumerate(t.table.LookupList.Lookup)} + t.table.LookupList.mapLookups(lookupMap) + if t.table.FeatureList: + # XXX Handle present FeatureList but absent LookupList + t.table.FeatureList.mapLookups(lookupMap) + + if t.table.FeatureList and t.table.ScriptList: + # XXX Handle present ScriptList but absent FeatureList + featureMap = {id(v):i for i,v in enumerate(t.table.FeatureList.FeatureRecord)} + t.table.ScriptList.mapFeatures(featureMap) + + # TODO GDEF/Lookup MarkFilteringSets + # TODO FeatureParams nameIDs + + +class Logger(object): + + def __init__(self, verbose=False, xml=False, timing=False): + self.verbose = verbose + self.xml = xml + self.timing = timing + self.last_time = self.start_time = time.time() + + def parse_opts(self, argv): + argv = argv[:] + for v in ['verbose', 'xml', 'timing']: + if "--"+v in argv: + setattr(self, v, True) + argv.remove("--"+v) + return argv + + def __call__(self, *things): + if not self.verbose: + return + print(' '.join(str(x) for x in things)) + + def lapse(self, *things): + if not self.timing: + return + new_time = time.time() + print("Took %0.3fs to %s" %(new_time - self.last_time, + ' '.join(str(x) for x in things))) + self.last_time = new_time + + def font(self, font, file=sys.stdout): + if not self.xml: + return + from fontTools.misc import xmlWriter + writer = xmlWriter.XMLWriter(file) + font.disassembleInstructions = False # Work around ttLib bug + for tag in font.keys(): + writer.begintag(tag) + writer.newline() + font[tag].toXML(writer, font) + writer.endtag(tag) + writer.newline() + + +__all__ = [ + 'Options', + 'Merger', + 'Logger', + 'main' +] + +def main(args=None): + + if args is None: + args = sys.argv[1:] + + log = Logger() + args = log.parse_opts(args) + + options = Options() + args = options.parse_opts(args) + + if len(args) < 1: + print("usage: pyftmerge font...", file=sys.stderr) + sys.exit(1) + + merger = Merger(options=options, log=log) + font = merger.merge(args) + outfile = 'merged.ttf' + font.save(outfile) + log.lapse("compile and save font") + + log.last_time = log.start_time + log.lapse("make one with everything(TOTAL TIME)") + +if __name__ == "__main__": + main() diff -Nru fonttools-2.4/Snippets/fontTools/misc/arrayTools.py fonttools-3.0/Snippets/fontTools/misc/arrayTools.py --- fonttools-2.4/Snippets/fontTools/misc/arrayTools.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.0/Snippets/fontTools/misc/arrayTools.py 2015-08-31 17:57:15.000000000 +0000 @@ -0,0 +1,185 @@ +# +# Various array and rectangle tools, but mostly rectangles, hence the +# name of this module (not). +# + + +from __future__ import print_function, division, absolute_import +from fontTools.misc.py23 import * +import math + +def calcBounds(array): + """Return the bounding rectangle of a 2D points array as a tuple: + (xMin, yMin, xMax, yMax) + """ + if len(array) == 0: + return 0, 0, 0, 0 + xs = [x for x, y in array] + ys = [y for x, y in array] + return min(xs), min(ys), max(xs), max(ys) + +def calcIntBounds(array): + """Return the integer bounding rectangle of a 2D points array as a + tuple: (xMin, yMin, xMax, yMax) + """ + xMin, yMin, xMax, yMax = calcBounds(array) + xMin = int(math.floor(xMin)) + xMax = int(math.ceil(xMax)) + yMin = int(math.floor(yMin)) + yMax = int(math.ceil(yMax)) + return xMin, yMin, xMax, yMax + + +def updateBounds(bounds, p, min=min, max=max): + """Return the bounding recangle of rectangle bounds and point (x, y).""" + (x, y) = p + xMin, yMin, xMax, yMax = bounds + return min(xMin, x), min(yMin, y), max(xMax, x), max(yMax, y) + +def pointInRect(p, rect): + """Return True when point (x, y) is inside rect.""" + (x, y) = p + xMin, yMin, xMax, yMax = rect + return (xMin <= x <= xMax) and (yMin <= y <= yMax) + +def pointsInRect(array, rect): + """Find out which points or array are inside rect. + Returns an array with a boolean for each point. + """ + if len(array) < 1: + return [] + xMin, yMin, xMax, yMax = rect + return [(xMin <= x <= xMax) and (yMin <= y <= yMax) for x, y in array] + +def vectorLength(vector): + """Return the length of the given vector.""" + x, y = vector + return math.sqrt(x**2 + y**2) + +def asInt16(array): + """Round and cast to 16 bit integer.""" + return [int(math.floor(i+0.5)) for i in array] + + +def normRect(rect): + """Normalize the rectangle so that the following holds: + xMin <= xMax and yMin <= yMax + """ + (xMin, yMin, xMax, yMax) = rect + return min(xMin, xMax), min(yMin, yMax), max(xMin, xMax), max(yMin, yMax) + +def scaleRect(rect, x, y): + """Scale the rectangle by x, y.""" + (xMin, yMin, xMax, yMax) = rect + return xMin * x, yMin * y, xMax * x, yMax * y + +def offsetRect(rect, dx, dy): + """Offset the rectangle by dx, dy.""" + (xMin, yMin, xMax, yMax) = rect + return xMin+dx, yMin+dy, xMax+dx, yMax+dy + +def insetRect(rect, dx, dy): + """Inset the rectangle by dx, dy on all sides.""" + (xMin, yMin, xMax, yMax) = rect + return xMin+dx, yMin+dy, xMax-dx, yMax-dy + +def sectRect(rect1, rect2): + """Return a boolean and a rectangle. If the input rectangles intersect, return + True and the intersecting rectangle. Return False and (0, 0, 0, 0) if the input + rectangles don't intersect. + """ + (xMin1, yMin1, xMax1, yMax1) = rect1 + (xMin2, yMin2, xMax2, yMax2) = rect2 + xMin, yMin, xMax, yMax = (max(xMin1, xMin2), max(yMin1, yMin2), + min(xMax1, xMax2), min(yMax1, yMax2)) + if xMin >= xMax or yMin >= yMax: + return False, (0, 0, 0, 0) + return True, (xMin, yMin, xMax, yMax) + +def unionRect(rect1, rect2): + """Return the smallest rectangle in which both input rectangles are fully + enclosed. In other words, return the total bounding rectangle of both input + rectangles. + """ + (xMin1, yMin1, xMax1, yMax1) = rect1 + (xMin2, yMin2, xMax2, yMax2) = rect2 + xMin, yMin, xMax, yMax = (min(xMin1, xMin2), min(yMin1, yMin2), + max(xMax1, xMax2), max(yMax1, yMax2)) + return (xMin, yMin, xMax, yMax) + +def rectCenter(rect0): + """Return the center of the rectangle as an (x, y) coordinate.""" + (xMin, yMin, xMax, yMax) = rect0 + return (xMin+xMax)/2, (yMin+yMax)/2 + +def intRect(rect1): + """Return the rectangle, rounded off to integer values, but guaranteeing that + the resulting rectangle is NOT smaller than the original. + """ + (xMin, yMin, xMax, yMax) = rect1 + xMin = int(math.floor(xMin)) + yMin = int(math.floor(yMin)) + xMax = int(math.ceil(xMax)) + yMax = int(math.ceil(yMax)) + return (xMin, yMin, xMax, yMax) + + +def _test(): + """ + >>> import math + >>> calcBounds([]) + (0, 0, 0, 0) + >>> calcBounds([(0, 40), (0, 100), (50, 50), (80, 10)]) + (0, 10, 80, 100) + >>> updateBounds((0, 0, 0, 0), (100, 100)) + (0, 0, 100, 100) + >>> pointInRect((50, 50), (0, 0, 100, 100)) + True + >>> pointInRect((0, 0), (0, 0, 100, 100)) + True + >>> pointInRect((100, 100), (0, 0, 100, 100)) + True + >>> not pointInRect((101, 100), (0, 0, 100, 100)) + True + >>> list(pointsInRect([(50, 50), (0, 0), (100, 100), (101, 100)], (0, 0, 100, 100))) + [True, True, True, False] + >>> vectorLength((3, 4)) + 5.0 + >>> vectorLength((1, 1)) == math.sqrt(2) + True + >>> list(asInt16([0, 0.1, 0.5, 0.9])) + [0, 0, 1, 1] + >>> normRect((0, 10, 100, 200)) + (0, 10, 100, 200) + >>> normRect((100, 200, 0, 10)) + (0, 10, 100, 200) + >>> scaleRect((10, 20, 50, 150), 1.5, 2) + (15.0, 40, 75.0, 300) + >>> offsetRect((10, 20, 30, 40), 5, 6) + (15, 26, 35, 46) + >>> insetRect((10, 20, 50, 60), 5, 10) + (15, 30, 45, 50) + >>> insetRect((10, 20, 50, 60), -5, -10) + (5, 10, 55, 70) + >>> intersects, rect = sectRect((0, 10, 20, 30), (0, 40, 20, 50)) + >>> not intersects + True + >>> intersects, rect = sectRect((0, 10, 20, 30), (5, 20, 35, 50)) + >>> intersects + 1 + >>> rect + (5, 20, 20, 30) + >>> unionRect((0, 10, 20, 30), (0, 40, 20, 50)) + (0, 10, 20, 50) + >>> rectCenter((0, 0, 100, 200)) + (50.0, 100.0) + >>> rectCenter((0, 0, 100, 199.0)) + (50.0, 99.5) + >>> intRect((0.9, 2.9, 3.1, 4.1)) + (0, 2, 4, 5) + """ + +if __name__ == "__main__": + import sys + import doctest + sys.exit(doctest.testmod().failed) diff -Nru fonttools-2.4/Snippets/fontTools/misc/bezierTools.py fonttools-3.0/Snippets/fontTools/misc/bezierTools.py --- fonttools-2.4/Snippets/fontTools/misc/bezierTools.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.0/Snippets/fontTools/misc/bezierTools.py 2015-08-31 17:57:15.000000000 +0000 @@ -0,0 +1,414 @@ +"""fontTools.misc.bezierTools.py -- tools for working with bezier path segments. +""" + +from __future__ import print_function, division, absolute_import +from fontTools.misc.py23 import * + +__all__ = [ + "calcQuadraticBounds", + "calcCubicBounds", + "splitLine", + "splitQuadratic", + "splitCubic", + "splitQuadraticAtT", + "splitCubicAtT", + "solveQuadratic", + "solveCubic", +] + +from fontTools.misc.arrayTools import calcBounds + +epsilon = 1e-12 + + +def calcQuadraticBounds(pt1, pt2, pt3): + """Return the bounding rectangle for a qudratic bezier segment. + pt1 and pt3 are the "anchor" points, pt2 is the "handle". + + >>> calcQuadraticBounds((0, 0), (50, 100), (100, 0)) + (0, 0, 100, 50.0) + >>> calcQuadraticBounds((0, 0), (100, 0), (100, 100)) + (0.0, 0.0, 100, 100) + """ + (ax, ay), (bx, by), (cx, cy) = calcQuadraticParameters(pt1, pt2, pt3) + ax2 = ax*2.0 + ay2 = ay*2.0 + roots = [] + if ax2 != 0: + roots.append(-bx/ax2) + if ay2 != 0: + roots.append(-by/ay2) + points = [(ax*t*t + bx*t + cx, ay*t*t + by*t + cy) for t in roots if 0 <= t < 1] + [pt1, pt3] + return calcBounds(points) + + +def calcCubicBounds(pt1, pt2, pt3, pt4): + """Return the bounding rectangle for a cubic bezier segment. + pt1 and pt4 are the "anchor" points, pt2 and pt3 are the "handles". + + >>> calcCubicBounds((0, 0), (25, 100), (75, 100), (100, 0)) + (0, 0, 100, 75.0) + >>> calcCubicBounds((0, 0), (50, 0), (100, 50), (100, 100)) + (0.0, 0.0, 100, 100) + >>> print("%f %f %f %f" % calcCubicBounds((50, 0), (0, 100), (100, 100), (50, 0))) + 35.566243 0.000000 64.433757 75.000000 + """ + (ax, ay), (bx, by), (cx, cy), (dx, dy) = calcCubicParameters(pt1, pt2, pt3, pt4) + # calc first derivative + ax3 = ax * 3.0 + ay3 = ay * 3.0 + bx2 = bx * 2.0 + by2 = by * 2.0 + xRoots = [t for t in solveQuadratic(ax3, bx2, cx) if 0 <= t < 1] + yRoots = [t for t in solveQuadratic(ay3, by2, cy) if 0 <= t < 1] + roots = xRoots + yRoots + + points = [(ax*t*t*t + bx*t*t + cx * t + dx, ay*t*t*t + by*t*t + cy * t + dy) for t in roots] + [pt1, pt4] + return calcBounds(points) + + +def splitLine(pt1, pt2, where, isHorizontal): + """Split the line between pt1 and pt2 at position 'where', which + is an x coordinate if isHorizontal is False, a y coordinate if + isHorizontal is True. Return a list of two line segments if the + line was successfully split, or a list containing the original + line. + + >>> printSegments(splitLine((0, 0), (100, 100), 50, True)) + ((0, 0), (50, 50)) + ((50, 50), (100, 100)) + >>> printSegments(splitLine((0, 0), (100, 100), 100, True)) + ((0, 0), (100, 100)) + >>> printSegments(splitLine((0, 0), (100, 100), 0, True)) + ((0, 0), (0, 0)) + ((0, 0), (100, 100)) + >>> printSegments(splitLine((0, 0), (100, 100), 0, False)) + ((0, 0), (0, 0)) + ((0, 0), (100, 100)) + >>> printSegments(splitLine((100, 0), (0, 0), 50, False)) + ((100, 0), (50, 0)) + ((50, 0), (0, 0)) + >>> printSegments(splitLine((0, 100), (0, 0), 50, True)) + ((0, 100), (0, 50)) + ((0, 50), (0, 0)) + """ + pt1x, pt1y = pt1 + pt2x, pt2y = pt2 + + ax = (pt2x - pt1x) + ay = (pt2y - pt1y) + + bx = pt1x + by = pt1y + + a = (ax, ay)[isHorizontal] + + if a == 0: + return [(pt1, pt2)] + t = (where - (bx, by)[isHorizontal]) / a + if 0 <= t < 1: + midPt = ax * t + bx, ay * t + by + return [(pt1, midPt), (midPt, pt2)] + else: + return [(pt1, pt2)] + + +def splitQuadratic(pt1, pt2, pt3, where, isHorizontal): + """Split the quadratic curve between pt1, pt2 and pt3 at position 'where', + which is an x coordinate if isHorizontal is False, a y coordinate if + isHorizontal is True. Return a list of curve segments. + + >>> printSegments(splitQuadratic((0, 0), (50, 100), (100, 0), 150, False)) + ((0, 0), (50, 100), (100, 0)) + >>> printSegments(splitQuadratic((0, 0), (50, 100), (100, 0), 50, False)) + ((0, 0), (25, 50), (50, 50)) + ((50, 50), (75, 50), (100, 0)) + >>> printSegments(splitQuadratic((0, 0), (50, 100), (100, 0), 25, False)) + ((0, 0), (12.5, 25), (25, 37.5)) + ((25, 37.5), (62.5, 75), (100, 0)) + >>> printSegments(splitQuadratic((0, 0), (50, 100), (100, 0), 25, True)) + ((0, 0), (7.32233, 14.6447), (14.6447, 25)) + ((14.6447, 25), (50, 75), (85.3553, 25)) + ((85.3553, 25), (92.6777, 14.6447), (100, -7.10543e-15)) + >>> # XXX I'm not at all sure if the following behavior is desirable: + >>> printSegments(splitQuadratic((0, 0), (50, 100), (100, 0), 50, True)) + ((0, 0), (25, 50), (50, 50)) + ((50, 50), (50, 50), (50, 50)) + ((50, 50), (75, 50), (100, 0)) + """ + a, b, c = calcQuadraticParameters(pt1, pt2, pt3) + solutions = solveQuadratic(a[isHorizontal], b[isHorizontal], + c[isHorizontal] - where) + solutions = sorted([t for t in solutions if 0 <= t < 1]) + if not solutions: + return [(pt1, pt2, pt3)] + return _splitQuadraticAtT(a, b, c, *solutions) + + +def splitCubic(pt1, pt2, pt3, pt4, where, isHorizontal): + """Split the cubic curve between pt1, pt2, pt3 and pt4 at position 'where', + which is an x coordinate if isHorizontal is False, a y coordinate if + isHorizontal is True. Return a list of curve segments. + + >>> printSegments(splitCubic((0, 0), (25, 100), (75, 100), (100, 0), 150, False)) + ((0, 0), (25, 100), (75, 100), (100, 0)) + >>> printSegments(splitCubic((0, 0), (25, 100), (75, 100), (100, 0), 50, False)) + ((0, 0), (12.5, 50), (31.25, 75), (50, 75)) + ((50, 75), (68.75, 75), (87.5, 50), (100, 0)) + >>> printSegments(splitCubic((0, 0), (25, 100), (75, 100), (100, 0), 25, True)) + ((0, 0), (2.29379, 9.17517), (4.79804, 17.5085), (7.47414, 25)) + ((7.47414, 25), (31.2886, 91.6667), (68.7114, 91.6667), (92.5259, 25)) + ((92.5259, 25), (95.202, 17.5085), (97.7062, 9.17517), (100, 1.77636e-15)) + """ + a, b, c, d = calcCubicParameters(pt1, pt2, pt3, pt4) + solutions = solveCubic(a[isHorizontal], b[isHorizontal], c[isHorizontal], + d[isHorizontal] - where) + solutions = sorted([t for t in solutions if 0 <= t < 1]) + if not solutions: + return [(pt1, pt2, pt3, pt4)] + return _splitCubicAtT(a, b, c, d, *solutions) + + +def splitQuadraticAtT(pt1, pt2, pt3, *ts): + """Split the quadratic curve between pt1, pt2 and pt3 at one or more + values of t. Return a list of curve segments. + + >>> printSegments(splitQuadraticAtT((0, 0), (50, 100), (100, 0), 0.5)) + ((0, 0), (25, 50), (50, 50)) + ((50, 50), (75, 50), (100, 0)) + >>> printSegments(splitQuadraticAtT((0, 0), (50, 100), (100, 0), 0.5, 0.75)) + ((0, 0), (25, 50), (50, 50)) + ((50, 50), (62.5, 50), (75, 37.5)) + ((75, 37.5), (87.5, 25), (100, 0)) + """ + a, b, c = calcQuadraticParameters(pt1, pt2, pt3) + return _splitQuadraticAtT(a, b, c, *ts) + + +def splitCubicAtT(pt1, pt2, pt3, pt4, *ts): + """Split the cubic curve between pt1, pt2, pt3 and pt4 at one or more + values of t. Return a list of curve segments. + + >>> printSegments(splitCubicAtT((0, 0), (25, 100), (75, 100), (100, 0), 0.5)) + ((0, 0), (12.5, 50), (31.25, 75), (50, 75)) + ((50, 75), (68.75, 75), (87.5, 50), (100, 0)) + >>> printSegments(splitCubicAtT((0, 0), (25, 100), (75, 100), (100, 0), 0.5, 0.75)) + ((0, 0), (12.5, 50), (31.25, 75), (50, 75)) + ((50, 75), (59.375, 75), (68.75, 68.75), (77.3438, 56.25)) + ((77.3438, 56.25), (85.9375, 43.75), (93.75, 25), (100, 0)) + """ + a, b, c, d = calcCubicParameters(pt1, pt2, pt3, pt4) + return _splitCubicAtT(a, b, c, d, *ts) + + +def _splitQuadraticAtT(a, b, c, *ts): + ts = list(ts) + segments = [] + ts.insert(0, 0.0) + ts.append(1.0) + ax, ay = a + bx, by = b + cx, cy = c + for i in range(len(ts) - 1): + t1 = ts[i] + t2 = ts[i+1] + delta = (t2 - t1) + # calc new a, b and c + a1x = ax * delta**2 + a1y = ay * delta**2 + b1x = (2*ax*t1 + bx) * delta + b1y = (2*ay*t1 + by) * delta + c1x = ax*t1**2 + bx*t1 + cx + c1y = ay*t1**2 + by*t1 + cy + + pt1, pt2, pt3 = calcQuadraticPoints((a1x, a1y), (b1x, b1y), (c1x, c1y)) + segments.append((pt1, pt2, pt3)) + return segments + + +def _splitCubicAtT(a, b, c, d, *ts): + ts = list(ts) + ts.insert(0, 0.0) + ts.append(1.0) + segments = [] + ax, ay = a + bx, by = b + cx, cy = c + dx, dy = d + for i in range(len(ts) - 1): + t1 = ts[i] + t2 = ts[i+1] + delta = (t2 - t1) + # calc new a, b, c and d + a1x = ax * delta**3 + a1y = ay * delta**3 + b1x = (3*ax*t1 + bx) * delta**2 + b1y = (3*ay*t1 + by) * delta**2 + c1x = (2*bx*t1 + cx + 3*ax*t1**2) * delta + c1y = (2*by*t1 + cy + 3*ay*t1**2) * delta + d1x = ax*t1**3 + bx*t1**2 + cx*t1 + dx + d1y = ay*t1**3 + by*t1**2 + cy*t1 + dy + pt1, pt2, pt3, pt4 = calcCubicPoints((a1x, a1y), (b1x, b1y), (c1x, c1y), (d1x, d1y)) + segments.append((pt1, pt2, pt3, pt4)) + return segments + + +# +# Equation solvers. +# + +from math import sqrt, acos, cos, pi + + +def solveQuadratic(a, b, c, + sqrt=sqrt): + """Solve a quadratic equation where a, b and c are real. + a*x*x + b*x + c = 0 + This function returns a list of roots. Note that the returned list + is neither guaranteed to be sorted nor to contain unique values! + """ + if abs(a) < epsilon: + if abs(b) < epsilon: + # We have a non-equation; therefore, we have no valid solution + roots = [] + else: + # We have a linear equation with 1 root. + roots = [-c/b] + else: + # We have a true quadratic equation. Apply the quadratic formula to find two roots. + DD = b*b - 4.0*a*c + if DD >= 0.0: + rDD = sqrt(DD) + roots = [(-b+rDD)/2.0/a, (-b-rDD)/2.0/a] + else: + # complex roots, ignore + roots = [] + return roots + + +def solveCubic(a, b, c, d): + """Solve a cubic equation where a, b, c and d are real. + a*x*x*x + b*x*x + c*x + d = 0 + This function returns a list of roots. Note that the returned list + is neither guaranteed to be sorted nor to contain unique values! + """ + # + # adapted from: + # CUBIC.C - Solve a cubic polynomial + # public domain by Ross Cottrell + # found at: http://www.strangecreations.com/library/snippets/Cubic.C + # + if abs(a) < epsilon: + # don't just test for zero; for very small values of 'a' solveCubic() + # returns unreliable results, so we fall back to quad. + return solveQuadratic(b, c, d) + a = float(a) + a1 = b/a + a2 = c/a + a3 = d/a + + Q = (a1*a1 - 3.0*a2)/9.0 + R = (2.0*a1*a1*a1 - 9.0*a1*a2 + 27.0*a3)/54.0 + R2_Q3 = R*R - Q*Q*Q + + if R2_Q3 < 0: + theta = acos(R/sqrt(Q*Q*Q)) + rQ2 = -2.0*sqrt(Q) + x0 = rQ2*cos(theta/3.0) - a1/3.0 + x1 = rQ2*cos((theta+2.0*pi)/3.0) - a1/3.0 + x2 = rQ2*cos((theta+4.0*pi)/3.0) - a1/3.0 + return [x0, x1, x2] + else: + if Q == 0 and R == 0: + x = 0 + else: + x = pow(sqrt(R2_Q3)+abs(R), 1/3.0) + x = x + Q/x + if R >= 0.0: + x = -x + x = x - a1/3.0 + return [x] + + +# +# Conversion routines for points to parameters and vice versa +# + +def calcQuadraticParameters(pt1, pt2, pt3): + x2, y2 = pt2 + x3, y3 = pt3 + cx, cy = pt1 + bx = (x2 - cx) * 2.0 + by = (y2 - cy) * 2.0 + ax = x3 - cx - bx + ay = y3 - cy - by + return (ax, ay), (bx, by), (cx, cy) + + +def calcCubicParameters(pt1, pt2, pt3, pt4): + x2, y2 = pt2 + x3, y3 = pt3 + x4, y4 = pt4 + dx, dy = pt1 + cx = (x2 -dx) * 3.0 + cy = (y2 -dy) * 3.0 + bx = (x3 - x2) * 3.0 - cx + by = (y3 - y2) * 3.0 - cy + ax = x4 - dx - cx - bx + ay = y4 - dy - cy - by + return (ax, ay), (bx, by), (cx, cy), (dx, dy) + + +def calcQuadraticPoints(a, b, c): + ax, ay = a + bx, by = b + cx, cy = c + x1 = cx + y1 = cy + x2 = (bx * 0.5) + cx + y2 = (by * 0.5) + cy + x3 = ax + bx + cx + y3 = ay + by + cy + return (x1, y1), (x2, y2), (x3, y3) + + +def calcCubicPoints(a, b, c, d): + ax, ay = a + bx, by = b + cx, cy = c + dx, dy = d + x1 = dx + y1 = dy + x2 = (cx / 3.0) + dx + y2 = (cy / 3.0) + dy + x3 = (bx + cx) / 3.0 + x2 + y3 = (by + cy) / 3.0 + y2 + x4 = ax + dx + cx + bx + y4 = ay + dy + cy + by + return (x1, y1), (x2, y2), (x3, y3), (x4, y4) + + +def _segmentrepr(obj): + """ + >>> _segmentrepr([1, [2, 3], [], [[2, [3, 4], [0.1, 2.2]]]]) + '(1, (2, 3), (), ((2, (3, 4), (0.1, 2.2))))' + """ + try: + it = iter(obj) + except TypeError: + return "%g" % obj + else: + return "(%s)" % ", ".join([_segmentrepr(x) for x in it]) + + +def printSegments(segments): + """Helper for the doctests, displaying each segment in a list of + segments on a single line as a tuple. + """ + for segment in segments: + print(_segmentrepr(segment)) + +if __name__ == "__main__": + import sys + import doctest + sys.exit(doctest.testmod().failed) diff -Nru fonttools-2.4/Snippets/fontTools/misc/eexec.py fonttools-3.0/Snippets/fontTools/misc/eexec.py --- fonttools-2.4/Snippets/fontTools/misc/eexec.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.0/Snippets/fontTools/misc/eexec.py 2015-08-31 17:57:15.000000000 +0000 @@ -0,0 +1,55 @@ +"""fontTools.misc.eexec.py -- Module implementing the eexec and +charstring encryption algorithm as used by PostScript Type 1 fonts. +""" + +from __future__ import print_function, division, absolute_import +from fontTools.misc.py23 import * + +def _decryptChar(cipher, R): + cipher = byteord(cipher) + plain = ( (cipher ^ (R>>8)) ) & 0xFF + R = ( (cipher + R) * 52845 + 22719 ) & 0xFFFF + return bytechr(plain), R + +def _encryptChar(plain, R): + plain = byteord(plain) + cipher = ( (plain ^ (R>>8)) ) & 0xFF + R = ( (cipher + R) * 52845 + 22719 ) & 0xFFFF + return bytechr(cipher), R + + +def decrypt(cipherstring, R): + plainList = [] + for cipher in cipherstring: + plain, R = _decryptChar(cipher, R) + plainList.append(plain) + plainstring = strjoin(plainList) + return plainstring, int(R) + +def encrypt(plainstring, R): + cipherList = [] + for plain in plainstring: + cipher, R = _encryptChar(plain, R) + cipherList.append(cipher) + cipherstring = strjoin(cipherList) + return cipherstring, int(R) + + +def hexString(s): + import binascii + return binascii.hexlify(s) + +def deHexString(h): + import binascii + h = strjoin(h.split()) + return binascii.unhexlify(h) + + +def _test(): + testStr = "\0\0asdadads asds\265" + print(decrypt, decrypt(testStr, 12321)) + print(encrypt, encrypt(testStr, 12321)) + + +if __name__ == "__main__": + _test() diff -Nru fonttools-2.4/Snippets/fontTools/misc/encodingTools.py fonttools-3.0/Snippets/fontTools/misc/encodingTools.py --- fonttools-2.4/Snippets/fontTools/misc/encodingTools.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.0/Snippets/fontTools/misc/encodingTools.py 2015-08-31 17:57:15.000000000 +0000 @@ -0,0 +1,73 @@ +"""fontTools.misc.encodingTools.py -- tools for working with OpenType encodings. +""" + +from __future__ import print_function, division, absolute_import +from fontTools.misc.py23 import * +import fontTools.encodings.codecs + +# Map keyed by platformID, then platEncID, then possibly langID +_encodingMap = { + 0: { # Unicode + 0: 'utf_16_be', + 1: 'utf_16_be', + 2: 'utf_16_be', + 3: 'utf_16_be', + 4: 'utf_16_be', + 5: 'utf_16_be', + 6: 'utf_16_be', + }, + 1: { # Macintosh + # See + # https://github.com/behdad/fonttools/issues/236 + 0: { # Macintosh, platEncID==0, keyed by langID + 15: "mac_iceland", + 17: "mac_turkish", + 18: "mac_croatian", + 24: "mac_latin2", + 25: "mac_latin2", + 26: "mac_latin2", + 27: "mac_latin2", + 28: "mac_latin2", + 36: "mac_latin2", + 37: "mac_romanian", + 38: "mac_latin2", + 39: "mac_latin2", + 40: "mac_latin2", + Ellipsis: 'mac_roman', # Other + }, + 1: 'x_mac_japanese_ttx', + 2: 'x_mac_trad_chinese_ttx', + 3: 'x_mac_korean_ttx', + 6: 'mac_greek', + 7: 'mac_cyrillic', + 25: 'x_mac_simp_chinese_ttx', + 29: 'mac_latin2', + 35: 'mac_turkish', + 37: 'mac_iceland', + }, + 2: { # ISO + 0: 'ascii', + 1: 'utf_16_be', + 2: 'latin1', + }, + 3: { # Microsoft + 0: 'utf_16_be', + 1: 'utf_16_be', + 2: 'shift_jis', + 3: 'gb2312', + 4: 'big5', + 5: 'euc_kr', + 6: 'johab', + 10: 'utf_16_be', + }, +} + +def getEncoding(platformID, platEncID, langID, default=None): + """Returns the Python encoding name for OpenType platformID/encodingID/langID + triplet. If encoding for these values is not known, by default None is + returned. That can be overriden by passing a value to the default argument. + """ + encoding = _encodingMap.get(platformID, {}).get(platEncID, default) + if isinstance(encoding, dict): + encoding = encoding.get(langID, encoding[Ellipsis]) + return encoding diff -Nru fonttools-2.4/Snippets/fontTools/misc/encodingTools_test.py fonttools-3.0/Snippets/fontTools/misc/encodingTools_test.py --- fonttools-2.4/Snippets/fontTools/misc/encodingTools_test.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.0/Snippets/fontTools/misc/encodingTools_test.py 2015-08-31 17:57:15.000000000 +0000 @@ -0,0 +1,31 @@ +from __future__ import print_function, division, absolute_import, unicode_literals +from fontTools.misc.py23 import * +import unittest +from .encodingTools import getEncoding + +class EncodingTest(unittest.TestCase): + + def test_encoding_unicode(self): + + self.assertEqual(getEncoding(3, 0, None), "utf_16_be") # MS Symbol is Unicode as well + self.assertEqual(getEncoding(3, 1, None), "utf_16_be") + self.assertEqual(getEncoding(3, 10, None), "utf_16_be") + self.assertEqual(getEncoding(0, 3, None), "utf_16_be") + + def test_encoding_macroman_misc(self): + self.assertEqual(getEncoding(1, 0, 17), "mac_turkish") + self.assertEqual(getEncoding(1, 0, 37), "mac_romanian") + self.assertEqual(getEncoding(1, 0, 45), "mac_roman") + + def test_extended_mac_encodings(self): + encoding = getEncoding(1, 1, 0) # Mac Japanese + decoded = b'\xfe'.decode(encoding) + self.assertEqual(decoded, unichr(0x2122)) + + def test_extended_unknown(self): + self.assertEqual(getEncoding(10, 11, 12), None) + self.assertEqual(getEncoding(10, 11, 12, "ascii"), "ascii") + self.assertEqual(getEncoding(10, 11, 12, default="ascii"), "ascii") + +if __name__ == "__main__": + unittest.main() diff -Nru fonttools-2.4/Snippets/fontTools/misc/fixedTools.py fonttools-3.0/Snippets/fontTools/misc/fixedTools.py --- fonttools-2.4/Snippets/fontTools/misc/fixedTools.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.0/Snippets/fontTools/misc/fixedTools.py 2015-08-31 17:57:15.000000000 +0000 @@ -0,0 +1,47 @@ +"""fontTools.misc.fixedTools.py -- tools for working with fixed numbers. +""" + +from __future__ import print_function, division, absolute_import +from fontTools.misc.py23 import * + +__all__ = [ + "fixedToFloat", + "floatToFixed", +] + +def fixedToFloat(value, precisionBits): + """Converts a fixed-point number to a float, choosing the float + that has the shortest decimal reprentation. Eg. to convert a + fixed number in a 2.14 format, use precisionBits=14. This is + pretty slow compared to a simple division. Use sporadically. + + precisionBits is only supported up to 16. + """ + if not value: return 0.0 + + scale = 1 << precisionBits + value /= scale + eps = .5 / scale + lo = value - eps + hi = value + eps + # If the range of valid choices spans an integer, return the integer. + if int(lo) != int(hi): + return float(round(value)) + fmt = "%.8f" + lo = fmt % lo + hi = fmt % hi + assert len(lo) == len(hi) and lo != hi + for i in range(len(lo)): + if lo[i] != hi[i]: + break + period = lo.find('.') + assert period < i + fmt = "%%.%df" % (i - period) + value = fmt % value + return float(value) + +def floatToFixed(value, precisionBits): + """Converts a float to a fixed-point number given the number of + precisionBits. Ie. int(round(value * (1<<precisionBits))). + """ + return int(round(value * (1<<precisionBits))) diff -Nru fonttools-2.4/Snippets/fontTools/misc/fixedTools_test.py fonttools-3.0/Snippets/fontTools/misc/fixedTools_test.py --- fonttools-2.4/Snippets/fontTools/misc/fixedTools_test.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.0/Snippets/fontTools/misc/fixedTools_test.py 2015-08-31 17:57:15.000000000 +0000 @@ -0,0 +1,42 @@ +from __future__ import print_function, division, absolute_import +from fontTools.misc.py23 import * +from fontTools.misc.fixedTools import fixedToFloat, floatToFixed +import unittest + + +class FixedToolsTest(unittest.TestCase): + + def test_roundtrip(self): + for bits in range(0, 15): + for value in range(-(2**(bits+1)), 2**(bits+1)): + self.assertEqual(value, floatToFixed(fixedToFloat(value, bits), bits)) + + def test_fixedToFloat_precision14(self): + self.assertEqual(0.8, fixedToFloat(13107, 14)) + self.assertEqual(0.0, fixedToFloat(0, 14)) + self.assertEqual(1.0, fixedToFloat(16384, 14)) + self.assertEqual(-1.0, fixedToFloat(-16384, 14)) + self.assertEqual(0.99994, fixedToFloat(16383, 14)) + self.assertEqual(-0.99994, fixedToFloat(-16383, 14)) + + def test_fixedToFloat_precision6(self): + self.assertAlmostEqual(-9.98, fixedToFloat(-639, 6)) + self.assertAlmostEqual(-10.0, fixedToFloat(-640, 6)) + self.assertAlmostEqual(9.98, fixedToFloat(639, 6)) + self.assertAlmostEqual(10.0, fixedToFloat(640, 6)) + + def test_floatToFixed_precision14(self): + self.assertEqual(13107, floatToFixed(0.8, 14)) + self.assertEqual(16384, floatToFixed(1.0, 14)) + self.assertEqual(16384, floatToFixed(1, 14)) + self.assertEqual(-16384, floatToFixed(-1.0, 14)) + self.assertEqual(-16384, floatToFixed(-1, 14)) + self.assertEqual(0, floatToFixed(0, 14)) + + def test_fixedToFloat_return_float(self): + value = fixedToFloat(16384, 14) + self.assertIsInstance(value, float) + + +if __name__ == "__main__": + unittest.main() diff -Nru fonttools-2.4/Snippets/fontTools/misc/homeResFile.py fonttools-3.0/Snippets/fontTools/misc/homeResFile.py --- fonttools-2.4/Snippets/fontTools/misc/homeResFile.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.0/Snippets/fontTools/misc/homeResFile.py 2015-08-31 17:57:15.000000000 +0000 @@ -0,0 +1,97 @@ +"""Mac-only module to find the home file of a resource.""" + +from __future__ import print_function, division, absolute_import +from fontTools.misc.py23 import * +from fontTools.misc import sstruct +import array +import calldll +import macfs +import Res + + +def HomeResFile(res): + """Return a path to the file in which resource 'res' lives.""" + return GetFileLocation(res.HomeResFile()) + + +def GetFileLocation(refNum): + """Return a path to the open file identified with refNum.""" + pb = ParamBlock(refNum) + return pb.getPath() + +# +# Internal cruft, adapted from MoreFiles +# + +_InterfaceLib = calldll.getlibrary("InterfaceLib") +GetVRefNum = calldll.newcall(_InterfaceLib.GetVRefNum, "None", "InShort", "OutShort") +_getInfo = calldll.newcall(_InterfaceLib.PBGetFCBInfoSync, "Short", "InLong") + + +_FCBPBFormat = """ + qLink: l + qType: h + ioTrap: h + ioCmdAddr: l + ioCompletion: l + ioResult: h + ioNamePtr: l + ioVRefNum: h + ioRefNum: h + filler: h + ioFCBIndx: h + filler1: h + ioFCBFINm: l + ioFCBFlags: h + ioFCBStBlk: h + ioFCBEOF: l + ioFCBPLen: l + ioFCBCrPs: l + ioFCBVRefNum: h + ioFCBClpSiz: l + ioFCBParID: l +""" + +class ParamBlock(object): + + """Wrapper for the very low level FCBPB record.""" + + def __init__(self, refNum): + self.__fileName = array.array("c", "\0" * 64) + sstruct.unpack(_FCBPBFormat, + "\0" * sstruct.calcsize(_FCBPBFormat), self) + self.ioNamePtr = self.__fileName.buffer_info()[0] + self.ioRefNum = refNum + self.ioVRefNum = GetVRefNum(refNum) + self.__haveInfo = 0 + + def getInfo(self): + if self.__haveInfo: + return + data = sstruct.pack(_FCBPBFormat, self) + buf = array.array("c", data) + ptr = buf.buffer_info()[0] + err = _getInfo(ptr) + if err: + raise Res.Error("can't get file info", err) + sstruct.unpack(_FCBPBFormat, buf.tostring(), self) + self.__haveInfo = 1 + + def getFileName(self): + self.getInfo() + data = self.__fileName.tostring() + return data[1:byteord(data[0])+1] + + def getFSSpec(self): + self.getInfo() + vRefNum = self.ioVRefNum + parID = self.ioFCBParID + return macfs.FSSpec((vRefNum, parID, self.getFileName())) + + def getPath(self): + return self.getFSSpec().as_pathname() + + +if __name__ == "__main__": + fond = Res.GetNamedResource("FOND", "Helvetica") + print(HomeResFile(fond)) diff -Nru fonttools-2.4/Snippets/fontTools/misc/__init__.py fonttools-3.0/Snippets/fontTools/misc/__init__.py --- fonttools-2.4/Snippets/fontTools/misc/__init__.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.0/Snippets/fontTools/misc/__init__.py 2015-08-31 17:57:15.000000000 +0000 @@ -0,0 +1,4 @@ +"""Empty __init__.py file to signal Python this directory is a package.""" + +from __future__ import print_function, division, absolute_import +from fontTools.misc.py23 import * diff -Nru fonttools-2.4/Snippets/fontTools/misc/macCreatorType.py fonttools-3.0/Snippets/fontTools/misc/macCreatorType.py --- fonttools-2.4/Snippets/fontTools/misc/macCreatorType.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.0/Snippets/fontTools/misc/macCreatorType.py 2015-08-31 17:57:15.000000000 +0000 @@ -0,0 +1,32 @@ +from __future__ import print_function, division, absolute_import +from fontTools.misc.py23 import * +import sys +try: + import MacOS +except ImportError: + MacOS = None +from .py23 import * + +def _reverseString(s): + s = list(s) + s.reverse() + return strjoin(s) + + +def getMacCreatorAndType(path): + if MacOS is not None: + fileCreator, fileType = MacOS.GetCreatorAndType(path) + if sys.version_info[:2] < (2, 7) and sys.byteorder == "little": + # work around bug in MacOS.GetCreatorAndType() on intel: + # http://bugs.python.org/issue1594 + # (fixed with Python 2.7) + fileCreator = _reverseString(fileCreator) + fileType = _reverseString(fileType) + return fileCreator, fileType + else: + return None, None + + +def setMacCreatorAndType(path, fileCreator, fileType): + if MacOS is not None: + MacOS.SetCreatorAndType(path, fileCreator, fileType) diff -Nru fonttools-2.4/Snippets/fontTools/misc/psCharStrings.py fonttools-3.0/Snippets/fontTools/misc/psCharStrings.py --- fonttools-2.4/Snippets/fontTools/misc/psCharStrings.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.0/Snippets/fontTools/misc/psCharStrings.py 2015-08-31 17:57:15.000000000 +0000 @@ -0,0 +1,1175 @@ +"""psCharStrings.py -- module implementing various kinds of CharStrings: +CFF dictionary data and Type1/Type2 CharStrings. +""" + +from __future__ import print_function, division, absolute_import +from fontTools.misc.py23 import * +import struct + + +DEBUG = 0 + + +def read_operator(self, b0, data, index): + if b0 == 12: + op = (b0, byteord(data[index])) + index = index+1 + else: + op = b0 + operator = self.operators[op] + value = self.handle_operator(operator) + return value, index + +def read_byte(self, b0, data, index): + return b0 - 139, index + +def read_smallInt1(self, b0, data, index): + b1 = byteord(data[index]) + return (b0-247)*256 + b1 + 108, index+1 + +def read_smallInt2(self, b0, data, index): + b1 = byteord(data[index]) + return -(b0-251)*256 - b1 - 108, index+1 + +def read_shortInt(self, b0, data, index): + value, = struct.unpack(">h", data[index:index+2]) + return value, index+2 + +def read_longInt(self, b0, data, index): + value, = struct.unpack(">l", data[index:index+4]) + return value, index+4 + +def read_fixed1616(self, b0, data, index): + value, = struct.unpack(">l", data[index:index+4]) + return value / 65536, index+4 + +def read_reserved(self, b0, data, index): + assert NotImplementedError + return NotImplemented, index + +def read_realNumber(self, b0, data, index): + number = '' + while True: + b = byteord(data[index]) + index = index + 1 + nibble0 = (b & 0xf0) >> 4 + nibble1 = b & 0x0f + if nibble0 == 0xf: + break + number = number + realNibbles[nibble0] + if nibble1 == 0xf: + break + number = number + realNibbles[nibble1] + return float(number), index + + +t1OperandEncoding = [None] * 256 +t1OperandEncoding[0:32] = (32) * [read_operator] +t1OperandEncoding[32:247] = (247 - 32) * [read_byte] +t1OperandEncoding[247:251] = (251 - 247) * [read_smallInt1] +t1OperandEncoding[251:255] = (255 - 251) * [read_smallInt2] +t1OperandEncoding[255] = read_longInt +assert len(t1OperandEncoding) == 256 + +t2OperandEncoding = t1OperandEncoding[:] +t2OperandEncoding[28] = read_shortInt +t2OperandEncoding[255] = read_fixed1616 + +cffDictOperandEncoding = t2OperandEncoding[:] +cffDictOperandEncoding[29] = read_longInt +cffDictOperandEncoding[30] = read_realNumber +cffDictOperandEncoding[255] = read_reserved + + +realNibbles = ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9', + '.', 'E', 'E-', None, '-'] +realNibblesDict = {v:i for i,v in enumerate(realNibbles)} + + +class ByteCodeBase(object): + pass + + +def buildOperatorDict(operatorList): + oper = {} + opc = {} + for item in operatorList: + if len(item) == 2: + oper[item[0]] = item[1] + else: + oper[item[0]] = item[1:] + if isinstance(item[0], tuple): + opc[item[1]] = item[0] + else: + opc[item[1]] = (item[0],) + return oper, opc + + +t2Operators = [ +# opcode name + (1, 'hstem'), + (3, 'vstem'), + (4, 'vmoveto'), + (5, 'rlineto'), + (6, 'hlineto'), + (7, 'vlineto'), + (8, 'rrcurveto'), + (10, 'callsubr'), + (11, 'return'), + (14, 'endchar'), + (16, 'blend'), + (18, 'hstemhm'), + (19, 'hintmask'), + (20, 'cntrmask'), + (21, 'rmoveto'), + (22, 'hmoveto'), + (23, 'vstemhm'), + (24, 'rcurveline'), + (25, 'rlinecurve'), + (26, 'vvcurveto'), + (27, 'hhcurveto'), +# (28, 'shortint'), # not really an operator + (29, 'callgsubr'), + (30, 'vhcurveto'), + (31, 'hvcurveto'), + ((12, 0), 'ignore'), # dotsection. Yes, there a few very early OTF/CFF + # fonts with this deprecated operator. Just ignore it. + ((12, 3), 'and'), + ((12, 4), 'or'), + ((12, 5), 'not'), + ((12, 8), 'store'), + ((12, 9), 'abs'), + ((12, 10), 'add'), + ((12, 11), 'sub'), + ((12, 12), 'div'), + ((12, 13), 'load'), + ((12, 14), 'neg'), + ((12, 15), 'eq'), + ((12, 18), 'drop'), + ((12, 20), 'put'), + ((12, 21), 'get'), + ((12, 22), 'ifelse'), + ((12, 23), 'random'), + ((12, 24), 'mul'), + ((12, 26), 'sqrt'), + ((12, 27), 'dup'), + ((12, 28), 'exch'), + ((12, 29), 'index'), + ((12, 30), 'roll'), + ((12, 34), 'hflex'), + ((12, 35), 'flex'), + ((12, 36), 'hflex1'), + ((12, 37), 'flex1'), +] + + +def getIntEncoder(format): + if format == "cff": + fourByteOp = bytechr(29) + elif format == "t1": + fourByteOp = bytechr(255) + else: + assert format == "t2" + fourByteOp = None + + def encodeInt(value, fourByteOp=fourByteOp, bytechr=bytechr, + pack=struct.pack, unpack=struct.unpack): + if -107 <= value <= 107: + code = bytechr(value + 139) + elif 108 <= value <= 1131: + value = value - 108 + code = bytechr((value >> 8) + 247) + bytechr(value & 0xFF) + elif -1131 <= value <= -108: + value = -value - 108 + code = bytechr((value >> 8) + 251) + bytechr(value & 0xFF) + elif fourByteOp is None: + # T2 only supports 2 byte ints + if -32768 <= value <= 32767: + code = bytechr(28) + pack(">h", value) + else: + # Backwards compatible hack: due to a previous bug in FontTools, + # 16.16 fixed numbers were written out as 4-byte ints. When + # these numbers were small, they were wrongly written back as + # small ints instead of 4-byte ints, breaking round-tripping. + # This here workaround doesn't do it any better, since we can't + # distinguish anymore between small ints that were supposed to + # be small fixed numbers and small ints that were just small + # ints. Hence the warning. + import sys + sys.stderr.write("Warning: 4-byte T2 number got passed to the " + "IntType handler. This should happen only when reading in " + "old XML files.\n") + code = bytechr(255) + pack(">l", value) + else: + code = fourByteOp + pack(">l", value) + return code + + return encodeInt + + +encodeIntCFF = getIntEncoder("cff") +encodeIntT1 = getIntEncoder("t1") +encodeIntT2 = getIntEncoder("t2") + +def encodeFixed(f, pack=struct.pack): + # For T2 only + return b"\xff" + pack(">l", int(round(f * 65536))) + +def encodeFloat(f): + # For CFF only, used in cffLib + s = str(f).upper() + if s[:2] == "0.": + s = s[1:] + elif s[:3] == "-0.": + s = "-" + s[2:] + nibbles = [] + while s: + c = s[0] + s = s[1:] + if c == "E" and s[:1] == "-": + s = s[1:] + c = "E-" + nibbles.append(realNibblesDict[c]) + nibbles.append(0xf) + if len(nibbles) % 2: + nibbles.append(0xf) + d = bytechr(30) + for i in range(0, len(nibbles), 2): + d = d + bytechr(nibbles[i] << 4 | nibbles[i+1]) + return d + + +class CharStringCompileError(Exception): pass + + +class T2CharString(ByteCodeBase): + + operandEncoding = t2OperandEncoding + operators, opcodes = buildOperatorDict(t2Operators) + + def __init__(self, bytecode=None, program=None, private=None, globalSubrs=None): + if program is None: + program = [] + self.bytecode = bytecode + self.program = program + self.private = private + self.globalSubrs = globalSubrs if globalSubrs is not None else [] + + def __repr__(self): + if self.bytecode is None: + return "<%s (source) at %x>" % (self.__class__.__name__, id(self)) + else: + return "<%s (bytecode) at %x>" % (self.__class__.__name__, id(self)) + + def getIntEncoder(self): + return encodeIntT2 + + def getFixedEncoder(self): + return encodeFixed + + def decompile(self): + if not self.needsDecompilation(): + return + subrs = getattr(self.private, "Subrs", []) + decompiler = SimpleT2Decompiler(subrs, self.globalSubrs) + decompiler.execute(self) + + def draw(self, pen): + subrs = getattr(self.private, "Subrs", []) + extractor = T2OutlineExtractor(pen, subrs, self.globalSubrs, + self.private.nominalWidthX, self.private.defaultWidthX) + extractor.execute(self) + self.width = extractor.width + + def compile(self): + if self.bytecode is not None: + return + assert self.program, "illegal CharString: decompiled to empty program" + assert self.program[-1] in ("endchar", "return", "callsubr", "callgsubr", + "seac"), "illegal CharString" + bytecode = [] + opcodes = self.opcodes + program = self.program + encodeInt = self.getIntEncoder() + encodeFixed = self.getFixedEncoder() + i = 0 + end = len(program) + while i < end: + token = program[i] + i = i + 1 + tp = type(token) + if issubclass(tp, basestring): + try: + bytecode.extend(bytechr(b) for b in opcodes[token]) + except KeyError: + raise CharStringCompileError("illegal operator: %s" % token) + if token in ('hintmask', 'cntrmask'): + bytecode.append(program[i]) # hint mask + i = i + 1 + elif tp == int: + bytecode.append(encodeInt(token)) + elif tp == float: + bytecode.append(encodeFixed(token)) + else: + assert 0, "unsupported type: %s" % tp + try: + bytecode = bytesjoin(bytecode) + except TypeError: + print(bytecode) + raise + self.setBytecode(bytecode) + + def needsDecompilation(self): + return self.bytecode is not None + + def setProgram(self, program): + self.program = program + self.bytecode = None + + def setBytecode(self, bytecode): + self.bytecode = bytecode + self.program = None + + def getToken(self, index, + len=len, byteord=byteord, basestring=basestring, + isinstance=isinstance): + if self.bytecode is not None: + if index >= len(self.bytecode): + return None, 0, 0 + b0 = byteord(self.bytecode[index]) + index = index + 1 + handler = self.operandEncoding[b0] + token, index = handler(self, b0, self.bytecode, index) + else: + if index >= len(self.program): + return None, 0, 0 + token = self.program[index] + index = index + 1 + isOperator = isinstance(token, basestring) + return token, isOperator, index + + def getBytes(self, index, nBytes): + if self.bytecode is not None: + newIndex = index + nBytes + bytes = self.bytecode[index:newIndex] + index = newIndex + else: + bytes = self.program[index] + index = index + 1 + assert len(bytes) == nBytes + return bytes, index + + def handle_operator(self, operator): + return operator + + def toXML(self, xmlWriter): + from fontTools.misc.textTools import num2binary + if self.bytecode is not None: + xmlWriter.dumphex(self.bytecode) + else: + index = 0 + args = [] + while True: + token, isOperator, index = self.getToken(index) + if token is None: + break + if isOperator: + args = [str(arg) for arg in args] + if token in ('hintmask', 'cntrmask'): + hintMask, isOperator, index = self.getToken(index) + bits = [] + for byte in hintMask: + bits.append(num2binary(byteord(byte), 8)) + hintMask = strjoin(bits) + line = ' '.join(args + [token, hintMask]) + else: + line = ' '.join(args + [token]) + xmlWriter.write(line) + xmlWriter.newline() + args = [] + else: + args.append(token) + + def fromXML(self, name, attrs, content): + from fontTools.misc.textTools import binary2num, readHex + if attrs.get("raw"): + self.setBytecode(readHex(content)) + return + content = strjoin(content) + content = content.split() + program = [] + end = len(content) + i = 0 + while i < end: + token = content[i] + i = i + 1 + try: + token = int(token) + except ValueError: + try: + token = float(token) + except ValueError: + program.append(token) + if token in ('hintmask', 'cntrmask'): + mask = content[i] + maskBytes = b"" + for j in range(0, len(mask), 8): + maskBytes = maskBytes + bytechr(binary2num(mask[j:j+8])) + program.append(maskBytes) + i = i + 1 + else: + program.append(token) + else: + program.append(token) + self.setProgram(program) + + +t1Operators = [ +# opcode name + (1, 'hstem'), + (3, 'vstem'), + (4, 'vmoveto'), + (5, 'rlineto'), + (6, 'hlineto'), + (7, 'vlineto'), + (8, 'rrcurveto'), + (9, 'closepath'), + (10, 'callsubr'), + (11, 'return'), + (13, 'hsbw'), + (14, 'endchar'), + (21, 'rmoveto'), + (22, 'hmoveto'), + (30, 'vhcurveto'), + (31, 'hvcurveto'), + ((12, 0), 'dotsection'), + ((12, 1), 'vstem3'), + ((12, 2), 'hstem3'), + ((12, 6), 'seac'), + ((12, 7), 'sbw'), + ((12, 12), 'div'), + ((12, 16), 'callothersubr'), + ((12, 17), 'pop'), + ((12, 33), 'setcurrentpoint'), +] + +class T1CharString(T2CharString): + + operandEncoding = t1OperandEncoding + operators, opcodes = buildOperatorDict(t1Operators) + + def __init__(self, bytecode=None, program=None, subrs=None): + if program is None: + program = [] + self.bytecode = bytecode + self.program = program + self.subrs = subrs + + def getIntEncoder(self): + return encodeIntT1 + + def getFixedEncoder(self): + def encodeFixed(value): + raise TypeError("Type 1 charstrings don't support floating point operands") + + def decompile(self): + if self.bytecode is None: + return + program = [] + index = 0 + while True: + token, isOperator, index = self.getToken(index) + if token is None: + break + program.append(token) + self.setProgram(program) + + def draw(self, pen): + extractor = T1OutlineExtractor(pen, self.subrs) + extractor.execute(self) + self.width = extractor.width + + +class SimpleT2Decompiler(object): + + def __init__(self, localSubrs, globalSubrs): + self.localSubrs = localSubrs + self.localBias = calcSubrBias(localSubrs) + self.globalSubrs = globalSubrs + self.globalBias = calcSubrBias(globalSubrs) + self.reset() + + def reset(self): + self.callingStack = [] + self.operandStack = [] + self.hintCount = 0 + self.hintMaskBytes = 0 + + def execute(self, charString): + self.callingStack.append(charString) + needsDecompilation = charString.needsDecompilation() + if needsDecompilation: + program = [] + pushToProgram = program.append + else: + pushToProgram = lambda x: None + pushToStack = self.operandStack.append + index = 0 + while True: + token, isOperator, index = charString.getToken(index) + if token is None: + break # we're done! + pushToProgram(token) + if isOperator: + handlerName = "op_" + token + handler = getattr(self, handlerName, None) + if handler is not None: + rv = handler(index) + if rv: + hintMaskBytes, index = rv + pushToProgram(hintMaskBytes) + else: + self.popall() + else: + pushToStack(token) + if needsDecompilation: + assert program, "illegal CharString: decompiled to empty program" + assert program[-1] in ("endchar", "return", "callsubr", "callgsubr", + "seac"), "illegal CharString" + charString.setProgram(program) + del self.callingStack[-1] + + def pop(self): + value = self.operandStack[-1] + del self.operandStack[-1] + return value + + def popall(self): + stack = self.operandStack[:] + self.operandStack[:] = [] + return stack + + def push(self, value): + self.operandStack.append(value) + + def op_return(self, index): + if self.operandStack: + pass + + def op_endchar(self, index): + pass + + def op_ignore(self, index): + pass + + def op_callsubr(self, index): + subrIndex = self.pop() + subr = self.localSubrs[subrIndex+self.localBias] + self.execute(subr) + + def op_callgsubr(self, index): + subrIndex = self.pop() + subr = self.globalSubrs[subrIndex+self.globalBias] + self.execute(subr) + + def op_hstem(self, index): + self.countHints() + def op_vstem(self, index): + self.countHints() + def op_hstemhm(self, index): + self.countHints() + def op_vstemhm(self, index): + self.countHints() + + def op_hintmask(self, index): + if not self.hintMaskBytes: + self.countHints() + self.hintMaskBytes = (self.hintCount + 7) // 8 + hintMaskBytes, index = self.callingStack[-1].getBytes(index, self.hintMaskBytes) + return hintMaskBytes, index + + op_cntrmask = op_hintmask + + def countHints(self): + args = self.popall() + self.hintCount = self.hintCount + len(args) // 2 + + # misc + def op_and(self, index): + raise NotImplementedError + def op_or(self, index): + raise NotImplementedError + def op_not(self, index): + raise NotImplementedError + def op_store(self, index): + raise NotImplementedError + def op_abs(self, index): + raise NotImplementedError + def op_add(self, index): + raise NotImplementedError + def op_sub(self, index): + raise NotImplementedError + def op_div(self, index): + raise NotImplementedError + def op_load(self, index): + raise NotImplementedError + def op_neg(self, index): + raise NotImplementedError + def op_eq(self, index): + raise NotImplementedError + def op_drop(self, index): + raise NotImplementedError + def op_put(self, index): + raise NotImplementedError + def op_get(self, index): + raise NotImplementedError + def op_ifelse(self, index): + raise NotImplementedError + def op_random(self, index): + raise NotImplementedError + def op_mul(self, index): + raise NotImplementedError + def op_sqrt(self, index): + raise NotImplementedError + def op_dup(self, index): + raise NotImplementedError + def op_exch(self, index): + raise NotImplementedError + def op_index(self, index): + raise NotImplementedError + def op_roll(self, index): + raise NotImplementedError + +class T2OutlineExtractor(SimpleT2Decompiler): + + def __init__(self, pen, localSubrs, globalSubrs, nominalWidthX, defaultWidthX): + SimpleT2Decompiler.__init__(self, localSubrs, globalSubrs) + self.pen = pen + self.nominalWidthX = nominalWidthX + self.defaultWidthX = defaultWidthX + + def reset(self): + SimpleT2Decompiler.reset(self) + self.hints = [] + self.gotWidth = 0 + self.width = 0 + self.currentPoint = (0, 0) + self.sawMoveTo = 0 + + def _nextPoint(self, point): + x, y = self.currentPoint + point = x + point[0], y + point[1] + self.currentPoint = point + return point + + def rMoveTo(self, point): + self.pen.moveTo(self._nextPoint(point)) + self.sawMoveTo = 1 + + def rLineTo(self, point): + if not self.sawMoveTo: + self.rMoveTo((0, 0)) + self.pen.lineTo(self._nextPoint(point)) + + def rCurveTo(self, pt1, pt2, pt3): + if not self.sawMoveTo: + self.rMoveTo((0, 0)) + nextPoint = self._nextPoint + self.pen.curveTo(nextPoint(pt1), nextPoint(pt2), nextPoint(pt3)) + + def closePath(self): + if self.sawMoveTo: + self.pen.closePath() + self.sawMoveTo = 0 + + def endPath(self): + # In T2 there are no open paths, so always do a closePath when + # finishing a sub path. + self.closePath() + + def popallWidth(self, evenOdd=0): + args = self.popall() + if not self.gotWidth: + if evenOdd ^ (len(args) % 2): + self.width = self.nominalWidthX + args[0] + args = args[1:] + else: + self.width = self.defaultWidthX + self.gotWidth = 1 + return args + + def countHints(self): + args = self.popallWidth() + self.hintCount = self.hintCount + len(args) // 2 + + # + # hint operators + # + #def op_hstem(self, index): + # self.countHints() + #def op_vstem(self, index): + # self.countHints() + #def op_hstemhm(self, index): + # self.countHints() + #def op_vstemhm(self, index): + # self.countHints() + #def op_hintmask(self, index): + # self.countHints() + #def op_cntrmask(self, index): + # self.countHints() + + # + # path constructors, moveto + # + def op_rmoveto(self, index): + self.endPath() + self.rMoveTo(self.popallWidth()) + def op_hmoveto(self, index): + self.endPath() + self.rMoveTo((self.popallWidth(1)[0], 0)) + def op_vmoveto(self, index): + self.endPath() + self.rMoveTo((0, self.popallWidth(1)[0])) + def op_endchar(self, index): + self.endPath() + args = self.popallWidth() + if args: + from fontTools.encodings.StandardEncoding import StandardEncoding + # endchar can do seac accent bulding; The T2 spec says it's deprecated, + # but recent software that shall remain nameless does output it. + adx, ady, bchar, achar = args + baseGlyph = StandardEncoding[bchar] + self.pen.addComponent(baseGlyph, (1, 0, 0, 1, 0, 0)) + accentGlyph = StandardEncoding[achar] + self.pen.addComponent(accentGlyph, (1, 0, 0, 1, adx, ady)) + + # + # path constructors, lines + # + def op_rlineto(self, index): + args = self.popall() + for i in range(0, len(args), 2): + point = args[i:i+2] + self.rLineTo(point) + + def op_hlineto(self, index): + self.alternatingLineto(1) + def op_vlineto(self, index): + self.alternatingLineto(0) + + # + # path constructors, curves + # + def op_rrcurveto(self, index): + """{dxa dya dxb dyb dxc dyc}+ rrcurveto""" + args = self.popall() + for i in range(0, len(args), 6): + dxa, dya, dxb, dyb, dxc, dyc, = args[i:i+6] + self.rCurveTo((dxa, dya), (dxb, dyb), (dxc, dyc)) + + def op_rcurveline(self, index): + """{dxa dya dxb dyb dxc dyc}+ dxd dyd rcurveline""" + args = self.popall() + for i in range(0, len(args)-2, 6): + dxb, dyb, dxc, dyc, dxd, dyd = args[i:i+6] + self.rCurveTo((dxb, dyb), (dxc, dyc), (dxd, dyd)) + self.rLineTo(args[-2:]) + + def op_rlinecurve(self, index): + """{dxa dya}+ dxb dyb dxc dyc dxd dyd rlinecurve""" + args = self.popall() + lineArgs = args[:-6] + for i in range(0, len(lineArgs), 2): + self.rLineTo(lineArgs[i:i+2]) + dxb, dyb, dxc, dyc, dxd, dyd = args[-6:] + self.rCurveTo((dxb, dyb), (dxc, dyc), (dxd, dyd)) + + def op_vvcurveto(self, index): + "dx1? {dya dxb dyb dyc}+ vvcurveto" + args = self.popall() + if len(args) % 2: + dx1 = args[0] + args = args[1:] + else: + dx1 = 0 + for i in range(0, len(args), 4): + dya, dxb, dyb, dyc = args[i:i+4] + self.rCurveTo((dx1, dya), (dxb, dyb), (0, dyc)) + dx1 = 0 + + def op_hhcurveto(self, index): + """dy1? {dxa dxb dyb dxc}+ hhcurveto""" + args = self.popall() + if len(args) % 2: + dy1 = args[0] + args = args[1:] + else: + dy1 = 0 + for i in range(0, len(args), 4): + dxa, dxb, dyb, dxc = args[i:i+4] + self.rCurveTo((dxa, dy1), (dxb, dyb), (dxc, 0)) + dy1 = 0 + + def op_vhcurveto(self, index): + """dy1 dx2 dy2 dx3 {dxa dxb dyb dyc dyd dxe dye dxf}* dyf? vhcurveto (30) + {dya dxb dyb dxc dxd dxe dye dyf}+ dxf? vhcurveto + """ + args = self.popall() + while args: + args = self.vcurveto(args) + if args: + args = self.hcurveto(args) + + def op_hvcurveto(self, index): + """dx1 dx2 dy2 dy3 {dya dxb dyb dxc dxd dxe dye dyf}* dxf? + {dxa dxb dyb dyc dyd dxe dye dxf}+ dyf? + """ + args = self.popall() + while args: + args = self.hcurveto(args) + if args: + args = self.vcurveto(args) + + # + # path constructors, flex + # + def op_hflex(self, index): + dx1, dx2, dy2, dx3, dx4, dx5, dx6 = self.popall() + dy1 = dy3 = dy4 = dy6 = 0 + dy5 = -dy2 + self.rCurveTo((dx1, dy1), (dx2, dy2), (dx3, dy3)) + self.rCurveTo((dx4, dy4), (dx5, dy5), (dx6, dy6)) + def op_flex(self, index): + dx1, dy1, dx2, dy2, dx3, dy3, dx4, dy4, dx5, dy5, dx6, dy6, fd = self.popall() + self.rCurveTo((dx1, dy1), (dx2, dy2), (dx3, dy3)) + self.rCurveTo((dx4, dy4), (dx5, dy5), (dx6, dy6)) + def op_hflex1(self, index): + dx1, dy1, dx2, dy2, dx3, dx4, dx5, dy5, dx6 = self.popall() + dy3 = dy4 = 0 + dy6 = -(dy1 + dy2 + dy3 + dy4 + dy5) + + self.rCurveTo((dx1, dy1), (dx2, dy2), (dx3, dy3)) + self.rCurveTo((dx4, dy4), (dx5, dy5), (dx6, dy6)) + def op_flex1(self, index): + dx1, dy1, dx2, dy2, dx3, dy3, dx4, dy4, dx5, dy5, d6 = self.popall() + dx = dx1 + dx2 + dx3 + dx4 + dx5 + dy = dy1 + dy2 + dy3 + dy4 + dy5 + if abs(dx) > abs(dy): + dx6 = d6 + dy6 = -dy + else: + dx6 = -dx + dy6 = d6 + self.rCurveTo((dx1, dy1), (dx2, dy2), (dx3, dy3)) + self.rCurveTo((dx4, dy4), (dx5, dy5), (dx6, dy6)) + + # + # MultipleMaster. Well... + # + def op_blend(self, index): + self.popall() + + # misc + def op_and(self, index): + raise NotImplementedError + def op_or(self, index): + raise NotImplementedError + def op_not(self, index): + raise NotImplementedError + def op_store(self, index): + raise NotImplementedError + def op_abs(self, index): + raise NotImplementedError + def op_add(self, index): + raise NotImplementedError + def op_sub(self, index): + raise NotImplementedError + def op_div(self, index): + num2 = self.pop() + num1 = self.pop() + d1 = num1//num2 + d2 = num1/num2 + if d1 == d2: + self.push(d1) + else: + self.push(d2) + def op_load(self, index): + raise NotImplementedError + def op_neg(self, index): + raise NotImplementedError + def op_eq(self, index): + raise NotImplementedError + def op_drop(self, index): + raise NotImplementedError + def op_put(self, index): + raise NotImplementedError + def op_get(self, index): + raise NotImplementedError + def op_ifelse(self, index): + raise NotImplementedError + def op_random(self, index): + raise NotImplementedError + def op_mul(self, index): + raise NotImplementedError + def op_sqrt(self, index): + raise NotImplementedError + def op_dup(self, index): + raise NotImplementedError + def op_exch(self, index): + raise NotImplementedError + def op_index(self, index): + raise NotImplementedError + def op_roll(self, index): + raise NotImplementedError + + # + # miscellaneous helpers + # + def alternatingLineto(self, isHorizontal): + args = self.popall() + for arg in args: + if isHorizontal: + point = (arg, 0) + else: + point = (0, arg) + self.rLineTo(point) + isHorizontal = not isHorizontal + + def vcurveto(self, args): + dya, dxb, dyb, dxc = args[:4] + args = args[4:] + if len(args) == 1: + dyc = args[0] + args = [] + else: + dyc = 0 + self.rCurveTo((0, dya), (dxb, dyb), (dxc, dyc)) + return args + + def hcurveto(self, args): + dxa, dxb, dyb, dyc = args[:4] + args = args[4:] + if len(args) == 1: + dxc = args[0] + args = [] + else: + dxc = 0 + self.rCurveTo((dxa, 0), (dxb, dyb), (dxc, dyc)) + return args + + +class T1OutlineExtractor(T2OutlineExtractor): + + def __init__(self, pen, subrs): + self.pen = pen + self.subrs = subrs + self.reset() + + def reset(self): + self.flexing = 0 + self.width = 0 + self.sbx = 0 + T2OutlineExtractor.reset(self) + + def endPath(self): + if self.sawMoveTo: + self.pen.endPath() + self.sawMoveTo = 0 + + def popallWidth(self, evenOdd=0): + return self.popall() + + def exch(self): + stack = self.operandStack + stack[-1], stack[-2] = stack[-2], stack[-1] + + # + # path constructors + # + def op_rmoveto(self, index): + if self.flexing: + return + self.endPath() + self.rMoveTo(self.popall()) + def op_hmoveto(self, index): + if self.flexing: + # We must add a parameter to the stack if we are flexing + self.push(0) + return + self.endPath() + self.rMoveTo((self.popall()[0], 0)) + def op_vmoveto(self, index): + if self.flexing: + # We must add a parameter to the stack if we are flexing + self.push(0) + self.exch() + return + self.endPath() + self.rMoveTo((0, self.popall()[0])) + def op_closepath(self, index): + self.closePath() + def op_setcurrentpoint(self, index): + args = self.popall() + x, y = args + self.currentPoint = x, y + + def op_endchar(self, index): + self.endPath() + + def op_hsbw(self, index): + sbx, wx = self.popall() + self.width = wx + self.sbx = sbx + self.currentPoint = sbx, self.currentPoint[1] + def op_sbw(self, index): + self.popall() # XXX + + # + def op_callsubr(self, index): + subrIndex = self.pop() + subr = self.subrs[subrIndex] + self.execute(subr) + def op_callothersubr(self, index): + subrIndex = self.pop() + nArgs = self.pop() + #print nArgs, subrIndex, "callothersubr" + if subrIndex == 0 and nArgs == 3: + self.doFlex() + self.flexing = 0 + elif subrIndex == 1 and nArgs == 0: + self.flexing = 1 + # ignore... + def op_pop(self, index): + pass # ignore... + + def doFlex(self): + finaly = self.pop() + finalx = self.pop() + self.pop() # flex height is unused + + p3y = self.pop() + p3x = self.pop() + bcp4y = self.pop() + bcp4x = self.pop() + bcp3y = self.pop() + bcp3x = self.pop() + p2y = self.pop() + p2x = self.pop() + bcp2y = self.pop() + bcp2x = self.pop() + bcp1y = self.pop() + bcp1x = self.pop() + rpy = self.pop() + rpx = self.pop() + + # call rrcurveto + self.push(bcp1x+rpx) + self.push(bcp1y+rpy) + self.push(bcp2x) + self.push(bcp2y) + self.push(p2x) + self.push(p2y) + self.op_rrcurveto(None) + + # call rrcurveto + self.push(bcp3x) + self.push(bcp3y) + self.push(bcp4x) + self.push(bcp4y) + self.push(p3x) + self.push(p3y) + self.op_rrcurveto(None) + + # Push back final coords so subr 0 can find them + self.push(finalx) + self.push(finaly) + + def op_dotsection(self, index): + self.popall() # XXX + def op_hstem3(self, index): + self.popall() # XXX + def op_seac(self, index): + "asb adx ady bchar achar seac" + from fontTools.encodings.StandardEncoding import StandardEncoding + asb, adx, ady, bchar, achar = self.popall() + baseGlyph = StandardEncoding[bchar] + self.pen.addComponent(baseGlyph, (1, 0, 0, 1, 0, 0)) + accentGlyph = StandardEncoding[achar] + adx = adx + self.sbx - asb # seac weirdness + self.pen.addComponent(accentGlyph, (1, 0, 0, 1, adx, ady)) + def op_vstem3(self, index): + self.popall() # XXX + + +class DictDecompiler(ByteCodeBase): + + operandEncoding = cffDictOperandEncoding + + def __init__(self, strings): + self.stack = [] + self.strings = strings + self.dict = {} + + def getDict(self): + assert len(self.stack) == 0, "non-empty stack" + return self.dict + + def decompile(self, data): + index = 0 + lenData = len(data) + push = self.stack.append + while index < lenData: + b0 = byteord(data[index]) + index = index + 1 + handler = self.operandEncoding[b0] + value, index = handler(self, b0, data, index) + if value is not None: + push(value) + + def pop(self): + value = self.stack[-1] + del self.stack[-1] + return value + + def popall(self): + args = self.stack[:] + del self.stack[:] + return args + + def handle_operator(self, operator): + operator, argType = operator + if isinstance(argType, type(())): + value = () + for i in range(len(argType)-1, -1, -1): + arg = argType[i] + arghandler = getattr(self, "arg_" + arg) + value = (arghandler(operator),) + value + else: + arghandler = getattr(self, "arg_" + argType) + value = arghandler(operator) + self.dict[operator] = value + + def arg_number(self, name): + return self.pop() + def arg_SID(self, name): + return self.strings[self.pop()] + def arg_array(self, name): + return self.popall() + def arg_delta(self, name): + out = [] + current = 0 + for v in self.popall(): + current = current + v + out.append(current) + return out + + +def calcSubrBias(subrs): + nSubrs = len(subrs) + if nSubrs < 1240: + bias = 107 + elif nSubrs < 33900: + bias = 1131 + else: + bias = 32768 + return bias diff -Nru fonttools-2.4/Snippets/fontTools/misc/psLib.py fonttools-3.0/Snippets/fontTools/misc/psLib.py --- fonttools-2.4/Snippets/fontTools/misc/psLib.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.0/Snippets/fontTools/misc/psLib.py 2015-08-31 17:57:15.000000000 +0000 @@ -0,0 +1,350 @@ +from __future__ import print_function, division, absolute_import +from fontTools.misc.py23 import * +from fontTools.misc import eexec +from .psOperators import * +import re +import collections +from string import whitespace + + +ps_special = '()<>[]{}%' # / is one too, but we take care of that one differently + +skipwhiteRE = re.compile("[%s]*" % whitespace) +endofthingPat = "[^][(){}<>/%%%s]*" % whitespace +endofthingRE = re.compile(endofthingPat) +commentRE = re.compile("%[^\n\r]*") + +# XXX This not entirely correct as it doesn't allow *nested* embedded parens: +stringPat = r""" + \( + ( + ( + [^()]* \ [()] + ) + | + ( + [^()]* \( [^()]* \) + ) + )* + [^()]* + \) +""" +stringPat = "".join(stringPat.split()) +stringRE = re.compile(stringPat) + +hexstringRE = re.compile("<[%s0-9A-Fa-f]*>" % whitespace) + +class PSTokenError(Exception): pass +class PSError(Exception): pass + + +class PSTokenizer(BytesIO): + + def getnexttoken(self, + # localize some stuff, for performance + len=len, + ps_special=ps_special, + stringmatch=stringRE.match, + hexstringmatch=hexstringRE.match, + commentmatch=commentRE.match, + endmatch=endofthingRE.match, + whitematch=skipwhiteRE.match): + + _, nextpos = whitematch(self.buf, self.pos).span() + self.pos = nextpos + if self.pos >= self.len: + return None, None + pos = self.pos + buf = self.buf + char = buf[pos] + if char in ps_special: + if char in '{}[]': + tokentype = 'do_special' + token = char + elif char == '%': + tokentype = 'do_comment' + _, nextpos = commentmatch(buf, pos).span() + token = buf[pos:nextpos] + elif char == '(': + tokentype = 'do_string' + m = stringmatch(buf, pos) + if m is None: + raise PSTokenError('bad string at character %d' % pos) + _, nextpos = m.span() + token = buf[pos:nextpos] + elif char == '<': + tokentype = 'do_hexstring' + m = hexstringmatch(buf, pos) + if m is None: + raise PSTokenError('bad hexstring at character %d' % pos) + _, nextpos = m.span() + token = buf[pos:nextpos] + else: + raise PSTokenError('bad token at character %d' % pos) + else: + if char == '/': + tokentype = 'do_literal' + m = endmatch(buf, pos+1) + else: + tokentype = '' + m = endmatch(buf, pos) + if m is None: + raise PSTokenError('bad token at character %d' % pos) + _, nextpos = m.span() + token = buf[pos:nextpos] + self.pos = pos + len(token) + return tokentype, token + + def skipwhite(self, whitematch=skipwhiteRE.match): + _, nextpos = whitematch(self.buf, self.pos).span() + self.pos = nextpos + + def starteexec(self): + self.pos = self.pos + 1 + #self.skipwhite() + self.dirtybuf = self.buf[self.pos:] + self.buf, R = eexec.decrypt(self.dirtybuf, 55665) + self.len = len(self.buf) + self.pos = 4 + + def stopeexec(self): + if not hasattr(self, 'dirtybuf'): + return + self.buf = self.dirtybuf + del self.dirtybuf + + def flush(self): + if self.buflist: + self.buf = self.buf + "".join(self.buflist) + self.buflist = [] + + +class PSInterpreter(PSOperators): + + def __init__(self): + systemdict = {} + userdict = {} + self.dictstack = [systemdict, userdict] + self.stack = [] + self.proclevel = 0 + self.procmark = ps_procmark() + self.fillsystemdict() + + def fillsystemdict(self): + systemdict = self.dictstack[0] + systemdict['['] = systemdict['mark'] = self.mark = ps_mark() + systemdict[']'] = ps_operator(']', self.do_makearray) + systemdict['true'] = ps_boolean(1) + systemdict['false'] = ps_boolean(0) + systemdict['StandardEncoding'] = ps_array(ps_StandardEncoding) + systemdict['FontDirectory'] = ps_dict({}) + self.suckoperators(systemdict, self.__class__) + + def suckoperators(self, systemdict, klass): + for name in dir(klass): + attr = getattr(self, name) + if isinstance(attr, collections.Callable) and name[:3] == 'ps_': + name = name[3:] + systemdict[name] = ps_operator(name, attr) + for baseclass in klass.__bases__: + self.suckoperators(systemdict, baseclass) + + def interpret(self, data, getattr=getattr): + tokenizer = self.tokenizer = PSTokenizer(data) + getnexttoken = tokenizer.getnexttoken + do_token = self.do_token + handle_object = self.handle_object + try: + while 1: + tokentype, token = getnexttoken() + #print token + if not token: + break + if tokentype: + handler = getattr(self, tokentype) + object = handler(token) + else: + object = do_token(token) + if object is not None: + handle_object(object) + tokenizer.close() + self.tokenizer = None + finally: + if self.tokenizer is not None: + if 0: + print('ps error:\n- - - - - - -') + print(self.tokenizer.buf[self.tokenizer.pos-50:self.tokenizer.pos]) + print('>>>') + print(self.tokenizer.buf[self.tokenizer.pos:self.tokenizer.pos+50]) + print('- - - - - - -') + + def handle_object(self, object): + if not (self.proclevel or object.literal or object.type == 'proceduretype'): + if object.type != 'operatortype': + object = self.resolve_name(object.value) + if object.literal: + self.push(object) + else: + if object.type == 'proceduretype': + self.call_procedure(object) + else: + object.function() + else: + self.push(object) + + def call_procedure(self, proc): + handle_object = self.handle_object + for item in proc.value: + handle_object(item) + + def resolve_name(self, name): + dictstack = self.dictstack + for i in range(len(dictstack)-1, -1, -1): + if name in dictstack[i]: + return dictstack[i][name] + raise PSError('name error: ' + str(name)) + + def do_token(self, token, + int=int, + float=float, + ps_name=ps_name, + ps_integer=ps_integer, + ps_real=ps_real): + try: + num = int(token) + except (ValueError, OverflowError): + try: + num = float(token) + except (ValueError, OverflowError): + if '#' in token: + hashpos = token.find('#') + try: + base = int(token[:hashpos]) + num = int(token[hashpos+1:], base) + except (ValueError, OverflowError): + return ps_name(token) + else: + return ps_integer(num) + else: + return ps_name(token) + else: + return ps_real(num) + else: + return ps_integer(num) + + def do_comment(self, token): + pass + + def do_literal(self, token): + return ps_literal(token[1:]) + + def do_string(self, token): + return ps_string(token[1:-1]) + + def do_hexstring(self, token): + hexStr = "".join(token[1:-1].split()) + if len(hexStr) % 2: + hexStr = hexStr + '0' + cleanstr = [] + for i in range(0, len(hexStr), 2): + cleanstr.append(chr(int(hexStr[i:i+2], 16))) + cleanstr = "".join(cleanstr) + return ps_string(cleanstr) + + def do_special(self, token): + if token == '{': + self.proclevel = self.proclevel + 1 + return self.procmark + elif token == '}': + proc = [] + while 1: + topobject = self.pop() + if topobject == self.procmark: + break + proc.append(topobject) + self.proclevel = self.proclevel - 1 + proc.reverse() + return ps_procedure(proc) + elif token == '[': + return self.mark + elif token == ']': + return ps_name(']') + else: + raise PSTokenError('huh?') + + def push(self, object): + self.stack.append(object) + + def pop(self, *types): + stack = self.stack + if not stack: + raise PSError('stack underflow') + object = stack[-1] + if types: + if object.type not in types: + raise PSError('typecheck, expected %s, found %s' % (repr(types), object.type)) + del stack[-1] + return object + + def do_makearray(self): + array = [] + while 1: + topobject = self.pop() + if topobject == self.mark: + break + array.append(topobject) + array.reverse() + self.push(ps_array(array)) + + def close(self): + """Remove circular references.""" + del self.stack + del self.dictstack + + +def unpack_item(item): + tp = type(item.value) + if tp == dict: + newitem = {} + for key, value in item.value.items(): + newitem[key] = unpack_item(value) + elif tp == list: + newitem = [None] * len(item.value) + for i in range(len(item.value)): + newitem[i] = unpack_item(item.value[i]) + if item.type == 'proceduretype': + newitem = tuple(newitem) + else: + newitem = item.value + return newitem + +def suckfont(data): + m = re.search(br"/FontName\s+/([^ \t\n\r]+)\s+def", data) + if m: + fontName = m.group(1) + else: + fontName = None + interpreter = PSInterpreter() + interpreter.interpret(b"/Helvetica 4 dict dup /Encoding StandardEncoding put definefont pop") + interpreter.interpret(data) + fontdir = interpreter.dictstack[0]['FontDirectory'].value + if fontName in fontdir: + rawfont = fontdir[fontName] + else: + # fall back, in case fontName wasn't found + fontNames = list(fontdir.keys()) + if len(fontNames) > 1: + fontNames.remove("Helvetica") + fontNames.sort() + rawfont = fontdir[fontNames[0]] + interpreter.close() + return unpack_item(rawfont) + + +if __name__ == "__main__": + import EasyDialogs + path = EasyDialogs.AskFileForOpen() + if path: + from fontTools import t1Lib + data, kind = t1Lib.read(path) + font = suckfont(data) diff -Nru fonttools-2.4/Snippets/fontTools/misc/psOperators.py fonttools-3.0/Snippets/fontTools/misc/psOperators.py --- fonttools-2.4/Snippets/fontTools/misc/psOperators.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.0/Snippets/fontTools/misc/psOperators.py 2015-08-31 17:57:15.000000000 +0000 @@ -0,0 +1,540 @@ +from __future__ import print_function, division, absolute_import +from fontTools.misc.py23 import * + +_accessstrings = {0: "", 1: "readonly", 2: "executeonly", 3: "noaccess"} + + +class ps_object(object): + + literal = 1 + access = 0 + value = None + + def __init__(self, value): + self.value = value + self.type = self.__class__.__name__[3:] + "type" + + def __repr__(self): + return "<%s %s>" % (self.__class__.__name__[3:], repr(self.value)) + + +class ps_operator(ps_object): + + literal = 0 + + def __init__(self, name, function): + self.name = name + self.function = function + self.type = self.__class__.__name__[3:] + "type" + def __repr__(self): + return "<operator %s>" % self.name + +class ps_procedure(ps_object): + literal = 0 + def __repr__(self): + return "<procedure>" + def __str__(self): + psstring = '{' + for i in range(len(self.value)): + if i: + psstring = psstring + ' ' + str(self.value[i]) + else: + psstring = psstring + str(self.value[i]) + return psstring + '}' + +class ps_name(ps_object): + literal = 0 + def __str__(self): + if self.literal: + return '/' + self.value + else: + return self.value + +class ps_literal(ps_object): + def __str__(self): + return '/' + self.value + +class ps_array(ps_object): + def __str__(self): + psstring = '[' + for i in range(len(self.value)): + item = self.value[i] + access = _accessstrings[item.access] + if access: + access = ' ' + access + if i: + psstring = psstring + ' ' + str(item) + access + else: + psstring = psstring + str(item) + access + return psstring + ']' + def __repr__(self): + return "<array>" + +_type1_pre_eexec_order = [ + "FontInfo", + "FontName", + "Encoding", + "PaintType", + "FontType", + "FontMatrix", + "FontBBox", + "UniqueID", + "Metrics", + "StrokeWidth" + ] + +_type1_fontinfo_order = [ + "version", + "Notice", + "FullName", + "FamilyName", + "Weight", + "ItalicAngle", + "isFixedPitch", + "UnderlinePosition", + "UnderlineThickness" + ] + +_type1_post_eexec_order = [ + "Private", + "CharStrings", + "FID" + ] + +def _type1_item_repr(key, value): + psstring = "" + access = _accessstrings[value.access] + if access: + access = access + ' ' + if key == 'CharStrings': + psstring = psstring + "/%s %s def\n" % (key, _type1_CharString_repr(value.value)) + elif key == 'Encoding': + psstring = psstring + _type1_Encoding_repr(value, access) + else: + psstring = psstring + "/%s %s %sdef\n" % (str(key), str(value), access) + return psstring + +def _type1_Encoding_repr(encoding, access): + encoding = encoding.value + psstring = "/Encoding 256 array\n0 1 255 {1 index exch /.notdef put} for\n" + for i in range(256): + name = encoding[i].value + if name != '.notdef': + psstring = psstring + "dup %d /%s put\n" % (i, name) + return psstring + access + "def\n" + +def _type1_CharString_repr(charstrings): + items = sorted(charstrings.items()) + return 'xxx' + +class ps_font(ps_object): + def __str__(self): + psstring = "%d dict dup begin\n" % len(self.value) + for key in _type1_pre_eexec_order: + try: + value = self.value[key] + except KeyError: + pass + else: + psstring = psstring + _type1_item_repr(key, value) + items = sorted(self.value.items()) + for key, value in items: + if key not in _type1_pre_eexec_order + _type1_post_eexec_order: + psstring = psstring + _type1_item_repr(key, value) + psstring = psstring + "currentdict end\ncurrentfile eexec\ndup " + for key in _type1_post_eexec_order: + try: + value = self.value[key] + except KeyError: + pass + else: + psstring = psstring + _type1_item_repr(key, value) + return psstring + 'dup/FontName get exch definefont pop\nmark currentfile closefile\n' + \ + 8 * (64 * '0' + '\n') + 'cleartomark' + '\n' + def __repr__(self): + return '<font>' + +class ps_file(ps_object): + pass + +class ps_dict(ps_object): + def __str__(self): + psstring = "%d dict dup begin\n" % len(self.value) + items = sorted(self.value.items()) + for key, value in items: + access = _accessstrings[value.access] + if access: + access = access + ' ' + psstring = psstring + "/%s %s %sdef\n" % (str(key), str(value), access) + return psstring + 'end ' + def __repr__(self): + return "<dict>" + +class ps_mark(ps_object): + def __init__(self): + self.value = 'mark' + self.type = self.__class__.__name__[3:] + "type" + +class ps_procmark(ps_object): + def __init__(self): + self.value = 'procmark' + self.type = self.__class__.__name__[3:] + "type" + +class ps_null(ps_object): + def __init__(self): + self.type = self.__class__.__name__[3:] + "type" + +class ps_boolean(ps_object): + def __str__(self): + if self.value: + return 'true' + else: + return 'false' + +class ps_string(ps_object): + def __str__(self): + return "(%s)" % repr(self.value)[1:-1] + +class ps_integer(ps_object): + def __str__(self): + return repr(self.value) + +class ps_real(ps_object): + def __str__(self): + return repr(self.value) + + +class PSOperators(object): + + def ps_def(self): + obj = self.pop() + name = self.pop() + self.dictstack[-1][name.value] = obj + + def ps_bind(self): + proc = self.pop('proceduretype') + self.proc_bind(proc) + self.push(proc) + + def proc_bind(self, proc): + for i in range(len(proc.value)): + item = proc.value[i] + if item.type == 'proceduretype': + self.proc_bind(item) + else: + if not item.literal: + try: + obj = self.resolve_name(item.value) + except: + pass + else: + if obj.type == 'operatortype': + proc.value[i] = obj + + def ps_exch(self): + if len(self.stack) < 2: + raise RuntimeError('stack underflow') + obj1 = self.pop() + obj2 = self.pop() + self.push(obj1) + self.push(obj2) + + def ps_dup(self): + if not self.stack: + raise RuntimeError('stack underflow') + self.push(self.stack[-1]) + + def ps_exec(self): + obj = self.pop() + if obj.type == 'proceduretype': + self.call_procedure(obj) + else: + self.handle_object(obj) + + def ps_count(self): + self.push(ps_integer(len(self.stack))) + + def ps_eq(self): + any1 = self.pop() + any2 = self.pop() + self.push(ps_boolean(any1.value == any2.value)) + + def ps_ne(self): + any1 = self.pop() + any2 = self.pop() + self.push(ps_boolean(any1.value != any2.value)) + + def ps_cvx(self): + obj = self.pop() + obj.literal = 0 + self.push(obj) + + def ps_matrix(self): + matrix = [ps_real(1.0), ps_integer(0), ps_integer(0), ps_real(1.0), ps_integer(0), ps_integer(0)] + self.push(ps_array(matrix)) + + def ps_string(self): + num = self.pop('integertype').value + self.push(ps_string('\0' * num)) + + def ps_type(self): + obj = self.pop() + self.push(ps_string(obj.type)) + + def ps_store(self): + value = self.pop() + key = self.pop() + name = key.value + for i in range(len(self.dictstack)-1, -1, -1): + if name in self.dictstack[i]: + self.dictstack[i][name] = value + break + self.dictstack[-1][name] = value + + def ps_where(self): + name = self.pop() + # XXX + self.push(ps_boolean(0)) + + def ps_systemdict(self): + self.push(ps_dict(self.dictstack[0])) + + def ps_userdict(self): + self.push(ps_dict(self.dictstack[1])) + + def ps_currentdict(self): + self.push(ps_dict(self.dictstack[-1])) + + def ps_currentfile(self): + self.push(ps_file(self.tokenizer)) + + def ps_eexec(self): + f = self.pop('filetype').value + f.starteexec() + + def ps_closefile(self): + f = self.pop('filetype').value + f.skipwhite() + f.stopeexec() + + def ps_cleartomark(self): + obj = self.pop() + while obj != self.mark: + obj = self.pop() + + def ps_readstring(self, + ps_boolean=ps_boolean, + len=len): + s = self.pop('stringtype') + oldstr = s.value + f = self.pop('filetype') + #pad = file.value.read(1) + # for StringIO, this is faster + f.value.pos = f.value.pos + 1 + newstr = f.value.read(len(oldstr)) + s.value = newstr + self.push(s) + self.push(ps_boolean(len(oldstr) == len(newstr))) + + def ps_known(self): + key = self.pop() + d = self.pop('dicttype', 'fonttype') + self.push(ps_boolean(key.value in d.value)) + + def ps_if(self): + proc = self.pop('proceduretype') + if self.pop('booleantype').value: + self.call_procedure(proc) + + def ps_ifelse(self): + proc2 = self.pop('proceduretype') + proc1 = self.pop('proceduretype') + if self.pop('booleantype').value: + self.call_procedure(proc1) + else: + self.call_procedure(proc2) + + def ps_readonly(self): + obj = self.pop() + if obj.access < 1: + obj.access = 1 + self.push(obj) + + def ps_executeonly(self): + obj = self.pop() + if obj.access < 2: + obj.access = 2 + self.push(obj) + + def ps_noaccess(self): + obj = self.pop() + if obj.access < 3: + obj.access = 3 + self.push(obj) + + def ps_not(self): + obj = self.pop('booleantype', 'integertype') + if obj.type == 'booleantype': + self.push(ps_boolean(not obj.value)) + else: + self.push(ps_integer(~obj.value)) + + def ps_print(self): + str = self.pop('stringtype') + print('PS output --->', str.value) + + def ps_anchorsearch(self): + seek = self.pop('stringtype') + s = self.pop('stringtype') + seeklen = len(seek.value) + if s.value[:seeklen] == seek.value: + self.push(ps_string(s.value[seeklen:])) + self.push(seek) + self.push(ps_boolean(1)) + else: + self.push(s) + self.push(ps_boolean(0)) + + def ps_array(self): + num = self.pop('integertype') + array = ps_array([None] * num.value) + self.push(array) + + def ps_astore(self): + array = self.pop('arraytype') + for i in range(len(array.value)-1, -1, -1): + array.value[i] = self.pop() + self.push(array) + + def ps_load(self): + name = self.pop() + self.push(self.resolve_name(name.value)) + + def ps_put(self): + obj1 = self.pop() + obj2 = self.pop() + obj3 = self.pop('arraytype', 'dicttype', 'stringtype', 'proceduretype') + tp = obj3.type + if tp == 'arraytype' or tp == 'proceduretype': + obj3.value[obj2.value] = obj1 + elif tp == 'dicttype': + obj3.value[obj2.value] = obj1 + elif tp == 'stringtype': + index = obj2.value + obj3.value = obj3.value[:index] + chr(obj1.value) + obj3.value[index+1:] + + def ps_get(self): + obj1 = self.pop() + if obj1.value == "Encoding": + pass + obj2 = self.pop('arraytype', 'dicttype', 'stringtype', 'proceduretype', 'fonttype') + tp = obj2.type + if tp in ('arraytype', 'proceduretype'): + self.push(obj2.value[obj1.value]) + elif tp in ('dicttype', 'fonttype'): + self.push(obj2.value[obj1.value]) + elif tp == 'stringtype': + self.push(ps_integer(ord(obj2.value[obj1.value]))) + else: + assert False, "shouldn't get here" + + def ps_getinterval(self): + obj1 = self.pop('integertype') + obj2 = self.pop('integertype') + obj3 = self.pop('arraytype', 'stringtype') + tp = obj3.type + if tp == 'arraytype': + self.push(ps_array(obj3.value[obj2.value:obj2.value + obj1.value])) + elif tp == 'stringtype': + self.push(ps_string(obj3.value[obj2.value:obj2.value + obj1.value])) + + def ps_putinterval(self): + obj1 = self.pop('arraytype', 'stringtype') + obj2 = self.pop('integertype') + obj3 = self.pop('arraytype', 'stringtype') + tp = obj3.type + if tp == 'arraytype': + obj3.value[obj2.value:obj2.value + len(obj1.value)] = obj1.value + elif tp == 'stringtype': + newstr = obj3.value[:obj2.value] + newstr = newstr + obj1.value + newstr = newstr + obj3.value[obj2.value + len(obj1.value):] + obj3.value = newstr + + def ps_cvn(self): + self.push(ps_name(self.pop('stringtype').value)) + + def ps_index(self): + n = self.pop('integertype').value + if n < 0: + raise RuntimeError('index may not be negative') + self.push(self.stack[-1-n]) + + def ps_for(self): + proc = self.pop('proceduretype') + limit = self.pop('integertype', 'realtype').value + increment = self.pop('integertype', 'realtype').value + i = self.pop('integertype', 'realtype').value + while 1: + if increment > 0: + if i > limit: + break + else: + if i < limit: + break + if type(i) == type(0.0): + self.push(ps_real(i)) + else: + self.push(ps_integer(i)) + self.call_procedure(proc) + i = i + increment + + def ps_forall(self): + proc = self.pop('proceduretype') + obj = self.pop('arraytype', 'stringtype', 'dicttype') + tp = obj.type + if tp == 'arraytype': + for item in obj.value: + self.push(item) + self.call_procedure(proc) + elif tp == 'stringtype': + for item in obj.value: + self.push(ps_integer(ord(item))) + self.call_procedure(proc) + elif tp == 'dicttype': + for key, value in obj.value.items(): + self.push(ps_name(key)) + self.push(value) + self.call_procedure(proc) + + def ps_definefont(self): + font = self.pop('dicttype') + name = self.pop() + font = ps_font(font.value) + self.dictstack[0]['FontDirectory'].value[name.value] = font + self.push(font) + + def ps_findfont(self): + name = self.pop() + font = self.dictstack[0]['FontDirectory'].value[name.value] + self.push(font) + + def ps_pop(self): + self.pop() + + def ps_dict(self): + self.pop('integertype') + self.push(ps_dict({})) + + def ps_begin(self): + self.dictstack.append(self.pop('dicttype').value) + + def ps_end(self): + if len(self.dictstack) > 2: + del self.dictstack[-1] + else: + raise RuntimeError('dictstack underflow') + +notdef = '.notdef' +from fontTools.encodings.StandardEncoding import StandardEncoding +ps_StandardEncoding = list(map(ps_name, StandardEncoding)) diff -Nru fonttools-2.4/Snippets/fontTools/misc/py23.py fonttools-3.0/Snippets/fontTools/misc/py23.py --- fonttools-2.4/Snippets/fontTools/misc/py23.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.0/Snippets/fontTools/misc/py23.py 2015-08-31 17:57:15.000000000 +0000 @@ -0,0 +1,161 @@ +"""Python 2/3 compat layer.""" + +from __future__ import print_function, division, absolute_import +import sys + +try: + basestring +except NameError: + basestring = str + +try: + unicode +except NameError: + unicode = str + +try: + unichr + + if sys.maxunicode < 0x10FFFF: + # workarounds for Python 2 "narrow" builds with UCS2-only support. + + _narrow_unichr = unichr + + def unichr(i): + """ + Return the unicode character whose Unicode code is the integer 'i'. + The valid range is 0 to 0x10FFFF inclusive. + + >>> _narrow_unichr(0xFFFF + 1) + Traceback (most recent call last): + File "<stdin>", line 1, in ? + ValueError: unichr() arg not in range(0x10000) (narrow Python build) + >>> unichr(0xFFFF + 1) == u'\U00010000' + True + >>> unichr(1114111) == u'\U0010FFFF' + True + >>> unichr(0x10FFFF + 1) + Traceback (most recent call last): + File "<stdin>", line 1, in ? + ValueError: unichr() arg not in range(0x110000) + """ + try: + return _narrow_unichr(i) + except ValueError: + try: + padded_hex_str = hex(i)[2:].zfill(8) + escape_str = "\\U" + padded_hex_str + return escape_str.decode("unicode-escape") + except UnicodeDecodeError: + raise ValueError('unichr() arg not in range(0x110000)') + + import re + _unicode_escape_RE = re.compile(r'\\U[A-Fa-f0-9]{8}') + + def byteord(c): + """ + Given a 8-bit or unicode character, return an integer representing the + Unicode code point of the character. If a unicode argument is given, the + character's code point must be in the range 0 to 0x10FFFF inclusive. + + >>> ord(u'\U00010000') + Traceback (most recent call last): + File "<stdin>", line 1, in ? + TypeError: ord() expected a character, but string of length 2 found + >>> byteord(u'\U00010000') == 0xFFFF + 1 + True + >>> byteord(u'\U0010FFFF') == 1114111 + True + """ + try: + return ord(c) + except TypeError as e: + try: + escape_str = c.encode('unicode-escape') + if not _unicode_escape_RE.match(escape_str): + raise + hex_str = escape_str[3:] + return int(hex_str, 16) + except: + raise TypeError(e) + + else: + byteord = ord + bytechr = chr + +except NameError: + unichr = chr + def bytechr(n): + return bytes([n]) + def byteord(c): + return c if isinstance(c, int) else ord(c) + + +# the 'io' module provides the same I/O interface on both 2 and 3. +# here we define an alias of io.StringIO to disambiguate it eternally... +from io import BytesIO +from io import StringIO as UnicodeIO +try: + # in python 2, by 'StringIO' we still mean a stream of *byte* strings + from StringIO import StringIO +except ImportError: + # in Python 3, we mean instead a stream of *unicode* strings + StringIO = UnicodeIO + + +def strjoin(iterable, joiner=''): + return tostr(joiner).join(iterable) + +def tobytes(s, encoding='ascii', errors='strict'): + if not isinstance(s, bytes): + return s.encode(encoding, errors) + else: + return s +def tounicode(s, encoding='ascii', errors='strict'): + if not isinstance(s, unicode): + return s.decode(encoding, errors) + else: + return s + +if str == bytes: + class Tag(str): + def tobytes(self): + if isinstance(self, bytes): + return self + else: + return self.encode('latin1') + + tostr = tobytes + + bytesjoin = strjoin +else: + class Tag(str): + + @staticmethod + def transcode(blob): + if not isinstance(blob, str): + blob = blob.decode('latin-1') + return blob + + def __new__(self, content): + return str.__new__(self, self.transcode(content)) + def __ne__(self, other): + return not self.__eq__(other) + def __eq__(self, other): + return str.__eq__(self, self.transcode(other)) + + def __hash__(self): + return str.__hash__(self) + + def tobytes(self): + return self.encode('latin-1') + + tostr = tounicode + + def bytesjoin(iterable, joiner=b''): + return tobytes(joiner).join(tobytes(item) for item in iterable) + + +if __name__ == "__main__": + import doctest, sys + sys.exit(doctest.testmod().failed) diff -Nru fonttools-2.4/Snippets/fontTools/misc/sstruct.py fonttools-3.0/Snippets/fontTools/misc/sstruct.py --- fonttools-2.4/Snippets/fontTools/misc/sstruct.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.0/Snippets/fontTools/misc/sstruct.py 2015-08-31 17:57:15.000000000 +0000 @@ -0,0 +1,211 @@ +"""sstruct.py -- SuperStruct + +Higher level layer on top of the struct module, enabling to +bind names to struct elements. The interface is similar to +struct, except the objects passed and returned are not tuples +(or argument lists), but dictionaries or instances. + +Just like struct, we use fmt strings to describe a data +structure, except we use one line per element. Lines are +separated by newlines or semi-colons. Each line contains +either one of the special struct characters ('@', '=', '<', +'>' or '!') or a 'name:formatchar' combo (eg. 'myFloat:f'). +Repetitions, like the struct module offers them are not useful +in this context, except for fixed length strings (eg. 'myInt:5h' +is not allowed but 'myString:5s' is). The 'x' fmt character +(pad byte) is treated as 'special', since it is by definition +anonymous. Extra whitespace is allowed everywhere. + +The sstruct module offers one feature that the "normal" struct +module doesn't: support for fixed point numbers. These are spelled +as "n.mF", where n is the number of bits before the point, and m +the number of bits after the point. Fixed point numbers get +converted to floats. + +pack(fmt, object): + 'object' is either a dictionary or an instance (or actually + anything that has a __dict__ attribute). If it is a dictionary, + its keys are used for names. If it is an instance, it's + attributes are used to grab struct elements from. Returns + a string containing the data. + +unpack(fmt, data, object=None) + If 'object' is omitted (or None), a new dictionary will be + returned. If 'object' is a dictionary, it will be used to add + struct elements to. If it is an instance (or in fact anything + that has a __dict__ attribute), an attribute will be added for + each struct element. In the latter two cases, 'object' itself + is returned. + +unpack2(fmt, data, object=None) + Convenience function. Same as unpack, except data may be longer + than needed. The returned value is a tuple: (object, leftoverdata). + +calcsize(fmt) + like struct.calcsize(), but uses our own fmt strings: + it returns the size of the data in bytes. +""" + +from __future__ import print_function, division, absolute_import +from fontTools.misc.py23 import * +from fontTools.misc.fixedTools import fixedToFloat as fi2fl, floatToFixed as fl2fi +import struct +import re + +__version__ = "1.2" +__copyright__ = "Copyright 1998, Just van Rossum <just@letterror.com>" + + +class Error(Exception): + pass + +def pack(fmt, obj): + formatstring, names, fixes = getformat(fmt) + elements = [] + if not isinstance(obj, dict): + obj = obj.__dict__ + for name in names: + value = obj[name] + if name in fixes: + # fixed point conversion + value = fl2fi(value, fixes[name]) + elif isinstance(value, basestring): + value = tobytes(value) + elements.append(value) + data = struct.pack(*(formatstring,) + tuple(elements)) + return data + +def unpack(fmt, data, obj=None): + if obj is None: + obj = {} + data = tobytes(data) + formatstring, names, fixes = getformat(fmt) + if isinstance(obj, dict): + d = obj + else: + d = obj.__dict__ + elements = struct.unpack(formatstring, data) + for i in range(len(names)): + name = names[i] + value = elements[i] + if name in fixes: + # fixed point conversion + value = fi2fl(value, fixes[name]) + elif isinstance(value, bytes): + try: + value = tostr(value) + except UnicodeDecodeError: + pass + d[name] = value + return obj + +def unpack2(fmt, data, obj=None): + length = calcsize(fmt) + return unpack(fmt, data[:length], obj), data[length:] + +def calcsize(fmt): + formatstring, names, fixes = getformat(fmt) + return struct.calcsize(formatstring) + + +# matches "name:formatchar" (whitespace is allowed) +_elementRE = re.compile( + "\s*" # whitespace + "([A-Za-z_][A-Za-z_0-9]*)" # name (python identifier) + "\s*:\s*" # whitespace : whitespace + "([cbBhHiIlLqQfd]|[0-9]+[ps]|" # formatchar... + "([0-9]+)\.([0-9]+)(F))" # ...formatchar + "\s*" # whitespace + "(#.*)?$" # [comment] + end of string + ) + +# matches the special struct fmt chars and 'x' (pad byte) +_extraRE = re.compile("\s*([x@=<>!])\s*(#.*)?$") + +# matches an "empty" string, possibly containing whitespace and/or a comment +_emptyRE = re.compile("\s*(#.*)?$") + +_fixedpointmappings = { + 8: "b", + 16: "h", + 32: "l"} + +_formatcache = {} + +def getformat(fmt): + try: + formatstring, names, fixes = _formatcache[fmt] + except KeyError: + lines = re.split("[\n;]", fmt) + formatstring = "" + names = [] + fixes = {} + for line in lines: + if _emptyRE.match(line): + continue + m = _extraRE.match(line) + if m: + formatchar = m.group(1) + if formatchar != 'x' and formatstring: + raise Error("a special fmt char must be first") + else: + m = _elementRE.match(line) + if not m: + raise Error("syntax error in fmt: '%s'" % line) + name = m.group(1) + names.append(name) + formatchar = m.group(2) + if m.group(3): + # fixed point + before = int(m.group(3)) + after = int(m.group(4)) + bits = before + after + if bits not in [8, 16, 32]: + raise Error("fixed point must be 8, 16 or 32 bits long") + formatchar = _fixedpointmappings[bits] + assert m.group(5) == "F" + fixes[name] = after + formatstring = formatstring + formatchar + _formatcache[fmt] = formatstring, names, fixes + return formatstring, names, fixes + +def _test(): + fmt = """ + # comments are allowed + > # big endian (see documentation for struct) + # empty lines are allowed: + + ashort: h + along: l + abyte: b # a byte + achar: c + astr: 5s + afloat: f; adouble: d # multiple "statements" are allowed + afixed: 16.16F + """ + + print('size:', calcsize(fmt)) + + class foo(object): + pass + + i = foo() + + i.ashort = 0x7fff + i.along = 0x7fffffff + i.abyte = 0x7f + i.achar = "a" + i.astr = "12345" + i.afloat = 0.5 + i.adouble = 0.5 + i.afixed = 1.5 + + data = pack(fmt, i) + print('data:', repr(data)) + print(unpack(fmt, data)) + i2 = foo() + unpack(fmt, data, i2) + print(vars(i2)) + +if __name__ == "__main__": + _test() diff -Nru fonttools-2.4/Snippets/fontTools/misc/textTools.py fonttools-3.0/Snippets/fontTools/misc/textTools.py --- fonttools-2.4/Snippets/fontTools/misc/textTools.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.0/Snippets/fontTools/misc/textTools.py 2015-08-31 17:57:15.000000000 +0000 @@ -0,0 +1,101 @@ +"""fontTools.misc.textTools.py -- miscellaneous routines.""" + + +from __future__ import print_function, division, absolute_import +from fontTools.misc.py23 import * +import string + + +def safeEval(data, eval=eval): + """A (kindof) safe replacement for eval.""" + return eval(data, {"__builtins__":{"True":True,"False":False}}) + + +def readHex(content): + """Convert a list of hex strings to binary data.""" + return deHexStr(strjoin(chunk for chunk in content if isinstance(chunk, basestring))) + +def deHexStr(hexdata): + """Convert a hex string to binary data.""" + hexdata = strjoin(hexdata.split()) + if len(hexdata) % 2: + hexdata = hexdata + "0" + data = [] + for i in range(0, len(hexdata), 2): + data.append(bytechr(int(hexdata[i:i+2], 16))) + return bytesjoin(data) + + +def hexStr(data): + """Convert binary data to a hex string.""" + h = string.hexdigits + r = '' + for c in data: + i = byteord(c) + r = r + h[(i >> 4) & 0xF] + h[i & 0xF] + return r + + +def num2binary(l, bits=32): + items = [] + binary = "" + for i in range(bits): + if l & 0x1: + binary = "1" + binary + else: + binary = "0" + binary + l = l >> 1 + if not ((i+1) % 8): + items.append(binary) + binary = "" + if binary: + items.append(binary) + items.reverse() + assert l in (0, -1), "number doesn't fit in number of bits" + return ' '.join(items) + + +def binary2num(bin): + bin = strjoin(bin.split()) + l = 0 + for digit in bin: + l = l << 1 + if digit != "0": + l = l | 0x1 + return l + + +def caselessSort(alist): + """Return a sorted copy of a list. If there are only strings + in the list, it will not consider case. + """ + + try: + return sorted(alist, key=lambda a: (a.lower(), a)) + except TypeError: + return sorted(alist) + + +def pad(data, size): + r""" Pad byte string 'data' with null bytes until its length is a + multiple of 'size'. + + >>> len(pad(b'abcd', 4)) + 4 + >>> len(pad(b'abcde', 2)) + 6 + >>> len(pad(b'abcde', 4)) + 8 + >>> pad(b'abcdef', 4) == b'abcdef\x00\x00' + True + """ + data = tobytes(data) + if size > 1: + while len(data) % size != 0: + data += b"\0" + return data + + +if __name__ == "__main__": + import doctest + sys.exit(doctest.testmod().failed) diff -Nru fonttools-2.4/Snippets/fontTools/misc/timeTools.py fonttools-3.0/Snippets/fontTools/misc/timeTools.py --- fonttools-2.4/Snippets/fontTools/misc/timeTools.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.0/Snippets/fontTools/misc/timeTools.py 2015-08-31 17:57:15.000000000 +0000 @@ -0,0 +1,22 @@ +"""fontTools.misc.timeTools.py -- tools for working with OpenType timestamps. +""" + +from __future__ import print_function, division, absolute_import +from fontTools.misc.py23 import * +import time +import calendar + + +epoch_diff = calendar.timegm((1904, 1, 1, 0, 0, 0, 0, 0, 0)) + +def timestampToString(value): + return time.asctime(time.gmtime(max(0, value + epoch_diff))) + +def timestampFromString(value): + return calendar.timegm(time.strptime(value)) - epoch_diff + +def timestampNow(): + return int(time.time() - epoch_diff) + +def timestampSinceEpoch(value): + return int(value - epoch_diff) diff -Nru fonttools-2.4/Snippets/fontTools/misc/transform.py fonttools-3.0/Snippets/fontTools/misc/transform.py --- fonttools-2.4/Snippets/fontTools/misc/transform.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.0/Snippets/fontTools/misc/transform.py 2015-08-31 17:57:15.000000000 +0000 @@ -0,0 +1,357 @@ +"""Affine 2D transformation matrix class. + +The Transform class implements various transformation matrix operations, +both on the matrix itself, as well as on 2D coordinates. + +Transform instances are effectively immutable: all methods that operate on the +transformation itself always return a new instance. This has as the +interesting side effect that Transform instances are hashable, ie. they can be +used as dictionary keys. + +This module exports the following symbols: + + Transform -- this is the main class + Identity -- Transform instance set to the identity transformation + Offset -- Convenience function that returns a translating transformation + Scale -- Convenience function that returns a scaling transformation + +Examples: + + >>> t = Transform(2, 0, 0, 3, 0, 0) + >>> t.transformPoint((100, 100)) + (200, 300) + >>> t = Scale(2, 3) + >>> t.transformPoint((100, 100)) + (200, 300) + >>> t.transformPoint((0, 0)) + (0, 0) + >>> t = Offset(2, 3) + >>> t.transformPoint((100, 100)) + (102, 103) + >>> t.transformPoint((0, 0)) + (2, 3) + >>> t2 = t.scale(0.5) + >>> t2.transformPoint((100, 100)) + (52.0, 53.0) + >>> import math + >>> t3 = t2.rotate(math.pi / 2) + >>> t3.transformPoint((0, 0)) + (2.0, 3.0) + >>> t3.transformPoint((100, 100)) + (-48.0, 53.0) + >>> t = Identity.scale(0.5).translate(100, 200).skew(0.1, 0.2) + >>> t.transformPoints([(0, 0), (1, 1), (100, 100)]) + [(50.0, 100.0), (50.550167336042726, 100.60135501775433), (105.01673360427253, 160.13550177543362)] + >>> +""" + +from __future__ import print_function, division, absolute_import +from fontTools.misc.py23 import * + +__all__ = ["Transform", "Identity", "Offset", "Scale"] + + +_EPSILON = 1e-15 +_ONE_EPSILON = 1 - _EPSILON +_MINUS_ONE_EPSILON = -1 + _EPSILON + + +def _normSinCos(v): + if abs(v) < _EPSILON: + v = 0 + elif v > _ONE_EPSILON: + v = 1 + elif v < _MINUS_ONE_EPSILON: + v = -1 + return v + + +class Transform(object): + + """2x2 transformation matrix plus offset, a.k.a. Affine transform. + Transform instances are immutable: all transforming methods, eg. + rotate(), return a new Transform instance. + + Examples: + >>> t = Transform() + >>> t + <Transform [1 0 0 1 0 0]> + >>> t.scale(2) + <Transform [2 0 0 2 0 0]> + >>> t.scale(2.5, 5.5) + <Transform [2.5 0 0 5.5 0 0]> + >>> + >>> t.scale(2, 3).transformPoint((100, 100)) + (200, 300) + """ + + def __init__(self, xx=1, xy=0, yx=0, yy=1, dx=0, dy=0): + """Transform's constructor takes six arguments, all of which are + optional, and can be used as keyword arguments: + >>> Transform(12) + <Transform [12 0 0 1 0 0]> + >>> Transform(dx=12) + <Transform [1 0 0 1 12 0]> + >>> Transform(yx=12) + <Transform [1 0 12 1 0 0]> + >>> + """ + self.__affine = xx, xy, yx, yy, dx, dy + + def transformPoint(self, p): + """Transform a point. + + Example: + >>> t = Transform() + >>> t = t.scale(2.5, 5.5) + >>> t.transformPoint((100, 100)) + (250.0, 550.0) + """ + (x, y) = p + xx, xy, yx, yy, dx, dy = self.__affine + return (xx*x + yx*y + dx, xy*x + yy*y + dy) + + def transformPoints(self, points): + """Transform a list of points. + + Example: + >>> t = Scale(2, 3) + >>> t.transformPoints([(0, 0), (0, 100), (100, 100), (100, 0)]) + [(0, 0), (0, 300), (200, 300), (200, 0)] + >>> + """ + xx, xy, yx, yy, dx, dy = self.__affine + return [(xx*x + yx*y + dx, xy*x + yy*y + dy) for x, y in points] + + def translate(self, x=0, y=0): + """Return a new transformation, translated (offset) by x, y. + + Example: + >>> t = Transform() + >>> t.translate(20, 30) + <Transform [1 0 0 1 20 30]> + >>> + """ + return self.transform((1, 0, 0, 1, x, y)) + + def scale(self, x=1, y=None): + """Return a new transformation, scaled by x, y. The 'y' argument + may be None, which implies to use the x value for y as well. + + Example: + >>> t = Transform() + >>> t.scale(5) + <Transform [5 0 0 5 0 0]> + >>> t.scale(5, 6) + <Transform [5 0 0 6 0 0]> + >>> + """ + if y is None: + y = x + return self.transform((x, 0, 0, y, 0, 0)) + + def rotate(self, angle): + """Return a new transformation, rotated by 'angle' (radians). + + Example: + >>> import math + >>> t = Transform() + >>> t.rotate(math.pi / 2) + <Transform [0 1 -1 0 0 0]> + >>> + """ + import math + c = _normSinCos(math.cos(angle)) + s = _normSinCos(math.sin(angle)) + return self.transform((c, s, -s, c, 0, 0)) + + def skew(self, x=0, y=0): + """Return a new transformation, skewed by x and y. + + Example: + >>> import math + >>> t = Transform() + >>> t.skew(math.pi / 4) + <Transform [1 0 1 1 0 0]> + >>> + """ + import math + return self.transform((1, math.tan(y), math.tan(x), 1, 0, 0)) + + def transform(self, other): + """Return a new transformation, transformed by another + transformation. + + Example: + >>> t = Transform(2, 0, 0, 3, 1, 6) + >>> t.transform((4, 3, 2, 1, 5, 6)) + <Transform [8 9 4 3 11 24]> + >>> + """ + xx1, xy1, yx1, yy1, dx1, dy1 = other + xx2, xy2, yx2, yy2, dx2, dy2 = self.__affine + return self.__class__( + xx1*xx2 + xy1*yx2, + xx1*xy2 + xy1*yy2, + yx1*xx2 + yy1*yx2, + yx1*xy2 + yy1*yy2, + xx2*dx1 + yx2*dy1 + dx2, + xy2*dx1 + yy2*dy1 + dy2) + + def reverseTransform(self, other): + """Return a new transformation, which is the other transformation + transformed by self. self.reverseTransform(other) is equivalent to + other.transform(self). + + Example: + >>> t = Transform(2, 0, 0, 3, 1, 6) + >>> t.reverseTransform((4, 3, 2, 1, 5, 6)) + <Transform [8 6 6 3 21 15]> + >>> Transform(4, 3, 2, 1, 5, 6).transform((2, 0, 0, 3, 1, 6)) + <Transform [8 6 6 3 21 15]> + >>> + """ + xx1, xy1, yx1, yy1, dx1, dy1 = self.__affine + xx2, xy2, yx2, yy2, dx2, dy2 = other + return self.__class__( + xx1*xx2 + xy1*yx2, + xx1*xy2 + xy1*yy2, + yx1*xx2 + yy1*yx2, + yx1*xy2 + yy1*yy2, + xx2*dx1 + yx2*dy1 + dx2, + xy2*dx1 + yy2*dy1 + dy2) + + def inverse(self): + """Return the inverse transformation. + + Example: + >>> t = Identity.translate(2, 3).scale(4, 5) + >>> t.transformPoint((10, 20)) + (42, 103) + >>> it = t.inverse() + >>> it.transformPoint((42, 103)) + (10.0, 20.0) + >>> + """ + if self.__affine == (1, 0, 0, 1, 0, 0): + return self + xx, xy, yx, yy, dx, dy = self.__affine + det = xx*yy - yx*xy + xx, xy, yx, yy = yy/det, -xy/det, -yx/det, xx/det + dx, dy = -xx*dx - yx*dy, -xy*dx - yy*dy + return self.__class__(xx, xy, yx, yy, dx, dy) + + def toPS(self): + """Return a PostScript representation: + >>> t = Identity.scale(2, 3).translate(4, 5) + >>> t.toPS() + '[2 0 0 3 8 15]' + >>> + """ + return "[%s %s %s %s %s %s]" % self.__affine + + def __len__(self): + """Transform instances also behave like sequences of length 6: + >>> len(Identity) + 6 + >>> + """ + return 6 + + def __getitem__(self, index): + """Transform instances also behave like sequences of length 6: + >>> list(Identity) + [1, 0, 0, 1, 0, 0] + >>> tuple(Identity) + (1, 0, 0, 1, 0, 0) + >>> + """ + return self.__affine[index] + + def __ne__(self, other): + return not self.__eq__(other) + def __eq__(self, other): + """Transform instances are comparable: + >>> t1 = Identity.scale(2, 3).translate(4, 6) + >>> t2 = Identity.translate(8, 18).scale(2, 3) + >>> t1 == t2 + 1 + >>> + + But beware of floating point rounding errors: + >>> t1 = Identity.scale(0.2, 0.3).translate(0.4, 0.6) + >>> t2 = Identity.translate(0.08, 0.18).scale(0.2, 0.3) + >>> t1 + <Transform [0.2 0 0 0.3 0.08 0.18]> + >>> t2 + <Transform [0.2 0 0 0.3 0.08 0.18]> + >>> t1 == t2 + 0 + >>> + """ + xx1, xy1, yx1, yy1, dx1, dy1 = self.__affine + xx2, xy2, yx2, yy2, dx2, dy2 = other + return (xx1, xy1, yx1, yy1, dx1, dy1) == \ + (xx2, xy2, yx2, yy2, dx2, dy2) + + def __hash__(self): + """Transform instances are hashable, meaning you can use them as + keys in dictionaries: + >>> d = {Scale(12, 13): None} + >>> d + {<Transform [12 0 0 13 0 0]>: None} + >>> + + But again, beware of floating point rounding errors: + >>> t1 = Identity.scale(0.2, 0.3).translate(0.4, 0.6) + >>> t2 = Identity.translate(0.08, 0.18).scale(0.2, 0.3) + >>> t1 + <Transform [0.2 0 0 0.3 0.08 0.18]> + >>> t2 + <Transform [0.2 0 0 0.3 0.08 0.18]> + >>> d = {t1: None} + >>> d + {<Transform [0.2 0 0 0.3 0.08 0.18]>: None} + >>> d[t2] + Traceback (most recent call last): + File "<stdin>", line 1, in ? + KeyError: <Transform [0.2 0 0 0.3 0.08 0.18]> + >>> + """ + return hash(self.__affine) + + def __repr__(self): + return "<%s [%g %g %g %g %g %g]>" % ((self.__class__.__name__,) \ + + self.__affine) + + +Identity = Transform() + +def Offset(x=0, y=0): + """Return the identity transformation offset by x, y. + + Example: + >>> Offset(2, 3) + <Transform [1 0 0 1 2 3]> + >>> + """ + return Transform(1, 0, 0, 1, x, y) + +def Scale(x, y=None): + """Return the identity transformation scaled by x, y. The 'y' argument + may be None, which implies to use the x value for y as well. + + Example: + >>> Scale(2, 3) + <Transform [2 0 0 3 0 0]> + >>> + """ + if y is None: + y = x + return Transform(x, 0, 0, y, 0, 0) + + +if __name__ == "__main__": + import sys + import doctest + sys.exit(doctest.testmod().failed) diff -Nru fonttools-2.4/Snippets/fontTools/misc/xmlReader.py fonttools-3.0/Snippets/fontTools/misc/xmlReader.py --- fonttools-2.4/Snippets/fontTools/misc/xmlReader.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.0/Snippets/fontTools/misc/xmlReader.py 2015-08-31 17:57:15.000000000 +0000 @@ -0,0 +1,131 @@ +from __future__ import print_function, division, absolute_import +from fontTools.misc.py23 import * +from fontTools import ttLib +from fontTools.misc.textTools import safeEval +from fontTools.ttLib.tables.DefaultTable import DefaultTable +import os + + +class TTXParseError(Exception): pass + +BUFSIZE = 0x4000 + + +class XMLReader(object): + + def __init__(self, fileName, ttFont, progress=None, quiet=False): + self.ttFont = ttFont + self.fileName = fileName + self.progress = progress + self.quiet = quiet + self.root = None + self.contentStack = [] + self.stackSize = 0 + + def read(self): + if self.progress: + import stat + self.progress.set(0, os.stat(self.fileName)[stat.ST_SIZE] // 100 or 1) + file = open(self.fileName, 'rb') + self._parseFile(file) + file.close() + + def _parseFile(self, file): + from xml.parsers.expat import ParserCreate + parser = ParserCreate() + parser.StartElementHandler = self._startElementHandler + parser.EndElementHandler = self._endElementHandler + parser.CharacterDataHandler = self._characterDataHandler + + pos = 0 + while True: + chunk = file.read(BUFSIZE) + if not chunk: + parser.Parse(chunk, 1) + break + pos = pos + len(chunk) + if self.progress: + self.progress.set(pos // 100) + parser.Parse(chunk, 0) + + def _startElementHandler(self, name, attrs): + stackSize = self.stackSize + self.stackSize = stackSize + 1 + if not stackSize: + if name != "ttFont": + raise TTXParseError("illegal root tag: %s" % name) + sfntVersion = attrs.get("sfntVersion") + if sfntVersion is not None: + if len(sfntVersion) != 4: + sfntVersion = safeEval('"' + sfntVersion + '"') + self.ttFont.sfntVersion = sfntVersion + self.contentStack.append([]) + elif stackSize == 1: + subFile = attrs.get("src") + if subFile is not None: + subFile = os.path.join(os.path.dirname(self.fileName), subFile) + subReader = XMLReader(subFile, self.ttFont, self.progress, self.quiet) + subReader.read() + self.contentStack.append([]) + return + tag = ttLib.xmlToTag(name) + msg = "Parsing '%s' table..." % tag + if self.progress: + self.progress.setlabel(msg) + elif self.ttFont.verbose: + ttLib.debugmsg(msg) + else: + if not self.quiet: + print(msg) + if tag == "GlyphOrder": + tableClass = ttLib.GlyphOrder + elif "ERROR" in attrs or ('raw' in attrs and safeEval(attrs['raw'])): + tableClass = DefaultTable + else: + tableClass = ttLib.getTableClass(tag) + if tableClass is None: + tableClass = DefaultTable + if tag == 'loca' and tag in self.ttFont: + # Special-case the 'loca' table as we need the + # original if the 'glyf' table isn't recompiled. + self.currentTable = self.ttFont[tag] + else: + self.currentTable = tableClass(tag) + self.ttFont[tag] = self.currentTable + self.contentStack.append([]) + elif stackSize == 2: + self.contentStack.append([]) + self.root = (name, attrs, self.contentStack[-1]) + else: + l = [] + self.contentStack[-1].append((name, attrs, l)) + self.contentStack.append(l) + + def _characterDataHandler(self, data): + if self.stackSize > 1: + self.contentStack[-1].append(data) + + def _endElementHandler(self, name): + self.stackSize = self.stackSize - 1 + del self.contentStack[-1] + if self.stackSize == 1: + self.root = None + elif self.stackSize == 2: + name, attrs, content = self.root + self.currentTable.fromXML(name, attrs, content, self.ttFont) + self.root = None + + +class ProgressPrinter(object): + + def __init__(self, title, maxval=100): + print(title) + + def set(self, val, maxval=None): + pass + + def increment(self, val=1): + pass + + def setLabel(self, text): + print(text) diff -Nru fonttools-2.4/Snippets/fontTools/misc/xmlReader_test.py fonttools-3.0/Snippets/fontTools/misc/xmlReader_test.py --- fonttools-2.4/Snippets/fontTools/misc/xmlReader_test.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.0/Snippets/fontTools/misc/xmlReader_test.py 2015-08-31 17:57:15.000000000 +0000 @@ -0,0 +1,81 @@ +# -*- coding: utf-8 -*- + +from __future__ import print_function, division, absolute_import, unicode_literals +from fontTools.misc.py23 import * +import os +import unittest +from fontTools.ttLib import TTFont +from .xmlReader import XMLReader +import tempfile + + +class TestXMLReader(unittest.TestCase): + + def test_decode_utf8(self): + + class DebugXMLReader(XMLReader): + + def __init__(self, fileName, ttFont, progress=None, quiet=False): + super(DebugXMLReader, self).__init__( + fileName, ttFont, progress, quiet) + self.contents = [] + + def _endElementHandler(self, name): + if self.stackSize == 3: + name, attrs, content = self.root + self.contents.append(content) + super(DebugXMLReader, self)._endElementHandler(name) + + expected = 'fôôbär' + data = '''\ +<?xml version="1.0" encoding="UTF-8"?> +<ttFont> + <name> + <namerecord nameID="1" platformID="3" platEncID="1" langID="0x409"> + %s + </namerecord> + </name> +</ttFont> +''' % expected + + with tempfile.NamedTemporaryFile(delete=False) as tmp: + tmp.write(data.encode('utf-8')) + reader = DebugXMLReader(tmp.name, TTFont(), quiet=True) + reader.read() + os.remove(tmp.name) + content = strjoin(reader.contents[0]).strip() + self.assertEqual(expected, content) + + def test_normalise_newlines(self): + + class DebugXMLReader(XMLReader): + + def __init__(self, fileName, ttFont, progress=None, quiet=False): + super(DebugXMLReader, self).__init__( + fileName, ttFont, progress, quiet) + self.newlines = [] + + def _characterDataHandler(self, data): + self.newlines.extend([c for c in data if c in ('\r', '\n')]) + + # notice how when CR is escaped, it is not normalised by the XML parser + data = ( + '<ttFont>\r' # \r -> \n + ' <test>\r\n' # \r\n -> \n + ' a line of text\n' # \n + ' escaped CR and unix newline &#13;\n' # &#13;\n -> \r\n + ' escaped CR and macintosh newline &#13;\r' # &#13;\r -> \r\n + ' escaped CR and windows newline &#13;\r\n' # &#13;\r\n -> \r\n + ' </test>\n' # \n + '</ttFont>') + with tempfile.NamedTemporaryFile(delete=False) as tmp: + tmp.write(data.encode('utf-8')) + reader = DebugXMLReader(tmp.name, TTFont(), quiet=True) + reader.read() + os.remove(tmp.name) + expected = ['\n'] * 3 + ['\r', '\n'] * 3 + ['\n'] + self.assertEqual(expected, reader.newlines) + + +if __name__ == '__main__': + unittest.main() diff -Nru fonttools-2.4/Snippets/fontTools/misc/xmlWriter.py fonttools-3.0/Snippets/fontTools/misc/xmlWriter.py --- fonttools-2.4/Snippets/fontTools/misc/xmlWriter.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.0/Snippets/fontTools/misc/xmlWriter.py 2015-08-31 17:57:15.000000000 +0000 @@ -0,0 +1,180 @@ +"""xmlWriter.py -- Simple XML authoring class""" + +from __future__ import print_function, division, absolute_import +from fontTools.misc.py23 import * +import sys +import os +import string + +INDENT = " " + + +class XMLWriter(object): + + def __init__(self, fileOrPath, indentwhite=INDENT, idlefunc=None, encoding="utf_8"): + if encoding.lower().replace('-','').replace('_','') != 'utf8': + raise Exception('Only UTF-8 encoding is supported.') + if fileOrPath == '-': + fileOrPath = sys.stdout + if not hasattr(fileOrPath, "write"): + self.file = open(fileOrPath, "wb") + else: + # assume writable file object + self.file = fileOrPath + + # Figure out if writer expects bytes or unicodes + try: + # The bytes check should be first. See: + # https://github.com/behdad/fonttools/pull/233 + self.file.write(b'') + self.totype = tobytes + except TypeError: + # This better not fail. + self.file.write(tounicode('')) + self.totype = tounicode + self.indentwhite = self.totype(indentwhite) + self.newlinestr = self.totype(os.linesep) + self.indentlevel = 0 + self.stack = [] + self.needindent = 1 + self.idlefunc = idlefunc + self.idlecounter = 0 + self._writeraw('<?xml version="1.0" encoding="UTF-8"?>') + self.newline() + + def close(self): + self.file.close() + + def write(self, string, indent=True): + """Writes text.""" + self._writeraw(escape(string), indent=indent) + + def writecdata(self, string): + """Writes text in a CDATA section.""" + self._writeraw("<![CDATA[" + string + "") + + def write8bit(self, data, strip=False): + """Writes a bytes() sequence into the XML, escaping + non-ASCII bytes. When this is read in xmlReader, + the original bytes can be recovered by encoding to + 'latin-1'.""" + self._writeraw(escape8bit(data), strip=strip) + + def write_noindent(self, string): + """Writes text without indentation.""" + self._writeraw(escape(string), indent=False) + + def _writeraw(self, data, indent=True, strip=False): + """Writes bytes, possibly indented.""" + if indent and self.needindent: + self.file.write(self.indentlevel * self.indentwhite) + self.needindent = 0 + s = self.totype(data, encoding="utf_8") + if (strip): + s = s.strip() + self.file.write(s) + + def newline(self): + self.file.write(self.newlinestr) + self.needindent = 1 + idlecounter = self.idlecounter + if not idlecounter % 100 and self.idlefunc is not None: + self.idlefunc() + self.idlecounter = idlecounter + 1 + + def comment(self, data): + data = escape(data) + lines = data.split("\n") + self._writeraw("") + + def simpletag(self, _TAG_, *args, **kwargs): + attrdata = self.stringifyattrs(*args, **kwargs) + data = "<%s%s/>" % (_TAG_, attrdata) + self._writeraw(data) + + def begintag(self, _TAG_, *args, **kwargs): + attrdata = self.stringifyattrs(*args, **kwargs) + data = "<%s%s>" % (_TAG_, attrdata) + self._writeraw(data) + self.stack.append(_TAG_) + self.indent() + + def endtag(self, _TAG_): + assert self.stack and self.stack[-1] == _TAG_, "nonmatching endtag" + del self.stack[-1] + self.dedent() + data = "" % _TAG_ + self._writeraw(data) + + def dumphex(self, data): + linelength = 16 + hexlinelength = linelength * 2 + chunksize = 8 + for i in range(0, len(data), linelength): + hexline = hexStr(data[i:i+linelength]) + line = "" + white = "" + for j in range(0, hexlinelength, chunksize): + line = line + white + hexline[j:j+chunksize] + white = " " + self._writeraw(line) + self.newline() + + def indent(self): + self.indentlevel = self.indentlevel + 1 + + def dedent(self): + assert self.indentlevel > 0 + self.indentlevel = self.indentlevel - 1 + + def stringifyattrs(self, *args, **kwargs): + if kwargs: + assert not args + attributes = sorted(kwargs.items()) + elif args: + assert len(args) == 1 + attributes = args[0] + else: + return "" + data = "" + for attr, value in attributes: + if not isinstance(value, (bytes, unicode)): + value = str(value) + data = data + ' %s="%s"' % (attr, escapeattr(value)) + return data + + +def escape(data): + data = tostr(data, 'utf_8') + data = data.replace("&", "&") + data = data.replace("<", "<") + data = data.replace(">", ">") + data = data.replace("\r", " ") + return data + +def escapeattr(data): + data = escape(data) + data = data.replace('"', """) + return data + +def escape8bit(data): + """Input is Unicode string.""" + def escapechar(c): + n = ord(c) + if 32 <= n <= 127 and c not in "<&>": + return c + else: + return "&#" + repr(n) + ";" + return strjoin(map(escapechar, data.decode('latin-1'))) + +def hexStr(s): + h = string.hexdigits + r = '' + for c in s: + i = byteord(c) + r = r + h[(i >> 4) & 0xF] + h[i & 0xF] + return r diff -Nru fonttools-2.4/Snippets/fontTools/misc/xmlWriter_test.py fonttools-3.0/Snippets/fontTools/misc/xmlWriter_test.py --- fonttools-2.4/Snippets/fontTools/misc/xmlWriter_test.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.0/Snippets/fontTools/misc/xmlWriter_test.py 2015-08-31 17:57:15.000000000 +0000 @@ -0,0 +1,111 @@ +from __future__ import print_function, division, absolute_import +from fontTools.misc.py23 import * +import os +import unittest +from .xmlWriter import XMLWriter + +linesep = tobytes(os.linesep) +HEADER = b'' + linesep + +class TestXMLWriter(unittest.TestCase): + + def test_comment_escaped(self): + writer = XMLWriter(BytesIO()) + writer.comment("This&that are ") + self.assertEqual(HEADER + b"", writer.file.getvalue()) + + def test_comment_multiline(self): + writer = XMLWriter(BytesIO()) + writer.comment("Hello world\nHow are you?") + self.assertEqual(HEADER + b"", + writer.file.getvalue()) + + def test_encoding_default(self): + writer = XMLWriter(BytesIO()) + self.assertEqual(b'' + linesep, + writer.file.getvalue()) + + def test_encoding_utf8(self): + # https://github.com/behdad/fonttools/issues/246 + writer = XMLWriter(BytesIO(), encoding="utf8") + self.assertEqual(b'' + linesep, + writer.file.getvalue()) + + def test_encoding_UTF_8(self): + # https://github.com/behdad/fonttools/issues/246 + writer = XMLWriter(BytesIO(), encoding="UTF-8") + self.assertEqual(b'' + linesep, + writer.file.getvalue()) + + def test_encoding_UTF8(self): + # https://github.com/behdad/fonttools/issues/246 + writer = XMLWriter(BytesIO(), encoding="UTF8") + self.assertEqual(b'' + linesep, + writer.file.getvalue()) + + def test_encoding_other(self): + self.assertRaises(Exception, XMLWriter, BytesIO(), + encoding="iso-8859-1") + + def test_write(self): + writer = XMLWriter(BytesIO()) + writer.write("foo&bar") + self.assertEqual(HEADER + b"foo&bar", writer.file.getvalue()) + + def test_indent_dedent(self): + writer = XMLWriter(BytesIO()) + writer.write("foo") + writer.newline() + writer.indent() + writer.write("bar") + writer.newline() + writer.dedent() + writer.write("baz") + self.assertEqual(HEADER + bytesjoin(["foo", " bar", "baz"], linesep), + writer.file.getvalue()) + + def test_writecdata(self): + writer = XMLWriter(BytesIO()) + writer.writecdata("foo&bar") + self.assertEqual(HEADER + b"foo&bar", writer.file.getvalue()) + + def test_simpletag(self): + writer = XMLWriter(BytesIO()) + writer.simpletag("tag", a="1", b="2") + self.assertEqual(HEADER + b'', writer.file.getvalue()) + + def test_begintag_endtag(self): + writer = XMLWriter(BytesIO()) + writer.begintag("tag", attr="value") + writer.write("content") + writer.endtag("tag") + self.assertEqual(HEADER + b'content', writer.file.getvalue()) + + def test_dumphex(self): + writer = XMLWriter(BytesIO()) + writer.dumphex("Type is a beautiful group of letters, not a group of beautiful letters.") + self.assertEqual(HEADER + bytesjoin([ + "54797065 20697320 61206265 61757469", + "66756c20 67726f75 70206f66 206c6574", + "74657273 2c206e6f 74206120 67726f75", + "70206f66 20626561 75746966 756c206c", + "65747465 72732e ", ""], joiner=linesep), writer.file.getvalue()) + + def test_stringifyattrs(self): + writer = XMLWriter(BytesIO()) + expected = ' attr="0"' + self.assertEqual(expected, writer.stringifyattrs(attr=0)) + self.assertEqual(expected, writer.stringifyattrs(attr=b'0')) + self.assertEqual(expected, writer.stringifyattrs(attr='0')) + self.assertEqual(expected, writer.stringifyattrs(attr=u'0')) + + def test_carriage_return_escaped(self): + writer = XMLWriter(BytesIO()) + writer.write("two lines\r\nseparated by Windows line endings") + self.assertEqual( + HEADER + b'two lines \nseparated by Windows line endings', + writer.file.getvalue()) + + +if __name__ == '__main__': + unittest.main() diff -Nru fonttools-2.4/Snippets/fontTools/pens/basePen.py fonttools-3.0/Snippets/fontTools/pens/basePen.py --- fonttools-2.4/Snippets/fontTools/pens/basePen.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.0/Snippets/fontTools/pens/basePen.py 2015-08-31 17:57:15.000000000 +0000 @@ -0,0 +1,363 @@ +"""fontTools.pens.basePen.py -- Tools and base classes to build pen objects. + +The Pen Protocol + +A Pen is a kind of object that standardizes the way how to "draw" outlines: +it is a middle man between an outline and a drawing. In other words: +it is an abstraction for drawing outlines, making sure that outline objects +don't need to know the details about how and where they're being drawn, and +that drawings don't need to know the details of how outlines are stored. + +The most basic pattern is this: + + outline.draw(pen) # 'outline' draws itself onto 'pen' + +Pens can be used to render outlines to the screen, but also to construct +new outlines. Eg. an outline object can be both a drawable object (it has a +draw() method) as well as a pen itself: you *build* an outline using pen +methods. + +The AbstractPen class defines the Pen protocol. It implements almost +nothing (only no-op closePath() and endPath() methods), but is useful +for documentation purposes. Subclassing it basically tells the reader: +"this class implements the Pen protocol.". An examples of an AbstractPen +subclass is fontTools.pens.transformPen.TransformPen. + +The BasePen class is a base implementation useful for pens that actually +draw (for example a pen renders outlines using a native graphics engine). +BasePen contains a lot of base functionality, making it very easy to build +a pen that fully conforms to the pen protocol. Note that if you subclass +BasePen, you _don't_ override moveTo(), lineTo(), etc., but _moveTo(), +_lineTo(), etc. See the BasePen doc string for details. Examples of +BasePen subclasses are fontTools.pens.boundsPen.BoundsPen and +fontTools.pens.cocoaPen.CocoaPen. + +Coordinates are usually expressed as (x, y) tuples, but generally any +sequence of length 2 will do. +""" + +from __future__ import print_function, division, absolute_import +from fontTools.misc.py23 import * + +__all__ = ["AbstractPen", "NullPen", "BasePen", + "decomposeSuperBezierSegment", "decomposeQuadraticSegment"] + + +class AbstractPen(object): + + def moveTo(self, pt): + """Begin a new sub path, set the current point to 'pt'. You must + end each sub path with a call to pen.closePath() or pen.endPath(). + """ + raise NotImplementedError + + def lineTo(self, pt): + """Draw a straight line from the current point to 'pt'.""" + raise NotImplementedError + + def curveTo(self, *points): + """Draw a cubic bezier with an arbitrary number of control points. + + The last point specified is on-curve, all others are off-curve + (control) points. If the number of control points is > 2, the + segment is split into multiple bezier segments. This works + like this: + + Let n be the number of control points (which is the number of + arguments to this call minus 1). If n==2, a plain vanilla cubic + bezier is drawn. If n==1, we fall back to a quadratic segment and + if n==0 we draw a straight line. It gets interesting when n>2: + n-1 PostScript-style cubic segments will be drawn as if it were + one curve. See decomposeSuperBezierSegment(). + + The conversion algorithm used for n>2 is inspired by NURB + splines, and is conceptually equivalent to the TrueType "implied + points" principle. See also decomposeQuadraticSegment(). + """ + raise NotImplementedError + + def qCurveTo(self, *points): + """Draw a whole string of quadratic curve segments. + + The last point specified is on-curve, all others are off-curve + points. + + This method implements TrueType-style curves, breaking up curves + using 'implied points': between each two consequtive off-curve points, + there is one implied point exactly in the middle between them. See + also decomposeQuadraticSegment(). + + The last argument (normally the on-curve point) may be None. + This is to support contours that have NO on-curve points (a rarely + seen feature of TrueType outlines). + """ + raise NotImplementedError + + def closePath(self): + """Close the current sub path. You must call either pen.closePath() + or pen.endPath() after each sub path. + """ + pass + + def endPath(self): + """End the current sub path, but don't close it. You must call + either pen.closePath() or pen.endPath() after each sub path. + """ + pass + + def addComponent(self, glyphName, transformation): + """Add a sub glyph. The 'transformation' argument must be a 6-tuple + containing an affine transformation, or a Transform object from the + fontTools.misc.transform module. More precisely: it should be a + sequence containing 6 numbers. + """ + raise NotImplementedError + + +class NullPen(object): + + """A pen that does nothing. + """ + + def moveTo(self, pt): + pass + + def lineTo(self, pt): + pass + + def curveTo(self, *points): + pass + + def qCurveTo(self, *points): + pass + + def closePath(self): + pass + + def endPath(self): + pass + + def addComponent(self, glyphName, transformation): + pass + + +class BasePen(AbstractPen): + + """Base class for drawing pens. You must override _moveTo, _lineTo and + _curveToOne. You may additionally override _closePath, _endPath, + addComponent and/or _qCurveToOne. You should not override any other + methods. + """ + + def __init__(self, glyphSet): + self.glyphSet = glyphSet + self.__currentPoint = None + + # must override + + def _moveTo(self, pt): + raise NotImplementedError + + def _lineTo(self, pt): + raise NotImplementedError + + def _curveToOne(self, pt1, pt2, pt3): + raise NotImplementedError + + # may override + + def _closePath(self): + pass + + def _endPath(self): + pass + + def _qCurveToOne(self, pt1, pt2): + """This method implements the basic quadratic curve type. The + default implementation delegates the work to the cubic curve + function. Optionally override with a native implementation. + """ + pt0x, pt0y = self.__currentPoint + pt1x, pt1y = pt1 + pt2x, pt2y = pt2 + mid1x = pt0x + 0.66666666666666667 * (pt1x - pt0x) + mid1y = pt0y + 0.66666666666666667 * (pt1y - pt0y) + mid2x = pt2x + 0.66666666666666667 * (pt1x - pt2x) + mid2y = pt2y + 0.66666666666666667 * (pt1y - pt2y) + self._curveToOne((mid1x, mid1y), (mid2x, mid2y), pt2) + + def addComponent(self, glyphName, transformation): + """This default implementation simply transforms the points + of the base glyph and draws it onto self. + """ + from fontTools.pens.transformPen import TransformPen + try: + glyph = self.glyphSet[glyphName] + except KeyError: + pass + else: + tPen = TransformPen(self, transformation) + glyph.draw(tPen) + + # don't override + + def _getCurrentPoint(self): + """Return the current point. This is not part of the public + interface, yet is useful for subclasses. + """ + return self.__currentPoint + + def closePath(self): + self._closePath() + self.__currentPoint = None + + def endPath(self): + self._endPath() + self.__currentPoint = None + + def moveTo(self, pt): + self._moveTo(pt) + self.__currentPoint = pt + + def lineTo(self, pt): + self._lineTo(pt) + self.__currentPoint = pt + + def curveTo(self, *points): + n = len(points) - 1 # 'n' is the number of control points + assert n >= 0 + if n == 2: + # The common case, we have exactly two BCP's, so this is a standard + # cubic bezier. Even though decomposeSuperBezierSegment() handles + # this case just fine, we special-case it anyway since it's so + # common. + self._curveToOne(*points) + self.__currentPoint = points[-1] + elif n > 2: + # n is the number of control points; split curve into n-1 cubic + # bezier segments. The algorithm used here is inspired by NURB + # splines and the TrueType "implied point" principle, and ensures + # the smoothest possible connection between two curve segments, + # with no disruption in the curvature. It is practical since it + # allows one to construct multiple bezier segments with a much + # smaller amount of points. + _curveToOne = self._curveToOne + for pt1, pt2, pt3 in decomposeSuperBezierSegment(points): + _curveToOne(pt1, pt2, pt3) + self.__currentPoint = pt3 + elif n == 1: + self.qCurveTo(*points) + elif n == 0: + self.lineTo(points[0]) + else: + raise AssertionError("can't get there from here") + + def qCurveTo(self, *points): + n = len(points) - 1 # 'n' is the number of control points + assert n >= 0 + if points[-1] is None: + # Special case for TrueType quadratics: it is possible to + # define a contour with NO on-curve points. BasePen supports + # this by allowing the final argument (the expected on-curve + # point) to be None. We simulate the feature by making the implied + # on-curve point between the last and the first off-curve points + # explicit. + x, y = points[-2] # last off-curve point + nx, ny = points[0] # first off-curve point + impliedStartPoint = (0.5 * (x + nx), 0.5 * (y + ny)) + self.__currentPoint = impliedStartPoint + self._moveTo(impliedStartPoint) + points = points[:-1] + (impliedStartPoint,) + if n > 0: + # Split the string of points into discrete quadratic curve + # segments. Between any two consecutive off-curve points + # there's an implied on-curve point exactly in the middle. + # This is where the segment splits. + _qCurveToOne = self._qCurveToOne + for pt1, pt2 in decomposeQuadraticSegment(points): + _qCurveToOne(pt1, pt2) + self.__currentPoint = pt2 + else: + self.lineTo(points[0]) + + +def decomposeSuperBezierSegment(points): + """Split the SuperBezier described by 'points' into a list of regular + bezier segments. The 'points' argument must be a sequence with length + 3 or greater, containing (x, y) coordinates. The last point is the + destination on-curve point, the rest of the points are off-curve points. + The start point should not be supplied. + + This function returns a list of (pt1, pt2, pt3) tuples, which each + specify a regular curveto-style bezier segment. + """ + n = len(points) - 1 + assert n > 1 + bezierSegments = [] + pt1, pt2, pt3 = points[0], None, None + for i in range(2, n+1): + # calculate points in between control points. + nDivisions = min(i, 3, n-i+2) + for j in range(1, nDivisions): + factor = j / nDivisions + temp1 = points[i-1] + temp2 = points[i-2] + temp = (temp2[0] + factor * (temp1[0] - temp2[0]), + temp2[1] + factor * (temp1[1] - temp2[1])) + if pt2 is None: + pt2 = temp + else: + pt3 = (0.5 * (pt2[0] + temp[0]), + 0.5 * (pt2[1] + temp[1])) + bezierSegments.append((pt1, pt2, pt3)) + pt1, pt2, pt3 = temp, None, None + bezierSegments.append((pt1, points[-2], points[-1])) + return bezierSegments + + +def decomposeQuadraticSegment(points): + """Split the quadratic curve segment described by 'points' into a list + of "atomic" quadratic segments. The 'points' argument must be a sequence + with length 2 or greater, containing (x, y) coordinates. The last point + is the destination on-curve point, the rest of the points are off-curve + points. The start point should not be supplied. + + This function returns a list of (pt1, pt2) tuples, which each specify a + plain quadratic bezier segment. + """ + n = len(points) - 1 + assert n > 0 + quadSegments = [] + for i in range(n - 1): + x, y = points[i] + nx, ny = points[i+1] + impliedPt = (0.5 * (x + nx), 0.5 * (y + ny)) + quadSegments.append((points[i], impliedPt)) + quadSegments.append((points[-2], points[-1])) + return quadSegments + + +class _TestPen(BasePen): + """Test class that prints PostScript to stdout.""" + def _moveTo(self, pt): + print("%s %s moveto" % (pt[0], pt[1])) + def _lineTo(self, pt): + print("%s %s lineto" % (pt[0], pt[1])) + def _curveToOne(self, bcp1, bcp2, pt): + print("%s %s %s %s %s %s curveto" % (bcp1[0], bcp1[1], + bcp2[0], bcp2[1], pt[0], pt[1])) + def _closePath(self): + print("closepath") + + +if __name__ == "__main__": + pen = _TestPen(None) + pen.moveTo((0, 0)) + pen.lineTo((0, 100)) + pen.curveTo((50, 75), (60, 50), (50, 25), (0, 0)) + pen.closePath() + + pen = _TestPen(None) + # testing the "no on-curve point" scenario + pen.qCurveTo((0, 0), (0, 100), (100, 100), (100, 0), None) + pen.closePath() diff -Nru fonttools-2.4/Snippets/fontTools/pens/basePen_test.py fonttools-3.0/Snippets/fontTools/pens/basePen_test.py --- fonttools-2.4/Snippets/fontTools/pens/basePen_test.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.0/Snippets/fontTools/pens/basePen_test.py 2015-08-31 17:57:15.000000000 +0000 @@ -0,0 +1,171 @@ +from __future__ import print_function, division, absolute_import +from fontTools.misc.py23 import * +from fontTools.pens.basePen import \ + BasePen, decomposeSuperBezierSegment, decomposeQuadraticSegment +import unittest + + +class _TestPen(BasePen): + def __init__(self): + BasePen.__init__(self, glyphSet={}) + self._commands = [] + + def __repr__(self): + return " ".join(self._commands) + + def getCurrentPoint(self): + return self._getCurrentPoint() + + def _moveTo(self, pt): + self._commands.append("%s %s moveto" % (pt[0], pt[1])) + + def _lineTo(self, pt): + self._commands.append("%s %s lineto" % (pt[0], pt[1])) + + def _curveToOne(self, bcp1, bcp2, pt): + self._commands.append("%s %s %s %s %s %s curveto" % + (bcp1[0], bcp1[1], + bcp2[0], bcp2[1], + pt[0], pt[1])) + + def _closePath(self): + self._commands.append("closepath") + + def _endPath(self): + self._commands.append("endpath") + + +class _TestGlyph: + def draw(self, pen): + pen.moveTo((0.0, 0.0)) + pen.lineTo((0.0, 100.0)) + pen.curveTo((50.0, 75.0), (60.0, 50.0), (50.0, 25.0), (0.0, 0.0)) + pen.closePath() + + +class BasePenTest(unittest.TestCase): + def test_moveTo(self): + pen = _TestPen() + pen.moveTo((0.5, -4.3)) + self.assertEqual("0.5 -4.3 moveto", repr(pen)) + self.assertEqual((0.5, -4.3), pen.getCurrentPoint()) + + def test_lineTo(self): + pen = _TestPen() + pen.moveTo((4, 5)) + pen.lineTo((7, 8)) + self.assertEqual("4 5 moveto 7 8 lineto", repr(pen)) + self.assertEqual((7, 8), pen.getCurrentPoint()) + + def test_curveTo_zeroPoints(self): + pen = _TestPen() + pen.moveTo((0.0, 0.0)) + self.assertRaises(AssertionError, pen.curveTo) + + def test_curveTo_onePoint(self): + pen = _TestPen() + pen.moveTo((0.0, 0.0)) + pen.curveTo((1.0, 1.1)) + self.assertEqual("0.0 0.0 moveto 1.0 1.1 lineto", repr(pen)) + self.assertEqual((1.0, 1.1), pen.getCurrentPoint()) + + def test_curveTo_twoPoints(self): + pen = _TestPen() + pen.moveTo((0.0, 0.0)) + pen.curveTo((6.0, 3.0), (3.0, 6.0)) + self.assertEqual("0.0 0.0 moveto 4.0 2.0 5.0 4.0 3.0 6.0 curveto", + repr(pen)) + self.assertEqual((3.0, 6.0), pen.getCurrentPoint()) + + def test_curveTo_manyPoints(self): + pen = _TestPen() + pen.moveTo((0.0, 0.0)) + pen.curveTo((1.0, 1.1), (2.0, 2.1), (3.0, 3.1), (4.0, 4.1)) + self.assertEqual("0.0 0.0 moveto " + "1.0 1.1 1.5 1.6 2.0 2.1 curveto " + "2.5 2.6 3.0 3.1 4.0 4.1 curveto", repr(pen)) + self.assertEqual((4.0, 4.1), pen.getCurrentPoint()) + + def test_qCurveTo_zeroPoints(self): + pen = _TestPen() + pen.moveTo((0.0, 0.0)) + self.assertRaises(AssertionError, pen.qCurveTo) + + def test_qCurveTo_onePoint(self): + pen = _TestPen() + pen.moveTo((0.0, 0.0)) + pen.qCurveTo((77.7, 99.9)) + self.assertEqual("0.0 0.0 moveto 77.7 99.9 lineto", repr(pen)) + self.assertEqual((77.7, 99.9), pen.getCurrentPoint()) + + def test_qCurveTo_manyPoints(self): + pen = _TestPen() + pen.moveTo((0.0, 0.0)) + pen.qCurveTo((6.0, 3.0), (3.0, 6.0)) + self.assertEqual("0.0 0.0 moveto 4.0 2.0 5.0 4.0 3.0 6.0 curveto", + repr(pen)) + self.assertEqual((3.0, 6.0), pen.getCurrentPoint()) + + def test_qCurveTo_onlyOffCurvePoints(self): + pen = _TestPen() + pen.moveTo((0.0, 0.0)) + pen.qCurveTo((6.0, -6.0), (12.0, 12.0), (18.0, -18.0), None) + self.assertEqual("0.0 0.0 moveto " + "12.0 -12.0 moveto " + "8.0 -8.0 7.0 -3.0 9.0 3.0 curveto " + "11.0 9.0 13.0 7.0 15.0 -3.0 curveto " + "17.0 -13.0 16.0 -16.0 12.0 -12.0 curveto", repr(pen)) + self.assertEqual((12.0, -12.0), pen.getCurrentPoint()) + + def test_closePath(self): + pen = _TestPen() + pen.lineTo((3, 4)) + pen.closePath() + self.assertEqual("3 4 lineto closepath", repr(pen)) + self.assertEqual(None, pen.getCurrentPoint()) + + def test_endPath(self): + pen = _TestPen() + pen.lineTo((3, 4)) + pen.endPath() + self.assertEqual("3 4 lineto endpath", repr(pen)) + self.assertEqual(None, pen.getCurrentPoint()) + + def test_addComponent(self): + pen = _TestPen() + pen.glyphSet["oslash"] = _TestGlyph() + pen.addComponent("oslash", (2, 3, 0.5, 2, -10, 0)) + self.assertEqual("-10.0 0.0 moveto " + "40.0 200.0 lineto " + "127.5 300.0 131.25 290.0 125.0 265.0 curveto " + "118.75 240.0 102.5 200.0 -10.0 0.0 curveto " + "closepath", repr(pen)) + self.assertEqual(None, pen.getCurrentPoint()) + + +class DecomposeSegmentTest(unittest.TestCase): + def test_decomposeSuperBezierSegment(self): + decompose = decomposeSuperBezierSegment + self.assertRaises(AssertionError, decompose, []) + self.assertRaises(AssertionError, decompose, [(0, 0)]) + self.assertRaises(AssertionError, decompose, [(0, 0), (1, 1)]) + self.assertEqual([((0, 0), (1, 1), (2, 2))], + decompose([(0, 0), (1, 1), (2, 2)])) + self.assertEqual( + [((0, 0), (2, -2), (4, 0)), ((6, 2), (8, 8), (12, -12))], + decompose([(0, 0), (4, -4), (8, 8), (12, -12)])) + + def test_decomposeQuadraticSegment(self): + decompose = decomposeQuadraticSegment + self.assertRaises(AssertionError, decompose, []) + self.assertRaises(AssertionError, decompose, [(0, 0)]) + self.assertEqual([((0,0), (4, 8))], decompose([(0, 0), (4, 8)])) + self.assertEqual([((0,0), (2, 4)), ((4, 8), (9, -9))], + decompose([(0, 0), (4, 8), (9, -9)])) + self.assertEqual( + [((0, 0), (2.0, 4.0)), ((4, 8), (6.5, -0.5)), ((9, -9), (10, 10))], + decompose([(0, 0), (4, 8), (9, -9), (10, 10)])) + + +if __name__ == '__main__': + unittest.main() diff -Nru fonttools-2.4/Snippets/fontTools/pens/boundsPen.py fonttools-3.0/Snippets/fontTools/pens/boundsPen.py --- fonttools-2.4/Snippets/fontTools/pens/boundsPen.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.0/Snippets/fontTools/pens/boundsPen.py 2015-08-31 17:57:15.000000000 +0000 @@ -0,0 +1,78 @@ +from __future__ import print_function, division, absolute_import +from fontTools.misc.py23 import * +from fontTools.misc.arrayTools import updateBounds, pointInRect, unionRect +from fontTools.misc.bezierTools import calcCubicBounds, calcQuadraticBounds +from fontTools.pens.basePen import BasePen + + +__all__ = ["BoundsPen", "ControlBoundsPen"] + + +class ControlBoundsPen(BasePen): + + """Pen to calculate the "control bounds" of a shape. This is the + bounding box of all control points, so may be larger than the + actual bounding box if there are curves that don't have points + on their extremes. + + When the shape has been drawn, the bounds are available as the + 'bounds' attribute of the pen object. It's a 4-tuple: + (xMin, yMin, xMax, yMax) + """ + + def __init__(self, glyphSet): + BasePen.__init__(self, glyphSet) + self.bounds = None + + def _moveTo(self, pt): + bounds = self.bounds + if bounds: + self.bounds = updateBounds(bounds, pt) + else: + x, y = pt + self.bounds = (x, y, x, y) + + def _lineTo(self, pt): + self.bounds = updateBounds(self.bounds, pt) + + def _curveToOne(self, bcp1, bcp2, pt): + bounds = self.bounds + bounds = updateBounds(bounds, bcp1) + bounds = updateBounds(bounds, bcp2) + bounds = updateBounds(bounds, pt) + self.bounds = bounds + + def _qCurveToOne(self, bcp, pt): + bounds = self.bounds + bounds = updateBounds(bounds, bcp) + bounds = updateBounds(bounds, pt) + self.bounds = bounds + + +class BoundsPen(ControlBoundsPen): + + """Pen to calculate the bounds of a shape. It calculates the + correct bounds even when the shape contains curves that don't + have points on their extremes. This is somewhat slower to compute + than the "control bounds". + + When the shape has been drawn, the bounds are available as the + 'bounds' attribute of the pen object. It's a 4-tuple: + (xMin, yMin, xMax, yMax) + """ + + def _curveToOne(self, bcp1, bcp2, pt): + bounds = self.bounds + bounds = updateBounds(bounds, pt) + if not pointInRect(bcp1, bounds) or not pointInRect(bcp2, bounds): + bounds = unionRect(bounds, calcCubicBounds( + self._getCurrentPoint(), bcp1, bcp2, pt)) + self.bounds = bounds + + def _qCurveToOne(self, bcp, pt): + bounds = self.bounds + bounds = updateBounds(bounds, pt) + if not pointInRect(bcp, bounds): + bounds = unionRect(bounds, calcQuadraticBounds( + self._getCurrentPoint(), bcp, pt)) + self.bounds = bounds diff -Nru fonttools-2.4/Snippets/fontTools/pens/boundsPen_test.py fonttools-3.0/Snippets/fontTools/pens/boundsPen_test.py --- fonttools-2.4/Snippets/fontTools/pens/boundsPen_test.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.0/Snippets/fontTools/pens/boundsPen_test.py 2015-08-31 17:57:15.000000000 +0000 @@ -0,0 +1,66 @@ +from __future__ import print_function, division, absolute_import +from fontTools.misc.py23 import * +from fontTools.pens.boundsPen import BoundsPen, ControlBoundsPen +import unittest + + +def draw_(pen): + pen.moveTo((0, 0)) + pen.lineTo((0, 100)) + pen.qCurveTo((50, 75), (60, 50), (50, 25), (0, 0)) + pen.curveTo((-50, 25), (-60, 50), (-50, 75), (0, 100)) + pen.closePath() + + +def bounds_(pen): + return " ".join(["%.0f" % c for c in pen.bounds]) + + +class BoundsPenTest(unittest.TestCase): + def test_draw(self): + pen = BoundsPen(None) + draw_(pen) + self.assertEqual("-55 0 58 100", bounds_(pen)) + + def test_empty(self): + pen = BoundsPen(None) + self.assertEqual(None, pen.bounds) + + def test_curve(self): + pen = BoundsPen(None) + pen.moveTo((0, 0)) + pen.curveTo((20, 10), (90, 40), (0, 0)) + self.assertEqual("0 0 45 20", bounds_(pen)) + + def test_quadraticCurve(self): + pen = BoundsPen(None) + pen.moveTo((0, 0)) + pen.qCurveTo((6, 6), (10, 0)) + self.assertEqual("0 0 10 3", bounds_(pen)) + + +class ControlBoundsPenTest(unittest.TestCase): + def test_draw(self): + pen = ControlBoundsPen(None) + draw_(pen) + self.assertEqual("-55 0 60 100", bounds_(pen)) + + def test_empty(self): + pen = ControlBoundsPen(None) + self.assertEqual(None, pen.bounds) + + def test_curve(self): + pen = ControlBoundsPen(None) + pen.moveTo((0, 0)) + pen.curveTo((20, 10), (90, 40), (0, 0)) + self.assertEqual("0 0 90 40", bounds_(pen)) + + def test_quadraticCurve(self): + pen = ControlBoundsPen(None) + pen.moveTo((0, 0)) + pen.qCurveTo((6, 6), (10, 0)) + self.assertEqual("0 0 10 6", bounds_(pen)) + + +if __name__ == '__main__': + unittest.main() diff -Nru fonttools-2.4/Snippets/fontTools/pens/cocoaPen.py fonttools-3.0/Snippets/fontTools/pens/cocoaPen.py --- fonttools-2.4/Snippets/fontTools/pens/cocoaPen.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.0/Snippets/fontTools/pens/cocoaPen.py 2015-08-31 17:57:15.000000000 +0000 @@ -0,0 +1,28 @@ +from __future__ import print_function, division, absolute_import +from fontTools.misc.py23 import * +from fontTools.pens.basePen import BasePen + + +__all__ = ["CocoaPen"] + + +class CocoaPen(BasePen): + + def __init__(self, glyphSet, path=None): + BasePen.__init__(self, glyphSet) + if path is None: + from AppKit import NSBezierPath + path = NSBezierPath.bezierPath() + self.path = path + + def _moveTo(self, p): + self.path.moveToPoint_(p) + + def _lineTo(self, p): + self.path.lineToPoint_(p) + + def _curveToOne(self, p1, p2, p3): + self.path.curveToPoint_controlPoint1_controlPoint2_(p3, p1, p2) + + def _closePath(self): + self.path.closePath() diff -Nru fonttools-2.4/Snippets/fontTools/pens/__init__.py fonttools-3.0/Snippets/fontTools/pens/__init__.py --- fonttools-2.4/Snippets/fontTools/pens/__init__.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.0/Snippets/fontTools/pens/__init__.py 2015-08-31 17:57:15.000000000 +0000 @@ -0,0 +1,4 @@ +"""Empty __init__.py file to signal Python this directory is a package.""" + +from __future__ import print_function, division, absolute_import +from fontTools.misc.py23 import * diff -Nru fonttools-2.4/Snippets/fontTools/pens/pointInsidePen.py fonttools-3.0/Snippets/fontTools/pens/pointInsidePen.py --- fonttools-2.4/Snippets/fontTools/pens/pointInsidePen.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.0/Snippets/fontTools/pens/pointInsidePen.py 2015-08-31 17:57:15.000000000 +0000 @@ -0,0 +1,191 @@ +"""fontTools.pens.pointInsidePen -- Pen implementing "point inside" testing +for shapes. +""" + +from __future__ import print_function, division, absolute_import +from fontTools.misc.py23 import * +from fontTools.pens.basePen import BasePen +from fontTools.misc.bezierTools import solveQuadratic, solveCubic + + +__all__ = ["PointInsidePen"] + + +# working around floating point errors +EPSILON = 1e-10 +ONE_PLUS_EPSILON = 1 + EPSILON +ZERO_MINUS_EPSILON = 0 - EPSILON + + +class PointInsidePen(BasePen): + + """This pen implements "point inside" testing: to test whether + a given point lies inside the shape (black) or outside (white). + Instances of this class can be recycled, as long as the + setTestPoint() method is used to set the new point to test. + + Typical usage: + + pen = PointInsidePen(glyphSet, (100, 200)) + outline.draw(pen) + isInside = pen.getResult() + + Both the even-odd algorithm and the non-zero-winding-rule + algorithm are implemented. The latter is the default, specify + True for the evenOdd argument of __init__ or setTestPoint + to use the even-odd algorithm. + """ + + # This class implements the classical "shoot a ray from the test point + # to infinity and count how many times it intersects the outline" (as well + # as the non-zero variant, where the counter is incremented if the outline + # intersects the ray in one direction and decremented if it intersects in + # the other direction). + # I found an amazingly clear explanation of the subtleties involved in + # implementing this correctly for polygons here: + # http://graphics.cs.ucdavis.edu/~okreylos/TAship/Spring2000/PointInPolygon.html + # I extended the principles outlined on that page to curves. + + def __init__(self, glyphSet, testPoint, evenOdd=0): + BasePen.__init__(self, glyphSet) + self.setTestPoint(testPoint, evenOdd) + + def setTestPoint(self, testPoint, evenOdd=0): + """Set the point to test. Call this _before_ the outline gets drawn.""" + self.testPoint = testPoint + self.evenOdd = evenOdd + self.firstPoint = None + self.intersectionCount = 0 + + def getResult(self): + """After the shape has been drawn, getResult() returns True if the test + point lies within the (black) shape, and False if it doesn't. + """ + if self.firstPoint is not None: + # always make sure the sub paths are closed; the algorithm only works + # for closed paths. + self.closePath() + if self.evenOdd: + result = self.intersectionCount % 2 + else: + result = self.intersectionCount + return not not result + + def _addIntersection(self, goingUp): + if self.evenOdd or goingUp: + self.intersectionCount += 1 + else: + self.intersectionCount -= 1 + + def _moveTo(self, point): + if self.firstPoint is not None: + # always make sure the sub paths are closed; the algorithm only works + # for closed paths. + self.closePath() + self.firstPoint = point + + def _lineTo(self, point): + x, y = self.testPoint + x1, y1 = self._getCurrentPoint() + x2, y2 = point + + if x1 < x and x2 < x: + return + if y1 < y and y2 < y: + return + if y1 >= y and y2 >= y: + return + + dx = x2 - x1 + dy = y2 - y1 + t = (y - y1) / dy + ix = dx * t + x1 + if ix < x: + return + self._addIntersection(y2 > y1) + + def _curveToOne(self, bcp1, bcp2, point): + x, y = self.testPoint + x1, y1 = self._getCurrentPoint() + x2, y2 = bcp1 + x3, y3 = bcp2 + x4, y4 = point + + if x1 < x and x2 < x and x3 < x and x4 < x: + return + if y1 < y and y2 < y and y3 < y and y4 < y: + return + if y1 >= y and y2 >= y and y3 >= y and y4 >= y: + return + + dy = y1 + cy = (y2 - dy) * 3.0 + by = (y3 - y2) * 3.0 - cy + ay = y4 - dy - cy - by + solutions = sorted(solveCubic(ay, by, cy, dy - y)) + solutions = [t for t in solutions if ZERO_MINUS_EPSILON <= t <= ONE_PLUS_EPSILON] + if not solutions: + return + + dx = x1 + cx = (x2 - dx) * 3.0 + bx = (x3 - x2) * 3.0 - cx + ax = x4 - dx - cx - bx + + above = y1 >= y + lastT = None + for t in solutions: + if t == lastT: + continue + lastT = t + t2 = t * t + t3 = t2 * t + + direction = 3*ay*t2 + 2*by*t + cy + if direction == 0.0: + direction = 6*ay*t + 2*by + if direction == 0.0: + direction = ay + goingUp = direction > 0.0 + + xt = ax*t3 + bx*t2 + cx*t + dx + if xt < x: + above = goingUp + continue + + if t == 0.0: + if not goingUp: + self._addIntersection(goingUp) + elif t == 1.0: + if not above: + self._addIntersection(goingUp) + else: + if above != goingUp: + self._addIntersection(goingUp) + #else: + # we're not really intersecting, merely touching the 'top' + above = goingUp + + def _qCurveToOne_unfinished(self, bcp, point): + # XXX need to finish this, for now doing it through a cubic + # (BasePen implements _qCurveTo in terms of a cubic) will + # have to do. + x, y = self.testPoint + x1, y1 = self._getCurrentPoint() + x2, y2 = bcp + x3, y3 = point + c = y1 + b = (y2 - c) * 2.0 + a = y3 - c - b + solutions = sorted(solveQuadratic(a, b, c - y)) + solutions = [t for t in solutions if ZERO_MINUS_EPSILON <= t <= ONE_PLUS_EPSILON] + if not solutions: + return + # XXX + + def _closePath(self): + if self._getCurrentPoint() != self.firstPoint: + self.lineTo(self.firstPoint) + self.firstPoint = None + + _endPath = _closePath diff -Nru fonttools-2.4/Snippets/fontTools/pens/pointInsidePen_test.py fonttools-3.0/Snippets/fontTools/pens/pointInsidePen_test.py --- fonttools-2.4/Snippets/fontTools/pens/pointInsidePen_test.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.0/Snippets/fontTools/pens/pointInsidePen_test.py 2015-08-31 17:57:15.000000000 +0000 @@ -0,0 +1,90 @@ +from __future__ import print_function, division, absolute_import +from fontTools.misc.py23 import * +from fontTools.pens.pointInsidePen import PointInsidePen +import unittest + + +class PointInsidePenTest(unittest.TestCase): + def test_line(self): + def draw_triangles(pen): + pen.moveTo((0,0)); pen.lineTo((10,5)); pen.lineTo((10,0)) + pen.moveTo((9,1)); pen.lineTo((4,1)); pen.lineTo((9,4)) + pen.closePath() + + self.assertEqual( + " *********" + " ** *" + " ** *" + " * *" + " *", + self.render(draw_triangles, even_odd=True)) + + self.assertEqual( + " *********" + " *******" + " *****" + " ***" + " *", + self.render(draw_triangles, even_odd=False)) + + def test_curve(self): + def draw_curves(pen): + pen.moveTo((0,0)); pen.curveTo((9,1), (9,4), (0,5)) + pen.moveTo((10,5)); pen.curveTo((1,4), (1,1), (10,0)) + pen.closePath() + + self.assertEqual( + "*** ***" + "**** ****" + "*** ***" + "**** ****" + "*** ***", + self.render(draw_curves, even_odd=True)) + + self.assertEqual( + "*** ***" + "**********" + "**********" + "**********" + "*** ***", + self.render(draw_curves, even_odd=False)) + + def test_qCurve(self): + def draw_qCurves(pen): + pen.moveTo((0,0)); pen.qCurveTo((15,2), (0,5)) + pen.moveTo((10,5)); pen.qCurveTo((-5,3), (10,0)) + pen.closePath() + + self.assertEqual( + "*** **" + "**** ***" + "*** ***" + "*** ****" + "** ***", + self.render(draw_qCurves, even_odd=True)) + + self.assertEqual( + "*** **" + "**********" + "**********" + "**********" + "** ***", + self.render(draw_qCurves, even_odd=False)) + + @staticmethod + def render(draw_function, even_odd): + result = BytesIO() + for y in range(5): + for x in range(10): + pen = PointInsidePen(None, (x + 0.5, y + 0.5), even_odd) + draw_function(pen) + if pen.getResult(): + result.write(b"*") + else: + result.write(b" ") + return tounicode(result.getvalue()) + + +if __name__ == "__main__": + unittest.main() + diff -Nru fonttools-2.4/Snippets/fontTools/pens/qtPen.py fonttools-3.0/Snippets/fontTools/pens/qtPen.py --- fonttools-2.4/Snippets/fontTools/pens/qtPen.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.0/Snippets/fontTools/pens/qtPen.py 2015-08-31 17:57:15.000000000 +0000 @@ -0,0 +1,28 @@ +from __future__ import print_function, division, absolute_import +from fontTools.misc.py23 import * +from fontTools.pens.basePen import BasePen + + +__all__ = ["QtPen"] + + +class QtPen(BasePen): + + def __init__(self, glyphSet, path=None): + BasePen.__init__(self, glyphSet) + if path is None: + from PyQt5.QtGui import QPainterPath + path = QPainterPath() + self.path = path + + def _moveTo(self, p): + self.path.moveTo(*p) + + def _lineTo(self, p): + self.path.lineTo(*p) + + def _curveToOne(self, p1, p2, p3): + self.path.cubicTo(*p1+p2+p3) + + def _closePath(self): + self.path.closeSubpath() diff -Nru fonttools-2.4/Snippets/fontTools/pens/reportLabPen.py fonttools-3.0/Snippets/fontTools/pens/reportLabPen.py --- fonttools-2.4/Snippets/fontTools/pens/reportLabPen.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.0/Snippets/fontTools/pens/reportLabPen.py 2015-08-31 17:57:15.000000000 +0000 @@ -0,0 +1,72 @@ +from __future__ import print_function, division, absolute_import +from fontTools.misc.py23 import * +from fontTools.pens.basePen import BasePen +from reportlab.graphics.shapes import Path + + +class ReportLabPen(BasePen): + + """A pen for drawing onto a reportlab.graphics.shapes.Path object.""" + + def __init__(self, glyphSet, path=None): + BasePen.__init__(self, glyphSet) + if path is None: + path = Path() + self.path = path + + def _moveTo(self, p): + (x,y) = p + self.path.moveTo(x,y) + + def _lineTo(self, p): + (x,y) = p + self.path.lineTo(x,y) + + def _curveToOne(self, p1, p2, p3): + (x1,y1) = p1 + (x2,y2) = p2 + (x3,y3) = p3 + self.path.curveTo(x1, y1, x2, y2, x3, y3) + + def _closePath(self): + self.path.closePath() + + +if __name__=="__main__": + import sys + if len(sys.argv) < 3: + print("Usage: reportLabPen.py []") + print(" If no image file name is created, by default .png is created.") + print(" example: reportLabPen.py Arial.TTF R test.png") + print(" (The file format will be PNG, regardless of the image file name supplied)") + sys.exit(0) + + from fontTools.ttLib import TTFont + from reportlab.lib import colors + + path = sys.argv[1] + glyphName = sys.argv[2] + if (len(sys.argv) > 3): + imageFile = sys.argv[3] + else: + imageFile = "%s.png" % glyphName + + font = TTFont(path) # it would work just as well with fontTools.t1Lib.T1Font + gs = font.getGlyphSet() + pen = ReportLabPen(gs, Path(fillColor=colors.red, strokeWidth=5)) + g = gs[glyphName] + g.draw(pen) + + w, h = g.width, 1000 + from reportlab.graphics import renderPM + from reportlab.graphics.shapes import Group, Drawing, scale + + # Everything is wrapped in a group to allow transformations. + g = Group(pen.path) + g.translate(0, 200) + g.scale(0.3, 0.3) + + d = Drawing(w, h) + d.add(g) + + renderPM.drawToFile(d, imageFile, fmt="PNG") diff -Nru fonttools-2.4/Snippets/fontTools/pens/transformPen.py fonttools-3.0/Snippets/fontTools/pens/transformPen.py --- fonttools-2.4/Snippets/fontTools/pens/transformPen.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.0/Snippets/fontTools/pens/transformPen.py 2015-08-31 17:57:15.000000000 +0000 @@ -0,0 +1,65 @@ +from __future__ import print_function, division, absolute_import +from fontTools.misc.py23 import * +from fontTools.pens.basePen import AbstractPen + + +__all__ = ["TransformPen"] + + +class TransformPen(AbstractPen): + + """Pen that transforms all coordinates using a Affine transformation, + and passes them to another pen. + """ + + def __init__(self, outPen, transformation): + """The 'outPen' argument is another pen object. It will receive the + transformed coordinates. The 'transformation' argument can either + be a six-tuple, or a fontTools.misc.transform.Transform object. + """ + if not hasattr(transformation, "transformPoint"): + from fontTools.misc.transform import Transform + transformation = Transform(*transformation) + self._transformation = transformation + self._transformPoint = transformation.transformPoint + self._outPen = outPen + self._stack = [] + + def moveTo(self, pt): + self._outPen.moveTo(self._transformPoint(pt)) + + def lineTo(self, pt): + self._outPen.lineTo(self._transformPoint(pt)) + + def curveTo(self, *points): + self._outPen.curveTo(*self._transformPoints(points)) + + def qCurveTo(self, *points): + if points[-1] is None: + points = self._transformPoints(points[:-1]) + [None] + else: + points = self._transformPoints(points) + self._outPen.qCurveTo(*points) + + def _transformPoints(self, points): + new = [] + transformPoint = self._transformPoint + for pt in points: + new.append(transformPoint(pt)) + return new + + def closePath(self): + self._outPen.closePath() + + def addComponent(self, glyphName, transformation): + transformation = self._transformation.transform(transformation) + self._outPen.addComponent(glyphName, transformation) + + +if __name__ == "__main__": + from fontTools.pens.basePen import _TestPen + pen = TransformPen(_TestPen(None), (2, 0, 0.5, 2, -10, 0)) + pen.moveTo((0, 0)) + pen.lineTo((0, 100)) + pen.curveTo((50, 75), (60, 50), (50, 25), (0, 0)) + pen.closePath() diff -Nru fonttools-2.4/Snippets/fontTools/subset.py fonttools-3.0/Snippets/fontTools/subset.py --- fonttools-2.4/Snippets/fontTools/subset.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.0/Snippets/fontTools/subset.py 2015-08-31 17:57:15.000000000 +0000 @@ -0,0 +1,2742 @@ +# Copyright 2013 Google, Inc. All Rights Reserved. +# +# Google Author(s): Behdad Esfahbod + +from __future__ import print_function, division, absolute_import +from fontTools.misc.py23 import * +from fontTools import ttLib +from fontTools.ttLib.tables import otTables +from fontTools.misc import psCharStrings +import sys +import struct +import time +import array + +__usage__ = "pyftsubset font-file [glyph...] [--option=value]..." + +__doc__="""\ +pyftsubset -- OpenType font subsetter and optimizer + + pyftsubset is an OpenType font subsetter and optimizer, based on fontTools. + It accepts any TT- or CFF-flavored OpenType (.otf or .ttf) or WOFF (.woff) + font file. The subsetted glyph set is based on the specified glyphs + or characters, and specified OpenType layout features. + + The tool also performs some size-reducing optimizations, aimed for using + subset fonts as webfonts. Individual optimizations can be enabled or + disabled, and are enabled by default when they are safe. + +Usage: + """+__usage__+""" + + At least one glyph or one of --gids, --gids-file, --glyphs, --glyphs-file, + --text, --text-file, --unicodes, or --unicodes-file, must be specified. + +Arguments: + font-file + The input font file. + glyph + Specify one or more glyph identifiers to include in the subset. Must be + PS glyph names, or the special string '*' to keep the entire glyph set. + +Initial glyph set specification: + These options populate the initial glyph set. Same option can appear + multiple times, and the results are accummulated. + --gids=[,...] + Specify comma/whitespace-separated list of glyph IDs or ranges as + decimal numbers. For example, --gids=10-12,14 adds glyphs with + numbers 10, 11, 12, and 14. + --gids-file= + Like --gids but reads from a file. Anything after a '#' on any line + is ignored as comments. + --glyphs=[,...] + Specify comma/whitespace-separated PS glyph names to add to the subset. + Note that only PS glyph names are accepted, not gidNNN, U+XXXX, etc + that are accepted on the command line. The special string '*' wil keep + the entire glyph set. + --glyphs-file= + Like --glyphs but reads from a file. Anything after a '#' on any line + is ignored as comments. + --text= + Specify characters to include in the subset, as UTF-8 string. + --text-file= + Like --text but reads from a file. Newline character are not added to + the subset. + --unicodes=[,...] + Specify comma/whitespace-separated list of Unicode codepoints or + ranges as hex numbers, optionally prefixed with 'U+', 'u', etc. + For example, --unicodes=41-5a,61-7a adds ASCII letters, so does + the more verbose --unicodes=U+0041-005A,U+0061-007A. + The special strings '*' will choose all Unicode characters mapped + by the font. + --unicodes-file= + Like --unicodes, but reads from a file. Anything after a '#' on any + line in the file is ignored as comments. + --ignore-missing-glyphs + Do not fail if some requested glyphs or gids are not available in + the font. + --no-ignore-missing-glyphs + Stop and fail if some requested glyphs or gids are not available + in the font. [default] + --ignore-missing-unicodes [default] + Do not fail if some requested Unicode characters (including those + indirectly specified using --text or --text-file) are not available + in the font. + --no-ignore-missing-unicodes + Stop and fail if some requested Unicode characters are not available + in the font. + Note the default discrepancy between ignoring missing glyphs versus + unicodes. This is for historical reasons and in the future + --no-ignore-missing-unicodes might become default. + +Other options: + For the other options listed below, to see the current value of the option, + pass a value of '?' to it, with or without a '='. + Examples: + $ pyftsubset --glyph-names? + Current setting for 'glyph-names' is: False + $ ./pyftsubset --name-IDs=? + Current setting for 'name-IDs' is: [1, 2] + $ ./pyftsubset --hinting? --no-hinting --hinting? + Current setting for 'hinting' is: True + Current setting for 'hinting' is: False + +Output options: + --output-file= + The output font file. If not specified, the subsetted font + will be saved in as font-file.subset. + --flavor= + Specify flavor of output font file. May be 'woff' or 'woff2'. + Note that WOFF2 requires the Brotli Python extension, available + at https://github.com/google/brotli + +Glyph set expansion: + These options control how additional glyphs are added to the subset. + --notdef-glyph + Add the '.notdef' glyph to the subset (ie, keep it). [default] + --no-notdef-glyph + Drop the '.notdef' glyph unless specified in the glyph set. This + saves a few bytes, but is not possible for Postscript-flavored + fonts, as those require '.notdef'. For TrueType-flavored fonts, + this works fine as long as no unsupported glyphs are requested + from the font. + --notdef-outline + Keep the outline of '.notdef' glyph. The '.notdef' glyph outline is + used when glyphs not supported by the font are to be shown. It is not + needed otherwise. + --no-notdef-outline + When including a '.notdef' glyph, remove its outline. This saves + a few bytes. [default] + --recommended-glyphs + Add glyphs 0, 1, 2, and 3 to the subset, as recommended for + TrueType-flavored fonts: '.notdef', 'NULL' or '.null', 'CR', 'space'. + Some legacy software might require this, but no modern system does. + --no-recommended-glyphs + Do not add glyphs 0, 1, 2, and 3 to the subset, unless specified in + glyph set. [default] + --layout-features[+|-]=[,...] + Specify (=), add to (+=) or exclude from (-=) the comma-separated + set of OpenType layout feature tags that will be preserved. + Glyph variants used by the preserved features are added to the + specified subset glyph set. By default, 'calt', 'ccmp', 'clig', 'curs', + 'kern', 'liga', 'locl', 'mark', 'mkmk', 'rclt', 'rlig' and all features + required for script shaping are preserved. To see the full list, try + '--layout-features=?'. Use '*' to keep all features. + Multiple --layout-features options can be provided if necessary. + Examples: + --layout-features+=onum,pnum,ss01 + * Keep the default set of features and 'onum', 'pnum', 'ss01'. + --layout-features-='mark','mkmk' + * Keep the default set of features but drop 'mark' and 'mkmk'. + --layout-features='kern' + * Only keep the 'kern' feature, drop all others. + --layout-features='' + * Drop all features. + --layout-features='*' + * Keep all features. + --layout-features+=aalt --layout-features-=vrt2 + * Keep default set of features plus 'aalt', but drop 'vrt2'. + +Hinting options: + --hinting + Keep hinting [default] + --no-hinting + Drop glyph-specific hinting and font-wide hinting tables, as well + as remove hinting-related bits and pieces from other tables (eg. GPOS). + See --hinting-tables for list of tables that are dropped by default. + Instructions and hints are stripped from 'glyf' and 'CFF ' tables + respectively. This produces (sometimes up to 30%) smaller fonts that + are suitable for extremely high-resolution systems, like high-end + mobile devices and retina displays. + XXX Note: Currently there is a known bug in 'CFF ' hint stripping that + might make the font unusable as a webfont as they will be rejected by + OpenType Sanitizer used in common browsers. For more information see: + https://github.com/behdad/fonttools/issues/144 + The --desubroutinize options works around that bug. + +Optimization options: + --desubroutinize + Remove CFF use of subroutinizes. Subroutinization is a way to make CFF + fonts smaller. For small subsets however, desubroutinizing might make + the font smaller. It has even been reported that desubroutinized CFF + fonts compress better (produce smaller output) WOFF and WOFF2 fonts. + Also see note under --no-hinting. + --no-desubroutinize [default] + Leave CFF subroutinizes as is, only throw away unused subroutinizes. + +Font table options: + --drop-tables[+|-]=
[,
...] + Specify (=), add to (+=) or exclude from (-=) the comma-separated + set of tables that will be be dropped. + By default, the following tables are dropped: + 'BASE', 'JSTF', 'DSIG', 'EBDT', 'EBLC', 'EBSC', 'SVG ', 'PCLT', 'LTSH' + and Graphite tables: 'Feat', 'Glat', 'Gloc', 'Silf', 'Sill' + and color tables: 'CBLC', 'CBDT', 'sbix', 'COLR', 'CPAL'. + The tool will attempt to subset the remaining tables. + Examples: + --drop-tables-='SVG ' + * Drop the default set of tables but keep 'SVG '. + --drop-tables+=GSUB + * Drop the default set of tables and 'GSUB'. + --drop-tables=DSIG + * Only drop the 'DSIG' table, keep all others. + --drop-tables= + * Keep all tables. + --no-subset-tables+=
[,
...] + Add to the set of tables that will not be subsetted. + By default, the following tables are included in this list, as + they do not need subsetting (ignore the fact that 'loca' is listed + here): 'gasp', 'head', 'hhea', 'maxp', 'vhea', 'OS/2', 'loca', + 'name', 'cvt ', 'fpgm', 'prep', 'VMDX', and 'DSIG'. Tables that the tool + does not know how to subset and are not specified here will be dropped + from the font. + Example: + --no-subset-tables+=FFTM + * Keep 'FFTM' table in the font by preventing subsetting. + --hinting-tables[-]=
[,
...] + Specify (=), add to (+=) or exclude from (-=) the list of font-wide + hinting tables that will be dropped if --no-hinting is specified, + Examples: + --hinting-tables-='VDMX' + * Drop font-wide hinting tables except 'VDMX'. + --hinting-tables='' + * Keep all font-wide hinting tables (but strip hints from glyphs). + --legacy-kern + Keep TrueType 'kern' table even when OpenType 'GPOS' is available. + --no-legacy-kern + Drop TrueType 'kern' table if OpenType 'GPOS' is available. [default] + +Font naming options: + These options control what is retained in the 'name' table. For numerical + codes, see: http://www.microsoft.com/typography/otspec/name.htm + --name-IDs[+|-]=[,...] + Specify (=), add to (+=) or exclude from (-=) the set of 'name' table + entry nameIDs that will be preserved. By default only nameID 1 (Family) + and nameID 2 (Style) are preserved. Use '*' to keep all entries. + Examples: + --name-IDs+=0,4,6 + * Also keep Copyright, Full name and PostScript name entry. + --name-IDs='' + * Drop all 'name' table entries. + --name-IDs='*' + * keep all 'name' table entries + --name-legacy + Keep legacy (non-Unicode) 'name' table entries (0.x, 1.x etc.). + XXX Note: This might be needed for some fonts that have no Unicode name + entires for English. See: https://github.com/behdad/fonttools/issues/146 + --no-name-legacy + Drop legacy (non-Unicode) 'name' table entries [default] + --name-languages[+|-]=[,] + Specify (=), add to (+=) or exclude from (-=) the set of 'name' table + langIDs that will be preserved. By default only records with langID + 0x0409 (English) are preserved. Use '*' to keep all langIDs. + --obfuscate-names + Make the font unusable as a system font by replacing name IDs 1, 2, 3, 4, + and 6 with dummy strings (it is still fully functional as webfont). + +Glyph naming and encoding options: + --glyph-names + Keep PS glyph names in TT-flavored fonts. In general glyph names are + not needed for correct use of the font. However, some PDF generators + and PDF viewers might rely on glyph names to extract Unicode text + from PDF documents. + --no-glyph-names + Drop PS glyph names in TT-flavored fonts, by using 'post' table + version 3.0. [default] + --legacy-cmap + Keep the legacy 'cmap' subtables (0.x, 1.x, 4.x etc.). + --no-legacy-cmap + Drop the legacy 'cmap' subtables. [default] + --symbol-cmap + Keep the 3.0 symbol 'cmap'. + --no-symbol-cmap + Drop the 3.0 symbol 'cmap'. [default] + +Other font-specific options: + --recalc-bounds + Recalculate font bounding boxes. + --no-recalc-bounds + Keep original font bounding boxes. This is faster and still safe + for all practical purposes. [default] + --recalc-timestamp + Set font 'modified' timestamp to current time. + --no-recalc-timestamp + Do not modify font 'modified' timestamp. [default] + --canonical-order + Order tables as recommended in the OpenType standard. This is not + required by the standard, nor by any known implementation. + --no-canonical-order + Keep original order of font tables. This is faster. [default] + +Application options: + --verbose + Display verbose information of the subsetting process. + --timing + Display detailed timing information of the subsetting process. + --xml + Display the TTX XML representation of subsetted font. + +Example: + Produce a subset containing the characters ' !"#$%' without performing + size-reducing optimizations: + + $ pyftsubset font.ttf --unicodes="U+0020-0025" \\ + --layout-features='*' --glyph-names --symbol-cmap --legacy-cmap \\ + --notdef-glyph --notdef-outline --recommended-glyphs \\ + --name-IDs='*' --name-legacy --name-languages='*' +""" + + +def _add_method(*clazzes): + """Returns a decorator function that adds a new method to one or + more classes.""" + def wrapper(method): + for clazz in clazzes: + assert clazz.__name__ != 'DefaultTable', \ + 'Oops, table class not found.' + assert not hasattr(clazz, method.__name__), \ + "Oops, class '%s' has method '%s'." % (clazz.__name__, + method.__name__) + setattr(clazz, method.__name__, method) + return None + return wrapper + +def _uniq_sort(l): + return sorted(set(l)) + +def _set_update(s, *others): + # Jython's set.update only takes one other argument. + # Emulate real set.update... + for other in others: + s.update(other) + +def _dict_subset(d, glyphs): + return {g:d[g] for g in glyphs} + + +@_add_method(otTables.Coverage) +def intersect(self, glyphs): + """Returns ascending list of matching coverage values.""" + return [i for i,g in enumerate(self.glyphs) if g in glyphs] + +@_add_method(otTables.Coverage) +def intersect_glyphs(self, glyphs): + """Returns set of intersecting glyphs.""" + return set(g for g in self.glyphs if g in glyphs) + +@_add_method(otTables.Coverage) +def subset(self, glyphs): + """Returns ascending list of remaining coverage values.""" + indices = self.intersect(glyphs) + self.glyphs = [g for g in self.glyphs if g in glyphs] + return indices + +@_add_method(otTables.Coverage) +def remap(self, coverage_map): + """Remaps coverage.""" + self.glyphs = [self.glyphs[i] for i in coverage_map] + +@_add_method(otTables.ClassDef) +def intersect(self, glyphs): + """Returns ascending list of matching class values.""" + return _uniq_sort( + ([0] if any(g not in self.classDefs for g in glyphs) else []) + + [v for g,v in self.classDefs.items() if g in glyphs]) + +@_add_method(otTables.ClassDef) +def intersect_class(self, glyphs, klass): + """Returns set of glyphs matching class.""" + if klass == 0: + return set(g for g in glyphs if g not in self.classDefs) + return set(g for g,v in self.classDefs.items() + if v == klass and g in glyphs) + +@_add_method(otTables.ClassDef) +def subset(self, glyphs, remap=False): + """Returns ascending list of remaining classes.""" + self.classDefs = {g:v for g,v in self.classDefs.items() if g in glyphs} + # Note: while class 0 has the special meaning of "not matched", + # if no glyph will ever /not match/, we can optimize class 0 out too. + indices = _uniq_sort( + ([0] if any(g not in self.classDefs for g in glyphs) else []) + + list(self.classDefs.values())) + if remap: + self.remap(indices) + return indices + +@_add_method(otTables.ClassDef) +def remap(self, class_map): + """Remaps classes.""" + self.classDefs = {g:class_map.index(v) for g,v in self.classDefs.items()} + +@_add_method(otTables.SingleSubst) +def closure_glyphs(self, s, cur_glyphs): + s.glyphs.update(v for g,v in self.mapping.items() if g in cur_glyphs) + +@_add_method(otTables.SingleSubst) +def subset_glyphs(self, s): + self.mapping = {g:v for g,v in self.mapping.items() + if g in s.glyphs and v in s.glyphs} + return bool(self.mapping) + +@_add_method(otTables.MultipleSubst) +def closure_glyphs(self, s, cur_glyphs): + indices = self.Coverage.intersect(cur_glyphs) + _set_update(s.glyphs, *(self.Sequence[i].Substitute for i in indices)) + +@_add_method(otTables.MultipleSubst) +def subset_glyphs(self, s): + indices = self.Coverage.subset(s.glyphs) + self.Sequence = [self.Sequence[i] for i in indices] + # Now drop rules generating glyphs we don't want + indices = [i for i,seq in enumerate(self.Sequence) + if all(sub in s.glyphs for sub in seq.Substitute)] + self.Sequence = [self.Sequence[i] for i in indices] + self.Coverage.remap(indices) + self.SequenceCount = len(self.Sequence) + return bool(self.SequenceCount) + +@_add_method(otTables.AlternateSubst) +def closure_glyphs(self, s, cur_glyphs): + _set_update(s.glyphs, *(vlist for g,vlist in self.alternates.items() + if g in cur_glyphs)) + +@_add_method(otTables.AlternateSubst) +def subset_glyphs(self, s): + self.alternates = {g:vlist + for g,vlist in self.alternates.items() + if g in s.glyphs and + all(v in s.glyphs for v in vlist)} + return bool(self.alternates) + +@_add_method(otTables.LigatureSubst) +def closure_glyphs(self, s, cur_glyphs): + _set_update(s.glyphs, *([seq.LigGlyph for seq in seqs + if all(c in s.glyphs for c in seq.Component)] + for g,seqs in self.ligatures.items() + if g in cur_glyphs)) + +@_add_method(otTables.LigatureSubst) +def subset_glyphs(self, s): + self.ligatures = {g:v for g,v in self.ligatures.items() + if g in s.glyphs} + self.ligatures = {g:[seq for seq in seqs + if seq.LigGlyph in s.glyphs and + all(c in s.glyphs for c in seq.Component)] + for g,seqs in self.ligatures.items()} + self.ligatures = {g:v for g,v in self.ligatures.items() if v} + return bool(self.ligatures) + +@_add_method(otTables.ReverseChainSingleSubst) +def closure_glyphs(self, s, cur_glyphs): + if self.Format == 1: + indices = self.Coverage.intersect(cur_glyphs) + if(not indices or + not all(c.intersect(s.glyphs) + for c in self.LookAheadCoverage + self.BacktrackCoverage)): + return + s.glyphs.update(self.Substitute[i] for i in indices) + else: + assert 0, "unknown format: %s" % self.Format + +@_add_method(otTables.ReverseChainSingleSubst) +def subset_glyphs(self, s): + if self.Format == 1: + indices = self.Coverage.subset(s.glyphs) + self.Substitute = [self.Substitute[i] for i in indices] + # Now drop rules generating glyphs we don't want + indices = [i for i,sub in enumerate(self.Substitute) + if sub in s.glyphs] + self.Substitute = [self.Substitute[i] for i in indices] + self.Coverage.remap(indices) + self.GlyphCount = len(self.Substitute) + return bool(self.GlyphCount and + all(c.subset(s.glyphs) + for c in self.LookAheadCoverage+self.BacktrackCoverage)) + else: + assert 0, "unknown format: %s" % self.Format + +@_add_method(otTables.SinglePos) +def subset_glyphs(self, s): + if self.Format == 1: + return len(self.Coverage.subset(s.glyphs)) + elif self.Format == 2: + indices = self.Coverage.subset(s.glyphs) + self.Value = [self.Value[i] for i in indices] + self.ValueCount = len(self.Value) + return bool(self.ValueCount) + else: + assert 0, "unknown format: %s" % self.Format + +@_add_method(otTables.SinglePos) +def prune_post_subset(self, options): + if not options.hinting: + # Drop device tables + self.ValueFormat &= ~0x00F0 + return True + +@_add_method(otTables.PairPos) +def subset_glyphs(self, s): + if self.Format == 1: + indices = self.Coverage.subset(s.glyphs) + self.PairSet = [self.PairSet[i] for i in indices] + for p in self.PairSet: + p.PairValueRecord = [r for r in p.PairValueRecord + if r.SecondGlyph in s.glyphs] + p.PairValueCount = len(p.PairValueRecord) + # Remove empty pairsets + indices = [i for i,p in enumerate(self.PairSet) if p.PairValueCount] + self.Coverage.remap(indices) + self.PairSet = [self.PairSet[i] for i in indices] + self.PairSetCount = len(self.PairSet) + return bool(self.PairSetCount) + elif self.Format == 2: + class1_map = self.ClassDef1.subset(s.glyphs, remap=True) + class2_map = self.ClassDef2.subset(s.glyphs, remap=True) + self.Class1Record = [self.Class1Record[i] for i in class1_map] + for c in self.Class1Record: + c.Class2Record = [c.Class2Record[i] for i in class2_map] + self.Class1Count = len(class1_map) + self.Class2Count = len(class2_map) + return bool(self.Class1Count and + self.Class2Count and + self.Coverage.subset(s.glyphs)) + else: + assert 0, "unknown format: %s" % self.Format + +@_add_method(otTables.PairPos) +def prune_post_subset(self, options): + if not options.hinting: + # Drop device tables + self.ValueFormat1 &= ~0x00F0 + self.ValueFormat2 &= ~0x00F0 + return True + +@_add_method(otTables.CursivePos) +def subset_glyphs(self, s): + if self.Format == 1: + indices = self.Coverage.subset(s.glyphs) + self.EntryExitRecord = [self.EntryExitRecord[i] for i in indices] + self.EntryExitCount = len(self.EntryExitRecord) + return bool(self.EntryExitCount) + else: + assert 0, "unknown format: %s" % self.Format + +@_add_method(otTables.Anchor) +def prune_hints(self): + # Drop device tables / contour anchor point + self.ensureDecompiled() + self.Format = 1 + +@_add_method(otTables.CursivePos) +def prune_post_subset(self, options): + if not options.hinting: + for rec in self.EntryExitRecord: + if rec.EntryAnchor: rec.EntryAnchor.prune_hints() + if rec.ExitAnchor: rec.ExitAnchor.prune_hints() + return True + +@_add_method(otTables.MarkBasePos) +def subset_glyphs(self, s): + if self.Format == 1: + mark_indices = self.MarkCoverage.subset(s.glyphs) + self.MarkArray.MarkRecord = [self.MarkArray.MarkRecord[i] + for i in mark_indices] + self.MarkArray.MarkCount = len(self.MarkArray.MarkRecord) + base_indices = self.BaseCoverage.subset(s.glyphs) + self.BaseArray.BaseRecord = [self.BaseArray.BaseRecord[i] + for i in base_indices] + self.BaseArray.BaseCount = len(self.BaseArray.BaseRecord) + # Prune empty classes + class_indices = _uniq_sort(v.Class for v in self.MarkArray.MarkRecord) + self.ClassCount = len(class_indices) + for m in self.MarkArray.MarkRecord: + m.Class = class_indices.index(m.Class) + for b in self.BaseArray.BaseRecord: + b.BaseAnchor = [b.BaseAnchor[i] for i in class_indices] + return bool(self.ClassCount and + self.MarkArray.MarkCount and + self.BaseArray.BaseCount) + else: + assert 0, "unknown format: %s" % self.Format + +@_add_method(otTables.MarkBasePos) +def prune_post_subset(self, options): + if not options.hinting: + for m in self.MarkArray.MarkRecord: + if m.MarkAnchor: + m.MarkAnchor.prune_hints() + for b in self.BaseArray.BaseRecord: + for a in b.BaseAnchor: + if a: + a.prune_hints() + return True + +@_add_method(otTables.MarkLigPos) +def subset_glyphs(self, s): + if self.Format == 1: + mark_indices = self.MarkCoverage.subset(s.glyphs) + self.MarkArray.MarkRecord = [self.MarkArray.MarkRecord[i] + for i in mark_indices] + self.MarkArray.MarkCount = len(self.MarkArray.MarkRecord) + ligature_indices = self.LigatureCoverage.subset(s.glyphs) + self.LigatureArray.LigatureAttach = [self.LigatureArray.LigatureAttach[i] + for i in ligature_indices] + self.LigatureArray.LigatureCount = len(self.LigatureArray.LigatureAttach) + # Prune empty classes + class_indices = _uniq_sort(v.Class for v in self.MarkArray.MarkRecord) + self.ClassCount = len(class_indices) + for m in self.MarkArray.MarkRecord: + m.Class = class_indices.index(m.Class) + for l in self.LigatureArray.LigatureAttach: + for c in l.ComponentRecord: + c.LigatureAnchor = [c.LigatureAnchor[i] for i in class_indices] + return bool(self.ClassCount and + self.MarkArray.MarkCount and + self.LigatureArray.LigatureCount) + else: + assert 0, "unknown format: %s" % self.Format + +@_add_method(otTables.MarkLigPos) +def prune_post_subset(self, options): + if not options.hinting: + for m in self.MarkArray.MarkRecord: + if m.MarkAnchor: + m.MarkAnchor.prune_hints() + for l in self.LigatureArray.LigatureAttach: + for c in l.ComponentRecord: + for a in c.LigatureAnchor: + if a: + a.prune_hints() + return True + +@_add_method(otTables.MarkMarkPos) +def subset_glyphs(self, s): + if self.Format == 1: + mark1_indices = self.Mark1Coverage.subset(s.glyphs) + self.Mark1Array.MarkRecord = [self.Mark1Array.MarkRecord[i] + for i in mark1_indices] + self.Mark1Array.MarkCount = len(self.Mark1Array.MarkRecord) + mark2_indices = self.Mark2Coverage.subset(s.glyphs) + self.Mark2Array.Mark2Record = [self.Mark2Array.Mark2Record[i] + for i in mark2_indices] + self.Mark2Array.MarkCount = len(self.Mark2Array.Mark2Record) + # Prune empty classes + class_indices = _uniq_sort(v.Class for v in self.Mark1Array.MarkRecord) + self.ClassCount = len(class_indices) + for m in self.Mark1Array.MarkRecord: + m.Class = class_indices.index(m.Class) + for b in self.Mark2Array.Mark2Record: + b.Mark2Anchor = [b.Mark2Anchor[i] for i in class_indices] + return bool(self.ClassCount and + self.Mark1Array.MarkCount and + self.Mark2Array.MarkCount) + else: + assert 0, "unknown format: %s" % self.Format + +@_add_method(otTables.MarkMarkPos) +def prune_post_subset(self, options): + if not options.hinting: + # Drop device tables or contour anchor point + for m in self.Mark1Array.MarkRecord: + if m.MarkAnchor: + m.MarkAnchor.prune_hints() + for b in self.Mark2Array.Mark2Record: + for m in b.Mark2Anchor: + if m: + m.prune_hints() + return True + +@_add_method(otTables.SingleSubst, + otTables.MultipleSubst, + otTables.AlternateSubst, + otTables.LigatureSubst, + otTables.ReverseChainSingleSubst, + otTables.SinglePos, + otTables.PairPos, + otTables.CursivePos, + otTables.MarkBasePos, + otTables.MarkLigPos, + otTables.MarkMarkPos) +def subset_lookups(self, lookup_indices): + pass + +@_add_method(otTables.SingleSubst, + otTables.MultipleSubst, + otTables.AlternateSubst, + otTables.LigatureSubst, + otTables.ReverseChainSingleSubst, + otTables.SinglePos, + otTables.PairPos, + otTables.CursivePos, + otTables.MarkBasePos, + otTables.MarkLigPos, + otTables.MarkMarkPos) +def collect_lookups(self): + return [] + +@_add_method(otTables.SingleSubst, + otTables.MultipleSubst, + otTables.AlternateSubst, + otTables.LigatureSubst, + otTables.ReverseChainSingleSubst, + otTables.ContextSubst, + otTables.ChainContextSubst, + otTables.ContextPos, + otTables.ChainContextPos) +def prune_post_subset(self, options): + return True + +@_add_method(otTables.SingleSubst, + otTables.AlternateSubst, + otTables.ReverseChainSingleSubst) +def may_have_non_1to1(self): + return False + +@_add_method(otTables.MultipleSubst, + otTables.LigatureSubst, + otTables.ContextSubst, + otTables.ChainContextSubst) +def may_have_non_1to1(self): + return True + +@_add_method(otTables.ContextSubst, + otTables.ChainContextSubst, + otTables.ContextPos, + otTables.ChainContextPos) +def __subset_classify_context(self): + + class ContextHelper(object): + def __init__(self, klass, Format): + if klass.__name__.endswith('Subst'): + Typ = 'Sub' + Type = 'Subst' + else: + Typ = 'Pos' + Type = 'Pos' + if klass.__name__.startswith('Chain'): + Chain = 'Chain' + else: + Chain = '' + ChainTyp = Chain+Typ + + self.Typ = Typ + self.Type = Type + self.Chain = Chain + self.ChainTyp = ChainTyp + + self.LookupRecord = Type+'LookupRecord' + + if Format == 1: + Coverage = lambda r: r.Coverage + ChainCoverage = lambda r: r.Coverage + ContextData = lambda r:(None,) + ChainContextData = lambda r:(None, None, None) + RuleData = lambda r:(r.Input,) + ChainRuleData = lambda r:(r.Backtrack, r.Input, r.LookAhead) + SetRuleData = None + ChainSetRuleData = None + elif Format == 2: + Coverage = lambda r: r.Coverage + ChainCoverage = lambda r: r.Coverage + ContextData = lambda r:(r.ClassDef,) + ChainContextData = lambda r:(r.BacktrackClassDef, + r.InputClassDef, + r.LookAheadClassDef) + RuleData = lambda r:(r.Class,) + ChainRuleData = lambda r:(r.Backtrack, r.Input, r.LookAhead) + def SetRuleData(r, d):(r.Class,) = d + def ChainSetRuleData(r, d):(r.Backtrack, r.Input, r.LookAhead) = d + elif Format == 3: + Coverage = lambda r: r.Coverage[0] + ChainCoverage = lambda r: r.InputCoverage[0] + ContextData = None + ChainContextData = None + RuleData = lambda r: r.Coverage + ChainRuleData = lambda r:(r.BacktrackCoverage + + r.InputCoverage + + r.LookAheadCoverage) + SetRuleData = None + ChainSetRuleData = None + else: + assert 0, "unknown format: %s" % Format + + if Chain: + self.Coverage = ChainCoverage + self.ContextData = ChainContextData + self.RuleData = ChainRuleData + self.SetRuleData = ChainSetRuleData + else: + self.Coverage = Coverage + self.ContextData = ContextData + self.RuleData = RuleData + self.SetRuleData = SetRuleData + + if Format == 1: + self.Rule = ChainTyp+'Rule' + self.RuleCount = ChainTyp+'RuleCount' + self.RuleSet = ChainTyp+'RuleSet' + self.RuleSetCount = ChainTyp+'RuleSetCount' + self.Intersect = lambda glyphs, c, r: [r] if r in glyphs else [] + elif Format == 2: + self.Rule = ChainTyp+'ClassRule' + self.RuleCount = ChainTyp+'ClassRuleCount' + self.RuleSet = ChainTyp+'ClassSet' + self.RuleSetCount = ChainTyp+'ClassSetCount' + self.Intersect = lambda glyphs, c, r: (c.intersect_class(glyphs, r) if c + else (set(glyphs) if r == 0 else set())) + + self.ClassDef = 'InputClassDef' if Chain else 'ClassDef' + self.ClassDefIndex = 1 if Chain else 0 + self.Input = 'Input' if Chain else 'Class' + + if self.Format not in [1, 2, 3]: + return None # Don't shoot the messenger; let it go + if not hasattr(self.__class__, "__ContextHelpers"): + self.__class__.__ContextHelpers = {} + if self.Format not in self.__class__.__ContextHelpers: + helper = ContextHelper(self.__class__, self.Format) + self.__class__.__ContextHelpers[self.Format] = helper + return self.__class__.__ContextHelpers[self.Format] + +@_add_method(otTables.ContextSubst, + otTables.ChainContextSubst) +def closure_glyphs(self, s, cur_glyphs): + c = self.__subset_classify_context() + + indices = c.Coverage(self).intersect(cur_glyphs) + if not indices: + return [] + cur_glyphs = c.Coverage(self).intersect_glyphs(cur_glyphs) + + if self.Format == 1: + ContextData = c.ContextData(self) + rss = getattr(self, c.RuleSet) + rssCount = getattr(self, c.RuleSetCount) + for i in indices: + if i >= rssCount or not rss[i]: continue + for r in getattr(rss[i], c.Rule): + if not r: continue + if not all(all(c.Intersect(s.glyphs, cd, k) for k in klist) + for cd,klist in zip(ContextData, c.RuleData(r))): + continue + chaos = set() + for ll in getattr(r, c.LookupRecord): + if not ll: continue + seqi = ll.SequenceIndex + if seqi in chaos: + # TODO Can we improve this? + pos_glyphs = None + else: + if seqi == 0: + pos_glyphs = frozenset([c.Coverage(self).glyphs[i]]) + else: + pos_glyphs = frozenset([r.Input[seqi - 1]]) + lookup = s.table.LookupList.Lookup[ll.LookupListIndex] + chaos.add(seqi) + if lookup.may_have_non_1to1(): + chaos.update(range(seqi, len(r.Input)+2)) + lookup.closure_glyphs(s, cur_glyphs=pos_glyphs) + elif self.Format == 2: + ClassDef = getattr(self, c.ClassDef) + indices = ClassDef.intersect(cur_glyphs) + ContextData = c.ContextData(self) + rss = getattr(self, c.RuleSet) + rssCount = getattr(self, c.RuleSetCount) + for i in indices: + if i >= rssCount or not rss[i]: continue + for r in getattr(rss[i], c.Rule): + if not r: continue + if not all(all(c.Intersect(s.glyphs, cd, k) for k in klist) + for cd,klist in zip(ContextData, c.RuleData(r))): + continue + chaos = set() + for ll in getattr(r, c.LookupRecord): + if not ll: continue + seqi = ll.SequenceIndex + if seqi in chaos: + # TODO Can we improve this? + pos_glyphs = None + else: + if seqi == 0: + pos_glyphs = frozenset(ClassDef.intersect_class(cur_glyphs, i)) + else: + pos_glyphs = frozenset(ClassDef.intersect_class(s.glyphs, getattr(r, c.Input)[seqi - 1])) + lookup = s.table.LookupList.Lookup[ll.LookupListIndex] + chaos.add(seqi) + if lookup.may_have_non_1to1(): + chaos.update(range(seqi, len(getattr(r, c.Input))+2)) + lookup.closure_glyphs(s, cur_glyphs=pos_glyphs) + elif self.Format == 3: + if not all(x.intersect(s.glyphs) for x in c.RuleData(self)): + return [] + r = self + chaos = set() + for ll in getattr(r, c.LookupRecord): + if not ll: continue + seqi = ll.SequenceIndex + if seqi in chaos: + # TODO Can we improve this? + pos_glyphs = None + else: + if seqi == 0: + pos_glyphs = frozenset(cur_glyphs) + else: + pos_glyphs = frozenset(r.InputCoverage[seqi].intersect_glyphs(s.glyphs)) + lookup = s.table.LookupList.Lookup[ll.LookupListIndex] + chaos.add(seqi) + if lookup.may_have_non_1to1(): + chaos.update(range(seqi, len(r.InputCoverage)+1)) + lookup.closure_glyphs(s, cur_glyphs=pos_glyphs) + else: + assert 0, "unknown format: %s" % self.Format + +@_add_method(otTables.ContextSubst, + otTables.ContextPos, + otTables.ChainContextSubst, + otTables.ChainContextPos) +def subset_glyphs(self, s): + c = self.__subset_classify_context() + + if self.Format == 1: + indices = self.Coverage.subset(s.glyphs) + rss = getattr(self, c.RuleSet) + rssCount = getattr(self, c.RuleSetCount) + rss = [rss[i] for i in indices if i < rssCount] + for rs in rss: + if not rs: continue + ss = getattr(rs, c.Rule) + ss = [r for r in ss + if r and all(all(g in s.glyphs for g in glist) + for glist in c.RuleData(r))] + setattr(rs, c.Rule, ss) + setattr(rs, c.RuleCount, len(ss)) + # Prune empty rulesets + indices = [i for i,rs in enumerate(rss) if rs and getattr(rs, c.Rule)] + self.Coverage.remap(indices) + rss = [rss[i] for i in indices] + setattr(self, c.RuleSet, rss) + setattr(self, c.RuleSetCount, len(rss)) + return bool(rss) + elif self.Format == 2: + if not self.Coverage.subset(s.glyphs): + return False + ContextData = c.ContextData(self) + klass_maps = [x.subset(s.glyphs, remap=True) if x else None for x in ContextData] + + # Keep rulesets for class numbers that survived. + indices = klass_maps[c.ClassDefIndex] + rss = getattr(self, c.RuleSet) + rssCount = getattr(self, c.RuleSetCount) + rss = [rss[i] for i in indices if i < rssCount] + del rssCount + # Delete, but not renumber, unreachable rulesets. + indices = getattr(self, c.ClassDef).intersect(self.Coverage.glyphs) + rss = [rss if i in indices else None for i,rss in enumerate(rss)] + + for rs in rss: + if not rs: continue + ss = getattr(rs, c.Rule) + ss = [r for r in ss + if r and all(all(k in klass_map for k in klist) + for klass_map,klist in zip(klass_maps, c.RuleData(r)))] + setattr(rs, c.Rule, ss) + setattr(rs, c.RuleCount, len(ss)) + + # Remap rule classes + for r in ss: + c.SetRuleData(r, [[klass_map.index(k) for k in klist] + for klass_map,klist in zip(klass_maps, c.RuleData(r))]) + + # Prune empty rulesets + rss = [rs if rs and getattr(rs, c.Rule) else None for rs in rss] + while rss and rss[-1] is None: + del rss[-1] + setattr(self, c.RuleSet, rss) + setattr(self, c.RuleSetCount, len(rss)) + + # TODO: We can do a second round of remapping class values based + # on classes that are actually used in at least one rule. Right + # now we subset classes to c.glyphs only. Or better, rewrite + # the above to do that. + + return bool(rss) + elif self.Format == 3: + return all(x.subset(s.glyphs) for x in c.RuleData(self)) + else: + assert 0, "unknown format: %s" % self.Format + +@_add_method(otTables.ContextSubst, + otTables.ChainContextSubst, + otTables.ContextPos, + otTables.ChainContextPos) +def subset_lookups(self, lookup_indices): + c = self.__subset_classify_context() + + if self.Format in [1, 2]: + for rs in getattr(self, c.RuleSet): + if not rs: continue + for r in getattr(rs, c.Rule): + if not r: continue + setattr(r, c.LookupRecord, + [ll for ll in getattr(r, c.LookupRecord) + if ll and ll.LookupListIndex in lookup_indices]) + for ll in getattr(r, c.LookupRecord): + if not ll: continue + ll.LookupListIndex = lookup_indices.index(ll.LookupListIndex) + elif self.Format == 3: + setattr(self, c.LookupRecord, + [ll for ll in getattr(self, c.LookupRecord) + if ll and ll.LookupListIndex in lookup_indices]) + for ll in getattr(self, c.LookupRecord): + if not ll: continue + ll.LookupListIndex = lookup_indices.index(ll.LookupListIndex) + else: + assert 0, "unknown format: %s" % self.Format + +@_add_method(otTables.ContextSubst, + otTables.ChainContextSubst, + otTables.ContextPos, + otTables.ChainContextPos) +def collect_lookups(self): + c = self.__subset_classify_context() + + if self.Format in [1, 2]: + return [ll.LookupListIndex + for rs in getattr(self, c.RuleSet) if rs + for r in getattr(rs, c.Rule) if r + for ll in getattr(r, c.LookupRecord) if ll] + elif self.Format == 3: + return [ll.LookupListIndex + for ll in getattr(self, c.LookupRecord) if ll] + else: + assert 0, "unknown format: %s" % self.Format + +@_add_method(otTables.ExtensionSubst) +def closure_glyphs(self, s, cur_glyphs): + if self.Format == 1: + self.ExtSubTable.closure_glyphs(s, cur_glyphs) + else: + assert 0, "unknown format: %s" % self.Format + +@_add_method(otTables.ExtensionSubst) +def may_have_non_1to1(self): + if self.Format == 1: + return self.ExtSubTable.may_have_non_1to1() + else: + assert 0, "unknown format: %s" % self.Format + +@_add_method(otTables.ExtensionSubst, + otTables.ExtensionPos) +def subset_glyphs(self, s): + if self.Format == 1: + return self.ExtSubTable.subset_glyphs(s) + else: + assert 0, "unknown format: %s" % self.Format + +@_add_method(otTables.ExtensionSubst, + otTables.ExtensionPos) +def prune_post_subset(self, options): + if self.Format == 1: + return self.ExtSubTable.prune_post_subset(options) + else: + assert 0, "unknown format: %s" % self.Format + +@_add_method(otTables.ExtensionSubst, + otTables.ExtensionPos) +def subset_lookups(self, lookup_indices): + if self.Format == 1: + return self.ExtSubTable.subset_lookups(lookup_indices) + else: + assert 0, "unknown format: %s" % self.Format + +@_add_method(otTables.ExtensionSubst, + otTables.ExtensionPos) +def collect_lookups(self): + if self.Format == 1: + return self.ExtSubTable.collect_lookups() + else: + assert 0, "unknown format: %s" % self.Format + +@_add_method(otTables.Lookup) +def closure_glyphs(self, s, cur_glyphs=None): + if cur_glyphs is None: + cur_glyphs = frozenset(s.glyphs) + + # Memoize + if (id(self), cur_glyphs) in s._doneLookups: + return + s._doneLookups.add((id(self), cur_glyphs)) + + if self in s._activeLookups: + raise Exception("Circular loop in lookup recursion") + s._activeLookups.append(self) + for st in self.SubTable: + if not st: continue + st.closure_glyphs(s, cur_glyphs) + assert(s._activeLookups[-1] == self) + del s._activeLookups[-1] + +@_add_method(otTables.Lookup) +def subset_glyphs(self, s): + self.SubTable = [st for st in self.SubTable if st and st.subset_glyphs(s)] + self.SubTableCount = len(self.SubTable) + return bool(self.SubTableCount) + +@_add_method(otTables.Lookup) +def prune_post_subset(self, options): + ret = False + for st in self.SubTable: + if not st: continue + if st.prune_post_subset(options): ret = True + return ret + +@_add_method(otTables.Lookup) +def subset_lookups(self, lookup_indices): + for s in self.SubTable: + s.subset_lookups(lookup_indices) + +@_add_method(otTables.Lookup) +def collect_lookups(self): + return _uniq_sort(sum((st.collect_lookups() for st in self.SubTable + if st), [])) + +@_add_method(otTables.Lookup) +def may_have_non_1to1(self): + return any(st.may_have_non_1to1() for st in self.SubTable if st) + +@_add_method(otTables.LookupList) +def subset_glyphs(self, s): + """Returns the indices of nonempty lookups.""" + return [i for i,l in enumerate(self.Lookup) if l and l.subset_glyphs(s)] + +@_add_method(otTables.LookupList) +def prune_post_subset(self, options): + ret = False + for l in self.Lookup: + if not l: continue + if l.prune_post_subset(options): ret = True + return ret + +@_add_method(otTables.LookupList) +def subset_lookups(self, lookup_indices): + self.ensureDecompiled() + self.Lookup = [self.Lookup[i] for i in lookup_indices + if i < self.LookupCount] + self.LookupCount = len(self.Lookup) + for l in self.Lookup: + l.subset_lookups(lookup_indices) + +@_add_method(otTables.LookupList) +def neuter_lookups(self, lookup_indices): + """Sets lookups not in lookup_indices to None.""" + self.ensureDecompiled() + self.Lookup = [l if i in lookup_indices else None for i,l in enumerate(self.Lookup)] + +@_add_method(otTables.LookupList) +def closure_lookups(self, lookup_indices): + lookup_indices = _uniq_sort(lookup_indices) + recurse = lookup_indices + while True: + recurse_lookups = sum((self.Lookup[i].collect_lookups() + for i in recurse if i < self.LookupCount), []) + recurse_lookups = [l for l in recurse_lookups + if l not in lookup_indices and l < self.LookupCount] + if not recurse_lookups: + return _uniq_sort(lookup_indices) + recurse_lookups = _uniq_sort(recurse_lookups) + lookup_indices.extend(recurse_lookups) + recurse = recurse_lookups + +@_add_method(otTables.Feature) +def subset_lookups(self, lookup_indices): + self.LookupListIndex = [l for l in self.LookupListIndex + if l in lookup_indices] + # Now map them. + self.LookupListIndex = [lookup_indices.index(l) + for l in self.LookupListIndex] + self.LookupCount = len(self.LookupListIndex) + return self.LookupCount or self.FeatureParams + +@_add_method(otTables.Feature) +def collect_lookups(self): + return self.LookupListIndex[:] + +@_add_method(otTables.FeatureList) +def subset_lookups(self, lookup_indices): + """Returns the indices of nonempty features.""" + # Note: Never ever drop feature 'pref', even if it's empty. + # HarfBuzz chooses shaper for Khmer based on presence of this + # feature. See thread at: + # http://lists.freedesktop.org/archives/harfbuzz/2012-November/002660.html + feature_indices = [i for i,f in enumerate(self.FeatureRecord) + if (f.Feature.subset_lookups(lookup_indices) or + f.FeatureTag == 'pref')] + self.subset_features(feature_indices) + return feature_indices + +@_add_method(otTables.FeatureList) +def collect_lookups(self, feature_indices): + return _uniq_sort(sum((self.FeatureRecord[i].Feature.collect_lookups() + for i in feature_indices + if i < self.FeatureCount), [])) + +@_add_method(otTables.FeatureList) +def subset_features(self, feature_indices): + self.ensureDecompiled() + self.FeatureRecord = [self.FeatureRecord[i] for i in feature_indices] + self.FeatureCount = len(self.FeatureRecord) + return bool(self.FeatureCount) + +@_add_method(otTables.DefaultLangSys, + otTables.LangSys) +def subset_features(self, feature_indices): + if self.ReqFeatureIndex in feature_indices: + self.ReqFeatureIndex = feature_indices.index(self.ReqFeatureIndex) + else: + self.ReqFeatureIndex = 65535 + self.FeatureIndex = [f for f in self.FeatureIndex if f in feature_indices] + # Now map them. + self.FeatureIndex = [feature_indices.index(f) for f in self.FeatureIndex + if f in feature_indices] + self.FeatureCount = len(self.FeatureIndex) + return bool(self.FeatureCount or self.ReqFeatureIndex != 65535) + +@_add_method(otTables.DefaultLangSys, + otTables.LangSys) +def collect_features(self): + feature_indices = self.FeatureIndex[:] + if self.ReqFeatureIndex != 65535: + feature_indices.append(self.ReqFeatureIndex) + return _uniq_sort(feature_indices) + +@_add_method(otTables.Script) +def subset_features(self, feature_indices): + if(self.DefaultLangSys and + not self.DefaultLangSys.subset_features(feature_indices)): + self.DefaultLangSys = None + self.LangSysRecord = [l for l in self.LangSysRecord + if l.LangSys.subset_features(feature_indices)] + self.LangSysCount = len(self.LangSysRecord) + return bool(self.LangSysCount or self.DefaultLangSys) + +@_add_method(otTables.Script) +def collect_features(self): + feature_indices = [l.LangSys.collect_features() for l in self.LangSysRecord] + if self.DefaultLangSys: + feature_indices.append(self.DefaultLangSys.collect_features()) + return _uniq_sort(sum(feature_indices, [])) + +@_add_method(otTables.ScriptList) +def subset_features(self, feature_indices): + self.ScriptRecord = [s for s in self.ScriptRecord + if s.Script.subset_features(feature_indices)] + self.ScriptCount = len(self.ScriptRecord) + return bool(self.ScriptCount) + +@_add_method(otTables.ScriptList) +def collect_features(self): + return _uniq_sort(sum((s.Script.collect_features() + for s in self.ScriptRecord), [])) + +@_add_method(ttLib.getTableClass('GSUB')) +def closure_glyphs(self, s): + s.table = self.table + if self.table.ScriptList: + feature_indices = self.table.ScriptList.collect_features() + else: + feature_indices = [] + if self.table.FeatureList: + lookup_indices = self.table.FeatureList.collect_lookups(feature_indices) + else: + lookup_indices = [] + if self.table.LookupList: + while True: + orig_glyphs = frozenset(s.glyphs) + s._activeLookups = [] + s._doneLookups = set() + for i in lookup_indices: + if i >= self.table.LookupList.LookupCount: continue + if not self.table.LookupList.Lookup[i]: continue + self.table.LookupList.Lookup[i].closure_glyphs(s) + del s._activeLookups, s._doneLookups + if orig_glyphs == s.glyphs: + break + del s.table + +@_add_method(ttLib.getTableClass('GSUB'), + ttLib.getTableClass('GPOS')) +def subset_glyphs(self, s): + s.glyphs = s.glyphs_gsubed + if self.table.LookupList: + lookup_indices = self.table.LookupList.subset_glyphs(s) + else: + lookup_indices = [] + self.subset_lookups(lookup_indices) + return True + +@_add_method(ttLib.getTableClass('GSUB'), + ttLib.getTableClass('GPOS')) +def subset_lookups(self, lookup_indices): + """Retains specified lookups, then removes empty features, language + systems, and scripts.""" + if self.table.LookupList: + self.table.LookupList.subset_lookups(lookup_indices) + if self.table.FeatureList: + feature_indices = self.table.FeatureList.subset_lookups(lookup_indices) + else: + feature_indices = [] + if self.table.ScriptList: + self.table.ScriptList.subset_features(feature_indices) + +@_add_method(ttLib.getTableClass('GSUB'), + ttLib.getTableClass('GPOS')) +def neuter_lookups(self, lookup_indices): + """Sets lookups not in lookup_indices to None.""" + if self.table.LookupList: + self.table.LookupList.neuter_lookups(lookup_indices) + +@_add_method(ttLib.getTableClass('GSUB'), + ttLib.getTableClass('GPOS')) +def prune_lookups(self, remap=True): + """Remove (default) or neuter unreferenced lookups""" + if self.table.ScriptList: + feature_indices = self.table.ScriptList.collect_features() + else: + feature_indices = [] + if self.table.FeatureList: + lookup_indices = self.table.FeatureList.collect_lookups(feature_indices) + else: + lookup_indices = [] + if self.table.LookupList: + lookup_indices = self.table.LookupList.closure_lookups(lookup_indices) + else: + lookup_indices = [] + if remap: + self.subset_lookups(lookup_indices) + else: + self.neuter_lookups(lookup_indices) + +@_add_method(ttLib.getTableClass('GSUB'), + ttLib.getTableClass('GPOS')) +def subset_feature_tags(self, feature_tags): + if self.table.FeatureList: + feature_indices = \ + [i for i,f in enumerate(self.table.FeatureList.FeatureRecord) + if f.FeatureTag in feature_tags] + self.table.FeatureList.subset_features(feature_indices) + else: + feature_indices = [] + if self.table.ScriptList: + self.table.ScriptList.subset_features(feature_indices) + +@_add_method(ttLib.getTableClass('GSUB'), + ttLib.getTableClass('GPOS')) +def prune_features(self): + """Remove unreferenced features""" + if self.table.ScriptList: + feature_indices = self.table.ScriptList.collect_features() + else: + feature_indices = [] + if self.table.FeatureList: + self.table.FeatureList.subset_features(feature_indices) + if self.table.ScriptList: + self.table.ScriptList.subset_features(feature_indices) + +@_add_method(ttLib.getTableClass('GSUB'), + ttLib.getTableClass('GPOS')) +def prune_pre_subset(self, options): + # Drop undesired features + if '*' not in options.layout_features: + self.subset_feature_tags(options.layout_features) + # Neuter unreferenced lookups + self.prune_lookups(remap=False) + return True + +@_add_method(ttLib.getTableClass('GSUB'), + ttLib.getTableClass('GPOS')) +def remove_redundant_langsys(self): + table = self.table + if not table.ScriptList or not table.FeatureList: + return + + features = table.FeatureList.FeatureRecord + + for s in table.ScriptList.ScriptRecord: + d = s.Script.DefaultLangSys + if not d: + continue + for lr in s.Script.LangSysRecord[:]: + l = lr.LangSys + # Compare d and l + if len(d.FeatureIndex) != len(l.FeatureIndex): + continue + if (d.ReqFeatureIndex == 65535) != (l.ReqFeatureIndex == 65535): + continue + + if d.ReqFeatureIndex != 65535: + if features[d.ReqFeatureIndex] != features[l.ReqFeatureIndex]: + continue + + for i in range(len(d.FeatureIndex)): + if features[d.FeatureIndex[i]] != features[l.FeatureIndex[i]]: + break + else: + # LangSys and default are equal; delete LangSys + s.Script.LangSysRecord.remove(lr) + +@_add_method(ttLib.getTableClass('GSUB'), + ttLib.getTableClass('GPOS')) +def prune_post_subset(self, options): + table = self.table + + self.prune_lookups() # XXX Is this actually needed?! + + if table.LookupList: + table.LookupList.prune_post_subset(options) + # XXX Next two lines disabled because OTS is stupid and + # doesn't like NULL offsets here. + #if not table.LookupList.Lookup: + # table.LookupList = None + + if not table.LookupList: + table.FeatureList = None + + if table.FeatureList: + self.remove_redundant_langsys() + # Remove unreferenced features + self.prune_features() + + # XXX Next two lines disabled because OTS is stupid and + # doesn't like NULL offsets here. + #if table.FeatureList and not table.FeatureList.FeatureRecord: + # table.FeatureList = None + + # Never drop scripts themselves as them just being available + # holds semantic significance. + # XXX Next two lines disabled because OTS is stupid and + # doesn't like NULL offsets here. + #if table.ScriptList and not table.ScriptList.ScriptRecord: + # table.ScriptList = None + + return True + +@_add_method(ttLib.getTableClass('GDEF')) +def subset_glyphs(self, s): + glyphs = s.glyphs_gsubed + table = self.table + if table.LigCaretList: + indices = table.LigCaretList.Coverage.subset(glyphs) + table.LigCaretList.LigGlyph = [table.LigCaretList.LigGlyph[i] + for i in indices] + table.LigCaretList.LigGlyphCount = len(table.LigCaretList.LigGlyph) + if table.MarkAttachClassDef: + table.MarkAttachClassDef.classDefs = \ + {g:v for g,v in table.MarkAttachClassDef.classDefs.items() + if g in glyphs} + if table.GlyphClassDef: + table.GlyphClassDef.classDefs = \ + {g:v for g,v in table.GlyphClassDef.classDefs.items() + if g in glyphs} + if table.AttachList: + indices = table.AttachList.Coverage.subset(glyphs) + GlyphCount = table.AttachList.GlyphCount + table.AttachList.AttachPoint = [table.AttachList.AttachPoint[i] + for i in indices + if i < GlyphCount] + table.AttachList.GlyphCount = len(table.AttachList.AttachPoint) + if hasattr(table, "MarkGlyphSetsDef") and table.MarkGlyphSetsDef: + for coverage in table.MarkGlyphSetsDef.Coverage: + coverage.subset(glyphs) + # TODO: The following is disabled. If enabling, we need to go fixup all + # lookups that use MarkFilteringSet and map their set. + # indices = table.MarkGlyphSetsDef.Coverage = \ + # [c for c in table.MarkGlyphSetsDef.Coverage if c.glyphs] + return True + +@_add_method(ttLib.getTableClass('GDEF')) +def prune_post_subset(self, options): + table = self.table + # XXX check these against OTS + if table.LigCaretList and not table.LigCaretList.LigGlyphCount: + table.LigCaretList = None + if table.MarkAttachClassDef and not table.MarkAttachClassDef.classDefs: + table.MarkAttachClassDef = None + if table.GlyphClassDef and not table.GlyphClassDef.classDefs: + table.GlyphClassDef = None + if table.AttachList and not table.AttachList.GlyphCount: + table.AttachList = None + if (hasattr(table, "MarkGlyphSetsDef") and + table.MarkGlyphSetsDef and + not table.MarkGlyphSetsDef.Coverage): + table.MarkGlyphSetsDef = None + if table.Version == 0x00010002/0x10000: + table.Version = 1.0 + return bool(table.LigCaretList or + table.MarkAttachClassDef or + table.GlyphClassDef or + table.AttachList or + (table.Version >= 0x00010002/0x10000 and table.MarkGlyphSetsDef)) + +@_add_method(ttLib.getTableClass('kern')) +def prune_pre_subset(self, options): + # Prune unknown kern table types + self.kernTables = [t for t in self.kernTables if hasattr(t, 'kernTable')] + return bool(self.kernTables) + +@_add_method(ttLib.getTableClass('kern')) +def subset_glyphs(self, s): + glyphs = s.glyphs_gsubed + for t in self.kernTables: + t.kernTable = {(a,b):v for (a,b),v in t.kernTable.items() + if a in glyphs and b in glyphs} + self.kernTables = [t for t in self.kernTables if t.kernTable] + return bool(self.kernTables) + +@_add_method(ttLib.getTableClass('vmtx')) +def subset_glyphs(self, s): + self.metrics = _dict_subset(self.metrics, s.glyphs) + return bool(self.metrics) + +@_add_method(ttLib.getTableClass('hmtx')) +def subset_glyphs(self, s): + self.metrics = _dict_subset(self.metrics, s.glyphs) + return True # Required table + +@_add_method(ttLib.getTableClass('hdmx')) +def subset_glyphs(self, s): + self.hdmx = {sz:_dict_subset(l, s.glyphs) for sz,l in self.hdmx.items()} + return bool(self.hdmx) + +@_add_method(ttLib.getTableClass('VORG')) +def subset_glyphs(self, s): + self.VOriginRecords = {g:v for g,v in self.VOriginRecords.items() + if g in s.glyphs} + self.numVertOriginYMetrics = len(self.VOriginRecords) + return True # Never drop; has default metrics + +@_add_method(ttLib.getTableClass('post')) +def prune_pre_subset(self, options): + if not options.glyph_names: + self.formatType = 3.0 + return True # Required table + +@_add_method(ttLib.getTableClass('post')) +def subset_glyphs(self, s): + self.extraNames = [] # This seems to do it + return True # Required table + +@_add_method(ttLib.getTableModule('glyf').Glyph) +def remapComponentsFast(self, indices): + if not self.data or struct.unpack(">h", self.data[:2])[0] >= 0: + return # Not composite + data = array.array("B", self.data) + i = 10 + more = 1 + while more: + flags =(data[i] << 8) | data[i+1] + glyphID =(data[i+2] << 8) | data[i+3] + # Remap + glyphID = indices.index(glyphID) + data[i+2] = glyphID >> 8 + data[i+3] = glyphID & 0xFF + i += 4 + flags = int(flags) + + if flags & 0x0001: i += 4 # ARG_1_AND_2_ARE_WORDS + else: i += 2 + if flags & 0x0008: i += 2 # WE_HAVE_A_SCALE + elif flags & 0x0040: i += 4 # WE_HAVE_AN_X_AND_Y_SCALE + elif flags & 0x0080: i += 8 # WE_HAVE_A_TWO_BY_TWO + more = flags & 0x0020 # MORE_COMPONENTS + + self.data = data.tostring() + +@_add_method(ttLib.getTableClass('glyf')) +def closure_glyphs(self, s): + decompose = s.glyphs + while True: + components = set() + for g in decompose: + if g not in self.glyphs: + continue + gl = self.glyphs[g] + for c in gl.getComponentNames(self): + if c not in s.glyphs: + components.add(c) + components = set(c for c in components if c not in s.glyphs) + if not components: + break + decompose = components + s.glyphs.update(components) + +@_add_method(ttLib.getTableClass('glyf')) +def prune_pre_subset(self, options): + if options.notdef_glyph and not options.notdef_outline: + g = self[self.glyphOrder[0]] + # Yay, easy! + g.__dict__.clear() + g.data = "" + return True + +@_add_method(ttLib.getTableClass('glyf')) +def subset_glyphs(self, s): + self.glyphs = _dict_subset(self.glyphs, s.glyphs) + indices = [i for i,g in enumerate(self.glyphOrder) if g in s.glyphs] + for v in self.glyphs.values(): + if hasattr(v, "data"): + v.remapComponentsFast(indices) + else: + pass # No need + self.glyphOrder = [g for g in self.glyphOrder if g in s.glyphs] + # Don't drop empty 'glyf' tables, otherwise 'loca' doesn't get subset. + return True + +@_add_method(ttLib.getTableClass('glyf')) +def prune_post_subset(self, options): + remove_hinting = not options.hinting + for v in self.glyphs.values(): + v.trim(remove_hinting=remove_hinting) + return True + +@_add_method(ttLib.getTableClass('CFF ')) +def prune_pre_subset(self, options): + cff = self.cff + # CFF table must have one font only + cff.fontNames = cff.fontNames[:1] + + if options.notdef_glyph and not options.notdef_outline: + for fontname in cff.keys(): + font = cff[fontname] + c,_ = font.CharStrings.getItemAndSelector('.notdef') + # XXX we should preserve the glyph width + c.bytecode = '\x0e' # endchar + c.program = None + + return True # bool(cff.fontNames) + +@_add_method(ttLib.getTableClass('CFF ')) +def subset_glyphs(self, s): + cff = self.cff + for fontname in cff.keys(): + font = cff[fontname] + cs = font.CharStrings + + # Load all glyphs + for g in font.charset: + if g not in s.glyphs: continue + c,sel = cs.getItemAndSelector(g) + + if cs.charStringsAreIndexed: + indices = [i for i,g in enumerate(font.charset) if g in s.glyphs] + csi = cs.charStringsIndex + csi.items = [csi.items[i] for i in indices] + del csi.file, csi.offsets + if hasattr(font, "FDSelect"): + sel = font.FDSelect + # XXX We want to set sel.format to None, such that the + # most compact format is selected. However, OTS was + # broken and couldn't parse a FDSelect format 0 that + # happened before CharStrings. As such, always force + # format 3 until we fix cffLib to always generate + # FDSelect after CharStrings. + # https://github.com/khaledhosny/ots/pull/31 + #sel.format = None + sel.format = 3 + sel.gidArray = [sel.gidArray[i] for i in indices] + cs.charStrings = {g:indices.index(v) + for g,v in cs.charStrings.items() + if g in s.glyphs} + else: + cs.charStrings = {g:v + for g,v in cs.charStrings.items() + if g in s.glyphs} + font.charset = [g for g in font.charset if g in s.glyphs] + font.numGlyphs = len(font.charset) + + return True # any(cff[fontname].numGlyphs for fontname in cff.keys()) + +@_add_method(psCharStrings.T2CharString) +def subset_subroutines(self, subrs, gsubrs): + p = self.program + assert len(p) + for i in range(1, len(p)): + if p[i] == 'callsubr': + assert isinstance(p[i-1], int) + p[i-1] = subrs._used.index(p[i-1] + subrs._old_bias) - subrs._new_bias + elif p[i] == 'callgsubr': + assert isinstance(p[i-1], int) + p[i-1] = gsubrs._used.index(p[i-1] + gsubrs._old_bias) - gsubrs._new_bias + +@_add_method(psCharStrings.T2CharString) +def drop_hints(self): + hints = self._hints + + if hints.has_hint: + self.program = self.program[hints.last_hint:] + if hasattr(self, 'width'): + # Insert width back if needed + if self.width != self.private.defaultWidthX: + self.program.insert(0, self.width - self.private.nominalWidthX) + + if hints.has_hintmask: + i = 0 + p = self.program + while i < len(p): + if p[i] in ['hintmask', 'cntrmask']: + assert i + 1 <= len(p) + del p[i:i+2] + continue + i += 1 + + # TODO: we currently don't drop calls to "empty" subroutines. + + assert len(self.program) + + del self._hints + +class _MarkingT2Decompiler(psCharStrings.SimpleT2Decompiler): + + def __init__(self, localSubrs, globalSubrs): + psCharStrings.SimpleT2Decompiler.__init__(self, + localSubrs, + globalSubrs) + for subrs in [localSubrs, globalSubrs]: + if subrs and not hasattr(subrs, "_used"): + subrs._used = set() + + def op_callsubr(self, index): + self.localSubrs._used.add(self.operandStack[-1]+self.localBias) + psCharStrings.SimpleT2Decompiler.op_callsubr(self, index) + + def op_callgsubr(self, index): + self.globalSubrs._used.add(self.operandStack[-1]+self.globalBias) + psCharStrings.SimpleT2Decompiler.op_callgsubr(self, index) + +class _DehintingT2Decompiler(psCharStrings.SimpleT2Decompiler): + + class Hints(object): + def __init__(self): + # Whether calling this charstring produces any hint stems + self.has_hint = False + # Index to start at to drop all hints + self.last_hint = 0 + # Index up to which we know more hints are possible. + # Only relevant if status is 0 or 1. + self.last_checked = 0 + # The status means: + # 0: after dropping hints, this charstring is empty + # 1: after dropping hints, there may be more hints + # continuing after this + # 2: no more hints possible after this charstring + self.status = 0 + # Has hintmask instructions; not recursive + self.has_hintmask = False + pass + + def __init__(self, css, localSubrs, globalSubrs): + self._css = css + psCharStrings.SimpleT2Decompiler.__init__(self, + localSubrs, + globalSubrs) + + def execute(self, charString): + old_hints = charString._hints if hasattr(charString, '_hints') else None + charString._hints = self.Hints() + + psCharStrings.SimpleT2Decompiler.execute(self, charString) + + hints = charString._hints + + if hints.has_hint or hints.has_hintmask: + self._css.add(charString) + + if hints.status != 2: + # Check from last_check, make sure we didn't have any operators. + for i in range(hints.last_checked, len(charString.program) - 1): + if isinstance(charString.program[i], str): + hints.status = 2 + break + else: + hints.status = 1 # There's *something* here + hints.last_checked = len(charString.program) + + if old_hints: + assert hints.__dict__ == old_hints.__dict__ + + def op_callsubr(self, index): + subr = self.localSubrs[self.operandStack[-1]+self.localBias] + psCharStrings.SimpleT2Decompiler.op_callsubr(self, index) + self.processSubr(index, subr) + + def op_callgsubr(self, index): + subr = self.globalSubrs[self.operandStack[-1]+self.globalBias] + psCharStrings.SimpleT2Decompiler.op_callgsubr(self, index) + self.processSubr(index, subr) + + def op_hstem(self, index): + psCharStrings.SimpleT2Decompiler.op_hstem(self, index) + self.processHint(index) + def op_vstem(self, index): + psCharStrings.SimpleT2Decompiler.op_vstem(self, index) + self.processHint(index) + def op_hstemhm(self, index): + psCharStrings.SimpleT2Decompiler.op_hstemhm(self, index) + self.processHint(index) + def op_vstemhm(self, index): + psCharStrings.SimpleT2Decompiler.op_vstemhm(self, index) + self.processHint(index) + def op_hintmask(self, index): + psCharStrings.SimpleT2Decompiler.op_hintmask(self, index) + self.processHintmask(index) + def op_cntrmask(self, index): + psCharStrings.SimpleT2Decompiler.op_cntrmask(self, index) + self.processHintmask(index) + + def processHintmask(self, index): + cs = self.callingStack[-1] + hints = cs._hints + hints.has_hintmask = True + if hints.status != 2 and hints.has_hint: + # Check from last_check, see if we may be an implicit vstem + for i in range(hints.last_checked, index - 1): + if isinstance(cs.program[i], str): + hints.status = 2 + break + if hints.status != 2: + # We are an implicit vstem + hints.last_hint = index + 1 + hints.status = 0 + hints.last_checked = index + 1 + + def processHint(self, index): + cs = self.callingStack[-1] + hints = cs._hints + hints.has_hint = True + hints.last_hint = index + hints.last_checked = index + + def processSubr(self, index, subr): + cs = self.callingStack[-1] + hints = cs._hints + subr_hints = subr._hints + + if subr_hints.has_hint: + if hints.status != 2: + hints.has_hint = True + hints.last_checked = index + hints.status = subr_hints.status + # Decide where to chop off from + if subr_hints.status == 0: + hints.last_hint = index + else: + hints.last_hint = index - 2 # Leave the subr call in + else: + # In my understanding, this is a font bug. + # I.e., it has hint stems *after* path construction. + # I've seen this in widespread fonts. + # Best to ignore the hints I suppose... + pass + #assert 0 + else: + hints.status = max(hints.status, subr_hints.status) + if hints.status != 2: + # Check from last_check, make sure we didn't have + # any operators. + for i in range(hints.last_checked, index - 1): + if isinstance(cs.program[i], str): + hints.status = 2 + break + hints.last_checked = index + if hints.status != 2: + # Decide where to chop off from + if subr_hints.status == 0: + hints.last_hint = index + else: + hints.last_hint = index - 2 # Leave the subr call in + +class _DesubroutinizingT2Decompiler(psCharStrings.SimpleT2Decompiler): + + def __init__(self, localSubrs, globalSubrs): + psCharStrings.SimpleT2Decompiler.__init__(self, + localSubrs, + globalSubrs) + + def execute(self, charString): + # Note: Currently we recompute _desubroutinized each time. + # This is more robust in some cases, but in other places we assume + # that each subroutine always expands to the same code, so + # maybe it doesn't matter. To speed up we can just not + # recompute _desubroutinized if it's there. For now I just + # double-check that it desubroutinized to the same thing. + old_desubroutinized = charString._desubroutinized if hasattr(charString, '_desubroutinized') else None + + charString._patches = [] + psCharStrings.SimpleT2Decompiler.execute(self, charString) + desubroutinized = charString.program[:] + for idx,expansion in reversed (charString._patches): + assert idx >= 2 + assert desubroutinized[idx - 1] in ['callsubr', 'callgsubr'], desubroutinized[idx - 1] + assert type(desubroutinized[idx - 2]) == int + if expansion[-1] == 'return': + expansion = expansion[:-1] + desubroutinized[idx-2:idx] = expansion + if 'endchar' in desubroutinized: + # Cut off after first endchar + desubroutinized = desubroutinized[:desubroutinized.index('endchar') + 1] + else: + if not len(desubroutinized) or desubroutinized[-1] != 'return': + desubroutinized.append('return') + + charString._desubroutinized = desubroutinized + del charString._patches + + if old_desubroutinized: + assert desubroutinized == old_desubroutinized + + def op_callsubr(self, index): + subr = self.localSubrs[self.operandStack[-1]+self.localBias] + psCharStrings.SimpleT2Decompiler.op_callsubr(self, index) + self.processSubr(index, subr) + + def op_callgsubr(self, index): + subr = self.globalSubrs[self.operandStack[-1]+self.globalBias] + psCharStrings.SimpleT2Decompiler.op_callgsubr(self, index) + self.processSubr(index, subr) + + def processSubr(self, index, subr): + cs = self.callingStack[-1] + cs._patches.append((index, subr._desubroutinized)) + + +@_add_method(ttLib.getTableClass('CFF ')) +def prune_post_subset(self, options): + cff = self.cff + for fontname in cff.keys(): + font = cff[fontname] + cs = font.CharStrings + + # Drop unused FontDictionaries + if hasattr(font, "FDSelect"): + sel = font.FDSelect + indices = _uniq_sort(sel.gidArray) + sel.gidArray = [indices.index (ss) for ss in sel.gidArray] + arr = font.FDArray + arr.items = [arr[i] for i in indices] + del arr.file, arr.offsets + + # Desubroutinize if asked for + if options.desubroutinize: + for g in font.charset: + c,sel = cs.getItemAndSelector(g) + c.decompile() + subrs = getattr(c.private, "Subrs", []) + decompiler = _DesubroutinizingT2Decompiler(subrs, c.globalSubrs) + decompiler.execute(c) + c.program = c._desubroutinized + + # Drop hints if not needed + if not options.hinting: + + # This can be tricky, but doesn't have to. What we do is: + # + # - Run all used glyph charstrings and recurse into subroutines, + # - For each charstring (including subroutines), if it has any + # of the hint stem operators, we mark it as such. + # Upon returning, for each charstring we note all the + # subroutine calls it makes that (recursively) contain a stem, + # - Dropping hinting then consists of the following two ops: + # * Drop the piece of the program in each charstring before the + # last call to a stem op or a stem-calling subroutine, + # * Drop all hintmask operations. + # - It's trickier... A hintmask right after hints and a few numbers + # will act as an implicit vstemhm. As such, we track whether + # we have seen any non-hint operators so far and do the right + # thing, recursively... Good luck understanding that :( + css = set() + for g in font.charset: + c,sel = cs.getItemAndSelector(g) + c.decompile() + subrs = getattr(c.private, "Subrs", []) + decompiler = _DehintingT2Decompiler(css, subrs, c.globalSubrs) + decompiler.execute(c) + for charstring in css: + charstring.drop_hints() + del css + + # Drop font-wide hinting values + all_privs = [] + if hasattr(font, 'FDSelect'): + all_privs.extend(fd.Private for fd in font.FDArray) + else: + all_privs.append(font.Private) + for priv in all_privs: + for k in ['BlueValues', 'OtherBlues', + 'FamilyBlues', 'FamilyOtherBlues', + 'BlueScale', 'BlueShift', 'BlueFuzz', + 'StemSnapH', 'StemSnapV', 'StdHW', 'StdVW']: + if hasattr(priv, k): + setattr(priv, k, None) + + # Renumber subroutines to remove unused ones + + # Mark all used subroutines + for g in font.charset: + c,sel = cs.getItemAndSelector(g) + subrs = getattr(c.private, "Subrs", []) + decompiler = _MarkingT2Decompiler(subrs, c.globalSubrs) + decompiler.execute(c) + + all_subrs = [font.GlobalSubrs] + if hasattr(font, 'FDSelect'): + all_subrs.extend(fd.Private.Subrs for fd in font.FDArray if hasattr(fd.Private, 'Subrs') and fd.Private.Subrs) + elif hasattr(font.Private, 'Subrs') and font.Private.Subrs: + all_subrs.append(font.Private.Subrs) + + subrs = set(subrs) # Remove duplicates + + # Prepare + for subrs in all_subrs: + if not hasattr(subrs, '_used'): + subrs._used = set() + subrs._used = _uniq_sort(subrs._used) + subrs._old_bias = psCharStrings.calcSubrBias(subrs) + subrs._new_bias = psCharStrings.calcSubrBias(subrs._used) + + # Renumber glyph charstrings + for g in font.charset: + c,sel = cs.getItemAndSelector(g) + subrs = getattr(c.private, "Subrs", []) + c.subset_subroutines (subrs, font.GlobalSubrs) + + # Renumber subroutines themselves + for subrs in all_subrs: + if subrs == font.GlobalSubrs: + if not hasattr(font, 'FDSelect') and hasattr(font.Private, 'Subrs'): + local_subrs = font.Private.Subrs + else: + local_subrs = [] + else: + local_subrs = subrs + + subrs.items = [subrs.items[i] for i in subrs._used] + del subrs.file + if hasattr(subrs, 'offsets'): + del subrs.offsets + + for subr in subrs.items: + subr.subset_subroutines (local_subrs, font.GlobalSubrs) + + # Cleanup + for subrs in all_subrs: + del subrs._used, subrs._old_bias, subrs._new_bias + + return True + +@_add_method(ttLib.getTableClass('cmap')) +def closure_glyphs(self, s): + tables = [t for t in self.tables if t.isUnicode()] + + # Close glyphs + for table in tables: + if table.format == 14: + for cmap in table.uvsDict.values(): + glyphs = {g for u,g in cmap if u in s.unicodes_requested} + if None in glyphs: + glyphs.remove(None) + s.glyphs.update(glyphs) + else: + cmap = table.cmap + intersection = s.unicodes_requested.intersection(cmap.keys()) + s.glyphs.update(cmap[u] for u in intersection) + + # Calculate unicodes_missing + s.unicodes_missing = s.unicodes_requested.copy() + for table in tables: + s.unicodes_missing.difference_update(table.cmap) + +@_add_method(ttLib.getTableClass('cmap')) +def prune_pre_subset(self, options): + if not options.legacy_cmap: + # Drop non-Unicode / non-Symbol cmaps + self.tables = [t for t in self.tables if t.isUnicode() or t.isSymbol()] + if not options.symbol_cmap: + self.tables = [t for t in self.tables if not t.isSymbol()] + # TODO(behdad) Only keep one subtable? + # For now, drop format=0 which can't be subset_glyphs easily? + self.tables = [t for t in self.tables if t.format != 0] + self.numSubTables = len(self.tables) + return True # Required table + +@_add_method(ttLib.getTableClass('cmap')) +def subset_glyphs(self, s): + s.glyphs = None # We use s.glyphs_requested and s.unicodes_requested only + for t in self.tables: + if t.format == 14: + # TODO(behdad) We drop all the default-UVS mappings + # for glyphs_requested. So it's the caller's responsibility to make + # sure those are included. + t.uvsDict = {v:[(u,g) for u,g in l + if g in s.glyphs_requested or u in s.unicodes_requested] + for v,l in t.uvsDict.items()} + t.uvsDict = {v:l for v,l in t.uvsDict.items() if l} + elif t.isUnicode(): + t.cmap = {u:g for u,g in t.cmap.items() + if g in s.glyphs_requested or u in s.unicodes_requested} + else: + t.cmap = {u:g for u,g in t.cmap.items() + if g in s.glyphs_requested} + self.tables = [t for t in self.tables + if (t.cmap if t.format != 14 else t.uvsDict)] + self.numSubTables = len(self.tables) + # TODO(behdad) Convert formats when needed. + # In particular, if we have a format=12 without non-BMP + # characters, either drop format=12 one or convert it + # to format=4 if there's not one. + return True # Required table + +@_add_method(ttLib.getTableClass('DSIG')) +def prune_pre_subset(self, options): + # Drop all signatures since they will be invalid + self.usNumSigs = 0 + self.signatureRecords = [] + return True + +@_add_method(ttLib.getTableClass('maxp')) +def prune_pre_subset(self, options): + if not options.hinting: + if self.tableVersion == 0x00010000: + self.maxZones = 1 + self.maxTwilightPoints = 0 + self.maxFunctionDefs = 0 + self.maxInstructionDefs = 0 + self.maxStackElements = 0 + self.maxSizeOfInstructions = 0 + return True + +@_add_method(ttLib.getTableClass('name')) +def prune_pre_subset(self, options): + if '*' not in options.name_IDs: + self.names = [n for n in self.names if n.nameID in options.name_IDs] + if not options.name_legacy: + # TODO(behdad) Sometimes (eg Apple Color Emoji) there's only a macroman + # entry for Latin and no Unicode names. + self.names = [n for n in self.names if n.isUnicode()] + # TODO(behdad) Option to keep only one platform's + if '*' not in options.name_languages: + # TODO(behdad) This is Windows-platform specific! + self.names = [n for n in self.names + if n.langID in options.name_languages] + if options.obfuscate_names: + namerecs = [] + for n in self.names: + if n.nameID in [1, 4]: + n.string = ".\x7f".encode('utf_16_be') if n.isUnicode() else ".\x7f" + elif n.nameID in [2, 6]: + n.string = "\x7f".encode('utf_16_be') if n.isUnicode() else "\x7f" + elif n.nameID == 3: + n.string = "" + elif n.nameID in [16, 17, 18]: + continue + namerecs.append(n) + self.names = namerecs + return True # Required table + + +# TODO(behdad) OS/2 ulUnicodeRange / ulCodePageRange? +# TODO(behdad) Drop AAT tables. +# TODO(behdad) Drop unneeded GSUB/GPOS Script/LangSys entries. +# TODO(behdad) Drop empty GSUB/GPOS, and GDEF if no GSUB/GPOS left +# TODO(behdad) Drop GDEF subitems if unused by lookups +# TODO(behdad) Avoid recursing too much (in GSUB/GPOS and in CFF) +# TODO(behdad) Text direction considerations. +# TODO(behdad) Text script / language considerations. +# TODO(behdad) Optionally drop 'kern' table if GPOS available +# TODO(behdad) Implement --unicode='*' to choose all cmap'ed +# TODO(behdad) Drop old-spec Indic scripts + + +class Options(object): + + class OptionError(Exception): pass + class UnknownOptionError(OptionError): pass + + _drop_tables_default = ['BASE', 'JSTF', 'DSIG', 'EBDT', 'EBLC', + 'EBSC', 'SVG ', 'PCLT', 'LTSH'] + _drop_tables_default += ['Feat', 'Glat', 'Gloc', 'Silf', 'Sill'] # Graphite + _drop_tables_default += ['CBLC', 'CBDT', 'sbix', 'COLR', 'CPAL'] # Color + _no_subset_tables_default = ['gasp', 'head', 'hhea', 'maxp', + 'vhea', 'OS/2', 'loca', 'name', 'cvt ', + 'fpgm', 'prep', 'VDMX', 'DSIG'] + _hinting_tables_default = ['cvt ', 'fpgm', 'prep', 'hdmx', 'VDMX'] + + # Based on HarfBuzz shapers + _layout_features_groups = { + # Default shaper + 'common': ['ccmp', 'liga', 'locl', 'mark', 'mkmk', 'rlig'], + 'horizontal': ['calt', 'clig', 'curs', 'kern', 'rclt'], + 'vertical': ['valt', 'vert', 'vkrn', 'vpal', 'vrt2'], + 'ltr': ['ltra', 'ltrm'], + 'rtl': ['rtla', 'rtlm'], + # Complex shapers + 'arabic': ['init', 'medi', 'fina', 'isol', 'med2', 'fin2', 'fin3', + 'cswh', 'mset'], + 'hangul': ['ljmo', 'vjmo', 'tjmo'], + 'tibetan': ['abvs', 'blws', 'abvm', 'blwm'], + 'indic': ['nukt', 'akhn', 'rphf', 'rkrf', 'pref', 'blwf', 'half', + 'abvf', 'pstf', 'cfar', 'vatu', 'cjct', 'init', 'pres', + 'abvs', 'blws', 'psts', 'haln', 'dist', 'abvm', 'blwm'], + } + _layout_features_default = _uniq_sort(sum( + iter(_layout_features_groups.values()), [])) + + drop_tables = _drop_tables_default + no_subset_tables = _no_subset_tables_default + hinting_tables = _hinting_tables_default + legacy_kern = False # drop 'kern' table if GPOS available + layout_features = _layout_features_default + ignore_missing_glyphs = False + ignore_missing_unicodes = True + hinting = True + glyph_names = False + legacy_cmap = False + symbol_cmap = False + name_IDs = [1, 2] # Family and Style + name_legacy = False + name_languages = [0x0409] # English + obfuscate_names = False # to make webfont unusable as a system font + notdef_glyph = True # gid0 for TrueType / .notdef for CFF + notdef_outline = False # No need for notdef to have an outline really + recommended_glyphs = False # gid1, gid2, gid3 for TrueType + recalc_bounds = False # Recalculate font bounding boxes + recalc_timestamp = False # Recalculate font modified timestamp + canonical_order = False # Order tables as recommended + flavor = None # May be 'woff' or 'woff2' + desubroutinize = False # Desubroutinize CFF CharStrings + + def __init__(self, **kwargs): + self.set(**kwargs) + + def set(self, **kwargs): + for k,v in kwargs.items(): + if not hasattr(self, k): + raise self.UnknownOptionError("Unknown option '%s'" % k) + setattr(self, k, v) + + def parse_opts(self, argv, ignore_unknown=False): + ret = [] + for a in argv: + orig_a = a + if not a.startswith('--'): + ret.append(a) + continue + a = a[2:] + i = a.find('=') + op = '=' + if i == -1: + if a.startswith("no-"): + k = a[3:] + v = False + else: + k = a + v = True + if k.endswith("?"): + k = k[:-1] + v = '?' + else: + k = a[:i] + if k[-1] in "-+": + op = k[-1]+'=' # Op is '-=' or '+=' now. + k = k[:-1] + v = a[i+1:] + ok = k + k = k.replace('-', '_') + if not hasattr(self, k): + if ignore_unknown is True or ok in ignore_unknown: + ret.append(orig_a) + continue + else: + raise self.UnknownOptionError("Unknown option '%s'" % a) + + ov = getattr(self, k) + if v == '?': + print("Current setting for '%s' is: %s" % (ok, ov)) + continue + if isinstance(ov, bool): + v = bool(v) + elif isinstance(ov, int): + v = int(v) + elif isinstance(ov, str): + v = str(v) # redundant + elif isinstance(ov, list): + if isinstance(v, bool): + raise self.OptionError("Option '%s' requires values to be specified using '='" % a) + vv = v.replace(',', ' ').split() + if vv == ['']: + vv = [] + vv = [int(x, 0) if len(x) and x[0] in "0123456789" else x for x in vv] + if op == '=': + v = vv + elif op == '+=': + v = ov + v.extend(vv) + elif op == '-=': + v = ov + for x in vv: + if x in v: + v.remove(x) + else: + assert False + + setattr(self, k, v) + + return ret + + +class Subsetter(object): + + class SubsettingError(Exception): pass + class MissingGlyphsSubsettingError(SubsettingError): pass + class MissingUnicodesSubsettingError(SubsettingError): pass + + def __init__(self, options=None, log=None): + + if not log: + log = Logger() + if not options: + options = Options() + + self.options = options + self.log = log + self.unicodes_requested = set() + self.glyph_names_requested = set() + self.glyph_ids_requested = set() + + def populate(self, glyphs=[], gids=[], unicodes=[], text=""): + self.unicodes_requested.update(unicodes) + if isinstance(text, bytes): + text = text.decode("utf_8") + for u in text: + self.unicodes_requested.add(ord(u)) + self.glyph_names_requested.update(glyphs) + self.glyph_ids_requested.update(gids) + + def _prune_pre_subset(self, font): + + for tag in font.keys(): + if tag == 'GlyphOrder': continue + + if(tag in self.options.drop_tables or + (tag in self.options.hinting_tables and not self.options.hinting) or + (tag == 'kern' and (not self.options.legacy_kern and 'GPOS' in font))): + self.log(tag, "dropped") + del font[tag] + continue + + clazz = ttLib.getTableClass(tag) + + if hasattr(clazz, 'prune_pre_subset'): + table = font[tag] + self.log.lapse("load '%s'" % tag) + retain = table.prune_pre_subset(self.options) + self.log.lapse("prune '%s'" % tag) + if not retain: + self.log(tag, "pruned to empty; dropped") + del font[tag] + continue + else: + self.log(tag, "pruned") + + def _closure_glyphs(self, font): + + realGlyphs = set(font.getGlyphOrder()) + glyph_order = font.getGlyphOrder() + + self.glyphs_requested = set() + self.glyphs_requested.update(self.glyph_names_requested) + self.glyphs_requested.update(glyph_order[i] + for i in self.glyph_ids_requested + if i < len(glyph_order)) + + self.glyphs_missing = set() + self.glyphs_missing.update(self.glyphs_requested.difference(realGlyphs)) + self.glyphs_missing.update(i for i in self.glyph_ids_requested + if i >= len(glyph_order)) + if self.glyphs_missing: + self.log("Missing requested glyphs: %s" % self.glyphs_missing) + if not self.options.ignore_missing_glyphs: + raise self.MissingGlyphsSubsettingError(self.glyphs_missing) + + self.glyphs = self.glyphs_requested.copy() + + self.unicodes_missing = set() + if 'cmap' in font: + font['cmap'].closure_glyphs(self) + self.glyphs.intersection_update(realGlyphs) + self.log.lapse("close glyph list over 'cmap'") + self.glyphs_cmaped = frozenset(self.glyphs) + if self.unicodes_missing: + missing = ["U+%04X" % u for u in self.unicodes_missing] + self.log("Missing glyphs for requested Unicodes: %s" % missing) + if not self.options.ignore_missing_unicodes: + raise self.MissingUnicodesSubsettingError(missing) + del missing + + if self.options.notdef_glyph: + if 'glyf' in font: + self.glyphs.add(font.getGlyphName(0)) + self.log("Added gid0 to subset") + else: + self.glyphs.add('.notdef') + self.log("Added .notdef to subset") + if self.options.recommended_glyphs: + if 'glyf' in font: + for i in range(min(4, len(font.getGlyphOrder()))): + self.glyphs.add(font.getGlyphName(i)) + self.log("Added first four glyphs to subset") + + if 'GSUB' in font: + self.log("Closing glyph list over 'GSUB': %d glyphs before" % + len(self.glyphs)) + self.log.glyphs(self.glyphs, font=font) + font['GSUB'].closure_glyphs(self) + self.glyphs.intersection_update(realGlyphs) + self.log("Closed glyph list over 'GSUB': %d glyphs after" % + len(self.glyphs)) + self.log.glyphs(self.glyphs, font=font) + self.log.lapse("close glyph list over 'GSUB'") + self.glyphs_gsubed = frozenset(self.glyphs) + + if 'glyf' in font: + self.log("Closing glyph list over 'glyf': %d glyphs before" % + len(self.glyphs)) + self.log.glyphs(self.glyphs, font=font) + font['glyf'].closure_glyphs(self) + self.glyphs.intersection_update(realGlyphs) + self.log("Closed glyph list over 'glyf': %d glyphs after" % + len(self.glyphs)) + self.log.glyphs(self.glyphs, font=font) + self.log.lapse("close glyph list over 'glyf'") + self.glyphs_glyfed = frozenset(self.glyphs) + + self.glyphs_all = frozenset(self.glyphs) + + self.log("Retaining %d glyphs: " % len(self.glyphs_all)) + + del self.glyphs + + def _subset_glyphs(self, font): + for tag in font.keys(): + if tag == 'GlyphOrder': continue + clazz = ttLib.getTableClass(tag) + + if tag in self.options.no_subset_tables: + self.log(tag, "subsetting not needed") + elif hasattr(clazz, 'subset_glyphs'): + table = font[tag] + self.glyphs = self.glyphs_all + retain = table.subset_glyphs(self) + del self.glyphs + self.log.lapse("subset '%s'" % tag) + if not retain: + self.log(tag, "subsetted to empty; dropped") + del font[tag] + else: + self.log(tag, "subsetted") + else: + self.log(tag, "NOT subset; don't know how to subset; dropped") + del font[tag] + + glyphOrder = font.getGlyphOrder() + glyphOrder = [g for g in glyphOrder if g in self.glyphs_all] + font.setGlyphOrder(glyphOrder) + font._buildReverseGlyphOrderDict() + self.log.lapse("subset GlyphOrder") + + def _prune_post_subset(self, font): + for tag in font.keys(): + if tag == 'GlyphOrder': continue + clazz = ttLib.getTableClass(tag) + if hasattr(clazz, 'prune_post_subset'): + table = font[tag] + retain = table.prune_post_subset(self.options) + self.log.lapse("prune '%s'" % tag) + if not retain: + self.log(tag, "pruned to empty; dropped") + del font[tag] + else: + self.log(tag, "pruned") + + def subset(self, font): + + self._prune_pre_subset(font) + self._closure_glyphs(font) + self._subset_glyphs(font) + self._prune_post_subset(font) + + +class Logger(object): + + def __init__(self, verbose=False, xml=False, timing=False): + self.verbose = verbose + self.xml = xml + self.timing = timing + self.last_time = self.start_time = time.time() + + def parse_opts(self, argv): + argv = argv[:] + for v in ['verbose', 'xml', 'timing']: + if "--"+v in argv: + setattr(self, v, True) + argv.remove("--"+v) + return argv + + def __call__(self, *things): + if not self.verbose: + return + print(' '.join(str(x) for x in things)) + + def lapse(self, *things): + if not self.timing: + return + new_time = time.time() + print("Took %0.3fs to %s" %(new_time - self.last_time, + ' '.join(str(x) for x in things))) + self.last_time = new_time + + def glyphs(self, glyphs, font=None): + if not self.verbose: + return + self("Glyph names:", sorted(glyphs)) + if font: + reverseGlyphMap = font.getReverseGlyphMap() + self("Glyph IDs: ", sorted(reverseGlyphMap[g] for g in glyphs)) + + def font(self, font, file=sys.stdout): + if not self.xml: + return + from fontTools.misc import xmlWriter + writer = xmlWriter.XMLWriter(file) + for tag in font.keys(): + writer.begintag(tag) + writer.newline() + font[tag].toXML(writer, font) + writer.endtag(tag) + writer.newline() + + +def load_font(fontFile, + options, + allowVID=False, + checkChecksums=False, + dontLoadGlyphNames=False, + lazy=True): + + font = ttLib.TTFont(fontFile, + allowVID=allowVID, + checkChecksums=checkChecksums, + recalcBBoxes=options.recalc_bounds, + recalcTimestamp=options.recalc_timestamp, + lazy=lazy) + + # Hack: + # + # If we don't need glyph names, change 'post' class to not try to + # load them. It avoid lots of headache with broken fonts as well + # as loading time. + # + # Ideally ttLib should provide a way to ask it to skip loading + # glyph names. But it currently doesn't provide such a thing. + # + if dontLoadGlyphNames: + post = ttLib.getTableClass('post') + saved = post.decode_format_2_0 + post.decode_format_2_0 = post.decode_format_3_0 + f = font['post'] + if f.formatType == 2.0: + f.formatType = 3.0 + post.decode_format_2_0 = saved + + return font + +def save_font(font, outfile, options): + if options.flavor and not hasattr(font, 'flavor'): + raise Exception("fonttools version does not support flavors.") + font.flavor = options.flavor + font.save(outfile, reorderTables=options.canonical_order) + +def parse_unicodes(s): + import re + s = re.sub (r"0[xX]", " ", s) + s = re.sub (r"[<+>,;&#\\xXuU\n ]", " ", s) + l = [] + for item in s.split(): + fields = item.split('-') + if len(fields) == 1: + l.append(int(item, 16)) + else: + start,end = fields + l.extend(range(int(start, 16), int(end, 16)+1)) + return l + +def parse_gids(s): + l = [] + for item in s.replace(',', ' ').split(): + fields = item.split('-') + if len(fields) == 1: + l.append(int(fields[0])) + else: + l.extend(range(int(fields[0]), int(fields[1])+1)) + return l + +def parse_glyphs(s): + return s.replace(',', ' ').split() + +def main(args=None): + + if args is None: + args = sys.argv[1:] + + if '--help' in args: + print(__doc__) + sys.exit(0) + + log = Logger() + args = log.parse_opts(args) + + options = Options() + args = options.parse_opts(args, + ignore_unknown=['gids', 'gids-file', + 'glyphs', 'glyphs-file', + 'text', 'text-file', + 'unicodes', 'unicodes-file', + 'output-file']) + + if len(args) < 2: + print("usage:", __usage__, file=sys.stderr) + print("Try pyftsubset --help for more information.", file=sys.stderr) + sys.exit(1) + + fontfile = args[0] + args = args[1:] + + subsetter = Subsetter(options=options, log=log) + outfile = fontfile + '.subset' + glyphs = [] + gids = [] + unicodes = [] + wildcard_glyphs = False + wildcard_unicodes = False + text = "" + for g in args: + if g == '*': + wildcard_glyphs = True + continue + if g.startswith('--output-file='): + outfile = g[14:] + continue + if g.startswith('--text='): + text += g[7:] + continue + if g.startswith('--text-file='): + text += open(g[12:]).read().replace('\n', '') + continue + if g.startswith('--unicodes='): + if g[11:] == '*': + wildcard_unicodes = True + else: + unicodes.extend(parse_unicodes(g[11:])) + continue + if g.startswith('--unicodes-file='): + for line in open(g[16:]).readlines(): + unicodes.extend(parse_unicodes(line.split('#')[0])) + continue + if g.startswith('--gids='): + gids.extend(parse_gids(g[7:])) + continue + if g.startswith('--gids-file='): + for line in open(g[12:]).readlines(): + gids.extend(parse_gids(line.split('#')[0])) + continue + if g.startswith('--glyphs='): + if g[9:] == '*': + wildcard_glyphs = True + else: + glyphs.extend(parse_glyphs(g[9:])) + continue + if g.startswith('--glyphs-file='): + for line in open(g[14:]).readlines(): + glyphs.extend(parse_glyphs(line.split('#')[0])) + continue + glyphs.append(g) + + dontLoadGlyphNames = not options.glyph_names and not glyphs + font = load_font(fontfile, options, dontLoadGlyphNames=dontLoadGlyphNames) + log.lapse("load font") + if wildcard_glyphs: + glyphs.extend(font.getGlyphOrder()) + if wildcard_unicodes: + for t in font['cmap'].tables: + if t.isUnicode(): + unicodes.extend(t.cmap.keys()) + assert '' not in glyphs + + log.lapse("compile glyph list") + log("Text: '%s'" % text) + log("Unicodes:", unicodes) + log("Glyphs:", glyphs) + log("Gids:", gids) + + subsetter.populate(glyphs=glyphs, gids=gids, unicodes=unicodes, text=text) + subsetter.subset(font) + + save_font (font, outfile, options) + log.lapse("compile and save font") + + log.last_time = log.start_time + log.lapse("make one with everything(TOTAL TIME)") + + if log.verbose: + import os + log("Input font:% 7d bytes: %s" % (os.path.getsize(fontfile), fontfile)) + log("Subset font:% 7d bytes: %s" % (os.path.getsize(outfile), outfile)) + + log.font(font) + + font.close() + + +__all__ = [ + 'Options', + 'Subsetter', + 'Logger', + 'load_font', + 'save_font', + 'parse_gids', + 'parse_glyphs', + 'parse_unicodes', + 'main' +] + +if __name__ == '__main__': + main() diff -Nru fonttools-2.4/Snippets/fontTools/t1Lib.py fonttools-3.0/Snippets/fontTools/t1Lib.py --- fonttools-2.4/Snippets/fontTools/t1Lib.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.0/Snippets/fontTools/t1Lib.py 2015-08-31 17:57:15.000000000 +0000 @@ -0,0 +1,371 @@ +"""fontTools.t1Lib.py -- Tools for PostScript Type 1 fonts + +Functions for reading and writing raw Type 1 data: + +read(path) + reads any Type 1 font file, returns the raw data and a type indicator: + 'LWFN', 'PFB' or 'OTHER', depending on the format of the file pointed + to by 'path'. + Raises an error when the file does not contain valid Type 1 data. + +write(path, data, kind='OTHER', dohex=False) + writes raw Type 1 data to the file pointed to by 'path'. + 'kind' can be one of 'LWFN', 'PFB' or 'OTHER'; it defaults to 'OTHER'. + 'dohex' is a flag which determines whether the eexec encrypted + part should be written as hexadecimal or binary, but only if kind + is 'LWFN' or 'PFB'. +""" +from __future__ import print_function, division, absolute_import +from fontTools.misc.py23 import * +from fontTools.misc import eexec +from fontTools.misc.macCreatorType import getMacCreatorAndType +import os +import re + +__author__ = "jvr" +__version__ = "1.0b2" +DEBUG = 0 + + +try: + try: + from Carbon import Res + except ImportError: + import Res # MacPython < 2.2 +except ImportError: + haveMacSupport = 0 +else: + haveMacSupport = 1 + import MacOS + + +class T1Error(Exception): pass + + +class T1Font(object): + + """Type 1 font class. + + Uses a minimal interpeter that supports just about enough PS to parse + Type 1 fonts. + """ + + def __init__(self, path=None): + if path is not None: + self.data, type = read(path) + else: + pass # XXX + + def saveAs(self, path, type): + write(path, self.getData(), type) + + def getData(self): + # XXX Todo: if the data has been converted to Python object, + # recreate the PS stream + return self.data + + def getGlyphSet(self): + """Return a generic GlyphSet, which is a dict-like object + mapping glyph names to glyph objects. The returned glyph objects + have a .draw() method that supports the Pen protocol, and will + have an attribute named 'width', but only *after* the .draw() method + has been called. + + In the case of Type 1, the GlyphSet is simply the CharStrings dict. + """ + return self["CharStrings"] + + def __getitem__(self, key): + if not hasattr(self, "font"): + self.parse() + return self.font[key] + + def parse(self): + from fontTools.misc import psLib + from fontTools.misc import psCharStrings + self.font = psLib.suckfont(self.data) + charStrings = self.font["CharStrings"] + lenIV = self.font["Private"].get("lenIV", 4) + assert lenIV >= 0 + subrs = self.font["Private"]["Subrs"] + for glyphName, charString in charStrings.items(): + charString, R = eexec.decrypt(charString, 4330) + charStrings[glyphName] = psCharStrings.T1CharString(charString[lenIV:], + subrs=subrs) + for i in range(len(subrs)): + charString, R = eexec.decrypt(subrs[i], 4330) + subrs[i] = psCharStrings.T1CharString(charString[lenIV:], subrs=subrs) + del self.data + + +# low level T1 data read and write functions + +def read(path, onlyHeader=False): + """reads any Type 1 font file, returns raw data""" + normpath = path.lower() + creator, typ = getMacCreatorAndType(path) + if typ == 'LWFN': + return readLWFN(path, onlyHeader), 'LWFN' + if normpath[-4:] == '.pfb': + return readPFB(path, onlyHeader), 'PFB' + else: + return readOther(path), 'OTHER' + +def write(path, data, kind='OTHER', dohex=False): + assertType1(data) + kind = kind.upper() + try: + os.remove(path) + except os.error: + pass + err = 1 + try: + if kind == 'LWFN': + writeLWFN(path, data) + elif kind == 'PFB': + writePFB(path, data) + else: + writeOther(path, data, dohex) + err = 0 + finally: + if err and not DEBUG: + try: + os.remove(path) + except os.error: + pass + + +# -- internal -- + +LWFNCHUNKSIZE = 2000 +HEXLINELENGTH = 80 + + +def readLWFN(path, onlyHeader=False): + """reads an LWFN font file, returns raw data""" + resRef = Res.FSOpenResFile(path, 1) # read-only + try: + Res.UseResFile(resRef) + n = Res.Count1Resources('POST') + data = [] + for i in range(501, 501 + n): + res = Res.Get1Resource('POST', i) + code = byteord(res.data[0]) + if byteord(res.data[1]) != 0: + raise T1Error('corrupt LWFN file') + if code in [1, 2]: + if onlyHeader and code == 2: + break + data.append(res.data[2:]) + elif code in [3, 5]: + break + elif code == 4: + f = open(path, "rb") + data.append(f.read()) + f.close() + elif code == 0: + pass # comment, ignore + else: + raise T1Error('bad chunk code: ' + repr(code)) + finally: + Res.CloseResFile(resRef) + data = bytesjoin(data) + assertType1(data) + return data + +def readPFB(path, onlyHeader=False): + """reads a PFB font file, returns raw data""" + f = open(path, "rb") + data = [] + while True: + if f.read(1) != bytechr(128): + raise T1Error('corrupt PFB file') + code = byteord(f.read(1)) + if code in [1, 2]: + chunklen = stringToLong(f.read(4)) + chunk = f.read(chunklen) + assert len(chunk) == chunklen + data.append(chunk) + elif code == 3: + break + else: + raise T1Error('bad chunk code: ' + repr(code)) + if onlyHeader: + break + f.close() + data = bytesjoin(data) + assertType1(data) + return data + +def readOther(path): + """reads any (font) file, returns raw data""" + f = open(path, "rb") + data = f.read() + f.close() + assertType1(data) + + chunks = findEncryptedChunks(data) + data = [] + for isEncrypted, chunk in chunks: + if isEncrypted and isHex(chunk[:4]): + data.append(deHexString(chunk)) + else: + data.append(chunk) + return bytesjoin(data) + +# file writing tools + +def writeLWFN(path, data): + Res.FSpCreateResFile(path, "just", "LWFN", 0) + resRef = Res.FSOpenResFile(path, 2) # write-only + try: + Res.UseResFile(resRef) + resID = 501 + chunks = findEncryptedChunks(data) + for isEncrypted, chunk in chunks: + if isEncrypted: + code = 2 + else: + code = 1 + while chunk: + res = Res.Resource(bytechr(code) + '\0' + chunk[:LWFNCHUNKSIZE - 2]) + res.AddResource('POST', resID, '') + chunk = chunk[LWFNCHUNKSIZE - 2:] + resID = resID + 1 + res = Res.Resource(bytechr(5) + '\0') + res.AddResource('POST', resID, '') + finally: + Res.CloseResFile(resRef) + +def writePFB(path, data): + chunks = findEncryptedChunks(data) + f = open(path, "wb") + try: + for isEncrypted, chunk in chunks: + if isEncrypted: + code = 2 + else: + code = 1 + f.write(bytechr(128) + bytechr(code)) + f.write(longToString(len(chunk))) + f.write(chunk) + f.write(bytechr(128) + bytechr(3)) + finally: + f.close() + +def writeOther(path, data, dohex=False): + chunks = findEncryptedChunks(data) + f = open(path, "wb") + try: + hexlinelen = HEXLINELENGTH // 2 + for isEncrypted, chunk in chunks: + if isEncrypted: + code = 2 + else: + code = 1 + if code == 2 and dohex: + while chunk: + f.write(eexec.hexString(chunk[:hexlinelen])) + f.write('\r') + chunk = chunk[hexlinelen:] + else: + f.write(chunk) + finally: + f.close() + + +# decryption tools + +EEXECBEGIN = "currentfile eexec" +EEXECEND = '0' * 64 +EEXECINTERNALEND = "currentfile closefile" +EEXECBEGINMARKER = "%-- eexec start\r" +EEXECENDMARKER = "%-- eexec end\r" + +_ishexRE = re.compile('[0-9A-Fa-f]*$') + +def isHex(text): + return _ishexRE.match(text) is not None + + +def decryptType1(data): + chunks = findEncryptedChunks(data) + data = [] + for isEncrypted, chunk in chunks: + if isEncrypted: + if isHex(chunk[:4]): + chunk = deHexString(chunk) + decrypted, R = eexec.decrypt(chunk, 55665) + decrypted = decrypted[4:] + if decrypted[-len(EEXECINTERNALEND)-1:-1] != EEXECINTERNALEND \ + and decrypted[-len(EEXECINTERNALEND)-2:-2] != EEXECINTERNALEND: + raise T1Error("invalid end of eexec part") + decrypted = decrypted[:-len(EEXECINTERNALEND)-2] + '\r' + data.append(EEXECBEGINMARKER + decrypted + EEXECENDMARKER) + else: + if chunk[-len(EEXECBEGIN)-1:-1] == EEXECBEGIN: + data.append(chunk[:-len(EEXECBEGIN)-1]) + else: + data.append(chunk) + return bytesjoin(data) + +def findEncryptedChunks(data): + chunks = [] + while True: + eBegin = data.find(EEXECBEGIN) + if eBegin < 0: + break + eBegin = eBegin + len(EEXECBEGIN) + 1 + eEnd = data.find(EEXECEND, eBegin) + if eEnd < 0: + raise T1Error("can't find end of eexec part") + cypherText = data[eBegin:eEnd + 2] + if isHex(cypherText[:4]): + cypherText = deHexString(cypherText) + plainText, R = eexec.decrypt(cypherText, 55665) + eEndLocal = plainText.find(EEXECINTERNALEND) + if eEndLocal < 0: + raise T1Error("can't find end of eexec part") + chunks.append((0, data[:eBegin])) + chunks.append((1, cypherText[:eEndLocal + len(EEXECINTERNALEND) + 1])) + data = data[eEnd:] + chunks.append((0, data)) + return chunks + +def deHexString(hexstring): + return eexec.deHexString(strjoin(hexstring.split())) + + +# Type 1 assertion + +_fontType1RE = re.compile(br"/FontType\s+1\s+def") + +def assertType1(data): + for head in [b'%!PS-AdobeFont', b'%!FontType1']: + if data[:len(head)] == head: + break + else: + raise T1Error("not a PostScript font") + if not _fontType1RE.search(data): + raise T1Error("not a Type 1 font") + if data.find(b"currentfile eexec") < 0: + raise T1Error("not an encrypted Type 1 font") + # XXX what else? + return data + + +# pfb helpers + +def longToString(long): + s = "" + for i in range(4): + s += bytechr((long & (0xff << (i * 8))) >> i * 8) + return s + +def stringToLong(s): + if len(s) != 4: + raise ValueError('string must be 4 bytes long') + l = 0 + for i in range(4): + l += byteord(s[i]) << (i * 8) + return l diff -Nru fonttools-2.4/Snippets/fontTools/ttLib/__init__.py fonttools-3.0/Snippets/fontTools/ttLib/__init__.py --- fonttools-2.4/Snippets/fontTools/ttLib/__init__.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.0/Snippets/fontTools/ttLib/__init__.py 2015-08-31 17:57:15.000000000 +0000 @@ -0,0 +1,991 @@ +"""fontTools.ttLib -- a package for dealing with TrueType fonts. + +This package offers translators to convert TrueType fonts to Python +objects and vice versa, and additionally from Python to TTX (an XML-based +text format) and vice versa. + +Example interactive session: + +Python 1.5.2c1 (#43, Mar 9 1999, 13:06:43) [CW PPC w/GUSI w/MSL] +Copyright 1991-1995 Stichting Mathematisch Centrum, Amsterdam +>>> from fontTools import ttLib +>>> tt = ttLib.TTFont("afont.ttf") +>>> tt['maxp'].numGlyphs +242 +>>> tt['OS/2'].achVendID +'B&H\000' +>>> tt['head'].unitsPerEm +2048 +>>> tt.saveXML("afont.ttx") +Dumping 'LTSH' table... +Dumping 'OS/2' table... +Dumping 'VDMX' table... +Dumping 'cmap' table... +Dumping 'cvt ' table... +Dumping 'fpgm' table... +Dumping 'glyf' table... +Dumping 'hdmx' table... +Dumping 'head' table... +Dumping 'hhea' table... +Dumping 'hmtx' table... +Dumping 'loca' table... +Dumping 'maxp' table... +Dumping 'name' table... +Dumping 'post' table... +Dumping 'prep' table... +>>> tt2 = ttLib.TTFont() +>>> tt2.importXML("afont.ttx") +>>> tt2['maxp'].numGlyphs +242 +>>> + +""" + +from __future__ import print_function, division, absolute_import +from fontTools.misc.py23 import * +import os +import sys + +haveMacSupport = 0 +if sys.platform == "mac": + haveMacSupport = 1 +elif sys.platform == "darwin": + if sys.version_info[:3] != (2, 2, 0) and sys.version_info[:1] < (3,): + # Python 2.2's Mac support is broken, so don't enable it there. + # Python 3 does not have Res used by macUtils + haveMacSupport = 1 + + +class TTLibError(Exception): pass + + +class TTFont(object): + + """The main font object. It manages file input and output, and offers + a convenient way of accessing tables. + Tables will be only decompiled when necessary, ie. when they're actually + accessed. This means that simple operations can be extremely fast. + """ + + def __init__(self, file=None, res_name_or_index=None, + sfntVersion="\000\001\000\000", flavor=None, checkChecksums=False, + verbose=False, recalcBBoxes=True, allowVID=False, ignoreDecompileErrors=False, + recalcTimestamp=True, fontNumber=-1, lazy=None, quiet=False): + + """The constructor can be called with a few different arguments. + When reading a font from disk, 'file' should be either a pathname + pointing to a file, or a readable file object. + + It we're running on a Macintosh, 'res_name_or_index' maybe an sfnt + resource name or an sfnt resource index number or zero. The latter + case will cause TTLib to autodetect whether the file is a flat file + or a suitcase. (If it's a suitcase, only the first 'sfnt' resource + will be read!) + + The 'checkChecksums' argument is used to specify how sfnt + checksums are treated upon reading a file from disk: + 0: don't check (default) + 1: check, print warnings if a wrong checksum is found + 2: check, raise an exception if a wrong checksum is found. + + The TTFont constructor can also be called without a 'file' + argument: this is the way to create a new empty font. + In this case you can optionally supply the 'sfntVersion' argument, + and a 'flavor' which can be None, or 'woff'. + + If the recalcBBoxes argument is false, a number of things will *not* + be recalculated upon save/compile: + 1) glyph bounding boxes + 2) maxp font bounding box + 3) hhea min/max values + (1) is needed for certain kinds of CJK fonts (ask Werner Lemberg ;-). + Additionally, upon importing an TTX file, this option cause glyphs + to be compiled right away. This should reduce memory consumption + greatly, and therefore should have some impact on the time needed + to parse/compile large fonts. + + If the recalcTimestamp argument is false, the modified timestamp in the + 'head' table will *not* be recalculated upon save/compile. + + If the allowVID argument is set to true, then virtual GID's are + supported. Asking for a glyph ID with a glyph name or GID that is not in + the font will return a virtual GID. This is valid for GSUB and cmap + tables. For SING glyphlets, the cmap table is used to specify Unicode + values for virtual GI's used in GSUB/GPOS rules. If the gid N is requested + and does not exist in the font, or the glyphname has the form glyphN + and does not exist in the font, then N is used as the virtual GID. + Else, the first virtual GID is assigned as 0x1000 -1; for subsequent new + virtual GIDs, the next is one less than the previous. + + If ignoreDecompileErrors is set to True, exceptions raised in + individual tables during decompilation will be ignored, falling + back to the DefaultTable implementation, which simply keeps the + binary data. + + If lazy is set to True, many data structures are loaded lazily, upon + access only. If it is set to False, many data structures are loaded + immediately. The default is lazy=None which is somewhere in between. + """ + + from fontTools.ttLib import sfnt + self.verbose = verbose + self.quiet = quiet + self.lazy = lazy + self.recalcBBoxes = recalcBBoxes + self.recalcTimestamp = recalcTimestamp + self.tables = {} + self.reader = None + + # Permit the user to reference glyphs that are not int the font. + self.last_vid = 0xFFFE # Can't make it be 0xFFFF, as the world is full unsigned short integer counters that get incremented after the last seen GID value. + self.reverseVIDDict = {} + self.VIDDict = {} + self.allowVID = allowVID + self.ignoreDecompileErrors = ignoreDecompileErrors + + if not file: + self.sfntVersion = sfntVersion + self.flavor = flavor + self.flavorData = None + return + if not hasattr(file, "read"): + closeStream = True + # assume file is a string + if haveMacSupport and res_name_or_index is not None: + # on the mac, we deal with sfnt resources as well as flat files + from . import macUtils + if res_name_or_index == 0: + if macUtils.getSFNTResIndices(file): + # get the first available sfnt font. + file = macUtils.SFNTResourceReader(file, 1) + else: + file = open(file, "rb") + else: + file = macUtils.SFNTResourceReader(file, res_name_or_index) + else: + file = open(file, "rb") + + else: + # assume "file" is a readable file object + closeStream = False + # read input file in memory and wrap a stream around it to allow overwriting + tmp = BytesIO(file.read()) + if hasattr(file, 'name'): + # save reference to input file name + tmp.name = file.name + if closeStream: + file.close() + self.reader = sfnt.SFNTReader(tmp, checkChecksums, fontNumber=fontNumber) + self.sfntVersion = self.reader.sfntVersion + self.flavor = self.reader.flavor + self.flavorData = self.reader.flavorData + + def close(self): + """If we still have a reader object, close it.""" + if self.reader is not None: + self.reader.close() + + def save(self, file, makeSuitcase=False, reorderTables=True): + """Save the font to disk. Similarly to the constructor, + the 'file' argument can be either a pathname or a writable + file object. + + On the Mac, if makeSuitcase is true, a suitcase (resource fork) + file will we made instead of a flat .ttf file. + """ + from fontTools.ttLib import sfnt + if not hasattr(file, "write"): + closeStream = 1 + if os.name == "mac" and makeSuitcase: + from . import macUtils + file = macUtils.SFNTResourceWriter(file, self) + else: + file = open(file, "wb") + if os.name == "mac": + from fontTools.misc.macCreator import setMacCreatorAndType + setMacCreatorAndType(file.name, 'mdos', 'BINA') + else: + # assume "file" is a writable file object + closeStream = 0 + + tags = list(self.keys()) + if "GlyphOrder" in tags: + tags.remove("GlyphOrder") + numTables = len(tags) + # write to a temporary stream to allow saving to unseekable streams + tmp = BytesIO() + writer = sfnt.SFNTWriter(tmp, numTables, self.sfntVersion, self.flavor, self.flavorData) + + done = [] + for tag in tags: + self._writeTable(tag, writer, done) + + writer.close() + + if (reorderTables is None or writer.reordersTables() or + (reorderTables is False and self.reader is None)): + # don't reorder tables and save as is + file.write(tmp.getvalue()) + tmp.close() + else: + if reorderTables is False: + # sort tables using the original font's order + tableOrder = list(self.reader.keys()) + else: + # use the recommended order from the OpenType specification + tableOrder = None + tmp.flush() + tmp.seek(0) + tmp2 = BytesIO() + reorderFontTables(tmp, tmp2, tableOrder) + file.write(tmp2.getvalue()) + tmp.close() + tmp2.close() + + if closeStream: + file.close() + + def saveXML(self, fileOrPath, progress=None, quiet=False, + tables=None, skipTables=None, splitTables=False, disassembleInstructions=True, + bitmapGlyphDataFormat='raw'): + """Export the font as TTX (an XML-based text file), or as a series of text + files when splitTables is true. In the latter case, the 'fileOrPath' + argument should be a path to a directory. + The 'tables' argument must either be false (dump all tables) or a + list of tables to dump. The 'skipTables' argument may be a list of tables + to skip, but only when the 'tables' argument is false. + """ + from fontTools import version + from fontTools.misc import xmlWriter + + self.disassembleInstructions = disassembleInstructions + self.bitmapGlyphDataFormat = bitmapGlyphDataFormat + if not tables: + tables = list(self.keys()) + if "GlyphOrder" not in tables: + tables = ["GlyphOrder"] + tables + if skipTables: + for tag in skipTables: + if tag in tables: + tables.remove(tag) + numTables = len(tables) + if progress: + progress.set(0, numTables) + idlefunc = getattr(progress, "idle", None) + else: + idlefunc = None + + writer = xmlWriter.XMLWriter(fileOrPath, idlefunc=idlefunc) + writer.begintag("ttFont", sfntVersion=repr(self.sfntVersion)[1:-1], + ttLibVersion=version) + writer.newline() + + if not splitTables: + writer.newline() + else: + # 'fileOrPath' must now be a path + path, ext = os.path.splitext(fileOrPath) + fileNameTemplate = path + ".%s" + ext + + for i in range(numTables): + if progress: + progress.set(i) + tag = tables[i] + if splitTables: + tablePath = fileNameTemplate % tagToIdentifier(tag) + tableWriter = xmlWriter.XMLWriter(tablePath, idlefunc=idlefunc) + tableWriter.begintag("ttFont", ttLibVersion=version) + tableWriter.newline() + tableWriter.newline() + writer.simpletag(tagToXML(tag), src=os.path.basename(tablePath)) + writer.newline() + else: + tableWriter = writer + self._tableToXML(tableWriter, tag, progress, quiet) + if splitTables: + tableWriter.endtag("ttFont") + tableWriter.newline() + tableWriter.close() + if progress: + progress.set((i + 1)) + writer.endtag("ttFont") + writer.newline() + writer.close() + if self.verbose: + debugmsg("Done dumping TTX") + + def _tableToXML(self, writer, tag, progress, quiet): + if tag in self: + table = self[tag] + report = "Dumping '%s' table..." % tag + else: + report = "No '%s' table found." % tag + if progress: + progress.setLabel(report) + elif self.verbose: + debugmsg(report) + else: + if not quiet: + print(report) + if tag not in self: + return + xmlTag = tagToXML(tag) + attrs = dict() + if hasattr(table, "ERROR"): + attrs['ERROR'] = "decompilation error" + from .tables.DefaultTable import DefaultTable + if table.__class__ == DefaultTable: + attrs['raw'] = True + writer.begintag(xmlTag, **attrs) + writer.newline() + if tag in ("glyf", "CFF "): + table.toXML(writer, self, progress) + else: + table.toXML(writer, self) + writer.endtag(xmlTag) + writer.newline() + writer.newline() + + def importXML(self, file, progress=None, quiet=False): + """Import a TTX file (an XML-based text format), so as to recreate + a font object. + """ + if "maxp" in self and "post" in self: + # Make sure the glyph order is loaded, as it otherwise gets + # lost if the XML doesn't contain the glyph order, yet does + # contain the table which was originally used to extract the + # glyph names from (ie. 'post', 'cmap' or 'CFF '). + self.getGlyphOrder() + + from fontTools.misc import xmlReader + + reader = xmlReader.XMLReader(file, self, progress, quiet) + reader.read() + + def isLoaded(self, tag): + """Return true if the table identified by 'tag' has been + decompiled and loaded into memory.""" + return tag in self.tables + + def has_key(self, tag): + if self.isLoaded(tag): + return True + elif self.reader and tag in self.reader: + return True + elif tag == "GlyphOrder": + return True + else: + return False + + __contains__ = has_key + + def keys(self): + keys = list(self.tables.keys()) + if self.reader: + for key in list(self.reader.keys()): + if key not in keys: + keys.append(key) + + if "GlyphOrder" in keys: + keys.remove("GlyphOrder") + keys = sortedTagList(keys) + return ["GlyphOrder"] + keys + + def __len__(self): + return len(list(self.keys())) + + def __getitem__(self, tag): + tag = Tag(tag) + try: + return self.tables[tag] + except KeyError: + if tag == "GlyphOrder": + table = GlyphOrder(tag) + self.tables[tag] = table + return table + if self.reader is not None: + import traceback + if self.verbose: + debugmsg("Reading '%s' table from disk" % tag) + data = self.reader[tag] + tableClass = getTableClass(tag) + table = tableClass(tag) + self.tables[tag] = table + if self.verbose: + debugmsg("Decompiling '%s' table" % tag) + try: + table.decompile(data, self) + except: + if not self.ignoreDecompileErrors: + raise + # fall back to DefaultTable, retaining the binary table data + print("An exception occurred during the decompilation of the '%s' table" % tag) + from .tables.DefaultTable import DefaultTable + file = StringIO() + traceback.print_exc(file=file) + table = DefaultTable(tag) + table.ERROR = file.getvalue() + self.tables[tag] = table + table.decompile(data, self) + return table + else: + raise KeyError("'%s' table not found" % tag) + + def __setitem__(self, tag, table): + self.tables[Tag(tag)] = table + + def __delitem__(self, tag): + if tag not in self: + raise KeyError("'%s' table not found" % tag) + if tag in self.tables: + del self.tables[tag] + if self.reader and tag in self.reader: + del self.reader[tag] + + def get(self, tag, default=None): + try: + return self[tag] + except KeyError: + return default + + def setGlyphOrder(self, glyphOrder): + self.glyphOrder = glyphOrder + + def getGlyphOrder(self): + try: + return self.glyphOrder + except AttributeError: + pass + if 'CFF ' in self: + cff = self['CFF '] + self.glyphOrder = cff.getGlyphOrder() + elif 'post' in self: + # TrueType font + glyphOrder = self['post'].getGlyphOrder() + if glyphOrder is None: + # + # No names found in the 'post' table. + # Try to create glyph names from the unicode cmap (if available) + # in combination with the Adobe Glyph List (AGL). + # + self._getGlyphNamesFromCmap() + else: + self.glyphOrder = glyphOrder + else: + self._getGlyphNamesFromCmap() + return self.glyphOrder + + def _getGlyphNamesFromCmap(self): + # + # This is rather convoluted, but then again, it's an interesting problem: + # - we need to use the unicode values found in the cmap table to + # build glyph names (eg. because there is only a minimal post table, + # or none at all). + # - but the cmap parser also needs glyph names to work with... + # So here's what we do: + # - make up glyph names based on glyphID + # - load a temporary cmap table based on those names + # - extract the unicode values, build the "real" glyph names + # - unload the temporary cmap table + # + if self.isLoaded("cmap"): + # Bootstrapping: we're getting called by the cmap parser + # itself. This means self.tables['cmap'] contains a partially + # loaded cmap, making it impossible to get at a unicode + # subtable here. We remove the partially loaded cmap and + # restore it later. + # This only happens if the cmap table is loaded before any + # other table that does f.getGlyphOrder() or f.getGlyphName(). + cmapLoading = self.tables['cmap'] + del self.tables['cmap'] + else: + cmapLoading = None + # Make up glyph names based on glyphID, which will be used by the + # temporary cmap and by the real cmap in case we don't find a unicode + # cmap. + numGlyphs = int(self['maxp'].numGlyphs) + glyphOrder = [None] * numGlyphs + glyphOrder[0] = ".notdef" + for i in range(1, numGlyphs): + glyphOrder[i] = "glyph%.5d" % i + # Set the glyph order, so the cmap parser has something + # to work with (so we don't get called recursively). + self.glyphOrder = glyphOrder + # Get a (new) temporary cmap (based on the just invented names) + try: + tempcmap = self['cmap'].getcmap(3, 1) + except KeyError: + tempcmap = None + if tempcmap is not None: + # we have a unicode cmap + from fontTools import agl + cmap = tempcmap.cmap + # create a reverse cmap dict + reversecmap = {} + for unicode, name in list(cmap.items()): + reversecmap[name] = unicode + allNames = {} + for i in range(numGlyphs): + tempName = glyphOrder[i] + if tempName in reversecmap: + unicode = reversecmap[tempName] + if unicode in agl.UV2AGL: + # get name from the Adobe Glyph List + glyphName = agl.UV2AGL[unicode] + else: + # create uni name + glyphName = "uni%04X" % unicode + tempName = glyphName + n = allNames.get(tempName, 0) + if n: + tempName = glyphName + "#" + str(n) + glyphOrder[i] = tempName + allNames[tempName] = n + 1 + # Delete the temporary cmap table from the cache, so it can + # be parsed again with the right names. + del self.tables['cmap'] + else: + pass # no unicode cmap available, stick with the invented names + self.glyphOrder = glyphOrder + if cmapLoading: + # restore partially loaded cmap, so it can continue loading + # using the proper names. + self.tables['cmap'] = cmapLoading + + def getGlyphNames(self): + """Get a list of glyph names, sorted alphabetically.""" + glyphNames = sorted(self.getGlyphOrder()[:]) + return glyphNames + + def getGlyphNames2(self): + """Get a list of glyph names, sorted alphabetically, + but not case sensitive. + """ + from fontTools.misc import textTools + return textTools.caselessSort(self.getGlyphOrder()) + + def getGlyphName(self, glyphID, requireReal=False): + try: + return self.getGlyphOrder()[glyphID] + except IndexError: + if requireReal or not self.allowVID: + # XXX The ??.W8.otf font that ships with OSX uses higher glyphIDs in + # the cmap table than there are glyphs. I don't think it's legal... + return "glyph%.5d" % glyphID + else: + # user intends virtual GID support + try: + glyphName = self.VIDDict[glyphID] + except KeyError: + glyphName ="glyph%.5d" % glyphID + self.last_vid = min(glyphID, self.last_vid ) + self.reverseVIDDict[glyphName] = glyphID + self.VIDDict[glyphID] = glyphName + return glyphName + + def getGlyphID(self, glyphName, requireReal=False): + if not hasattr(self, "_reverseGlyphOrderDict"): + self._buildReverseGlyphOrderDict() + glyphOrder = self.getGlyphOrder() + d = self._reverseGlyphOrderDict + if glyphName not in d: + if glyphName in glyphOrder: + self._buildReverseGlyphOrderDict() + return self.getGlyphID(glyphName) + else: + if requireReal: + raise KeyError(glyphName) + elif not self.allowVID: + # Handle glyphXXX only + if glyphName[:5] == "glyph": + try: + return int(glyphName[5:]) + except (NameError, ValueError): + raise KeyError(glyphName) + else: + # user intends virtual GID support + try: + glyphID = self.reverseVIDDict[glyphName] + except KeyError: + # if name is in glyphXXX format, use the specified name. + if glyphName[:5] == "glyph": + try: + glyphID = int(glyphName[5:]) + except (NameError, ValueError): + glyphID = None + if glyphID is None: + glyphID = self.last_vid -1 + self.last_vid = glyphID + self.reverseVIDDict[glyphName] = glyphID + self.VIDDict[glyphID] = glyphName + return glyphID + + glyphID = d[glyphName] + if glyphName != glyphOrder[glyphID]: + self._buildReverseGlyphOrderDict() + return self.getGlyphID(glyphName) + return glyphID + + def getReverseGlyphMap(self, rebuild=False): + if rebuild or not hasattr(self, "_reverseGlyphOrderDict"): + self._buildReverseGlyphOrderDict() + return self._reverseGlyphOrderDict + + def _buildReverseGlyphOrderDict(self): + self._reverseGlyphOrderDict = d = {} + glyphOrder = self.getGlyphOrder() + for glyphID in range(len(glyphOrder)): + d[glyphOrder[glyphID]] = glyphID + + def _writeTable(self, tag, writer, done): + """Internal helper function for self.save(). Keeps track of + inter-table dependencies. + """ + if tag in done: + return + tableClass = getTableClass(tag) + for masterTable in tableClass.dependencies: + if masterTable not in done: + if masterTable in self: + self._writeTable(masterTable, writer, done) + else: + done.append(masterTable) + tabledata = self.getTableData(tag) + if self.verbose: + debugmsg("writing '%s' table to disk" % tag) + writer[tag] = tabledata + done.append(tag) + + def getTableData(self, tag): + """Returns raw table data, whether compiled or directly read from disk. + """ + tag = Tag(tag) + if self.isLoaded(tag): + if self.verbose: + debugmsg("compiling '%s' table" % tag) + return self.tables[tag].compile(self) + elif self.reader and tag in self.reader: + if self.verbose: + debugmsg("Reading '%s' table from disk" % tag) + return self.reader[tag] + else: + raise KeyError(tag) + + def getGlyphSet(self, preferCFF=True): + """Return a generic GlyphSet, which is a dict-like object + mapping glyph names to glyph objects. The returned glyph objects + have a .draw() method that supports the Pen protocol, and will + have an attribute named 'width'. + + If the font is CFF-based, the outlines will be taken from the 'CFF ' + table. Otherwise the outlines will be taken from the 'glyf' table. + If the font contains both a 'CFF ' and a 'glyf' table, you can use + the 'preferCFF' argument to specify which one should be taken. + """ + glyphs = None + if (preferCFF and "CFF " in self) or "glyf" not in self: + glyphs = _TTGlyphSet(self, list(self["CFF "].cff.values())[0].CharStrings, _TTGlyphCFF) + + if glyphs is None and "glyf" in self: + glyphs = _TTGlyphSet(self, self["glyf"], _TTGlyphGlyf) + + if glyphs is None: + raise TTLibError("Font contains no outlines") + + return glyphs + + +class _TTGlyphSet(object): + + """Generic dict-like GlyphSet class that pulls metrics from hmtx and + glyph shape from TrueType or CFF. + """ + + def __init__(self, ttFont, glyphs, glyphType): + self._glyphs = glyphs + self._hmtx = ttFont['hmtx'] + self._glyphType = glyphType + + def keys(self): + return list(self._glyphs.keys()) + + def has_key(self, glyphName): + return glyphName in self._glyphs + + __contains__ = has_key + + def __getitem__(self, glyphName): + return self._glyphType(self, self._glyphs[glyphName], self._hmtx[glyphName]) + + def get(self, glyphName, default=None): + try: + return self[glyphName] + except KeyError: + return default + +class _TTGlyph(object): + + """Wrapper for a TrueType glyph that supports the Pen protocol, meaning + that it has a .draw() method that takes a pen object as its only + argument. Additionally there is a 'width' attribute. + """ + + def __init__(self, glyphset, glyph, metrics): + self._glyphset = glyphset + self._glyph = glyph + self.width, self.lsb = metrics + + def draw(self, pen): + """Draw the glyph onto Pen. See fontTools.pens.basePen for details + how that works. + """ + self._glyph.draw(pen) + +class _TTGlyphCFF(_TTGlyph): + pass + +class _TTGlyphGlyf(_TTGlyph): + + def draw(self, pen): + """Draw the glyph onto Pen. See fontTools.pens.basePen for details + how that works. + """ + glyfTable = self._glyphset._glyphs + glyph = self._glyph + offset = self.lsb - glyph.xMin if hasattr(glyph, "xMin") else 0 + glyph.draw(pen, glyfTable, offset) + + +class GlyphOrder(object): + + """A pseudo table. The glyph order isn't in the font as a separate + table, but it's nice to present it as such in the TTX format. + """ + + def __init__(self, tag=None): + pass + + def toXML(self, writer, ttFont): + glyphOrder = ttFont.getGlyphOrder() + writer.comment("The 'id' attribute is only for humans; " + "it is ignored when parsed.") + writer.newline() + for i in range(len(glyphOrder)): + glyphName = glyphOrder[i] + writer.simpletag("GlyphID", id=i, name=glyphName) + writer.newline() + + def fromXML(self, name, attrs, content, ttFont): + if not hasattr(self, "glyphOrder"): + self.glyphOrder = [] + ttFont.setGlyphOrder(self.glyphOrder) + if name == "GlyphID": + self.glyphOrder.append(attrs["name"]) + + +def getTableModule(tag): + """Fetch the packer/unpacker module for a table. + Return None when no module is found. + """ + from . import tables + pyTag = tagToIdentifier(tag) + try: + __import__("fontTools.ttLib.tables." + pyTag) + except ImportError as err: + # If pyTag is found in the ImportError message, + # means table is not implemented. If it's not + # there, then some other module is missing, don't + # suppress the error. + if str(err).find(pyTag) >= 0: + return None + else: + raise err + else: + return getattr(tables, pyTag) + + +def getTableClass(tag): + """Fetch the packer/unpacker class for a table. + Return None when no class is found. + """ + module = getTableModule(tag) + if module is None: + from .tables.DefaultTable import DefaultTable + return DefaultTable + pyTag = tagToIdentifier(tag) + tableClass = getattr(module, "table_" + pyTag) + return tableClass + + +def getClassTag(klass): + """Fetch the table tag for a class object.""" + name = klass.__name__ + assert name[:6] == 'table_' + name = name[6:] # Chop 'table_' + return identifierToTag(name) + + +def newTable(tag): + """Return a new instance of a table.""" + tableClass = getTableClass(tag) + return tableClass(tag) + + +def _escapechar(c): + """Helper function for tagToIdentifier()""" + import re + if re.match("[a-z0-9]", c): + return "_" + c + elif re.match("[A-Z]", c): + return c + "_" + else: + return hex(byteord(c))[2:] + + +def tagToIdentifier(tag): + """Convert a table tag to a valid (but UGLY) python identifier, + as well as a filename that's guaranteed to be unique even on a + caseless file system. Each character is mapped to two characters. + Lowercase letters get an underscore before the letter, uppercase + letters get an underscore after the letter. Trailing spaces are + trimmed. Illegal characters are escaped as two hex bytes. If the + result starts with a number (as the result of a hex escape), an + extra underscore is prepended. Examples: + 'glyf' -> '_g_l_y_f' + 'cvt ' -> '_c_v_t' + 'OS/2' -> 'O_S_2f_2' + """ + import re + tag = Tag(tag) + if tag == "GlyphOrder": + return tag + assert len(tag) == 4, "tag should be 4 characters long" + while len(tag) > 1 and tag[-1] == ' ': + tag = tag[:-1] + ident = "" + for c in tag: + ident = ident + _escapechar(c) + if re.match("[0-9]", ident): + ident = "_" + ident + return ident + + +def identifierToTag(ident): + """the opposite of tagToIdentifier()""" + if ident == "GlyphOrder": + return ident + if len(ident) % 2 and ident[0] == "_": + ident = ident[1:] + assert not (len(ident) % 2) + tag = "" + for i in range(0, len(ident), 2): + if ident[i] == "_": + tag = tag + ident[i+1] + elif ident[i+1] == "_": + tag = tag + ident[i] + else: + # assume hex + tag = tag + chr(int(ident[i:i+2], 16)) + # append trailing spaces + tag = tag + (4 - len(tag)) * ' ' + return Tag(tag) + + +def tagToXML(tag): + """Similarly to tagToIdentifier(), this converts a TT tag + to a valid XML element name. Since XML element names are + case sensitive, this is a fairly simple/readable translation. + """ + import re + tag = Tag(tag) + if tag == "OS/2": + return "OS_2" + elif tag == "GlyphOrder": + return tag + if re.match("[A-Za-z_][A-Za-z_0-9]* *$", tag): + return tag.strip() + else: + return tagToIdentifier(tag) + + +def xmlToTag(tag): + """The opposite of tagToXML()""" + if tag == "OS_2": + return Tag("OS/2") + if len(tag) == 8: + return identifierToTag(tag) + else: + return Tag(tag + " " * (4 - len(tag))) + + +def debugmsg(msg): + import time + print(msg + time.strftime(" (%H:%M:%S)", time.localtime(time.time()))) + + +# Table order as recommended in the OpenType specification 1.4 +TTFTableOrder = ["head", "hhea", "maxp", "OS/2", "hmtx", "LTSH", "VDMX", + "hdmx", "cmap", "fpgm", "prep", "cvt ", "loca", "glyf", + "kern", "name", "post", "gasp", "PCLT"] + +OTFTableOrder = ["head", "hhea", "maxp", "OS/2", "name", "cmap", "post", + "CFF "] + +def sortedTagList(tagList, tableOrder=None): + """Return a sorted copy of tagList, sorted according to the OpenType + specification, or according to a custom tableOrder. If given and not + None, tableOrder needs to be a list of tag names. + """ + tagList = sorted(tagList) + if tableOrder is None: + if "DSIG" in tagList: + # DSIG should be last (XXX spec reference?) + tagList.remove("DSIG") + tagList.append("DSIG") + if "CFF " in tagList: + tableOrder = OTFTableOrder + else: + tableOrder = TTFTableOrder + orderedTables = [] + for tag in tableOrder: + if tag in tagList: + orderedTables.append(tag) + tagList.remove(tag) + orderedTables.extend(tagList) + return orderedTables + + +def reorderFontTables(inFile, outFile, tableOrder=None, checkChecksums=False): + """Rewrite a font file, ordering the tables as recommended by the + OpenType specification 1.4. + """ + from fontTools.ttLib.sfnt import SFNTReader, SFNTWriter + reader = SFNTReader(inFile, checkChecksums=checkChecksums) + writer = SFNTWriter(outFile, len(reader.tables), reader.sfntVersion, reader.flavor, reader.flavorData) + tables = list(reader.keys()) + for tag in sortedTagList(tables, tableOrder): + writer[tag] = reader[tag] + writer.close() + + +def maxPowerOfTwo(x): + """Return the highest exponent of two, so that + (2 ** exponent) <= x. Return 0 if x is 0. + """ + exponent = 0 + while x: + x = x >> 1 + exponent = exponent + 1 + return max(exponent - 1, 0) + + +def getSearchRange(n, itemSize=16): + """Calculate searchRange, entrySelector, rangeShift. + """ + # itemSize defaults to 16, for backward compatibility + # with upstream fonttools. + exponent = maxPowerOfTwo(n) + searchRange = (2 ** exponent) * itemSize + entrySelector = exponent + rangeShift = max(0, n * itemSize - searchRange) + return searchRange, entrySelector, rangeShift diff -Nru fonttools-2.4/Snippets/fontTools/ttLib/macUtils.py fonttools-3.0/Snippets/fontTools/ttLib/macUtils.py --- fonttools-2.4/Snippets/fontTools/ttLib/macUtils.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.0/Snippets/fontTools/ttLib/macUtils.py 2015-08-31 17:57:15.000000000 +0000 @@ -0,0 +1,73 @@ +"""ttLib.macUtils.py -- Various Mac-specific stuff.""" + +from __future__ import print_function, division, absolute_import +from fontTools.misc.py23 import * +import sys +import os +if sys.platform not in ("mac", "darwin"): + raise ImportError("This module is Mac-only!") +try: + from Carbon import Res +except ImportError: + import Res + + +def MyOpenResFile(path): + mode = 1 # read only + try: + resref = Res.FSOpenResFile(path, mode) + except Res.Error: + # try data fork + resref = Res.FSOpenResourceFile(path, unicode(), mode) + return resref + + +def getSFNTResIndices(path): + """Determine whether a file has a resource fork or not.""" + try: + resref = MyOpenResFile(path) + except Res.Error: + return [] + Res.UseResFile(resref) + numSFNTs = Res.Count1Resources('sfnt') + Res.CloseResFile(resref) + return list(range(1, numSFNTs + 1)) + + +def openTTFonts(path): + """Given a pathname, return a list of TTFont objects. In the case + of a flat TTF/OTF file, the list will contain just one font object; + but in the case of a Mac font suitcase it will contain as many + font objects as there are sfnt resources in the file. + """ + from fontTools import ttLib + fonts = [] + sfnts = getSFNTResIndices(path) + if not sfnts: + fonts.append(ttLib.TTFont(path)) + else: + for index in sfnts: + fonts.append(ttLib.TTFont(path, index)) + if not fonts: + raise ttLib.TTLibError("no fonts found in file '%s'" % path) + return fonts + + +class SFNTResourceReader(object): + + """Simple (Mac-only) read-only file wrapper for 'sfnt' resources.""" + + def __init__(self, path, res_name_or_index): + resref = MyOpenResFile(path) + Res.UseResFile(resref) + if isinstance(res_name_or_index, basestring): + res = Res.Get1NamedResource('sfnt', res_name_or_index) + else: + res = Res.Get1IndResource('sfnt', res_name_or_index) + self.file = BytesIO(res.data) + Res.CloseResFile(resref) + self.name = path + + def __getattr__(self, attr): + # cheap inheritance + return getattr(self.file, attr) diff -Nru fonttools-2.4/Snippets/fontTools/ttLib/sfnt.py fonttools-3.0/Snippets/fontTools/ttLib/sfnt.py --- fonttools-2.4/Snippets/fontTools/ttLib/sfnt.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.0/Snippets/fontTools/ttLib/sfnt.py 2015-08-31 17:57:15.000000000 +0000 @@ -0,0 +1,520 @@ +"""ttLib/sfnt.py -- low-level module to deal with the sfnt file format. + +Defines two public classes: + SFNTReader + SFNTWriter + +(Normally you don't have to use these classes explicitly; they are +used automatically by ttLib.TTFont.) + +The reading and writing of sfnt files is separated in two distinct +classes, since whenever to number of tables changes or whenever +a table's length chages you need to rewrite the whole file anyway. +""" + +from __future__ import print_function, division, absolute_import +from fontTools.misc.py23 import * +from fontTools.misc import sstruct +from fontTools.ttLib import getSearchRange +import struct +from collections import OrderedDict + + +class SFNTReader(object): + + def __new__(cls, *args, **kwargs): + """ Return an instance of the SFNTReader sub-class which is compatible + with the input file type. + """ + if args and cls is SFNTReader: + infile = args[0] + sfntVersion = Tag(infile.read(4)) + infile.seek(0) + if sfntVersion == "wOF2": + # return new WOFF2Reader object + from fontTools.ttLib.woff2 import WOFF2Reader + return object.__new__(WOFF2Reader) + # return default object + return object.__new__(cls) + + def __init__(self, file, checkChecksums=1, fontNumber=-1): + self.file = file + self.checkChecksums = checkChecksums + + self.flavor = None + self.flavorData = None + self.DirectoryEntry = SFNTDirectoryEntry + self.sfntVersion = self.file.read(4) + self.file.seek(0) + if self.sfntVersion == b"ttcf": + data = self.file.read(ttcHeaderSize) + if len(data) != ttcHeaderSize: + from fontTools import ttLib + raise ttLib.TTLibError("Not a Font Collection (not enough data)") + sstruct.unpack(ttcHeaderFormat, data, self) + assert self.Version == 0x00010000 or self.Version == 0x00020000, "unrecognized TTC version 0x%08x" % self.Version + if not 0 <= fontNumber < self.numFonts: + from fontTools import ttLib + raise ttLib.TTLibError("specify a font number between 0 and %d (inclusive)" % (self.numFonts - 1)) + offsetTable = struct.unpack(">%dL" % self.numFonts, self.file.read(self.numFonts * 4)) + if self.Version == 0x00020000: + pass # ignoring version 2.0 signatures + self.file.seek(offsetTable[fontNumber]) + data = self.file.read(sfntDirectorySize) + if len(data) != sfntDirectorySize: + from fontTools import ttLib + raise ttLib.TTLibError("Not a Font Collection (not enough data)") + sstruct.unpack(sfntDirectoryFormat, data, self) + elif self.sfntVersion == b"wOFF": + self.flavor = "woff" + self.DirectoryEntry = WOFFDirectoryEntry + data = self.file.read(woffDirectorySize) + if len(data) != woffDirectorySize: + from fontTools import ttLib + raise ttLib.TTLibError("Not a WOFF font (not enough data)") + sstruct.unpack(woffDirectoryFormat, data, self) + else: + data = self.file.read(sfntDirectorySize) + if len(data) != sfntDirectorySize: + from fontTools import ttLib + raise ttLib.TTLibError("Not a TrueType or OpenType font (not enough data)") + sstruct.unpack(sfntDirectoryFormat, data, self) + self.sfntVersion = Tag(self.sfntVersion) + + if self.sfntVersion not in ("\x00\x01\x00\x00", "OTTO", "true"): + from fontTools import ttLib + raise ttLib.TTLibError("Not a TrueType or OpenType font (bad sfntVersion)") + self.tables = OrderedDict() + for i in range(self.numTables): + entry = self.DirectoryEntry() + entry.fromFile(self.file) + tag = Tag(entry.tag) + self.tables[tag] = entry + + # Load flavor data if any + if self.flavor == "woff": + self.flavorData = WOFFFlavorData(self) + + def has_key(self, tag): + return tag in self.tables + + __contains__ = has_key + + def keys(self): + return self.tables.keys() + + def __getitem__(self, tag): + """Fetch the raw table data.""" + entry = self.tables[Tag(tag)] + data = entry.loadData (self.file) + if self.checkChecksums: + if tag == 'head': + # Beh: we have to special-case the 'head' table. + checksum = calcChecksum(data[:8] + b'\0\0\0\0' + data[12:]) + else: + checksum = calcChecksum(data) + if self.checkChecksums > 1: + # Be obnoxious, and barf when it's wrong + assert checksum == entry.checkSum, "bad checksum for '%s' table" % tag + elif checksum != entry.checkSum: + # Be friendly, and just print a warning. + print("bad checksum for '%s' table" % tag) + return data + + def __delitem__(self, tag): + del self.tables[Tag(tag)] + + def close(self): + self.file.close() + + +class SFNTWriter(object): + + def __new__(cls, *args, **kwargs): + """ Return an instance of the SFNTWriter sub-class which is compatible + with the specified 'flavor'. + """ + flavor = None + if kwargs and 'flavor' in kwargs: + flavor = kwargs['flavor'] + elif args and len(args) > 3: + flavor = args[3] + if cls is SFNTWriter: + if flavor == "woff2": + # return new WOFF2Writer object + from fontTools.ttLib.woff2 import WOFF2Writer + return object.__new__(WOFF2Writer) + # return default object + return object.__new__(cls) + + def __init__(self, file, numTables, sfntVersion="\000\001\000\000", + flavor=None, flavorData=None): + self.file = file + self.numTables = numTables + self.sfntVersion = Tag(sfntVersion) + self.flavor = flavor + self.flavorData = flavorData + + if self.flavor == "woff": + self.directoryFormat = woffDirectoryFormat + self.directorySize = woffDirectorySize + self.DirectoryEntry = WOFFDirectoryEntry + + self.signature = "wOFF" + + # to calculate WOFF checksum adjustment, we also need the original SFNT offsets + self.origNextTableOffset = sfntDirectorySize + numTables * sfntDirectoryEntrySize + else: + assert not self.flavor, "Unknown flavor '%s'" % self.flavor + self.directoryFormat = sfntDirectoryFormat + self.directorySize = sfntDirectorySize + self.DirectoryEntry = SFNTDirectoryEntry + + self.searchRange, self.entrySelector, self.rangeShift = getSearchRange(numTables, 16) + + self.nextTableOffset = self.directorySize + numTables * self.DirectoryEntry.formatSize + # clear out directory area + self.file.seek(self.nextTableOffset) + # make sure we're actually where we want to be. (old cStringIO bug) + self.file.write(b'\0' * (self.nextTableOffset - self.file.tell())) + self.tables = OrderedDict() + + def __setitem__(self, tag, data): + """Write raw table data to disk.""" + if tag in self.tables: + from fontTools import ttLib + raise ttLib.TTLibError("cannot rewrite '%s' table" % tag) + + entry = self.DirectoryEntry() + entry.tag = tag + entry.offset = self.nextTableOffset + if tag == 'head': + entry.checkSum = calcChecksum(data[:8] + b'\0\0\0\0' + data[12:]) + self.headTable = data + entry.uncompressed = True + else: + entry.checkSum = calcChecksum(data) + entry.saveData(self.file, data) + + if self.flavor == "woff": + entry.origOffset = self.origNextTableOffset + self.origNextTableOffset += (entry.origLength + 3) & ~3 + + self.nextTableOffset = self.nextTableOffset + ((entry.length + 3) & ~3) + # Add NUL bytes to pad the table data to a 4-byte boundary. + # Don't depend on f.seek() as we need to add the padding even if no + # subsequent write follows (seek is lazy), ie. after the final table + # in the font. + self.file.write(b'\0' * (self.nextTableOffset - self.file.tell())) + assert self.nextTableOffset == self.file.tell() + + self.tables[tag] = entry + + def close(self): + """All tables must have been written to disk. Now write the + directory. + """ + tables = sorted(self.tables.items()) + if len(tables) != self.numTables: + from fontTools import ttLib + raise ttLib.TTLibError("wrong number of tables; expected %d, found %d" % (self.numTables, len(tables))) + + if self.flavor == "woff": + self.signature = b"wOFF" + self.reserved = 0 + + self.totalSfntSize = 12 + self.totalSfntSize += 16 * len(tables) + for tag, entry in tables: + self.totalSfntSize += (entry.origLength + 3) & ~3 + + data = self.flavorData if self.flavorData else WOFFFlavorData() + if data.majorVersion is not None and data.minorVersion is not None: + self.majorVersion = data.majorVersion + self.minorVersion = data.minorVersion + else: + if hasattr(self, 'headTable'): + self.majorVersion, self.minorVersion = struct.unpack(">HH", self.headTable[4:8]) + else: + self.majorVersion = self.minorVersion = 0 + if data.metaData: + self.metaOrigLength = len(data.metaData) + self.file.seek(0,2) + self.metaOffset = self.file.tell() + import zlib + compressedMetaData = zlib.compress(data.metaData) + self.metaLength = len(compressedMetaData) + self.file.write(compressedMetaData) + else: + self.metaOffset = self.metaLength = self.metaOrigLength = 0 + if data.privData: + self.file.seek(0,2) + off = self.file.tell() + paddedOff = (off + 3) & ~3 + self.file.write('\0' * (paddedOff - off)) + self.privOffset = self.file.tell() + self.privLength = len(data.privData) + self.file.write(data.privData) + else: + self.privOffset = self.privLength = 0 + + self.file.seek(0,2) + self.length = self.file.tell() + + else: + assert not self.flavor, "Unknown flavor '%s'" % self.flavor + pass + + directory = sstruct.pack(self.directoryFormat, self) + + self.file.seek(self.directorySize) + seenHead = 0 + for tag, entry in tables: + if tag == "head": + seenHead = 1 + directory = directory + entry.toString() + if seenHead: + self.writeMasterChecksum(directory) + self.file.seek(0) + self.file.write(directory) + + def _calcMasterChecksum(self, directory): + # calculate checkSumAdjustment + tags = list(self.tables.keys()) + checksums = [] + for i in range(len(tags)): + checksums.append(self.tables[tags[i]].checkSum) + + if self.DirectoryEntry != SFNTDirectoryEntry: + # Create a SFNT directory for checksum calculation purposes + self.searchRange, self.entrySelector, self.rangeShift = getSearchRange(self.numTables, 16) + directory = sstruct.pack(sfntDirectoryFormat, self) + tables = sorted(self.tables.items()) + for tag, entry in tables: + sfntEntry = SFNTDirectoryEntry() + sfntEntry.tag = entry.tag + sfntEntry.checkSum = entry.checkSum + sfntEntry.offset = entry.origOffset + sfntEntry.length = entry.origLength + directory = directory + sfntEntry.toString() + + directory_end = sfntDirectorySize + len(self.tables) * sfntDirectoryEntrySize + assert directory_end == len(directory) + + checksums.append(calcChecksum(directory)) + checksum = sum(checksums) & 0xffffffff + # BiboAfba! + checksumadjustment = (0xB1B0AFBA - checksum) & 0xffffffff + return checksumadjustment + + def writeMasterChecksum(self, directory): + checksumadjustment = self._calcMasterChecksum(directory) + # write the checksum to the file + self.file.seek(self.tables['head'].offset + 8) + self.file.write(struct.pack(">L", checksumadjustment)) + + def reordersTables(self): + return False + + +# -- sfnt directory helpers and cruft + +ttcHeaderFormat = """ + > # big endian + TTCTag: 4s # "ttcf" + Version: L # 0x00010000 or 0x00020000 + numFonts: L # number of fonts + # OffsetTable[numFonts]: L # array with offsets from beginning of file + # ulDsigTag: L # version 2.0 only + # ulDsigLength: L # version 2.0 only + # ulDsigOffset: L # version 2.0 only +""" + +ttcHeaderSize = sstruct.calcsize(ttcHeaderFormat) + +sfntDirectoryFormat = """ + > # big endian + sfntVersion: 4s + numTables: H # number of tables + searchRange: H # (max2 <= numTables)*16 + entrySelector: H # log2(max2 <= numTables) + rangeShift: H # numTables*16-searchRange +""" + +sfntDirectorySize = sstruct.calcsize(sfntDirectoryFormat) + +sfntDirectoryEntryFormat = """ + > # big endian + tag: 4s + checkSum: L + offset: L + length: L +""" + +sfntDirectoryEntrySize = sstruct.calcsize(sfntDirectoryEntryFormat) + +woffDirectoryFormat = """ + > # big endian + signature: 4s # "wOFF" + sfntVersion: 4s + length: L # total woff file size + numTables: H # number of tables + reserved: H # set to 0 + totalSfntSize: L # uncompressed size + majorVersion: H # major version of WOFF file + minorVersion: H # minor version of WOFF file + metaOffset: L # offset to metadata block + metaLength: L # length of compressed metadata + metaOrigLength: L # length of uncompressed metadata + privOffset: L # offset to private data block + privLength: L # length of private data block +""" + +woffDirectorySize = sstruct.calcsize(woffDirectoryFormat) + +woffDirectoryEntryFormat = """ + > # big endian + tag: 4s + offset: L + length: L # compressed length + origLength: L # original length + checkSum: L # original checksum +""" + +woffDirectoryEntrySize = sstruct.calcsize(woffDirectoryEntryFormat) + + +class DirectoryEntry(object): + + def __init__(self): + self.uncompressed = False # if True, always embed entry raw + + def fromFile(self, file): + sstruct.unpack(self.format, file.read(self.formatSize), self) + + def fromString(self, str): + sstruct.unpack(self.format, str, self) + + def toString(self): + return sstruct.pack(self.format, self) + + def __repr__(self): + if hasattr(self, "tag"): + return "<%s '%s' at %x>" % (self.__class__.__name__, self.tag, id(self)) + else: + return "<%s at %x>" % (self.__class__.__name__, id(self)) + + def loadData(self, file): + file.seek(self.offset) + data = file.read(self.length) + assert len(data) == self.length + if hasattr(self.__class__, 'decodeData'): + data = self.decodeData(data) + return data + + def saveData(self, file, data): + if hasattr(self.__class__, 'encodeData'): + data = self.encodeData(data) + self.length = len(data) + file.seek(self.offset) + file.write(data) + + def decodeData(self, rawData): + return rawData + + def encodeData(self, data): + return data + +class SFNTDirectoryEntry(DirectoryEntry): + + format = sfntDirectoryEntryFormat + formatSize = sfntDirectoryEntrySize + +class WOFFDirectoryEntry(DirectoryEntry): + + format = woffDirectoryEntryFormat + formatSize = woffDirectoryEntrySize + zlibCompressionLevel = 6 + + def decodeData(self, rawData): + import zlib + if self.length == self.origLength: + data = rawData + else: + assert self.length < self.origLength + data = zlib.decompress(rawData) + assert len (data) == self.origLength + return data + + def encodeData(self, data): + import zlib + self.origLength = len(data) + if not self.uncompressed: + compressedData = zlib.compress(data, self.zlibCompressionLevel) + if self.uncompressed or len(compressedData) >= self.origLength: + # Encode uncompressed + rawData = data + self.length = self.origLength + else: + rawData = compressedData + self.length = len(rawData) + return rawData + +class WOFFFlavorData(): + + Flavor = 'woff' + + def __init__(self, reader=None): + self.majorVersion = None + self.minorVersion = None + self.metaData = None + self.privData = None + if reader: + self.majorVersion = reader.majorVersion + self.minorVersion = reader.minorVersion + if reader.metaLength: + reader.file.seek(reader.metaOffset) + rawData = reader.file.read(reader.metaLength) + assert len(rawData) == reader.metaLength + import zlib + data = zlib.decompress(rawData) + assert len(data) == reader.metaOrigLength + self.metaData = data + if reader.privLength: + reader.file.seek(reader.privOffset) + data = reader.file.read(reader.privLength) + assert len(data) == reader.privLength + self.privData = data + + +def calcChecksum(data): + """Calculate the checksum for an arbitrary block of data. + Optionally takes a 'start' argument, which allows you to + calculate a checksum in chunks by feeding it a previous + result. + + If the data length is not a multiple of four, it assumes + it is to be padded with null byte. + + >>> print(calcChecksum(b"abcd")) + 1633837924 + >>> print(calcChecksum(b"abcdxyz")) + 3655064932 + """ + remainder = len(data) % 4 + if remainder: + data += b"\0" * (4 - remainder) + value = 0 + blockSize = 4096 + assert blockSize % 4 == 0 + for i in range(0, len(data), blockSize): + block = data[i:i+blockSize] + longs = struct.unpack(">%dL" % (len(block) // 4), block) + value = (value + sum(longs)) & 0xffffffff + return value + + +if __name__ == "__main__": + import sys + import doctest + sys.exit(doctest.testmod().failed) diff -Nru fonttools-2.4/Snippets/fontTools/ttLib/standardGlyphOrder.py fonttools-3.0/Snippets/fontTools/ttLib/standardGlyphOrder.py --- fonttools-2.4/Snippets/fontTools/ttLib/standardGlyphOrder.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.0/Snippets/fontTools/ttLib/standardGlyphOrder.py 2015-08-31 17:57:15.000000000 +0000 @@ -0,0 +1,274 @@ +from __future__ import print_function, division, absolute_import +from fontTools.misc.py23 import * + +# +# 'post' table formats 1.0 and 2.0 rely on this list of "standard" +# glyphs. +# +# My list is correct according to the Apple documentation for the 'post' +# table: http://developer.apple.com/fonts/TTRefMan/RM06/Chap6post.html +# (However, it seems that TTFdump (from MS) and FontLab disagree, at +# least with respect to the last glyph, which they list as 'dslash' +# instead of 'dcroat'.) +# + +standardGlyphOrder = [ + ".notdef", # 0 + ".null", # 1 + "nonmarkingreturn", # 2 + "space", # 3 + "exclam", # 4 + "quotedbl", # 5 + "numbersign", # 6 + "dollar", # 7 + "percent", # 8 + "ampersand", # 9 + "quotesingle", # 10 + "parenleft", # 11 + "parenright", # 12 + "asterisk", # 13 + "plus", # 14 + "comma", # 15 + "hyphen", # 16 + "period", # 17 + "slash", # 18 + "zero", # 19 + "one", # 20 + "two", # 21 + "three", # 22 + "four", # 23 + "five", # 24 + "six", # 25 + "seven", # 26 + "eight", # 27 + "nine", # 28 + "colon", # 29 + "semicolon", # 30 + "less", # 31 + "equal", # 32 + "greater", # 33 + "question", # 34 + "at", # 35 + "A", # 36 + "B", # 37 + "C", # 38 + "D", # 39 + "E", # 40 + "F", # 41 + "G", # 42 + "H", # 43 + "I", # 44 + "J", # 45 + "K", # 46 + "L", # 47 + "M", # 48 + "N", # 49 + "O", # 50 + "P", # 51 + "Q", # 52 + "R", # 53 + "S", # 54 + "T", # 55 + "U", # 56 + "V", # 57 + "W", # 58 + "X", # 59 + "Y", # 60 + "Z", # 61 + "bracketleft", # 62 + "backslash", # 63 + "bracketright", # 64 + "asciicircum", # 65 + "underscore", # 66 + "grave", # 67 + "a", # 68 + "b", # 69 + "c", # 70 + "d", # 71 + "e", # 72 + "f", # 73 + "g", # 74 + "h", # 75 + "i", # 76 + "j", # 77 + "k", # 78 + "l", # 79 + "m", # 80 + "n", # 81 + "o", # 82 + "p", # 83 + "q", # 84 + "r", # 85 + "s", # 86 + "t", # 87 + "u", # 88 + "v", # 89 + "w", # 90 + "x", # 91 + "y", # 92 + "z", # 93 + "braceleft", # 94 + "bar", # 95 + "braceright", # 96 + "asciitilde", # 97 + "Adieresis", # 98 + "Aring", # 99 + "Ccedilla", # 100 + "Eacute", # 101 + "Ntilde", # 102 + "Odieresis", # 103 + "Udieresis", # 104 + "aacute", # 105 + "agrave", # 106 + "acircumflex", # 107 + "adieresis", # 108 + "atilde", # 109 + "aring", # 110 + "ccedilla", # 111 + "eacute", # 112 + "egrave", # 113 + "ecircumflex", # 114 + "edieresis", # 115 + "iacute", # 116 + "igrave", # 117 + "icircumflex", # 118 + "idieresis", # 119 + "ntilde", # 120 + "oacute", # 121 + "ograve", # 122 + "ocircumflex", # 123 + "odieresis", # 124 + "otilde", # 125 + "uacute", # 126 + "ugrave", # 127 + "ucircumflex", # 128 + "udieresis", # 129 + "dagger", # 130 + "degree", # 131 + "cent", # 132 + "sterling", # 133 + "section", # 134 + "bullet", # 135 + "paragraph", # 136 + "germandbls", # 137 + "registered", # 138 + "copyright", # 139 + "trademark", # 140 + "acute", # 141 + "dieresis", # 142 + "notequal", # 143 + "AE", # 144 + "Oslash", # 145 + "infinity", # 146 + "plusminus", # 147 + "lessequal", # 148 + "greaterequal", # 149 + "yen", # 150 + "mu", # 151 + "partialdiff", # 152 + "summation", # 153 + "product", # 154 + "pi", # 155 + "integral", # 156 + "ordfeminine", # 157 + "ordmasculine", # 158 + "Omega", # 159 + "ae", # 160 + "oslash", # 161 + "questiondown", # 162 + "exclamdown", # 163 + "logicalnot", # 164 + "radical", # 165 + "florin", # 166 + "approxequal", # 167 + "Delta", # 168 + "guillemotleft", # 169 + "guillemotright", # 170 + "ellipsis", # 171 + "nonbreakingspace", # 172 + "Agrave", # 173 + "Atilde", # 174 + "Otilde", # 175 + "OE", # 176 + "oe", # 177 + "endash", # 178 + "emdash", # 179 + "quotedblleft", # 180 + "quotedblright", # 181 + "quoteleft", # 182 + "quoteright", # 183 + "divide", # 184 + "lozenge", # 185 + "ydieresis", # 186 + "Ydieresis", # 187 + "fraction", # 188 + "currency", # 189 + "guilsinglleft", # 190 + "guilsinglright", # 191 + "fi", # 192 + "fl", # 193 + "daggerdbl", # 194 + "periodcentered", # 195 + "quotesinglbase", # 196 + "quotedblbase", # 197 + "perthousand", # 198 + "Acircumflex", # 199 + "Ecircumflex", # 200 + "Aacute", # 201 + "Edieresis", # 202 + "Egrave", # 203 + "Iacute", # 204 + "Icircumflex", # 205 + "Idieresis", # 206 + "Igrave", # 207 + "Oacute", # 208 + "Ocircumflex", # 209 + "apple", # 210 + "Ograve", # 211 + "Uacute", # 212 + "Ucircumflex", # 213 + "Ugrave", # 214 + "dotlessi", # 215 + "circumflex", # 216 + "tilde", # 217 + "macron", # 218 + "breve", # 219 + "dotaccent", # 220 + "ring", # 221 + "cedilla", # 222 + "hungarumlaut", # 223 + "ogonek", # 224 + "caron", # 225 + "Lslash", # 226 + "lslash", # 227 + "Scaron", # 228 + "scaron", # 229 + "Zcaron", # 230 + "zcaron", # 231 + "brokenbar", # 232 + "Eth", # 233 + "eth", # 234 + "Yacute", # 235 + "yacute", # 236 + "Thorn", # 237 + "thorn", # 238 + "minus", # 239 + "multiply", # 240 + "onesuperior", # 241 + "twosuperior", # 242 + "threesuperior", # 243 + "onehalf", # 244 + "onequarter", # 245 + "threequarters", # 246 + "franc", # 247 + "Gbreve", # 248 + "gbreve", # 249 + "Idotaccent", # 250 + "Scedilla", # 251 + "scedilla", # 252 + "Cacute", # 253 + "cacute", # 254 + "Ccaron", # 255 + "ccaron", # 256 + "dcroat" # 257 +] diff -Nru fonttools-2.4/Snippets/fontTools/ttLib/tables/asciiTable.py fonttools-3.0/Snippets/fontTools/ttLib/tables/asciiTable.py --- fonttools-2.4/Snippets/fontTools/ttLib/tables/asciiTable.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.0/Snippets/fontTools/ttLib/tables/asciiTable.py 2015-08-31 17:57:15.000000000 +0000 @@ -0,0 +1,22 @@ +from __future__ import print_function, division, absolute_import +from fontTools.misc.py23 import * +from . import DefaultTable + + +class asciiTable(DefaultTable.DefaultTable): + + def toXML(self, writer, ttFont): + data = tostr(self.data) + # removing null bytes. XXX needed?? + data = data.split('\0') + data = strjoin(data) + writer.begintag("source") + writer.newline() + writer.write_noindent(data.replace("\r", "\n")) + writer.newline() + writer.endtag("source") + writer.newline() + + def fromXML(self, name, attrs, content, ttFont): + lines = strjoin(content).replace("\r", "\n").split("\n") + self.data = tobytes("\r".join(lines[1:-1])) diff -Nru fonttools-2.4/Snippets/fontTools/ttLib/tables/_a_v_a_r.py fonttools-3.0/Snippets/fontTools/ttLib/tables/_a_v_a_r.py --- fonttools-2.4/Snippets/fontTools/ttLib/tables/_a_v_a_r.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.0/Snippets/fontTools/ttLib/tables/_a_v_a_r.py 2015-08-31 17:57:15.000000000 +0000 @@ -0,0 +1,94 @@ +from __future__ import print_function, division, absolute_import +from fontTools.misc.py23 import * +from fontTools import ttLib +from fontTools.misc import sstruct +from fontTools.misc.fixedTools import fixedToFloat, floatToFixed +from fontTools.misc.textTools import safeEval +from fontTools.ttLib import TTLibError +from . import DefaultTable +import array +import struct +import warnings + + +# Apple's documentation of 'avar': +# https://developer.apple.com/fonts/TrueType-Reference-Manual/RM06/Chap6avar.html + +AVAR_HEADER_FORMAT = """ + > # big endian + version: L + axisCount: L +""" + + +class table__a_v_a_r(DefaultTable.DefaultTable): + dependencies = ["fvar"] + + def __init__(self, tag=None): + DefaultTable.DefaultTable.__init__(self, tag) + self.segments = {} + + def compile(self, ttFont): + axisTags = [axis.axisTag for axis in ttFont["fvar"].axes] + header = {"version": 0x00010000, "axisCount": len(axisTags)} + result = [sstruct.pack(AVAR_HEADER_FORMAT, header)] + for axis in axisTags: + mappings = sorted(self.segments[axis].items()) + result.append(struct.pack(">H", len(mappings))) + for key, value in mappings: + fixedKey = floatToFixed(key, 14) + fixedValue = floatToFixed(value, 14) + result.append(struct.pack(">hh", fixedKey, fixedValue)) + return bytesjoin(result) + + def decompile(self, data, ttFont): + axisTags = [axis.axisTag for axis in ttFont["fvar"].axes] + header = {} + headerSize = sstruct.calcsize(AVAR_HEADER_FORMAT) + header = sstruct.unpack(AVAR_HEADER_FORMAT, data[0:headerSize]) + if header["version"] != 0x00010000: + raise TTLibError("unsupported 'avar' version %04x" % header["version"]) + pos = headerSize + for axis in axisTags: + segments = self.segments[axis] = {} + numPairs = struct.unpack(">H", data[pos:pos+2])[0] + pos = pos + 2 + for _ in range(numPairs): + fromValue, toValue = struct.unpack(">hh", data[pos:pos+4]) + segments[fixedToFloat(fromValue, 14)] = fixedToFloat(toValue, 14) + pos = pos + 4 + self.fixupSegments_(warn=warnings.warn) + + def toXML(self, writer, ttFont, progress=None): + axisTags = [axis.axisTag for axis in ttFont["fvar"].axes] + for axis in axisTags: + writer.begintag("segment", axis=axis) + writer.newline() + for key, value in sorted(self.segments[axis].items()): + writer.simpletag("mapping", **{"from": key, "to": value}) + writer.newline() + writer.endtag("segment") + writer.newline() + + def fromXML(self, name, attrs, content, ttFont): + if name == "segment": + axis = attrs["axis"] + segment = self.segments[axis] = {} + for element in content: + if isinstance(element, tuple): + elementName, elementAttrs, _ = element + if elementName == "mapping": + fromValue = safeEval(elementAttrs["from"]) + toValue = safeEval(elementAttrs["to"]) + if fromValue in segment: + warnings.warn("duplicate entry for %s in axis '%s'" % + (fromValue, axis)) + segment[fromValue] = toValue + self.fixupSegments_(warn=warnings.warn) + + def fixupSegments_(self, warn): + for axis, mappings in self.segments.items(): + for k in [-1.0, 0.0, 1.0]: + if mappings.get(k) != k: + warn("avar axis '%s' should map %s to %s" % (axis, k, k)) + mappings[k] = k diff -Nru fonttools-2.4/Snippets/fontTools/ttLib/tables/_a_v_a_r_test.py fonttools-3.0/Snippets/fontTools/ttLib/tables/_a_v_a_r_test.py --- fonttools-2.4/Snippets/fontTools/ttLib/tables/_a_v_a_r_test.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.0/Snippets/fontTools/ttLib/tables/_a_v_a_r_test.py 2015-08-31 17:57:15.000000000 +0000 @@ -0,0 +1,91 @@ +from __future__ import print_function, division, absolute_import, unicode_literals +from fontTools.misc.py23 import * +from fontTools.misc.textTools import deHexStr +from fontTools.misc.xmlWriter import XMLWriter +from fontTools.ttLib import TTLibError +from fontTools.ttLib.tables._a_v_a_r import table__a_v_a_r +from fontTools.ttLib.tables._f_v_a_r import table__f_v_a_r, Axis +import collections +import unittest + + +TEST_DATA = deHexStr( + "00 01 00 00 00 00 00 02 " + "00 04 C0 00 C0 00 00 00 00 00 13 33 33 33 40 00 40 00 " + "00 03 C0 00 C0 00 00 00 00 00 40 00 40 00") + + +class AxisVariationTableTest(unittest.TestCase): + def test_compile(self): + avar = table__a_v_a_r() + avar.segments["wdth"] = {-1.0: -1.0, 0.0: 0.0, 0.3: 0.8, 1.0: 1.0} + avar.segments["wght"] = {-1.0: -1.0, 0.0: 0.0, 1.0: 1.0} + self.assertEqual(TEST_DATA, avar.compile(self.makeFont(["wdth", "wght"]))) + + def test_decompile(self): + avar = table__a_v_a_r() + avar.decompile(TEST_DATA, self.makeFont(["wdth", "wght"])) + self.assertEqual({ + "wdth": {-1.0: -1.0, 0.0: 0.0, 0.3: 0.8, 1.0: 1.0}, + "wght": {-1.0: -1.0, 0.0: 0.0, 1.0: 1.0} + }, avar.segments) + + def test_decompile_unsupportedVersion(self): + avar = table__a_v_a_r() + font = self.makeFont(["wdth", "wght"]) + self.assertRaises(TTLibError, avar.decompile, deHexStr("02 01 03 06 00 00 00 00"), font) + + def test_toXML(self): + avar = table__a_v_a_r() + avar.segments["opsz"] = {-1.0: -1.0, 0.0: 0.0, 0.3: 0.8, 1.0: 1.0} + writer = XMLWriter(BytesIO()) + avar.toXML(writer, self.makeFont(["opsz"])) + self.assertEqual([ + '', + '', + '', + '', + '', + '' + ], self.xml_lines(writer)) + + def test_fromXML(self): + avar = table__a_v_a_r() + avar.fromXML("segment", {"axis":"wdth"}, [ + ("mapping", {"from": "-1.0", "to": "-1.0"}, []), + ("mapping", {"from": "0.0", "to": "0.0"}, []), + ("mapping", {"from": "0.7", "to": "0.2"}, []), + ("mapping", {"from": "1.0", "to": "1.0"}, []) + ], ttFont=None) + self.assertEqual({"wdth": {-1: -1, 0: 0, 0.7: 0.2, 1.0: 1.0}}, avar.segments) + + def test_fixupSegments(self): + avar = table__a_v_a_r() + avar.segments = {"wdth": {0.3: 0.8, 1.0: 0.7}} + warnings = [] + avar.fixupSegments_(lambda w: warnings.append(w)) + self.assertEqual({"wdth": {-1.0: -1.0, 0.0: 0.0, 0.3: 0.8, 1.0: 1.0}}, avar.segments) + self.assertEqual([ + "avar axis 'wdth' should map -1.0 to -1.0", + "avar axis 'wdth' should map 0.0 to 0.0", + "avar axis 'wdth' should map 1.0 to 1.0" + ], warnings) + + @staticmethod + def makeFont(axisTags): + """['opsz', 'wdth'] --> ttFont""" + fvar = table__f_v_a_r() + for tag in axisTags: + axis = Axis() + axis.axisTag = tag + fvar.axes.append(axis) + return {"fvar": fvar} + + @staticmethod + def xml_lines(writer): + content = writer.file.getvalue().decode("utf-8") + return [line.strip() for line in content.splitlines()][1:] + + +if __name__ == "__main__": + unittest.main() diff -Nru fonttools-2.4/Snippets/fontTools/ttLib/tables/B_A_S_E_.py fonttools-3.0/Snippets/fontTools/ttLib/tables/B_A_S_E_.py --- fonttools-2.4/Snippets/fontTools/ttLib/tables/B_A_S_E_.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.0/Snippets/fontTools/ttLib/tables/B_A_S_E_.py 2015-08-31 17:57:15.000000000 +0000 @@ -0,0 +1,7 @@ +from __future__ import print_function, division, absolute_import +from fontTools.misc.py23 import * +from .otBase import BaseTTXConverter + + +class table_B_A_S_E_(BaseTTXConverter): + pass diff -Nru fonttools-2.4/Snippets/fontTools/ttLib/tables/BitmapGlyphMetrics.py fonttools-3.0/Snippets/fontTools/ttLib/tables/BitmapGlyphMetrics.py --- fonttools-2.4/Snippets/fontTools/ttLib/tables/BitmapGlyphMetrics.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.0/Snippets/fontTools/ttLib/tables/BitmapGlyphMetrics.py 2015-08-31 17:57:15.000000000 +0000 @@ -0,0 +1,58 @@ +# Since bitmap glyph metrics are shared between EBLC and EBDT +# this class gets its own python file. +from __future__ import print_function, division, absolute_import +from fontTools.misc.py23 import * +from fontTools.misc import sstruct +from fontTools.misc.textTools import safeEval + + +bigGlyphMetricsFormat = """ + > # big endian + height: B + width: B + horiBearingX: b + horiBearingY: b + horiAdvance: B + vertBearingX: b + vertBearingY: b + vertAdvance: B +""" + +smallGlyphMetricsFormat = """ + > # big endian + height: B + width: B + BearingX: b + BearingY: b + Advance: B +""" + +class BitmapGlyphMetrics(object): + + def toXML(self, writer, ttFont): + writer.begintag(self.__class__.__name__) + writer.newline() + for metricName in sstruct.getformat(self.__class__.binaryFormat)[1]: + writer.simpletag(metricName, value=getattr(self, metricName)) + writer.newline() + writer.endtag(self.__class__.__name__) + writer.newline() + + def fromXML(self, name, attrs, content, ttFont): + metricNames = set(sstruct.getformat(self.__class__.binaryFormat)[1]) + for element in content: + if not isinstance(element, tuple): + continue + name, attrs, content = element + # Make sure this is a metric that is needed by GlyphMetrics. + if name in metricNames: + vars(self)[name] = safeEval(attrs['value']) + else: + print("Warning: unknown name '%s' being ignored in %s." % name, self.__class__.__name__) + + +class BigGlyphMetrics(BitmapGlyphMetrics): + binaryFormat = bigGlyphMetricsFormat + +class SmallGlyphMetrics(BitmapGlyphMetrics): + binaryFormat = smallGlyphMetricsFormat diff -Nru fonttools-2.4/Snippets/fontTools/ttLib/tables/C_B_D_T_.py fonttools-3.0/Snippets/fontTools/ttLib/tables/C_B_D_T_.py --- fonttools-2.4/Snippets/fontTools/ttLib/tables/C_B_D_T_.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.0/Snippets/fontTools/ttLib/tables/C_B_D_T_.py 2015-08-31 17:57:15.000000000 +0000 @@ -0,0 +1,93 @@ +# Copyright 2013 Google, Inc. All Rights Reserved. +# +# Google Author(s): Matt Fontaine + + +from __future__ import print_function, division, absolute_import +from fontTools.misc.py23 import * +from fontTools.misc import sstruct +from . import E_B_D_T_ +from .BitmapGlyphMetrics import BigGlyphMetrics, bigGlyphMetricsFormat, SmallGlyphMetrics, smallGlyphMetricsFormat +from .E_B_D_T_ import BitmapGlyph, BitmapPlusSmallMetricsMixin, BitmapPlusBigMetricsMixin +import struct + +class table_C_B_D_T_(E_B_D_T_.table_E_B_D_T_): + + # Change the data locator table being referenced. + locatorName = 'CBLC' + + # Modify the format class accessor for color bitmap use. + def getImageFormatClass(self, imageFormat): + try: + return E_B_D_T_.table_E_B_D_T_.getImageFormatClass(self, imageFormat) + except KeyError: + return cbdt_bitmap_classes[imageFormat] + +# Helper method for removing export features not supported by color bitmaps. +# Write data in the parent class will default to raw if an option is unsupported. +def _removeUnsupportedForColor(dataFunctions): + dataFunctions = dict(dataFunctions) + del dataFunctions['row'] + return dataFunctions + +class ColorBitmapGlyph(BitmapGlyph): + + fileExtension = '.png' + xmlDataFunctions = _removeUnsupportedForColor(BitmapGlyph.xmlDataFunctions) + +class cbdt_bitmap_format_17(BitmapPlusSmallMetricsMixin, ColorBitmapGlyph): + + def decompile(self): + self.metrics = SmallGlyphMetrics() + dummy, data = sstruct.unpack2(smallGlyphMetricsFormat, self.data, self.metrics) + (dataLen,) = struct.unpack(">L", data[:4]) + data = data[4:] + + # For the image data cut it to the size specified by dataLen. + assert dataLen <= len(data), "Data overun in format 17" + self.imageData = data[:dataLen] + + def compile(self, ttFont): + dataList = [] + dataList.append(sstruct.pack(smallGlyphMetricsFormat, self.metrics)) + dataList.append(struct.pack(">L", len(self.imageData))) + dataList.append(self.imageData) + return bytesjoin(dataList) + +class cbdt_bitmap_format_18(BitmapPlusBigMetricsMixin, ColorBitmapGlyph): + + def decompile(self): + self.metrics = BigGlyphMetrics() + dummy, data = sstruct.unpack2(bigGlyphMetricsFormat, self.data, self.metrics) + (dataLen,) = struct.unpack(">L", data[:4]) + data = data[4:] + + # For the image data cut it to the size specified by dataLen. + assert dataLen <= len(data), "Data overun in format 18" + self.imageData = data[:dataLen] + + def compile(self, ttFont): + dataList = [] + dataList.append(sstruct.pack(bigGlyphMetricsFormat, self.metrics)) + dataList.append(struct.pack(">L", len(self.imageData))) + dataList.append(self.imageData) + return bytesjoin(dataList) + +class cbdt_bitmap_format_19(ColorBitmapGlyph): + + def decompile(self): + (dataLen,) = struct.unpack(">L", self.data[:4]) + data = self.data[4:] + + assert dataLen <= len(data), "Data overun in format 19" + self.imageData = data[:dataLen] + + def compile(self, ttFont): + return struct.pack(">L", len(self.imageData)) + self.imageData + +# Dict for CBDT extended formats. +cbdt_bitmap_classes = { + 17: cbdt_bitmap_format_17, + 18: cbdt_bitmap_format_18, + 19: cbdt_bitmap_format_19, +} diff -Nru fonttools-2.4/Snippets/fontTools/ttLib/tables/C_B_L_C_.py fonttools-3.0/Snippets/fontTools/ttLib/tables/C_B_L_C_.py --- fonttools-2.4/Snippets/fontTools/ttLib/tables/C_B_L_C_.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.0/Snippets/fontTools/ttLib/tables/C_B_L_C_.py 2015-08-31 17:57:15.000000000 +0000 @@ -0,0 +1,11 @@ +# Copyright 2013 Google, Inc. All Rights Reserved. +# +# Google Author(s): Matt Fontaine + +from __future__ import print_function, division, absolute_import +from fontTools.misc.py23 import * +from . import E_B_L_C_ + +class table_C_B_L_C_(E_B_L_C_.table_E_B_L_C_): + + dependencies = ['CBDT'] diff -Nru fonttools-2.4/Snippets/fontTools/ttLib/tables/C_F_F_.py fonttools-3.0/Snippets/fontTools/ttLib/tables/C_F_F_.py --- fonttools-2.4/Snippets/fontTools/ttLib/tables/C_F_F_.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.0/Snippets/fontTools/ttLib/tables/C_F_F_.py 2015-08-31 17:57:15.000000000 +0000 @@ -0,0 +1,47 @@ +from __future__ import print_function, division, absolute_import +from fontTools.misc.py23 import * +from fontTools import cffLib +from . import DefaultTable + + +class table_C_F_F_(DefaultTable.DefaultTable): + + def __init__(self, tag): + DefaultTable.DefaultTable.__init__(self, tag) + self.cff = cffLib.CFFFontSet() + self._gaveGlyphOrder = False + + def decompile(self, data, otFont): + self.cff.decompile(BytesIO(data), otFont) + assert len(self.cff) == 1, "can't deal with multi-font CFF tables." + + def compile(self, otFont): + f = BytesIO() + self.cff.compile(f, otFont) + return f.getvalue() + + def haveGlyphNames(self): + if hasattr(self.cff[self.cff.fontNames[0]], "ROS"): + return False # CID-keyed font + else: + return True + + def getGlyphOrder(self): + if self._gaveGlyphOrder: + from fontTools import ttLib + raise ttLib.TTLibError("illegal use of getGlyphOrder()") + self._gaveGlyphOrder = True + return self.cff[self.cff.fontNames[0]].getGlyphOrder() + + def setGlyphOrder(self, glyphOrder): + pass + # XXX + #self.cff[self.cff.fontNames[0]].setGlyphOrder(glyphOrder) + + def toXML(self, writer, otFont, progress=None): + self.cff.toXML(writer, progress) + + def fromXML(self, name, attrs, content, otFont): + if not hasattr(self, "cff"): + self.cff = cffLib.CFFFontSet() + self.cff.fromXML(name, attrs, content) diff -Nru fonttools-2.4/Snippets/fontTools/ttLib/tables/_c_m_a_p.py fonttools-3.0/Snippets/fontTools/ttLib/tables/_c_m_a_p.py --- fonttools-2.4/Snippets/fontTools/ttLib/tables/_c_m_a_p.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.0/Snippets/fontTools/ttLib/tables/_c_m_a_p.py 2015-08-31 17:57:15.000000000 +0000 @@ -0,0 +1,1294 @@ +from __future__ import print_function, division, absolute_import +from fontTools.misc.py23 import * +from fontTools.misc.textTools import safeEval, readHex +from fontTools.misc.encodingTools import getEncoding +from fontTools.ttLib import getSearchRange +from fontTools.unicode import Unicode +from . import DefaultTable +import sys +import struct +import array +import operator + + +class table__c_m_a_p(DefaultTable.DefaultTable): + + def getcmap(self, platformID, platEncID): + for subtable in self.tables: + if (subtable.platformID == platformID and + subtable.platEncID == platEncID): + return subtable + return None # not found + + def decompile(self, data, ttFont): + tableVersion, numSubTables = struct.unpack(">HH", data[:4]) + self.tableVersion = int(tableVersion) + self.tables = tables = [] + seenOffsets = {} + for i in range(numSubTables): + platformID, platEncID, offset = struct.unpack( + ">HHl", data[4+i*8:4+(i+1)*8]) + platformID, platEncID = int(platformID), int(platEncID) + format, length = struct.unpack(">HH", data[offset:offset+4]) + if format in [8,10,12,13]: + format, reserved, length = struct.unpack(">HHL", data[offset:offset+8]) + elif format in [14]: + format, length = struct.unpack(">HL", data[offset:offset+6]) + + if not length: + print("Error: cmap subtable is reported as having zero length: platformID %s, platEncID %s, format %s offset %s. Skipping table." % (platformID, platEncID,format, offset)) + continue + table = CmapSubtable.newSubtable(format) + table.platformID = platformID + table.platEncID = platEncID + # Note that by default we decompile only the subtable header info; + # any other data gets decompiled only when an attribute of the + # subtable is referenced. + table.decompileHeader(data[offset:offset+int(length)], ttFont) + if offset in seenOffsets: + table.data = None # Mark as decompiled + table.cmap = tables[seenOffsets[offset]].cmap + else: + seenOffsets[offset] = i + tables.append(table) + + def compile(self, ttFont): + self.tables.sort() # sort according to the spec; see CmapSubtable.__lt__() + numSubTables = len(self.tables) + totalOffset = 4 + 8 * numSubTables + data = struct.pack(">HH", self.tableVersion, numSubTables) + tableData = b"" + seen = {} # Some tables are the same object reference. Don't compile them twice. + done = {} # Some tables are different objects, but compile to the same data chunk + for table in self.tables: + try: + offset = seen[id(table.cmap)] + except KeyError: + chunk = table.compile(ttFont) + if chunk in done: + offset = done[chunk] + else: + offset = seen[id(table.cmap)] = done[chunk] = totalOffset + len(tableData) + tableData = tableData + chunk + data = data + struct.pack(">HHl", table.platformID, table.platEncID, offset) + return data + tableData + + def toXML(self, writer, ttFont): + writer.simpletag("tableVersion", version=self.tableVersion) + writer.newline() + for table in self.tables: + table.toXML(writer, ttFont) + + def fromXML(self, name, attrs, content, ttFont): + if name == "tableVersion": + self.tableVersion = safeEval(attrs["version"]) + return + if name[:12] != "cmap_format_": + return + if not hasattr(self, "tables"): + self.tables = [] + format = safeEval(name[12:]) + table = CmapSubtable.newSubtable(format) + table.platformID = safeEval(attrs["platformID"]) + table.platEncID = safeEval(attrs["platEncID"]) + table.fromXML(name, attrs, content, ttFont) + self.tables.append(table) + + +class CmapSubtable(object): + + @staticmethod + def getSubtableClass(format): + """Return the subtable class for a format.""" + return cmap_classes.get(format, cmap_format_unknown) + + @staticmethod + def newSubtable(format): + """Return a new instance of a subtable for format.""" + subtableClass = CmapSubtable.getSubtableClass(format) + return subtableClass(format) + + def __init__(self, format): + self.format = format + self.data = None + self.ttFont = None + + def __getattr__(self, attr): + # allow lazy decompilation of subtables. + if attr[:2] == '__': # don't handle requests for member functions like '__lt__' + raise AttributeError(attr) + if self.data is None: + raise AttributeError(attr) + self.decompile(None, None) # use saved data. + self.data = None # Once this table has been decompiled, make sure we don't + # just return the original data. Also avoids recursion when + # called with an attribute that the cmap subtable doesn't have. + return getattr(self, attr) + + def decompileHeader(self, data, ttFont): + format, length, language = struct.unpack(">HHH", data[:6]) + assert len(data) == length, "corrupt cmap table format %d (data length: %d, header length: %d)" % (format, len(data), length) + self.format = int(format) + self.length = int(length) + self.language = int(language) + self.data = data[6:] + self.ttFont = ttFont + + def toXML(self, writer, ttFont): + writer.begintag(self.__class__.__name__, [ + ("platformID", self.platformID), + ("platEncID", self.platEncID), + ("language", self.language), + ]) + writer.newline() + codes = sorted(self.cmap.items()) + self._writeCodes(codes, writer) + writer.endtag(self.__class__.__name__) + writer.newline() + + def getEncoding(self, default=None): + """Returns the Python encoding name for this cmap subtable based on its platformID, + platEncID, and language. If encoding for these values is not known, by default + None is returned. That can be overriden by passing a value to the default + argument. + + Note that if you want to choose a "preferred" cmap subtable, most of the time + self.isUnicode() is what you want as that one only returns true for the modern, + commonly used, Unicode-compatible triplets, not the legacy ones. + """ + return getEncoding(self.platformID, self.platEncID, self.language, default) + + def isUnicode(self): + return (self.platformID == 0 or + (self.platformID == 3 and self.platEncID in [0, 1, 10])) + + def isSymbol(self): + return self.platformID == 3 and self.platEncID == 0 + + def _writeCodes(self, codes, writer): + isUnicode = self.isUnicode() + for code, name in codes: + writer.simpletag("map", code=hex(code), name=name) + if isUnicode: + writer.comment(Unicode[code]) + writer.newline() + + def __lt__(self, other): + if not isinstance(other, CmapSubtable): + return NotImplemented + + # implemented so that list.sort() sorts according to the spec. + selfTuple = ( + getattr(self, "platformID", None), + getattr(self, "platEncID", None), + getattr(self, "language", None), + self.__dict__) + otherTuple = ( + getattr(other, "platformID", None), + getattr(other, "platEncID", None), + getattr(other, "language", None), + other.__dict__) + return selfTuple < otherTuple + + +class cmap_format_0(CmapSubtable): + + def decompile(self, data, ttFont): + # we usually get here indirectly from the subtable __getattr__ function, in which case both args must be None. + # If not, someone is calling the subtable decompile() directly, and must provide both args. + if data is not None and ttFont is not None: + self.decompileHeader(data, ttFont) + else: + assert (data is None and ttFont is None), "Need both data and ttFont arguments" + data = self.data # decompileHeader assigns the data after the header to self.data + assert 262 == self.length, "Format 0 cmap subtable not 262 bytes" + glyphIdArray = array.array("B") + glyphIdArray.fromstring(self.data) + self.cmap = cmap = {} + lenArray = len(glyphIdArray) + charCodes = list(range(lenArray)) + names = map(self.ttFont.getGlyphName, glyphIdArray) + list(map(operator.setitem, [cmap]*lenArray, charCodes, names)) + + def compile(self, ttFont): + if self.data: + return struct.pack(">HHH", 0, 262, self.language) + self.data + + charCodeList = sorted(self.cmap.items()) + charCodes = [entry[0] for entry in charCodeList] + valueList = [entry[1] for entry in charCodeList] + assert charCodes == list(range(256)) + valueList = map(ttFont.getGlyphID, valueList) + + glyphIdArray = array.array("B", valueList) + data = struct.pack(">HHH", 0, 262, self.language) + glyphIdArray.tostring() + assert len(data) == 262 + return data + + def fromXML(self, name, attrs, content, ttFont): + self.language = safeEval(attrs["language"]) + if not hasattr(self, "cmap"): + self.cmap = {} + cmap = self.cmap + for element in content: + if not isinstance(element, tuple): + continue + name, attrs, content = element + if name != "map": + continue + cmap[safeEval(attrs["code"])] = attrs["name"] + + +subHeaderFormat = ">HHhH" +class SubHeader(object): + def __init__(self): + self.firstCode = None + self.entryCount = None + self.idDelta = None + self.idRangeOffset = None + self.glyphIndexArray = [] + +class cmap_format_2(CmapSubtable): + + def setIDDelta(self, subHeader): + subHeader.idDelta = 0 + # find the minGI which is not zero. + minGI = subHeader.glyphIndexArray[0] + for gid in subHeader.glyphIndexArray: + if (gid != 0) and (gid < minGI): + minGI = gid + # The lowest gid in glyphIndexArray, after subtracting idDelta, must be 1. + # idDelta is a short, and must be between -32K and 32K. minGI can be between 1 and 64K. + # We would like to pick an idDelta such that the first glyphArray GID is 1, + # so that we are more likely to be able to combine glypharray GID subranges. + # This means that we have a problem when minGI is > 32K + # Since the final gi is reconstructed from the glyphArray GID by: + # (short)finalGID = (gid + idDelta) % 0x10000), + # we can get from a glypharray GID of 1 to a final GID of 65K by subtracting 2, and casting the + # negative number to an unsigned short. + + if (minGI > 1): + if minGI > 0x7FFF: + subHeader.idDelta = -(0x10000 - minGI) -1 + else: + subHeader.idDelta = minGI -1 + idDelta = subHeader.idDelta + for i in range(subHeader.entryCount): + gid = subHeader.glyphIndexArray[i] + if gid > 0: + subHeader.glyphIndexArray[i] = gid - idDelta + + def decompile(self, data, ttFont): + # we usually get here indirectly from the subtable __getattr__ function, in which case both args must be None. + # If not, someone is calling the subtable decompile() directly, and must provide both args. + if data is not None and ttFont is not None: + self.decompileHeader(data, ttFont) + else: + assert (data is None and ttFont is None), "Need both data and ttFont arguments" + + data = self.data # decompileHeader assigns the data after the header to self.data + subHeaderKeys = [] + maxSubHeaderindex = 0 + # get the key array, and determine the number of subHeaders. + allKeys = array.array("H") + allKeys.fromstring(data[:512]) + data = data[512:] + if sys.byteorder != "big": + allKeys.byteswap() + subHeaderKeys = [ key//8 for key in allKeys] + maxSubHeaderindex = max(subHeaderKeys) + + #Load subHeaders + subHeaderList = [] + pos = 0 + for i in range(maxSubHeaderindex + 1): + subHeader = SubHeader() + (subHeader.firstCode, subHeader.entryCount, subHeader.idDelta, \ + subHeader.idRangeOffset) = struct.unpack(subHeaderFormat, data[pos:pos + 8]) + pos += 8 + giDataPos = pos + subHeader.idRangeOffset-2 + giList = array.array("H") + giList.fromstring(data[giDataPos:giDataPos + subHeader.entryCount*2]) + if sys.byteorder != "big": + giList.byteswap() + subHeader.glyphIndexArray = giList + subHeaderList.append(subHeader) + # How this gets processed. + # Charcodes may be one or two bytes. + # The first byte of a charcode is mapped through the subHeaderKeys, to select + # a subHeader. For any subheader but 0, the next byte is then mapped through the + # selected subheader. If subheader Index 0 is selected, then the byte itself is + # mapped through the subheader, and there is no second byte. + # Then assume that the subsequent byte is the first byte of the next charcode,and repeat. + # + # Each subheader references a range in the glyphIndexArray whose length is entryCount. + # The range in glyphIndexArray referenced by a sunheader may overlap with the range in glyphIndexArray + # referenced by another subheader. + # The only subheader that will be referenced by more than one first-byte value is the subheader + # that maps the entire range of glyphID values to glyphIndex 0, e.g notdef: + # {firstChar 0, EntryCount 0,idDelta 0,idRangeOffset xx} + # A byte being mapped though a subheader is treated as in index into a mapping of array index to font glyphIndex. + # A subheader specifies a subrange within (0...256) by the + # firstChar and EntryCount values. If the byte value is outside the subrange, then the glyphIndex is zero + # (e.g. glyph not in font). + # If the byte index is in the subrange, then an offset index is calculated as (byteIndex - firstChar). + # The index to glyphIndex mapping is a subrange of the glyphIndexArray. You find the start of the subrange by + # counting idRangeOffset bytes from the idRangeOffset word. The first value in this subrange is the + # glyphIndex for the index firstChar. The offset index should then be used in this array to get the glyphIndex. + # Example for Logocut-Medium + # first byte of charcode = 129; selects subheader 1. + # subheader 1 = {firstChar 64, EntryCount 108,idDelta 42,idRangeOffset 0252} + # second byte of charCode = 66 + # the index offset = 66-64 = 2. + # The subrange of the glyphIndexArray starting at 0x0252 bytes from the idRangeOffset word is: + # [glyphIndexArray index], [subrange array index] = glyphIndex + # [256], [0]=1 from charcode [129, 64] + # [257], [1]=2 from charcode [129, 65] + # [258], [2]=3 from charcode [129, 66] + # [259], [3]=4 from charcode [129, 67] + # So, the glyphIndex = 3 from the array. Then if idDelta is not zero and the glyph ID is not zero, + # add it to the glyphID to get the final glyphIndex + # value. In this case the final glyph index = 3+ 42 -> 45 for the final glyphIndex. Whew! + + self.data = b"" + self.cmap = cmap = {} + notdefGI = 0 + for firstByte in range(256): + subHeadindex = subHeaderKeys[firstByte] + subHeader = subHeaderList[subHeadindex] + if subHeadindex == 0: + if (firstByte < subHeader.firstCode) or (firstByte >= subHeader.firstCode + subHeader.entryCount): + continue # gi is notdef. + else: + charCode = firstByte + offsetIndex = firstByte - subHeader.firstCode + gi = subHeader.glyphIndexArray[offsetIndex] + if gi != 0: + gi = (gi + subHeader.idDelta) % 0x10000 + else: + continue # gi is notdef. + cmap[charCode] = gi + else: + if subHeader.entryCount: + charCodeOffset = firstByte * 256 + subHeader.firstCode + for offsetIndex in range(subHeader.entryCount): + charCode = charCodeOffset + offsetIndex + gi = subHeader.glyphIndexArray[offsetIndex] + if gi != 0: + gi = (gi + subHeader.idDelta) % 0x10000 + else: + continue + cmap[charCode] = gi + # If not subHeader.entryCount, then all char codes with this first byte are + # mapped to .notdef. We can skip this subtable, and leave the glyphs un-encoded, which is the + # same as mapping it to .notdef. + # cmap values are GID's. + glyphOrder = self.ttFont.getGlyphOrder() + gids = list(cmap.values()) + charCodes = list(cmap.keys()) + lenCmap = len(gids) + try: + names = list(map(operator.getitem, [glyphOrder]*lenCmap, gids )) + except IndexError: + getGlyphName = self.ttFont.getGlyphName + names = list(map(getGlyphName, gids )) + list(map(operator.setitem, [cmap]*lenCmap, charCodes, names)) + + def compile(self, ttFont): + if self.data: + return struct.pack(">HHH", self.format, self.length, self.language) + self.data + kEmptyTwoCharCodeRange = -1 + notdefGI = 0 + + items = sorted(self.cmap.items()) + charCodes = [item[0] for item in items] + names = [item[1] for item in items] + nameMap = ttFont.getReverseGlyphMap() + lenCharCodes = len(charCodes) + try: + gids = list(map(operator.getitem, [nameMap]*lenCharCodes, names)) + except KeyError: + nameMap = ttFont.getReverseGlyphMap(rebuild=True) + try: + gids = list(map(operator.getitem, [nameMap]*lenCharCodes, names)) + except KeyError: + # allow virtual GIDs in format 2 tables + gids = [] + for name in names: + try: + gid = nameMap[name] + except KeyError: + try: + if (name[:3] == 'gid'): + gid = eval(name[3:]) + else: + gid = ttFont.getGlyphID(name) + except: + raise KeyError(name) + + gids.append(gid) + + # Process the (char code to gid) item list in char code order. + # By definition, all one byte char codes map to subheader 0. + # For all the two byte char codes, we assume that the first byte maps maps to the empty subhead (with an entry count of 0, + # which defines all char codes in its range to map to notdef) unless proven otherwise. + # Note that since the char code items are processed in char code order, all the char codes with the + # same first byte are in sequential order. + + subHeaderKeys = [ kEmptyTwoCharCodeRange for x in range(256)] # list of indices into subHeaderList. + subHeaderList = [] + + # We force this subheader entry 0 to exist in the subHeaderList in the case where some one comes up + # with a cmap where all the one byte char codes map to notdef, + # with the result that the subhead 0 would not get created just by processing the item list. + charCode = charCodes[0] + if charCode > 255: + subHeader = SubHeader() + subHeader.firstCode = 0 + subHeader.entryCount = 0 + subHeader.idDelta = 0 + subHeader.idRangeOffset = 0 + subHeaderList.append(subHeader) + + lastFirstByte = -1 + items = zip(charCodes, gids) + for charCode, gid in items: + if gid == 0: + continue + firstbyte = charCode >> 8 + secondByte = charCode & 0x00FF + + if firstbyte != lastFirstByte: # Need to update the current subhead, and start a new one. + if lastFirstByte > -1: + # fix GI's and iDelta of current subheader. + self.setIDDelta(subHeader) + + # If it was sunheader 0 for one-byte charCodes, then we need to set the subHeaderKeys value to zero + # for the indices matching the char codes. + if lastFirstByte == 0: + for index in range(subHeader.entryCount): + charCode = subHeader.firstCode + index + subHeaderKeys[charCode] = 0 + + assert (subHeader.entryCount == len(subHeader.glyphIndexArray)), "Error - subhead entry count does not match len of glyphID subrange." + # init new subheader + subHeader = SubHeader() + subHeader.firstCode = secondByte + subHeader.entryCount = 1 + subHeader.glyphIndexArray.append(gid) + subHeaderList.append(subHeader) + subHeaderKeys[firstbyte] = len(subHeaderList) -1 + lastFirstByte = firstbyte + else: + # need to fill in with notdefs all the code points between the last charCode and the current charCode. + codeDiff = secondByte - (subHeader.firstCode + subHeader.entryCount) + for i in range(codeDiff): + subHeader.glyphIndexArray.append(notdefGI) + subHeader.glyphIndexArray.append(gid) + subHeader.entryCount = subHeader.entryCount + codeDiff + 1 + + # fix GI's and iDelta of last subheader that we we added to the subheader array. + self.setIDDelta(subHeader) + + # Now we add a final subheader for the subHeaderKeys which maps to empty two byte charcode ranges. + subHeader = SubHeader() + subHeader.firstCode = 0 + subHeader.entryCount = 0 + subHeader.idDelta = 0 + subHeader.idRangeOffset = 2 + subHeaderList.append(subHeader) + emptySubheadIndex = len(subHeaderList) - 1 + for index in range(256): + if subHeaderKeys[index] == kEmptyTwoCharCodeRange: + subHeaderKeys[index] = emptySubheadIndex + # Since this is the last subheader, the GlyphIndex Array starts two bytes after the start of the + # idRangeOffset word of this subHeader. We can safely point to the first entry in the GlyphIndexArray, + # since the first subrange of the GlyphIndexArray is for subHeader 0, which always starts with + # charcode 0 and GID 0. + + idRangeOffset = (len(subHeaderList)-1)*8 + 2 # offset to beginning of glyphIDArray from first subheader idRangeOffset. + subheadRangeLen = len(subHeaderList) -1 # skip last special empty-set subheader; we've already hardocodes its idRangeOffset to 2. + for index in range(subheadRangeLen): + subHeader = subHeaderList[index] + subHeader.idRangeOffset = 0 + for j in range(index): + prevSubhead = subHeaderList[j] + if prevSubhead.glyphIndexArray == subHeader.glyphIndexArray: # use the glyphIndexArray subarray + subHeader.idRangeOffset = prevSubhead.idRangeOffset - (index-j)*8 + subHeader.glyphIndexArray = [] + break + if subHeader.idRangeOffset == 0: # didn't find one. + subHeader.idRangeOffset = idRangeOffset + idRangeOffset = (idRangeOffset - 8) + subHeader.entryCount*2 # one less subheader, one more subArray. + else: + idRangeOffset = idRangeOffset - 8 # one less subheader + + # Now we can write out the data! + length = 6 + 512 + 8*len(subHeaderList) # header, 256 subHeaderKeys, and subheader array. + for subhead in subHeaderList[:-1]: + length = length + len(subhead.glyphIndexArray)*2 # We can't use subhead.entryCount, as some of the subhead may share subArrays. + dataList = [struct.pack(">HHH", 2, length, self.language)] + for index in subHeaderKeys: + dataList.append(struct.pack(">H", index*8)) + for subhead in subHeaderList: + dataList.append(struct.pack(subHeaderFormat, subhead.firstCode, subhead.entryCount, subhead.idDelta, subhead.idRangeOffset)) + for subhead in subHeaderList[:-1]: + for gi in subhead.glyphIndexArray: + dataList.append(struct.pack(">H", gi)) + data = bytesjoin(dataList) + assert (len(data) == length), "Error: cmap format 2 is not same length as calculated! actual: " + str(len(data))+ " calc : " + str(length) + return data + + def fromXML(self, name, attrs, content, ttFont): + self.language = safeEval(attrs["language"]) + if not hasattr(self, "cmap"): + self.cmap = {} + cmap = self.cmap + + for element in content: + if not isinstance(element, tuple): + continue + name, attrs, content = element + if name != "map": + continue + cmap[safeEval(attrs["code"])] = attrs["name"] + + +cmap_format_4_format = ">7H" + +#uint16 endCode[segCount] # Ending character code for each segment, last = 0xFFFF. +#uint16 reservedPad # This value should be zero +#uint16 startCode[segCount] # Starting character code for each segment +#uint16 idDelta[segCount] # Delta for all character codes in segment +#uint16 idRangeOffset[segCount] # Offset in bytes to glyph indexArray, or 0 +#uint16 glyphIndexArray[variable] # Glyph index array + +def splitRange(startCode, endCode, cmap): + # Try to split a range of character codes into subranges with consecutive + # glyph IDs in such a way that the cmap4 subtable can be stored "most" + # efficiently. I can't prove I've got the optimal solution, but it seems + # to do well with the fonts I tested: none became bigger, many became smaller. + if startCode == endCode: + return [], [endCode] + + lastID = cmap[startCode] + lastCode = startCode + inOrder = None + orderedBegin = None + subRanges = [] + + # Gather subranges in which the glyph IDs are consecutive. + for code in range(startCode + 1, endCode + 1): + glyphID = cmap[code] + + if glyphID - 1 == lastID: + if inOrder is None or not inOrder: + inOrder = 1 + orderedBegin = lastCode + else: + if inOrder: + inOrder = 0 + subRanges.append((orderedBegin, lastCode)) + orderedBegin = None + + lastID = glyphID + lastCode = code + + if inOrder: + subRanges.append((orderedBegin, lastCode)) + assert lastCode == endCode + + # Now filter out those new subranges that would only make the data bigger. + # A new segment cost 8 bytes, not using a new segment costs 2 bytes per + # character. + newRanges = [] + for b, e in subRanges: + if b == startCode and e == endCode: + break # the whole range, we're fine + if b == startCode or e == endCode: + threshold = 4 # split costs one more segment + else: + threshold = 8 # split costs two more segments + if (e - b + 1) > threshold: + newRanges.append((b, e)) + subRanges = newRanges + + if not subRanges: + return [], [endCode] + + if subRanges[0][0] != startCode: + subRanges.insert(0, (startCode, subRanges[0][0] - 1)) + if subRanges[-1][1] != endCode: + subRanges.append((subRanges[-1][1] + 1, endCode)) + + # Fill the "holes" in the segments list -- those are the segments in which + # the glyph IDs are _not_ consecutive. + i = 1 + while i < len(subRanges): + if subRanges[i-1][1] + 1 != subRanges[i][0]: + subRanges.insert(i, (subRanges[i-1][1] + 1, subRanges[i][0] - 1)) + i = i + 1 + i = i + 1 + + # Transform the ranges into startCode/endCode lists. + start = [] + end = [] + for b, e in subRanges: + start.append(b) + end.append(e) + start.pop(0) + + assert len(start) + 1 == len(end) + return start, end + + +class cmap_format_4(CmapSubtable): + + def decompile(self, data, ttFont): + # we usually get here indirectly from the subtable __getattr__ function, in which case both args must be None. + # If not, someone is calling the subtable decompile() directly, and must provide both args. + if data is not None and ttFont is not None: + self.decompileHeader(data, ttFont) + else: + assert (data is None and ttFont is None), "Need both data and ttFont arguments" + + data = self.data # decompileHeader assigns the data after the header to self.data + (segCountX2, searchRange, entrySelector, rangeShift) = \ + struct.unpack(">4H", data[:8]) + data = data[8:] + segCount = segCountX2 // 2 + + allCodes = array.array("H") + allCodes.fromstring(data) + self.data = data = None + + if sys.byteorder != "big": + allCodes.byteswap() + + # divide the data + endCode = allCodes[:segCount] + allCodes = allCodes[segCount+1:] # the +1 is skipping the reservedPad field + startCode = allCodes[:segCount] + allCodes = allCodes[segCount:] + idDelta = allCodes[:segCount] + allCodes = allCodes[segCount:] + idRangeOffset = allCodes[:segCount] + glyphIndexArray = allCodes[segCount:] + lenGIArray = len(glyphIndexArray) + + # build 2-byte character mapping + charCodes = [] + gids = [] + for i in range(len(startCode) - 1): # don't do 0xffff! + start = startCode[i] + delta = idDelta[i] + rangeOffset = idRangeOffset[i] + # *someone* needs to get killed. + partial = rangeOffset // 2 - start + i - len(idRangeOffset) + + rangeCharCodes = list(range(startCode[i], endCode[i] + 1)) + charCodes.extend(rangeCharCodes) + if rangeOffset == 0: + gids.extend([(charCode + delta) & 0xFFFF for charCode in rangeCharCodes]) + else: + for charCode in rangeCharCodes: + index = charCode + partial + assert (index < lenGIArray), "In format 4 cmap, range (%d), the calculated index (%d) into the glyph index array is not less than the length of the array (%d) !" % (i, index, lenGIArray) + if glyphIndexArray[index] != 0: # if not missing glyph + glyphID = glyphIndexArray[index] + delta + else: + glyphID = 0 # missing glyph + gids.append(glyphID & 0xFFFF) + + self.cmap = cmap = {} + lenCmap = len(gids) + glyphOrder = self.ttFont.getGlyphOrder() + try: + names = list(map(operator.getitem, [glyphOrder]*lenCmap, gids )) + except IndexError: + getGlyphName = self.ttFont.getGlyphName + names = list(map(getGlyphName, gids )) + list(map(operator.setitem, [cmap]*lenCmap, charCodes, names)) + + def compile(self, ttFont): + if self.data: + return struct.pack(">HHH", self.format, self.length, self.language) + self.data + + charCodes = list(self.cmap.keys()) + lenCharCodes = len(charCodes) + if lenCharCodes == 0: + startCode = [0xffff] + endCode = [0xffff] + else: + charCodes.sort() + names = list(map(operator.getitem, [self.cmap]*lenCharCodes, charCodes)) + nameMap = ttFont.getReverseGlyphMap() + try: + gids = list(map(operator.getitem, [nameMap]*lenCharCodes, names)) + except KeyError: + nameMap = ttFont.getReverseGlyphMap(rebuild=True) + try: + gids = list(map(operator.getitem, [nameMap]*lenCharCodes, names)) + except KeyError: + # allow virtual GIDs in format 4 tables + gids = [] + for name in names: + try: + gid = nameMap[name] + except KeyError: + try: + if (name[:3] == 'gid'): + gid = eval(name[3:]) + else: + gid = ttFont.getGlyphID(name) + except: + raise KeyError(name) + + gids.append(gid) + cmap = {} # code:glyphID mapping + list(map(operator.setitem, [cmap]*len(charCodes), charCodes, gids)) + + # Build startCode and endCode lists. + # Split the char codes in ranges of consecutive char codes, then split + # each range in more ranges of consecutive/not consecutive glyph IDs. + # See splitRange(). + lastCode = charCodes[0] + endCode = [] + startCode = [lastCode] + for charCode in charCodes[1:]: # skip the first code, it's the first start code + if charCode == lastCode + 1: + lastCode = charCode + continue + start, end = splitRange(startCode[-1], lastCode, cmap) + startCode.extend(start) + endCode.extend(end) + startCode.append(charCode) + lastCode = charCode + start, end = splitRange(startCode[-1], lastCode, cmap) + startCode.extend(start) + endCode.extend(end) + startCode.append(0xffff) + endCode.append(0xffff) + + # build up rest of cruft + idDelta = [] + idRangeOffset = [] + glyphIndexArray = [] + for i in range(len(endCode)-1): # skip the closing codes (0xffff) + indices = [] + for charCode in range(startCode[i], endCode[i] + 1): + indices.append(cmap[charCode]) + if (indices == list(range(indices[0], indices[0] + len(indices)))): + idDelta.append((indices[0] - startCode[i]) % 0x10000) + idRangeOffset.append(0) + else: + # someone *definitely* needs to get killed. + idDelta.append(0) + idRangeOffset.append(2 * (len(endCode) + len(glyphIndexArray) - i)) + glyphIndexArray.extend(indices) + idDelta.append(1) # 0xffff + 1 == (tadaa!) 0. So this end code maps to .notdef + idRangeOffset.append(0) + + # Insane. + segCount = len(endCode) + segCountX2 = segCount * 2 + searchRange, entrySelector, rangeShift = getSearchRange(segCount, 2) + + charCodeArray = array.array("H", endCode + [0] + startCode) + idDeltaArray = array.array("H", idDelta) + restArray = array.array("H", idRangeOffset + glyphIndexArray) + if sys.byteorder != "big": + charCodeArray.byteswap() + idDeltaArray.byteswap() + restArray.byteswap() + data = charCodeArray.tostring() + idDeltaArray.tostring() + restArray.tostring() + + length = struct.calcsize(cmap_format_4_format) + len(data) + header = struct.pack(cmap_format_4_format, self.format, length, self.language, + segCountX2, searchRange, entrySelector, rangeShift) + return header + data + + def fromXML(self, name, attrs, content, ttFont): + self.language = safeEval(attrs["language"]) + if not hasattr(self, "cmap"): + self.cmap = {} + cmap = self.cmap + + for element in content: + if not isinstance(element, tuple): + continue + nameMap, attrsMap, dummyContent = element + if nameMap != "map": + assert 0, "Unrecognized keyword in cmap subtable" + cmap[safeEval(attrsMap["code"])] = attrsMap["name"] + + +class cmap_format_6(CmapSubtable): + + def decompile(self, data, ttFont): + # we usually get here indirectly from the subtable __getattr__ function, in which case both args must be None. + # If not, someone is calling the subtable decompile() directly, and must provide both args. + if data is not None and ttFont is not None: + self.decompileHeader(data, ttFont) + else: + assert (data is None and ttFont is None), "Need both data and ttFont arguments" + + data = self.data # decompileHeader assigns the data after the header to self.data + firstCode, entryCount = struct.unpack(">HH", data[:4]) + firstCode = int(firstCode) + data = data[4:] + #assert len(data) == 2 * entryCount # XXX not true in Apple's Helvetica!!! + glyphIndexArray = array.array("H") + glyphIndexArray.fromstring(data[:2 * int(entryCount)]) + if sys.byteorder != "big": + glyphIndexArray.byteswap() + self.data = data = None + + self.cmap = cmap = {} + + lenArray = len(glyphIndexArray) + charCodes = list(range(firstCode, firstCode + lenArray)) + glyphOrder = self.ttFont.getGlyphOrder() + try: + names = list(map(operator.getitem, [glyphOrder]*lenArray, glyphIndexArray )) + except IndexError: + getGlyphName = self.ttFont.getGlyphName + names = list(map(getGlyphName, glyphIndexArray )) + list(map(operator.setitem, [cmap]*lenArray, charCodes, names)) + + def compile(self, ttFont): + if self.data: + return struct.pack(">HHH", self.format, self.length, self.language) + self.data + cmap = self.cmap + codes = sorted(cmap.keys()) + if codes: # yes, there are empty cmap tables. + codes = list(range(codes[0], codes[-1] + 1)) + firstCode = codes[0] + valueList = [cmap.get(code, ".notdef") for code in codes] + valueList = map(ttFont.getGlyphID, valueList) + glyphIndexArray = array.array("H", valueList) + if sys.byteorder != "big": + glyphIndexArray.byteswap() + data = glyphIndexArray.tostring() + else: + data = b"" + firstCode = 0 + header = struct.pack(">HHHHH", + 6, len(data) + 10, self.language, firstCode, len(codes)) + return header + data + + def fromXML(self, name, attrs, content, ttFont): + self.language = safeEval(attrs["language"]) + if not hasattr(self, "cmap"): + self.cmap = {} + cmap = self.cmap + + for element in content: + if not isinstance(element, tuple): + continue + name, attrs, content = element + if name != "map": + continue + cmap[safeEval(attrs["code"])] = attrs["name"] + + +class cmap_format_12_or_13(CmapSubtable): + + def __init__(self, format): + self.format = format + self.reserved = 0 + self.data = None + self.ttFont = None + + def decompileHeader(self, data, ttFont): + format, reserved, length, language, nGroups = struct.unpack(">HHLLL", data[:16]) + assert len(data) == (16 + nGroups*12) == (length), "corrupt cmap table format %d (data length: %d, header length: %d)" % (self.format, len(data), length) + self.format = format + self.reserved = reserved + self.length = length + self.language = language + self.nGroups = nGroups + self.data = data[16:] + self.ttFont = ttFont + + def decompile(self, data, ttFont): + # we usually get here indirectly from the subtable __getattr__ function, in which case both args must be None. + # If not, someone is calling the subtable decompile() directly, and must provide both args. + if data is not None and ttFont is not None: + self.decompileHeader(data, ttFont) + else: + assert (data is None and ttFont is None), "Need both data and ttFont arguments" + + data = self.data # decompileHeader assigns the data after the header to self.data + charCodes = [] + gids = [] + pos = 0 + for i in range(self.nGroups): + startCharCode, endCharCode, glyphID = struct.unpack(">LLL",data[pos:pos+12] ) + pos += 12 + lenGroup = 1 + endCharCode - startCharCode + charCodes.extend(list(range(startCharCode, endCharCode +1))) + gids.extend(self._computeGIDs(glyphID, lenGroup)) + self.data = data = None + self.cmap = cmap = {} + lenCmap = len(gids) + glyphOrder = self.ttFont.getGlyphOrder() + try: + names = list(map(operator.getitem, [glyphOrder]*lenCmap, gids )) + except IndexError: + getGlyphName = self.ttFont.getGlyphName + names = list(map(getGlyphName, gids )) + list(map(operator.setitem, [cmap]*lenCmap, charCodes, names)) + + def compile(self, ttFont): + if self.data: + return struct.pack(">HHLLL", self.format, self.reserved, self.length, self.language, self.nGroups) + self.data + charCodes = list(self.cmap.keys()) + lenCharCodes = len(charCodes) + names = list(self.cmap.values()) + nameMap = ttFont.getReverseGlyphMap() + try: + gids = list(map(operator.getitem, [nameMap]*lenCharCodes, names)) + except KeyError: + nameMap = ttFont.getReverseGlyphMap(rebuild=True) + try: + gids = list(map(operator.getitem, [nameMap]*lenCharCodes, names)) + except KeyError: + # allow virtual GIDs in format 12 tables + gids = [] + for name in names: + try: + gid = nameMap[name] + except KeyError: + try: + if (name[:3] == 'gid'): + gid = eval(name[3:]) + else: + gid = ttFont.getGlyphID(name) + except: + raise KeyError(name) + + gids.append(gid) + + cmap = {} # code:glyphID mapping + list(map(operator.setitem, [cmap]*len(charCodes), charCodes, gids)) + + charCodes.sort() + index = 0 + startCharCode = charCodes[0] + startGlyphID = cmap[startCharCode] + lastGlyphID = startGlyphID - self._format_step + lastCharCode = startCharCode - 1 + nGroups = 0 + dataList = [] + maxIndex = len(charCodes) + for index in range(maxIndex): + charCode = charCodes[index] + glyphID = cmap[charCode] + if not self._IsInSameRun(glyphID, lastGlyphID, charCode, lastCharCode): + dataList.append(struct.pack(">LLL", startCharCode, lastCharCode, startGlyphID)) + startCharCode = charCode + startGlyphID = glyphID + nGroups = nGroups + 1 + lastGlyphID = glyphID + lastCharCode = charCode + dataList.append(struct.pack(">LLL", startCharCode, lastCharCode, startGlyphID)) + nGroups = nGroups + 1 + data = bytesjoin(dataList) + lengthSubtable = len(data) +16 + assert len(data) == (nGroups*12) == (lengthSubtable-16) + return struct.pack(">HHLLL", self.format, self.reserved, lengthSubtable, self.language, nGroups) + data + + def toXML(self, writer, ttFont): + writer.begintag(self.__class__.__name__, [ + ("platformID", self.platformID), + ("platEncID", self.platEncID), + ("format", self.format), + ("reserved", self.reserved), + ("length", self.length), + ("language", self.language), + ("nGroups", self.nGroups), + ]) + writer.newline() + codes = sorted(self.cmap.items()) + self._writeCodes(codes, writer) + writer.endtag(self.__class__.__name__) + writer.newline() + + def fromXML(self, name, attrs, content, ttFont): + self.format = safeEval(attrs["format"]) + self.reserved = safeEval(attrs["reserved"]) + self.length = safeEval(attrs["length"]) + self.language = safeEval(attrs["language"]) + self.nGroups = safeEval(attrs["nGroups"]) + if not hasattr(self, "cmap"): + self.cmap = {} + cmap = self.cmap + + for element in content: + if not isinstance(element, tuple): + continue + name, attrs, content = element + if name != "map": + continue + cmap[safeEval(attrs["code"])] = attrs["name"] + + +class cmap_format_12(cmap_format_12_or_13): + + _format_step = 1 + + def __init__(self, format=12): + cmap_format_12_or_13.__init__(self, format) + + def _computeGIDs(self, startingGlyph, numberOfGlyphs): + return list(range(startingGlyph, startingGlyph + numberOfGlyphs)) + + def _IsInSameRun(self, glyphID, lastGlyphID, charCode, lastCharCode): + return (glyphID == 1 + lastGlyphID) and (charCode == 1 + lastCharCode) + + +class cmap_format_13(cmap_format_12_or_13): + + _format_step = 0 + + def __init__(self, format=13): + cmap_format_12_or_13.__init__(self, format) + + def _computeGIDs(self, startingGlyph, numberOfGlyphs): + return [startingGlyph] * numberOfGlyphs + + def _IsInSameRun(self, glyphID, lastGlyphID, charCode, lastCharCode): + return (glyphID == lastGlyphID) and (charCode == 1 + lastCharCode) + + +def cvtToUVS(threeByteString): + data = b"\0" + threeByteString + val, = struct.unpack(">L", data) + return val + +def cvtFromUVS(val): + assert 0 <= val < 0x1000000 + fourByteString = struct.pack(">L", val) + return fourByteString[1:] + + +class cmap_format_14(CmapSubtable): + + def decompileHeader(self, data, ttFont): + format, length, numVarSelectorRecords = struct.unpack(">HLL", data[:10]) + self.data = data[10:] + self.length = length + self.numVarSelectorRecords = numVarSelectorRecords + self.ttFont = ttFont + self.language = 0xFF # has no language. + + def decompile(self, data, ttFont): + if data is not None and ttFont is not None: + self.decompileHeader(data, ttFont) + else: + assert (data is None and ttFont is None), "Need both data and ttFont arguments" + data = self.data + + self.cmap = {} # so that clients that expect this to exist in a cmap table won't fail. + uvsDict = {} + recOffset = 0 + for n in range(self.numVarSelectorRecords): + uvs, defOVSOffset, nonDefUVSOffset = struct.unpack(">3sLL", data[recOffset:recOffset +11]) + recOffset += 11 + varUVS = cvtToUVS(uvs) + if defOVSOffset: + startOffset = defOVSOffset - 10 + numValues, = struct.unpack(">L", data[startOffset:startOffset+4]) + startOffset +=4 + for r in range(numValues): + uv, addtlCnt = struct.unpack(">3sB", data[startOffset:startOffset+4]) + startOffset += 4 + firstBaseUV = cvtToUVS(uv) + cnt = addtlCnt+1 + baseUVList = list(range(firstBaseUV, firstBaseUV+cnt)) + glyphList = [None]*cnt + localUVList = zip(baseUVList, glyphList) + try: + uvsDict[varUVS].extend(localUVList) + except KeyError: + uvsDict[varUVS] = list(localUVList) + + if nonDefUVSOffset: + startOffset = nonDefUVSOffset - 10 + numRecs, = struct.unpack(">L", data[startOffset:startOffset+4]) + startOffset +=4 + localUVList = [] + for r in range(numRecs): + uv, gid = struct.unpack(">3sH", data[startOffset:startOffset+5]) + startOffset += 5 + uv = cvtToUVS(uv) + glyphName = self.ttFont.getGlyphName(gid) + localUVList.append( [uv, glyphName] ) + try: + uvsDict[varUVS].extend(localUVList) + except KeyError: + uvsDict[varUVS] = localUVList + + self.uvsDict = uvsDict + + def toXML(self, writer, ttFont): + writer.begintag(self.__class__.__name__, [ + ("platformID", self.platformID), + ("platEncID", self.platEncID), + ("format", self.format), + ("length", self.length), + ("numVarSelectorRecords", self.numVarSelectorRecords), + ]) + writer.newline() + uvsDict = self.uvsDict + uvsList = sorted(uvsDict.keys()) + for uvs in uvsList: + uvList = uvsDict[uvs] + uvList.sort(key=lambda item: (item[1] is not None, item[0], item[1])) + for uv, gname in uvList: + if gname is None: + gname = "None" + # I use the arg rather than th keyword syntax in order to preserve the attribute order. + writer.simpletag("map", [ ("uvs",hex(uvs)), ("uv",hex(uv)), ("name", gname)] ) + writer.newline() + writer.endtag(self.__class__.__name__) + writer.newline() + + def fromXML(self, name, attrs, content, ttFont): + self.format = safeEval(attrs["format"]) + self.length = safeEval(attrs["length"]) + self.numVarSelectorRecords = safeEval(attrs["numVarSelectorRecords"]) + self.language = 0xFF # provide a value so that CmapSubtable.__lt__() won't fail + if not hasattr(self, "cmap"): + self.cmap = {} # so that clients that expect this to exist in a cmap table won't fail. + if not hasattr(self, "uvsDict"): + self.uvsDict = {} + uvsDict = self.uvsDict + + for element in content: + if not isinstance(element, tuple): + continue + name, attrs, content = element + if name != "map": + continue + uvs = safeEval(attrs["uvs"]) + uv = safeEval(attrs["uv"]) + gname = attrs["name"] + if gname == "None": + gname = None + try: + uvsDict[uvs].append( [uv, gname]) + except KeyError: + uvsDict[uvs] = [ [uv, gname] ] + + def compile(self, ttFont): + if self.data: + return struct.pack(">HLL", self.format, self.length, self.numVarSelectorRecords) + self.data + + uvsDict = self.uvsDict + uvsList = sorted(uvsDict.keys()) + self.numVarSelectorRecords = len(uvsList) + offset = 10 + self.numVarSelectorRecords*11 # current value is end of VarSelectorRecords block. + data = [] + varSelectorRecords =[] + for uvs in uvsList: + entryList = uvsDict[uvs] + + defList = [entry for entry in entryList if entry[1] is None] + if defList: + defList = [entry[0] for entry in defList] + defOVSOffset = offset + defList.sort() + + lastUV = defList[0] + cnt = -1 + defRecs = [] + for defEntry in defList: + cnt +=1 + if (lastUV+cnt) != defEntry: + rec = struct.pack(">3sB", cvtFromUVS(lastUV), cnt-1) + lastUV = defEntry + defRecs.append(rec) + cnt = 0 + + rec = struct.pack(">3sB", cvtFromUVS(lastUV), cnt) + defRecs.append(rec) + + numDefRecs = len(defRecs) + data.append(struct.pack(">L", numDefRecs)) + data.extend(defRecs) + offset += 4 + numDefRecs*4 + else: + defOVSOffset = 0 + + ndefList = [entry for entry in entryList if entry[1] is not None] + if ndefList: + nonDefUVSOffset = offset + ndefList.sort() + numNonDefRecs = len(ndefList) + data.append(struct.pack(">L", numNonDefRecs)) + offset += 4 + numNonDefRecs*5 + + for uv, gname in ndefList: + gid = ttFont.getGlyphID(gname) + ndrec = struct.pack(">3sH", cvtFromUVS(uv), gid) + data.append(ndrec) + else: + nonDefUVSOffset = 0 + + vrec = struct.pack(">3sLL", cvtFromUVS(uvs), defOVSOffset, nonDefUVSOffset) + varSelectorRecords.append(vrec) + + data = bytesjoin(varSelectorRecords) + bytesjoin(data) + self.length = 10 + len(data) + headerdata = struct.pack(">HLL", self.format, self.length, self.numVarSelectorRecords) + self.data = headerdata + data + + return self.data + + +class cmap_format_unknown(CmapSubtable): + + def toXML(self, writer, ttFont): + cmapName = self.__class__.__name__[:12] + str(self.format) + writer.begintag(cmapName, [ + ("platformID", self.platformID), + ("platEncID", self.platEncID), + ]) + writer.newline() + writer.dumphex(self.data) + writer.endtag(cmapName) + writer.newline() + + def fromXML(self, name, attrs, content, ttFont): + self.data = readHex(content) + self.cmap = {} + + def decompileHeader(self, data, ttFont): + self.language = 0 # dummy value + self.data = data + + def decompile(self, data, ttFont): + # we usually get here indirectly from the subtable __getattr__ function, in which case both args must be None. + # If not, someone is calling the subtable decompile() directly, and must provide both args. + if data is not None and ttFont is not None: + self.decompileHeader(data, ttFont) + else: + assert (data is None and ttFont is None), "Need both data and ttFont arguments" + + def compile(self, ttFont): + if self.data: + return self.data + else: + return None + +cmap_classes = { + 0: cmap_format_0, + 2: cmap_format_2, + 4: cmap_format_4, + 6: cmap_format_6, + 12: cmap_format_12, + 13: cmap_format_13, + 14: cmap_format_14, +} diff -Nru fonttools-2.4/Snippets/fontTools/ttLib/tables/_c_m_a_p_test.py fonttools-3.0/Snippets/fontTools/ttLib/tables/_c_m_a_p_test.py --- fonttools-2.4/Snippets/fontTools/ttLib/tables/_c_m_a_p_test.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.0/Snippets/fontTools/ttLib/tables/_c_m_a_p_test.py 2015-08-31 17:57:15.000000000 +0000 @@ -0,0 +1,53 @@ +from __future__ import print_function, division, absolute_import, unicode_literals +from fontTools.misc.py23 import * +from fontTools import ttLib +import unittest +from ._c_m_a_p import CmapSubtable + +class CmapSubtableTest(unittest.TestCase): + + def makeSubtable(self, platformID, platEncID, langID): + subtable = CmapSubtable(None) + subtable.platformID, subtable.platEncID, subtable.language = (platformID, platEncID, langID) + return subtable + + def test_toUnicode_utf16be(self): + subtable = self.makeSubtable(0, 2, 7) + self.assertEqual("utf_16_be", subtable.getEncoding()) + self.assertEqual(True, subtable.isUnicode()) + + def test_toUnicode_macroman(self): + subtable = self.makeSubtable(1, 0, 7) # MacRoman + self.assertEqual("mac_roman", subtable.getEncoding()) + self.assertEqual(False, subtable.isUnicode()) + + def test_toUnicode_macromanian(self): + subtable = self.makeSubtable(1, 0, 37) # Mac Romanian + self.assertNotEqual(None, subtable.getEncoding()) + self.assertEqual(False, subtable.isUnicode()) + + def test_extended_mac_encodings(self): + subtable = self.makeSubtable(1, 1, 0) # Mac Japanese + self.assertNotEqual(None, subtable.getEncoding()) + self.assertEqual(False, subtable.isUnicode()) + + def test_extended_unknown(self): + subtable = self.makeSubtable(10, 11, 12) + self.assertEqual(subtable.getEncoding(), None) + self.assertEqual(subtable.getEncoding("ascii"), "ascii") + self.assertEqual(subtable.getEncoding(default="xyz"), "xyz") + + def test_decompile_4(self): + subtable = CmapSubtable.newSubtable(4) + font = ttLib.TTFont() + font.setGlyphOrder([]) + subtable.decompile(b'\0' * 3 + b'\x10' + b'\0' * 12, font) + + def test_decompile_12(self): + subtable = CmapSubtable.newSubtable(12) + font = ttLib.TTFont() + font.setGlyphOrder([]) + subtable.decompile(b'\0' * 7 + b'\x10' + b'\0' * 8, font) + +if __name__ == "__main__": + unittest.main() diff -Nru fonttools-2.4/Snippets/fontTools/ttLib/tables/C_O_L_R_.py fonttools-3.0/Snippets/fontTools/ttLib/tables/C_O_L_R_.py --- fonttools-2.4/Snippets/fontTools/ttLib/tables/C_O_L_R_.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.0/Snippets/fontTools/ttLib/tables/C_O_L_R_.py 2015-08-31 17:57:15.000000000 +0000 @@ -0,0 +1,159 @@ +# Copyright 2013 Google, Inc. All Rights Reserved. +# +# Google Author(s): Behdad Esfahbod + +from __future__ import print_function, division, absolute_import +from fontTools.misc.py23 import * +from fontTools.misc.textTools import safeEval +from . import DefaultTable +import operator +import struct + + +class table_C_O_L_R_(DefaultTable.DefaultTable): + + """ This table is structured so that you can treat it like a dictionary keyed by glyph name. + ttFont['COLR'][] will return the color layers for any glyph + ttFont['COLR'][] = will set the color layers for any glyph. + """ + + def decompile(self, data, ttFont): + self.getGlyphName = ttFont.getGlyphName # for use in get/set item functions, for access by GID + self.version, numBaseGlyphRecords, offsetBaseGlyphRecord, offsetLayerRecord, numLayerRecords = struct.unpack(">HHLLH", data[:14]) + assert (self.version == 0), "Version of COLR table is higher than I know how to handle" + glyphOrder = ttFont.getGlyphOrder() + gids = [] + layerLists = [] + glyphPos = offsetBaseGlyphRecord + for i in range(numBaseGlyphRecords): + gid, firstLayerIndex, numLayers = struct.unpack(">HHH", data[glyphPos:glyphPos+6]) + glyphPos += 6 + gids.append(gid) + assert (firstLayerIndex + numLayers <= numLayerRecords) + layerPos = offsetLayerRecord + firstLayerIndex * 4 + layers = [] + for j in range(numLayers): + layerGid, colorID = struct.unpack(">HH", data[layerPos:layerPos+4]) + try: + layerName = glyphOrder[layerGid] + except IndexError: + layerName = self.getGlyphName(layerGid) + layerPos += 4 + layers.append(LayerRecord(layerName, colorID)) + layerLists.append(layers) + + self.ColorLayers = colorLayerLists = {} + try: + names = list(map(operator.getitem, [glyphOrder]*numBaseGlyphRecords, gids)) + except IndexError: + getGlyphName = self.getGlyphName + names = list(map(getGlyphName, gids )) + + list(map(operator.setitem, [colorLayerLists]*numBaseGlyphRecords, names, layerLists)) + + def compile(self, ttFont): + ordered = [] + ttFont.getReverseGlyphMap(rebuild=True) + glyphNames = self.ColorLayers.keys() + for glyphName in glyphNames: + try: + gid = ttFont.getGlyphID(glyphName) + except: + assert 0, "COLR table contains a glyph name not in ttFont.getGlyphNames(): " + str(glyphName) + ordered.append([gid, glyphName, self.ColorLayers[glyphName]]) + ordered.sort() + + glyphMap = [] + layerMap = [] + for (gid, glyphName, layers) in ordered: + glyphMap.append(struct.pack(">HHH", gid, len(layerMap), len(layers))) + for layer in layers: + layerMap.append(struct.pack(">HH", ttFont.getGlyphID(layer.name), layer.colorID)) + + dataList = [struct.pack(">HHLLH", self.version, len(glyphMap), 14, 14+6*len(glyphMap), len(layerMap))] + dataList.extend(glyphMap) + dataList.extend(layerMap) + data = bytesjoin(dataList) + return data + + def toXML(self, writer, ttFont): + writer.simpletag("version", value=self.version) + writer.newline() + ordered = [] + glyphNames = self.ColorLayers.keys() + for glyphName in glyphNames: + try: + gid = ttFont.getGlyphID(glyphName) + except: + assert 0, "COLR table contains a glyph name not in ttFont.getGlyphNames(): " + str(glyphName) + ordered.append([gid, glyphName, self.ColorLayers[glyphName]]) + ordered.sort() + for entry in ordered: + writer.begintag("ColorGlyph", name=entry[1]) + writer.newline() + for layer in entry[2]: + layer.toXML(writer, ttFont) + writer.endtag("ColorGlyph") + writer.newline() + + def fromXML(self, name, attrs, content, ttFont): + if not hasattr(self, "ColorLayers"): + self.ColorLayers = {} + self.getGlyphName = ttFont.getGlyphName # for use in get/set item functions, for access by GID + if name == "ColorGlyph": + glyphName = attrs["name"] + for element in content: + if isinstance(element, basestring): + continue + layers = [] + for element in content: + if isinstance(element, basestring): + continue + layer = LayerRecord() + layer.fromXML(element[0], element[1], element[2], ttFont) + layers.append (layer) + operator.setitem(self, glyphName, layers) + elif "value" in attrs: + setattr(self, name, safeEval(attrs["value"])) + + def __getitem__(self, glyphSelector): + if isinstance(glyphSelector, int): + # its a gid, convert to glyph name + glyphSelector = self.getGlyphName(glyphSelector) + + if glyphSelector not in self.ColorLayers: + return None + + return self.ColorLayers[glyphSelector] + + def __setitem__(self, glyphSelector, value): + if isinstance(glyphSelector, int): + # its a gid, convert to glyph name + glyphSelector = self.getGlyphName(glyphSelector) + + if value: + self.ColorLayers[glyphSelector] = value + elif glyphSelector in self.ColorLayers: + del self.ColorLayers[glyphSelector] + + def __delitem__(self, glyphSelector): + del self.ColorLayers[glyphSelector] + +class LayerRecord(object): + + def __init__(self, name=None, colorID=None): + self.name = name + self.colorID = colorID + + def toXML(self, writer, ttFont): + writer.simpletag("layer", name=self.name, colorID=self.colorID) + writer.newline() + + def fromXML(self, eltname, attrs, content, ttFont): + for (name, value) in attrs.items(): + if name == "name": + if isinstance(value, int): + value = ttFont.getGlyphName(value) + setattr(self, name, value) + else: + setattr(self, name, safeEval(value)) diff -Nru fonttools-2.4/Snippets/fontTools/ttLib/tables/C_P_A_L_.py fonttools-3.0/Snippets/fontTools/ttLib/tables/C_P_A_L_.py --- fonttools-2.4/Snippets/fontTools/ttLib/tables/C_P_A_L_.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.0/Snippets/fontTools/ttLib/tables/C_P_A_L_.py 2015-08-31 17:57:15.000000000 +0000 @@ -0,0 +1,100 @@ +# Copyright 2013 Google, Inc. All Rights Reserved. +# +# Google Author(s): Behdad Esfahbod + +from __future__ import print_function, division, absolute_import +from fontTools.misc.py23 import * +from fontTools.misc.textTools import safeEval +from . import DefaultTable +import struct + + +class table_C_P_A_L_(DefaultTable.DefaultTable): + + def decompile(self, data, ttFont): + self.version, self.numPaletteEntries, numPalettes, numColorRecords, goffsetFirstColorRecord = struct.unpack(">HHHHL", data[:12]) + assert (self.version == 0), "Version of COLR table is higher than I know how to handle" + self.palettes = [] + pos = 12 + for i in range(numPalettes): + startIndex = struct.unpack(">H", data[pos:pos+2])[0] + assert (startIndex + self.numPaletteEntries <= numColorRecords) + pos += 2 + palette = [] + ppos = goffsetFirstColorRecord + startIndex * 4 + for j in range(self.numPaletteEntries): + palette.append( Color(*struct.unpack(">BBBB", data[ppos:ppos+4])) ) + ppos += 4 + self.palettes.append(palette) + + def compile(self, ttFont): + dataList = [struct.pack(">HHHHL", self.version, self.numPaletteEntries, len(self.palettes), self.numPaletteEntries * len(self.palettes), 12+2*len(self.palettes))] + for i in range(len(self.palettes)): + dataList.append(struct.pack(">H", i*self.numPaletteEntries)) + for palette in self.palettes: + assert(len(palette) == self.numPaletteEntries) + for color in palette: + dataList.append(struct.pack(">BBBB", color.blue,color.green,color.red,color.alpha)) + data = bytesjoin(dataList) + return data + + def toXML(self, writer, ttFont): + writer.simpletag("version", value=self.version) + writer.newline() + writer.simpletag("numPaletteEntries", value=self.numPaletteEntries) + writer.newline() + for index, palette in enumerate(self.palettes): + writer.begintag("palette", index=index) + writer.newline() + assert(len(palette) == self.numPaletteEntries) + for cindex, color in enumerate(palette): + color.toXML(writer, ttFont, cindex) + writer.endtag("palette") + writer.newline() + + def fromXML(self, name, attrs, content, ttFont): + if not hasattr(self, "palettes"): + self.palettes = [] + if name == "palette": + palette = [] + for element in content: + if isinstance(element, basestring): + continue + palette = [] + for element in content: + if isinstance(element, basestring): + continue + color = Color() + color.fromXML(element[0], element[1], element[2], ttFont) + palette.append (color) + self.palettes.append(palette) + elif "value" in attrs: + value = safeEval(attrs["value"]) + setattr(self, name, value) + +class Color(object): + + def __init__(self, blue=None, green=None, red=None, alpha=None): + self.blue = blue + self.green = green + self.red = red + self.alpha = alpha + + def hex(self): + return "#%02X%02X%02X%02X" % (self.red, self.green, self.blue, self.alpha) + + def __repr__(self): + return self.hex() + + def toXML(self, writer, ttFont, index=None): + writer.simpletag("color", value=self.hex(), index=index) + writer.newline() + + def fromXML(self, eltname, attrs, content, ttFont): + value = attrs["value"] + if value[0] == '#': + value = value[1:] + self.red = int(value[0:2], 16) + self.green = int(value[2:4], 16) + self.blue = int(value[4:6], 16) + self.alpha = int(value[6:8], 16) if len (value) >= 8 else 0xFF diff -Nru fonttools-2.4/Snippets/fontTools/ttLib/tables/_c_v_t.py fonttools-3.0/Snippets/fontTools/ttLib/tables/_c_v_t.py --- fonttools-2.4/Snippets/fontTools/ttLib/tables/_c_v_t.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.0/Snippets/fontTools/ttLib/tables/_c_v_t.py 2015-08-31 17:57:15.000000000 +0000 @@ -0,0 +1,49 @@ +from __future__ import print_function, division, absolute_import +from fontTools.misc.py23 import * +from fontTools.misc.textTools import safeEval +from . import DefaultTable +import sys +import array + +class table__c_v_t(DefaultTable.DefaultTable): + + def decompile(self, data, ttFont): + values = array.array("h") + values.fromstring(data) + if sys.byteorder != "big": + values.byteswap() + self.values = values + + def compile(self, ttFont): + values = self.values[:] + if sys.byteorder != "big": + values.byteswap() + return values.tostring() + + def toXML(self, writer, ttFont): + for i in range(len(self.values)): + value = self.values[i] + writer.simpletag("cv", value=value, index=i) + writer.newline() + + def fromXML(self, name, attrs, content, ttFont): + if not hasattr(self, "values"): + self.values = array.array("h") + if name == "cv": + index = safeEval(attrs["index"]) + value = safeEval(attrs["value"]) + for i in range(1 + index - len(self.values)): + self.values.append(0) + self.values[index] = value + + def __len__(self): + return len(self.values) + + def __getitem__(self, index): + return self.values[index] + + def __setitem__(self, index, value): + self.values[index] = value + + def __delitem__(self, index): + del self.values[index] diff -Nru fonttools-2.4/Snippets/fontTools/ttLib/tables/DefaultTable.py fonttools-3.0/Snippets/fontTools/ttLib/tables/DefaultTable.py --- fonttools-2.4/Snippets/fontTools/ttLib/tables/DefaultTable.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.0/Snippets/fontTools/ttLib/tables/DefaultTable.py 2015-08-31 17:57:15.000000000 +0000 @@ -0,0 +1,47 @@ +from __future__ import print_function, division, absolute_import +from fontTools.misc.py23 import * +from fontTools.ttLib import getClassTag + +class DefaultTable(object): + + dependencies = [] + + def __init__(self, tag=None): + if tag is None: + tag = getClassTag(self.__class__) + self.tableTag = Tag(tag) + + def decompile(self, data, ttFont): + self.data = data + + def compile(self, ttFont): + return self.data + + def toXML(self, writer, ttFont, progress=None): + if hasattr(self, "ERROR"): + writer.comment("An error occurred during the decompilation of this table") + writer.newline() + writer.comment(self.ERROR) + writer.newline() + writer.begintag("hexdata") + writer.newline() + writer.dumphex(self.compile(ttFont)) + writer.endtag("hexdata") + writer.newline() + + def fromXML(self, name, attrs, content, ttFont): + from fontTools.misc.textTools import readHex + from fontTools import ttLib + if name != "hexdata": + raise ttLib.TTLibError("can't handle '%s' element" % name) + self.decompile(readHex(content), ttFont) + + def __repr__(self): + return "<'%s' table at %x>" % (self.tableTag, id(self)) + + def __ne__(self, other): + return not self.__eq__(other) + def __eq__(self, other): + if type(self) != type(other): + return NotImplemented + return self.__dict__ == other.__dict__ diff -Nru fonttools-2.4/Snippets/fontTools/ttLib/tables/D_S_I_G_.py fonttools-3.0/Snippets/fontTools/ttLib/tables/D_S_I_G_.py --- fonttools-2.4/Snippets/fontTools/ttLib/tables/D_S_I_G_.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.0/Snippets/fontTools/ttLib/tables/D_S_I_G_.py 2015-08-31 17:57:15.000000000 +0000 @@ -0,0 +1,131 @@ +from __future__ import print_function, division, absolute_import +from fontTools.misc.py23 import * +from fontTools.misc.textTools import safeEval +from fontTools.misc import sstruct +from . import DefaultTable +import base64 + +DSIG_HeaderFormat = """ + > # big endian + ulVersion: L + usNumSigs: H + usFlag: H +""" +# followed by an array of usNumSigs DSIG_Signature records +DSIG_SignatureFormat = """ + > # big endian + ulFormat: L + ulLength: L # length includes DSIG_SignatureBlock header + ulOffset: L +""" +# followed by an array of usNumSigs DSIG_SignatureBlock records, +# each followed immediately by the pkcs7 bytes +DSIG_SignatureBlockFormat = """ + > # big endian + usReserved1: H + usReserved2: H + cbSignature: l # length of following raw pkcs7 data +""" + +# +# NOTE +# the DSIG table format allows for SignatureBlocks residing +# anywhere in the table and possibly in a different order as +# listed in the array after the first table header +# +# this implementation does not keep track of any gaps and/or data +# before or after the actual signature blocks while decompiling, +# and puts them in the same physical order as listed in the header +# on compilation with no padding whatsoever. +# + +class table_D_S_I_G_(DefaultTable.DefaultTable): + + def decompile(self, data, ttFont): + dummy, newData = sstruct.unpack2(DSIG_HeaderFormat, data, self) + assert self.ulVersion == 1, "DSIG ulVersion must be 1" + assert self.usFlag & ~1 == 0, "DSIG usFlag must be 0x1 or 0x0" + self.signatureRecords = sigrecs = [] + for n in range(self.usNumSigs): + sigrec, newData = sstruct.unpack2(DSIG_SignatureFormat, newData, SignatureRecord()) + assert sigrec.ulFormat == 1, "DSIG signature record #%d ulFormat must be 1" % n + sigrecs.append(sigrec) + for sigrec in sigrecs: + dummy, newData = sstruct.unpack2(DSIG_SignatureBlockFormat, data[sigrec.ulOffset:], sigrec) + assert sigrec.usReserved1 == 0, "DSIG signature record #%d usReserverd1 must be 0" % n + assert sigrec.usReserved2 == 0, "DSIG signature record #%d usReserverd2 must be 0" % n + sigrec.pkcs7 = newData[:sigrec.cbSignature] + + def compile(self, ttFont): + packed = sstruct.pack(DSIG_HeaderFormat, self) + headers = [packed] + offset = len(packed) + self.usNumSigs * sstruct.calcsize(DSIG_SignatureFormat) + data = [] + for sigrec in self.signatureRecords: + # first pack signature block + sigrec.cbSignature = len(sigrec.pkcs7) + packed = sstruct.pack(DSIG_SignatureBlockFormat, sigrec) + sigrec.pkcs7 + data.append(packed) + # update redundant length field + sigrec.ulLength = len(packed) + # update running table offset + sigrec.ulOffset = offset + headers.append(sstruct.pack(DSIG_SignatureFormat, sigrec)) + offset += sigrec.ulLength + if offset % 2: + # Pad to even bytes + data.append(b'\0') + return bytesjoin(headers+data) + + def toXML(self, xmlWriter, ttFont): + xmlWriter.comment("note that the Digital Signature will be invalid after recompilation!") + xmlWriter.newline() + xmlWriter.simpletag("tableHeader", version=self.ulVersion, numSigs=self.usNumSigs, flag="0x%X" % self.usFlag) + for sigrec in self.signatureRecords: + xmlWriter.newline() + sigrec.toXML(xmlWriter, ttFont) + xmlWriter.newline() + + def fromXML(self, name, attrs, content, ttFont): + if name == "tableHeader": + self.signatureRecords = [] + self.ulVersion = safeEval(attrs["version"]) + self.usNumSigs = safeEval(attrs["numSigs"]) + self.usFlag = safeEval(attrs["flag"]) + return + if name == "SignatureRecord": + sigrec = SignatureRecord() + sigrec.fromXML(name, attrs, content, ttFont) + self.signatureRecords.append(sigrec) + +pem_spam = lambda l, spam = { + "-----BEGIN PKCS7-----": True, "-----END PKCS7-----": True, "": True +}: not spam.get(l.strip()) + +def b64encode(b): + s = base64.b64encode(b) + # Line-break at 76 chars. + items = [] + while s: + items.append(tostr(s[:76])) + items.append('\n') + s = s[76:] + return strjoin(items) + +class SignatureRecord(object): + def __repr__(self): + return "<%s: %s>" % (self.__class__.__name__, self.__dict__) + + def toXML(self, writer, ttFont): + writer.begintag(self.__class__.__name__, format=self.ulFormat) + writer.newline() + writer.write_noindent("-----BEGIN PKCS7-----\n") + writer.write_noindent(b64encode(self.pkcs7)) + writer.write_noindent("-----END PKCS7-----\n") + writer.endtag(self.__class__.__name__) + + def fromXML(self, name, attrs, content, ttFont): + self.ulFormat = safeEval(attrs["format"]) + self.usReserved1 = safeEval(attrs.get("reserved1", "0")) + self.usReserved2 = safeEval(attrs.get("reserved2", "0")) + self.pkcs7 = base64.b64decode(tobytes(strjoin(filter(pem_spam, content)))) diff -Nru fonttools-2.4/Snippets/fontTools/ttLib/tables/E_B_D_T_.py fonttools-3.0/Snippets/fontTools/ttLib/tables/E_B_D_T_.py --- fonttools-2.4/Snippets/fontTools/ttLib/tables/E_B_D_T_.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.0/Snippets/fontTools/ttLib/tables/E_B_D_T_.py 2015-08-31 17:57:15.000000000 +0000 @@ -0,0 +1,759 @@ +from __future__ import print_function, division, absolute_import +from fontTools.misc.py23 import * +from fontTools.misc import sstruct +from fontTools.misc.textTools import safeEval, readHex, hexStr, deHexStr +from .BitmapGlyphMetrics import BigGlyphMetrics, bigGlyphMetricsFormat, SmallGlyphMetrics, smallGlyphMetricsFormat +from . import DefaultTable +import itertools +import os +import struct + +ebdtTableVersionFormat = """ + > # big endian + version: 16.16F +""" + +ebdtComponentFormat = """ + > # big endian + glyphCode: H + xOffset: b + yOffset: b +""" + +class table_E_B_D_T_(DefaultTable.DefaultTable): + + # Keep a reference to the name of the data locator table. + locatorName = 'EBLC' + + # This method can be overridden in subclasses to support new formats + # without changing the other implementation. Also can be used as a + # convenience method for coverting a font file to an alternative format. + def getImageFormatClass(self, imageFormat): + return ebdt_bitmap_classes[imageFormat] + + def decompile(self, data, ttFont): + # Get the version but don't advance the slice. + # Most of the lookup for this table is done relative + # to the begining so slice by the offsets provided + # in the EBLC table. + sstruct.unpack2(ebdtTableVersionFormat, data, self) + + # Keep a dict of glyphs that have been seen so they aren't remade. + # This dict maps intervals of data to the BitmapGlyph. + glyphDict = {} + + # Pull out the EBLC table and loop through glyphs. + # A strike is a concept that spans both tables. + # The actual bitmap data is stored in the EBDT. + locator = ttFont[self.__class__.locatorName] + self.strikeData = [] + for curStrike in locator.strikes: + bitmapGlyphDict = {} + self.strikeData.append(bitmapGlyphDict) + for indexSubTable in curStrike.indexSubTables: + dataIter = zip(indexSubTable.names, indexSubTable.locations) + for curName, curLoc in dataIter: + # Don't create duplicate data entries for the same glyphs. + # Instead just use the structures that already exist if they exist. + if curLoc in glyphDict: + curGlyph = glyphDict[curLoc] + else: + curGlyphData = data[slice(*curLoc)] + imageFormatClass = self.getImageFormatClass(indexSubTable.imageFormat) + curGlyph = imageFormatClass(curGlyphData, ttFont) + glyphDict[curLoc] = curGlyph + bitmapGlyphDict[curName] = curGlyph + + def compile(self, ttFont): + + dataList = [] + dataList.append(sstruct.pack(ebdtTableVersionFormat, self)) + dataSize = len(dataList[0]) + + # Keep a dict of glyphs that have been seen so they aren't remade. + # This dict maps the id of the BitmapGlyph to the interval + # in the data. + glyphDict = {} + + # Go through the bitmap glyph data. Just in case the data for a glyph + # changed the size metrics should be recalculated. There are a variety + # of formats and they get stored in the EBLC table. That is why + # recalculation is defered to the EblcIndexSubTable class and just + # pass what is known about bitmap glyphs from this particular table. + locator = ttFont[self.__class__.locatorName] + for curStrike, curGlyphDict in zip(locator.strikes, self.strikeData): + for curIndexSubTable in curStrike.indexSubTables: + dataLocations = [] + for curName in curIndexSubTable.names: + # Handle the data placement based on seeing the glyph or not. + # Just save a reference to the location if the glyph has already + # been saved in compile. This code assumes that glyphs will only + # be referenced multiple times from indexFormat5. By luck the + # code may still work when referencing poorly ordered fonts with + # duplicate references. If there is a font that is unlucky the + # respective compile methods for the indexSubTables will fail + # their assertions. All fonts seem to follow this assumption. + # More complicated packing may be needed if a counter-font exists. + glyph = curGlyphDict[curName] + objectId = id(glyph) + if objectId not in glyphDict: + data = glyph.compile(ttFont) + data = curIndexSubTable.padBitmapData(data) + startByte = dataSize + dataSize += len(data) + endByte = dataSize + dataList.append(data) + dataLoc = (startByte, endByte) + glyphDict[objectId] = dataLoc + else: + dataLoc = glyphDict[objectId] + dataLocations.append(dataLoc) + # Just use the new data locations in the indexSubTable. + # The respective compile implementations will take care + # of any of the problems in the convertion that may arise. + curIndexSubTable.locations = dataLocations + + return bytesjoin(dataList) + + def toXML(self, writer, ttFont): + # When exporting to XML if one of the data export formats + # requires metrics then those metrics may be in the locator. + # In this case populate the bitmaps with "export metrics". + if ttFont.bitmapGlyphDataFormat in ('row', 'bitwise'): + locator = ttFont[self.__class__.locatorName] + for curStrike, curGlyphDict in zip(locator.strikes, self.strikeData): + for curIndexSubTable in curStrike.indexSubTables: + for curName in curIndexSubTable.names: + glyph = curGlyphDict[curName] + # I'm not sure which metrics have priority here. + # For now if both metrics exist go with glyph metrics. + if hasattr(glyph, 'metrics'): + glyph.exportMetrics = glyph.metrics + else: + glyph.exportMetrics = curIndexSubTable.metrics + glyph.exportBitDepth = curStrike.bitmapSizeTable.bitDepth + + writer.simpletag("header", [('version', self.version)]) + writer.newline() + locator = ttFont[self.__class__.locatorName] + for strikeIndex, bitmapGlyphDict in enumerate(self.strikeData): + writer.begintag('strikedata', [('index', strikeIndex)]) + writer.newline() + for curName, curBitmap in bitmapGlyphDict.items(): + curBitmap.toXML(strikeIndex, curName, writer, ttFont) + writer.endtag('strikedata') + writer.newline() + + def fromXML(self, name, attrs, content, ttFont): + if name == 'header': + self.version = safeEval(attrs['version']) + elif name == 'strikedata': + if not hasattr(self, 'strikeData'): + self.strikeData = [] + strikeIndex = safeEval(attrs['index']) + + bitmapGlyphDict = {} + for element in content: + if not isinstance(element, tuple): + continue + name, attrs, content = element + if name[4:].startswith(_bitmapGlyphSubclassPrefix[4:]): + imageFormat = safeEval(name[len(_bitmapGlyphSubclassPrefix):]) + glyphName = attrs['name'] + imageFormatClass = self.getImageFormatClass(imageFormat) + curGlyph = imageFormatClass(None, None) + curGlyph.fromXML(name, attrs, content, ttFont) + assert glyphName not in bitmapGlyphDict, "Duplicate glyphs with the same name '%s' in the same strike." % glyphName + bitmapGlyphDict[glyphName] = curGlyph + else: + print("Warning: %s being ignored by %s", name, self.__class__.__name__) + + # Grow the strike data array to the appropriate size. The XML + # format allows the strike index value to be out of order. + if strikeIndex >= len(self.strikeData): + self.strikeData += [None] * (strikeIndex + 1 - len(self.strikeData)) + assert self.strikeData[strikeIndex] is None, "Duplicate strike EBDT indices." + self.strikeData[strikeIndex] = bitmapGlyphDict + +class EbdtComponent(object): + + def toXML(self, writer, ttFont): + writer.begintag('ebdtComponent', [('name', self.name)]) + writer.newline() + for componentName in sstruct.getformat(ebdtComponentFormat)[1][1:]: + writer.simpletag(componentName, value=getattr(self, componentName)) + writer.newline() + writer.endtag('ebdtComponent') + writer.newline() + + def fromXML(self, name, attrs, content, ttFont): + self.name = attrs['name'] + componentNames = set(sstruct.getformat(ebdtComponentFormat)[1][1:]) + for element in content: + if not isinstance(element, tuple): + continue + name, attrs, content = element + if name in componentNames: + vars(self)[name] = safeEval(attrs['value']) + else: + print("Warning: unknown name '%s' being ignored by EbdtComponent." % name) + +# Helper functions for dealing with binary. + +def _data2binary(data, numBits): + binaryList = [] + for curByte in data: + value = byteord(curByte) + numBitsCut = min(8, numBits) + for i in range(numBitsCut): + if value & 0x1: + binaryList.append('1') + else: + binaryList.append('0') + value = value >> 1 + numBits -= numBitsCut + return strjoin(binaryList) + +def _binary2data(binary): + byteList = [] + for bitLoc in range(0, len(binary), 8): + byteString = binary[bitLoc:bitLoc+8] + curByte = 0 + for curBit in reversed(byteString): + curByte = curByte << 1 + if curBit == '1': + curByte |= 1 + byteList.append(bytechr(curByte)) + return bytesjoin(byteList) + +def _memoize(f): + class memodict(dict): + def __missing__(self, key): + ret = f(key) + if len(key) == 1: + self[key] = ret + return ret + return memodict().__getitem__ + +# 00100111 -> 11100100 per byte, not to be confused with little/big endian. +# Bitmap data per byte is in the order that binary is written on the page +# with the least significant bit as far right as possible. This is the +# opposite of what makes sense algorithmically and hence this function. +@_memoize +def _reverseBytes(data): + if len(data) != 1: + return bytesjoin(map(_reverseBytes, data)) + byte = byteord(data) + result = 0 + for i in range(8): + result = result << 1 + result |= byte & 1 + byte = byte >> 1 + return bytechr(result) + +# This section of code is for reading and writing image data to/from XML. + +def _writeRawImageData(strikeIndex, glyphName, bitmapObject, writer, ttFont): + writer.begintag('rawimagedata') + writer.newline() + writer.dumphex(bitmapObject.imageData) + writer.endtag('rawimagedata') + writer.newline() + +def _readRawImageData(bitmapObject, name, attrs, content, ttFont): + bitmapObject.imageData = readHex(content) + +def _writeRowImageData(strikeIndex, glyphName, bitmapObject, writer, ttFont): + metrics = bitmapObject.exportMetrics + del bitmapObject.exportMetrics + bitDepth = bitmapObject.exportBitDepth + del bitmapObject.exportBitDepth + + writer.begintag('rowimagedata', bitDepth=bitDepth, width=metrics.width, height=metrics.height) + writer.newline() + for curRow in range(metrics.height): + rowData = bitmapObject.getRow(curRow, bitDepth=bitDepth, metrics=metrics) + writer.simpletag('row', value=hexStr(rowData)) + writer.newline() + writer.endtag('rowimagedata') + writer.newline() + +def _readRowImageData(bitmapObject, name, attrs, content, ttFont): + bitDepth = safeEval(attrs['bitDepth']) + metrics = SmallGlyphMetrics() + metrics.width = safeEval(attrs['width']) + metrics.height = safeEval(attrs['height']) + + dataRows = [] + for element in content: + if not isinstance(element, tuple): + continue + name, attr, content = element + # Chop off 'imagedata' from the tag to get just the option. + if name == 'row': + dataRows.append(deHexStr(attr['value'])) + bitmapObject.setRows(dataRows, bitDepth=bitDepth, metrics=metrics) + +def _writeBitwiseImageData(strikeIndex, glyphName, bitmapObject, writer, ttFont): + metrics = bitmapObject.exportMetrics + del bitmapObject.exportMetrics + bitDepth = bitmapObject.exportBitDepth + del bitmapObject.exportBitDepth + + # A dict for mapping binary to more readable/artistic ASCII characters. + binaryConv = {'0':'.', '1':'@'} + + writer.begintag('bitwiseimagedata', bitDepth=bitDepth, width=metrics.width, height=metrics.height) + writer.newline() + for curRow in range(metrics.height): + rowData = bitmapObject.getRow(curRow, bitDepth=1, metrics=metrics, reverseBytes=True) + rowData = _data2binary(rowData, metrics.width) + # Make the output a readable ASCII art form. + rowData = strjoin(map(binaryConv.get, rowData)) + writer.simpletag('row', value=rowData) + writer.newline() + writer.endtag('bitwiseimagedata') + writer.newline() + +def _readBitwiseImageData(bitmapObject, name, attrs, content, ttFont): + bitDepth = safeEval(attrs['bitDepth']) + metrics = SmallGlyphMetrics() + metrics.width = safeEval(attrs['width']) + metrics.height = safeEval(attrs['height']) + + # A dict for mapping from ASCII to binary. All characters are considered + # a '1' except space, period and '0' which maps to '0'. + binaryConv = {' ':'0', '.':'0', '0':'0'} + + dataRows = [] + for element in content: + if not isinstance(element, tuple): + continue + name, attr, content = element + if name == 'row': + mapParams = zip(attr['value'], itertools.repeat('1')) + rowData = strjoin(itertools.starmap(binaryConv.get, mapParams)) + dataRows.append(_binary2data(rowData)) + + bitmapObject.setRows(dataRows, bitDepth=bitDepth, metrics=metrics, reverseBytes=True) + +def _writeExtFileImageData(strikeIndex, glyphName, bitmapObject, writer, ttFont): + try: + folder = os.path.dirname(writer.file.name) + except AttributeError: + # fall back to current directory if output file's directory isn't found + folder = '.' + folder = os.path.join(folder, 'bitmaps') + filename = glyphName + bitmapObject.fileExtension + if not os.path.isdir(folder): + os.makedirs(folder) + folder = os.path.join(folder, 'strike%d' % strikeIndex) + if not os.path.isdir(folder): + os.makedirs(folder) + + fullPath = os.path.join(folder, filename) + writer.simpletag('extfileimagedata', value=fullPath) + writer.newline() + + with open(fullPath, "wb") as file: + file.write(bitmapObject.imageData) + +def _readExtFileImageData(bitmapObject, name, attrs, content, ttFont): + fullPath = attrs['value'] + with open(fullPath, "rb") as file: + bitmapObject.imageData = file.read() + +# End of XML writing code. + +# Important information about the naming scheme. Used for identifying formats +# in XML. +_bitmapGlyphSubclassPrefix = 'ebdt_bitmap_format_' + +class BitmapGlyph(object): + + # For the external file format. This can be changed in subclasses. This way + # when the extfile option is turned on files have the form: glyphName.ext + # The default is just a flat binary file with no meaning. + fileExtension = '.bin' + + # Keep track of reading and writing of various forms. + xmlDataFunctions = { + 'raw': (_writeRawImageData, _readRawImageData), + 'row': (_writeRowImageData, _readRowImageData), + 'bitwise': (_writeBitwiseImageData, _readBitwiseImageData), + 'extfile': (_writeExtFileImageData, _readExtFileImageData), + } + + def __init__(self, data, ttFont): + self.data = data + self.ttFont = ttFont + # TODO Currently non-lazy decompilation is untested here... + #if not ttFont.lazy: + # self.decompile() + # del self.data + + def __getattr__(self, attr): + # Allow lazy decompile. + if attr[:2] == '__': + raise AttributeError(attr) + if not hasattr(self, "data"): + raise AttributeError(attr) + self.decompile() + del self.data + return getattr(self, attr) + + # Not a fan of this but it is needed for safer safety checking. + def getFormat(self): + return safeEval(self.__class__.__name__[len(_bitmapGlyphSubclassPrefix):]) + + def toXML(self, strikeIndex, glyphName, writer, ttFont): + writer.begintag(self.__class__.__name__, [('name', glyphName)]) + writer.newline() + + self.writeMetrics(writer, ttFont) + # Use the internal write method to write using the correct output format. + self.writeData(strikeIndex, glyphName, writer, ttFont) + + writer.endtag(self.__class__.__name__) + writer.newline() + + def fromXML(self, name, attrs, content, ttFont): + self.readMetrics(name, attrs, content, ttFont) + for element in content: + if not isinstance(element, tuple): + continue + name, attr, content = element + if not name.endswith('imagedata'): + continue + # Chop off 'imagedata' from the tag to get just the option. + option = name[:-len('imagedata')] + assert option in self.__class__.xmlDataFunctions + self.readData(name, attr, content, ttFont) + + # Some of the glyphs have the metrics. This allows for metrics to be + # added if the glyph format has them. Default behavior is to do nothing. + def writeMetrics(self, writer, ttFont): + pass + + # The opposite of write metrics. + def readMetrics(self, name, attrs, content, ttFont): + pass + + def writeData(self, strikeIndex, glyphName, writer, ttFont): + try: + writeFunc, readFunc = self.__class__.xmlDataFunctions[ttFont.bitmapGlyphDataFormat] + except KeyError: + writeFunc = _writeRawImageData + writeFunc(strikeIndex, glyphName, self, writer, ttFont) + + def readData(self, name, attrs, content, ttFont): + # Chop off 'imagedata' from the tag to get just the option. + option = name[:-len('imagedata')] + writeFunc, readFunc = self.__class__.xmlDataFunctions[option] + readFunc(self, name, attrs, content, ttFont) + + +# A closure for creating a mixin for the two types of metrics handling. +# Most of the code is very similar so its easier to deal with here. +# Everything works just by passing the class that the mixin is for. +def _createBitmapPlusMetricsMixin(metricsClass): + # Both metrics names are listed here to make meaningful error messages. + metricStrings = [BigGlyphMetrics.__name__, SmallGlyphMetrics.__name__] + curMetricsName = metricsClass.__name__ + # Find which metrics this is for and determine the opposite name. + metricsId = metricStrings.index(curMetricsName) + oppositeMetricsName = metricStrings[1-metricsId] + + class BitmapPlusMetricsMixin(object): + + def writeMetrics(self, writer, ttFont): + self.metrics.toXML(writer, ttFont) + + def readMetrics(self, name, attrs, content, ttFont): + for element in content: + if not isinstance(element, tuple): + continue + name, attrs, content = element + if name == curMetricsName: + self.metrics = metricsClass() + self.metrics.fromXML(name, attrs, content, ttFont) + elif name == oppositeMetricsName: + print("Warning: %s being ignored in format %d." % oppositeMetricsName, self.getFormat()) + + return BitmapPlusMetricsMixin + +# Since there are only two types of mixin's just create them here. +BitmapPlusBigMetricsMixin = _createBitmapPlusMetricsMixin(BigGlyphMetrics) +BitmapPlusSmallMetricsMixin = _createBitmapPlusMetricsMixin(SmallGlyphMetrics) + +# Data that is bit aligned can be tricky to deal with. These classes implement +# helper functionality for dealing with the data and getting a particular row +# of bitwise data. Also helps implement fancy data export/import in XML. +class BitAlignedBitmapMixin(object): + + def _getBitRange(self, row, bitDepth, metrics): + rowBits = (bitDepth * metrics.width) + bitOffset = row * rowBits + return (bitOffset, bitOffset+rowBits) + + def getRow(self, row, bitDepth=1, metrics=None, reverseBytes=False): + if metrics is None: + metrics = self.metrics + assert 0 <= row and row < metrics.height, "Illegal row access in bitmap" + + # Loop through each byte. This can cover two bytes in the original data or + # a single byte if things happen to be aligned. The very last entry might + # not be aligned so take care to trim the binary data to size and pad with + # zeros in the row data. Bit aligned data is somewhat tricky. + # + # Example of data cut. Data cut represented in x's. + # '|' represents byte boundary. + # data = ...0XX|XXXXXX00|000... => XXXXXXXX + # or + # data = ...0XX|XXXX0000|000... => XXXXXX00 + # or + # data = ...000|XXXXXXXX|000... => XXXXXXXX + # or + # data = ...000|00XXXX00|000... => XXXX0000 + # + dataList = [] + bitRange = self._getBitRange(row, bitDepth, metrics) + stepRange = bitRange + (8,) + for curBit in range(*stepRange): + endBit = min(curBit+8, bitRange[1]) + numBits = endBit - curBit + cutPoint = curBit % 8 + firstByteLoc = curBit // 8 + secondByteLoc = endBit // 8 + if firstByteLoc < secondByteLoc: + numBitsCut = 8 - cutPoint + else: + numBitsCut = endBit - curBit + curByte = _reverseBytes(self.imageData[firstByteLoc]) + firstHalf = byteord(curByte) >> cutPoint + firstHalf = ((1<> numBitsCut) & ((1<<8-numBitsCut)-1) + ordDataList[secondByteLoc] |= secondByte + + # Save the image data with the bits going the correct way. + self.imageData = _reverseBytes(bytesjoin(map(bytechr, ordDataList))) + +class ByteAlignedBitmapMixin(object): + + def _getByteRange(self, row, bitDepth, metrics): + rowBytes = (bitDepth * metrics.width + 7) // 8 + byteOffset = row * rowBytes + return (byteOffset, byteOffset+rowBytes) + + def getRow(self, row, bitDepth=1, metrics=None, reverseBytes=False): + if metrics is None: + metrics = self.metrics + assert 0 <= row and row < metrics.height, "Illegal row access in bitmap" + byteRange = self._getByteRange(row, bitDepth, metrics) + data = self.imageData[slice(*byteRange)] + if reverseBytes: + data = _reverseBytes(data) + return data + + def setRows(self, dataRows, bitDepth=1, metrics=None, reverseBytes=False): + if metrics is None: + metrics = self.metrics + if reverseBytes: + dataRows = map(_reverseBytes, dataRows) + self.imageData = bytesjoin(dataRows) + +class ebdt_bitmap_format_1(ByteAlignedBitmapMixin, BitmapPlusSmallMetricsMixin, BitmapGlyph): + + def decompile(self): + self.metrics = SmallGlyphMetrics() + dummy, data = sstruct.unpack2(smallGlyphMetricsFormat, self.data, self.metrics) + self.imageData = data + + def compile(self, ttFont): + data = sstruct.pack(smallGlyphMetricsFormat, self.metrics) + return data + self.imageData + + +class ebdt_bitmap_format_2(BitAlignedBitmapMixin, BitmapPlusSmallMetricsMixin, BitmapGlyph): + + def decompile(self): + self.metrics = SmallGlyphMetrics() + dummy, data = sstruct.unpack2(smallGlyphMetricsFormat, self.data, self.metrics) + self.imageData = data + + def compile(self, ttFont): + data = sstruct.pack(smallGlyphMetricsFormat, self.metrics) + return data + self.imageData + + +class ebdt_bitmap_format_5(BitAlignedBitmapMixin, BitmapGlyph): + + def decompile(self): + self.imageData = self.data + + def compile(self, ttFont): + return self.imageData + +class ebdt_bitmap_format_6(ByteAlignedBitmapMixin, BitmapPlusBigMetricsMixin, BitmapGlyph): + + def decompile(self): + self.metrics = BigGlyphMetrics() + dummy, data = sstruct.unpack2(bigGlyphMetricsFormat, self.data, self.metrics) + self.imageData = data + + def compile(self, ttFont): + data = sstruct.pack(bigGlyphMetricsFormat, self.metrics) + return data + self.imageData + + +class ebdt_bitmap_format_7(BitAlignedBitmapMixin, BitmapPlusBigMetricsMixin, BitmapGlyph): + + def decompile(self): + self.metrics = BigGlyphMetrics() + dummy, data = sstruct.unpack2(bigGlyphMetricsFormat, self.data, self.metrics) + self.imageData = data + + def compile(self, ttFont): + data = sstruct.pack(bigGlyphMetricsFormat, self.metrics) + return data + self.imageData + + +class ComponentBitmapGlyph(BitmapGlyph): + + def toXML(self, strikeIndex, glyphName, writer, ttFont): + writer.begintag(self.__class__.__name__, [('name', glyphName)]) + writer.newline() + + self.writeMetrics(writer, ttFont) + + writer.begintag('components') + writer.newline() + for curComponent in self.componentArray: + curComponent.toXML(writer, ttFont) + writer.endtag('components') + writer.newline() + + writer.endtag(self.__class__.__name__) + writer.newline() + + def fromXML(self, name, attrs, content, ttFont): + self.readMetrics(name, attrs, content, ttFont) + for element in content: + if not isinstance(element, tuple): + continue + name, attr, content = element + if name == 'components': + self.componentArray = [] + for compElement in content: + if not isinstance(compElement, tuple): + continue + name, attrs, content = compElement + if name == 'ebdtComponent': + curComponent = EbdtComponent() + curComponent.fromXML(name, attrs, content, ttFont) + self.componentArray.append(curComponent) + else: + print("Warning: '%s' being ignored in component array." % name) + + +class ebdt_bitmap_format_8(BitmapPlusSmallMetricsMixin, ComponentBitmapGlyph): + + def decompile(self): + self.metrics = SmallGlyphMetrics() + dummy, data = sstruct.unpack2(smallGlyphMetricsFormat, self.data, self.metrics) + data = data[1:] + + (numComponents,) = struct.unpack(">H", data[:2]) + data = data[2:] + self.componentArray = [] + for i in range(numComponents): + curComponent = EbdtComponent() + dummy, data = sstruct.unpack2(ebdtComponentFormat, data, curComponent) + curComponent.name = self.ttFont.getGlyphName(curComponent.glyphCode) + self.componentArray.append(curComponent) + + def compile(self, ttFont): + dataList = [] + dataList.append(sstruct.pack(smallGlyphMetricsFormat, self.metrics)) + dataList.append(b'\0') + dataList.append(struct.pack(">H", len(self.componentArray))) + for curComponent in self.componentArray: + curComponent.glyphCode = ttFont.getGlyphID(curComponent.name) + dataList.append(sstruct.pack(ebdtComponentFormat, curComponent)) + return bytesjoin(dataList) + + +class ebdt_bitmap_format_9(BitmapPlusBigMetricsMixin, ComponentBitmapGlyph): + + def decompile(self): + self.metrics = BigGlyphMetrics() + dummy, data = sstruct.unpack2(bigGlyphMetricsFormat, self.data, self.metrics) + (numComponents,) = struct.unpack(">H", data[:2]) + data = data[2:] + self.componentArray = [] + for i in range(numComponents): + curComponent = EbdtComponent() + dummy, data = sstruct.unpack2(ebdtComponentFormat, data, curComponent) + curComponent.name = self.ttFont.getGlyphName(curComponent.glyphCode) + self.componentArray.append(curComponent) + + def compile(self, ttFont): + dataList = [] + dataList.append(sstruct.pack(bigGlyphMetricsFormat, self.metrics)) + dataList.append(struct.pack(">H", len(self.componentArray))) + for curComponent in self.componentArray: + curComponent.glyphCode = ttFont.getGlyphID(curComponent.name) + dataList.append(sstruct.pack(ebdtComponentFormat, curComponent)) + return bytesjoin(dataList) + + +# Dictionary of bitmap formats to the class representing that format +# currently only the ones listed in this map are the ones supported. +ebdt_bitmap_classes = { + 1: ebdt_bitmap_format_1, + 2: ebdt_bitmap_format_2, + 5: ebdt_bitmap_format_5, + 6: ebdt_bitmap_format_6, + 7: ebdt_bitmap_format_7, + 8: ebdt_bitmap_format_8, + 9: ebdt_bitmap_format_9, + } diff -Nru fonttools-2.4/Snippets/fontTools/ttLib/tables/E_B_L_C_.py fonttools-3.0/Snippets/fontTools/ttLib/tables/E_B_L_C_.py --- fonttools-2.4/Snippets/fontTools/ttLib/tables/E_B_L_C_.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.0/Snippets/fontTools/ttLib/tables/E_B_L_C_.py 2015-08-31 17:57:15.000000000 +0000 @@ -0,0 +1,617 @@ +from __future__ import print_function, division, absolute_import +from fontTools.misc.py23 import * +from fontTools.misc import sstruct +from . import DefaultTable +from fontTools.misc.textTools import safeEval +from .BitmapGlyphMetrics import BigGlyphMetrics, bigGlyphMetricsFormat, SmallGlyphMetrics, smallGlyphMetricsFormat +import struct +import itertools +from collections import deque + +eblcHeaderFormat = """ + > # big endian + version: 16.16F + numSizes: I +""" +# The table format string is split to handle sbitLineMetrics simply. +bitmapSizeTableFormatPart1 = """ + > # big endian + indexSubTableArrayOffset: I + indexTablesSize: I + numberOfIndexSubTables: I + colorRef: I +""" +# The compound type for hori and vert. +sbitLineMetricsFormat = """ + > # big endian + ascender: b + descender: b + widthMax: B + caretSlopeNumerator: b + caretSlopeDenominator: b + caretOffset: b + minOriginSB: b + minAdvanceSB: b + maxBeforeBL: b + minAfterBL: b + pad1: b + pad2: b +""" +# hori and vert go between the two parts. +bitmapSizeTableFormatPart2 = """ + > # big endian + startGlyphIndex: H + endGlyphIndex: H + ppemX: B + ppemY: B + bitDepth: B + flags: b +""" + +indexSubTableArrayFormat = ">HHL" +indexSubTableArraySize = struct.calcsize(indexSubTableArrayFormat) + +indexSubHeaderFormat = ">HHL" +indexSubHeaderSize = struct.calcsize(indexSubHeaderFormat) + +codeOffsetPairFormat = ">HH" +codeOffsetPairSize = struct.calcsize(codeOffsetPairFormat) + +class table_E_B_L_C_(DefaultTable.DefaultTable): + + dependencies = ['EBDT'] + + # This method can be overridden in subclasses to support new formats + # without changing the other implementation. Also can be used as a + # convenience method for coverting a font file to an alternative format. + def getIndexFormatClass(self, indexFormat): + return eblc_sub_table_classes[indexFormat] + + def decompile(self, data, ttFont): + + # Save the original data because offsets are from the start of the table. + origData = data + + dummy, data = sstruct.unpack2(eblcHeaderFormat, data, self) + + self.strikes = [] + for curStrikeIndex in range(self.numSizes): + curStrike = Strike() + self.strikes.append(curStrike) + curTable = curStrike.bitmapSizeTable + dummy, data = sstruct.unpack2(bitmapSizeTableFormatPart1, data, curTable) + for metric in ('hori', 'vert'): + metricObj = SbitLineMetrics() + vars(curTable)[metric] = metricObj + dummy, data = sstruct.unpack2(sbitLineMetricsFormat, data, metricObj) + dummy, data = sstruct.unpack2(bitmapSizeTableFormatPart2, data, curTable) + + for curStrike in self.strikes: + curTable = curStrike.bitmapSizeTable + for subtableIndex in range(curTable.numberOfIndexSubTables): + lowerBound = curTable.indexSubTableArrayOffset + subtableIndex * indexSubTableArraySize + upperBound = lowerBound + indexSubTableArraySize + data = origData[lowerBound:upperBound] + + tup = struct.unpack(indexSubTableArrayFormat, data) + (firstGlyphIndex, lastGlyphIndex, additionalOffsetToIndexSubtable) = tup + offsetToIndexSubTable = curTable.indexSubTableArrayOffset + additionalOffsetToIndexSubtable + data = origData[offsetToIndexSubTable:] + + tup = struct.unpack(indexSubHeaderFormat, data[:indexSubHeaderSize]) + (indexFormat, imageFormat, imageDataOffset) = tup + + indexFormatClass = self.getIndexFormatClass(indexFormat) + indexSubTable = indexFormatClass(data[indexSubHeaderSize:], ttFont) + indexSubTable.firstGlyphIndex = firstGlyphIndex + indexSubTable.lastGlyphIndex = lastGlyphIndex + indexSubTable.additionalOffsetToIndexSubtable = additionalOffsetToIndexSubtable + indexSubTable.indexFormat = indexFormat + indexSubTable.imageFormat = imageFormat + indexSubTable.imageDataOffset = imageDataOffset + curStrike.indexSubTables.append(indexSubTable) + + def compile(self, ttFont): + + dataList = [] + self.numSizes = len(self.strikes) + dataList.append(sstruct.pack(eblcHeaderFormat, self)) + + # Data size of the header + bitmapSizeTable needs to be calculated + # in order to form offsets. This value will hold the size of the data + # in dataList after all the data is consolidated in dataList. + dataSize = len(dataList[0]) + + # The table will be structured in the following order: + # (0) header + # (1) Each bitmapSizeTable [1 ... self.numSizes] + # (2) Alternate between indexSubTableArray and indexSubTable + # for each bitmapSizeTable present. + # + # The issue is maintaining the proper offsets when table information + # gets moved around. All offsets and size information must be recalculated + # when building the table to allow editing within ttLib and also allow easy + # import/export to and from XML. All of this offset information is lost + # when exporting to XML so everything must be calculated fresh so importing + # from XML will work cleanly. Only byte offset and size information is + # calculated fresh. Count information like numberOfIndexSubTables is + # checked through assertions. If the information in this table was not + # touched or was changed properly then these types of values should match. + # + # The table will be rebuilt the following way: + # (0) Precompute the size of all the bitmapSizeTables. This is needed to + # compute the offsets properly. + # (1) For each bitmapSizeTable compute the indexSubTable and + # indexSubTableArray pair. The indexSubTable must be computed first + # so that the offset information in indexSubTableArray can be + # calculated. Update the data size after each pairing. + # (2) Build each bitmapSizeTable. + # (3) Consolidate all the data into the main dataList in the correct order. + + for curStrike in self.strikes: + dataSize += sstruct.calcsize(bitmapSizeTableFormatPart1) + dataSize += len(('hori', 'vert')) * sstruct.calcsize(sbitLineMetricsFormat) + dataSize += sstruct.calcsize(bitmapSizeTableFormatPart2) + + indexSubTablePairDataList = [] + for curStrike in self.strikes: + curTable = curStrike.bitmapSizeTable + curTable.numberOfIndexSubTables = len(curStrike.indexSubTables) + curTable.indexSubTableArrayOffset = dataSize + + # Precompute the size of the indexSubTableArray. This information + # is important for correctly calculating the new value for + # additionalOffsetToIndexSubtable. + sizeOfSubTableArray = curTable.numberOfIndexSubTables * indexSubTableArraySize + lowerBound = dataSize + dataSize += sizeOfSubTableArray + upperBound = dataSize + + indexSubTableDataList = [] + for indexSubTable in curStrike.indexSubTables: + indexSubTable.additionalOffsetToIndexSubtable = dataSize - curTable.indexSubTableArrayOffset + glyphIds = list(map(ttFont.getGlyphID, indexSubTable.names)) + indexSubTable.firstGlyphIndex = min(glyphIds) + indexSubTable.lastGlyphIndex = max(glyphIds) + data = indexSubTable.compile(ttFont) + indexSubTableDataList.append(data) + dataSize += len(data) + curTable.startGlyphIndex = min(ist.firstGlyphIndex for ist in curStrike.indexSubTables) + curTable.endGlyphIndex = max(ist.lastGlyphIndex for ist in curStrike.indexSubTables) + + for i in curStrike.indexSubTables: + data = struct.pack(indexSubHeaderFormat, i.firstGlyphIndex, i.lastGlyphIndex, i.additionalOffsetToIndexSubtable) + indexSubTablePairDataList.append(data) + indexSubTablePairDataList.extend(indexSubTableDataList) + curTable.indexTablesSize = dataSize - curTable.indexSubTableArrayOffset + + for curStrike in self.strikes: + curTable = curStrike.bitmapSizeTable + data = sstruct.pack(bitmapSizeTableFormatPart1, curTable) + dataList.append(data) + for metric in ('hori', 'vert'): + metricObj = vars(curTable)[metric] + data = sstruct.pack(sbitLineMetricsFormat, metricObj) + dataList.append(data) + data = sstruct.pack(bitmapSizeTableFormatPart2, curTable) + dataList.append(data) + dataList.extend(indexSubTablePairDataList) + + return bytesjoin(dataList) + + def toXML(self, writer, ttFont): + writer.simpletag('header', [('version', self.version)]) + writer.newline() + for curIndex, curStrike in enumerate(self.strikes): + curStrike.toXML(curIndex, writer, ttFont) + + def fromXML(self, name, attrs, content, ttFont): + if name == 'header': + self.version = safeEval(attrs['version']) + elif name == 'strike': + if not hasattr(self, 'strikes'): + self.strikes = [] + strikeIndex = safeEval(attrs['index']) + curStrike = Strike() + curStrike.fromXML(name, attrs, content, ttFont, self) + + # Grow the strike array to the appropriate size. The XML format + # allows for the strike index value to be out of order. + if strikeIndex >= len(self.strikes): + self.strikes += [None] * (strikeIndex + 1 - len(self.strikes)) + assert self.strikes[strikeIndex] is None, "Duplicate strike EBLC indices." + self.strikes[strikeIndex] = curStrike + +class Strike(object): + + def __init__(self): + self.bitmapSizeTable = BitmapSizeTable() + self.indexSubTables = [] + + def toXML(self, strikeIndex, writer, ttFont): + writer.begintag('strike', [('index', strikeIndex)]) + writer.newline() + self.bitmapSizeTable.toXML(writer, ttFont) + writer.comment('GlyphIds are written but not read. The firstGlyphIndex and\nlastGlyphIndex values will be recalculated by the compiler.') + writer.newline() + for indexSubTable in self.indexSubTables: + indexSubTable.toXML(writer, ttFont) + writer.endtag('strike') + writer.newline() + + def fromXML(self, name, attrs, content, ttFont, locator): + for element in content: + if not isinstance(element, tuple): + continue + name, attrs, content = element + if name == 'bitmapSizeTable': + self.bitmapSizeTable.fromXML(name, attrs, content, ttFont) + elif name.startswith(_indexSubTableSubclassPrefix): + indexFormat = safeEval(name[len(_indexSubTableSubclassPrefix):]) + indexFormatClass = locator.getIndexFormatClass(indexFormat) + indexSubTable = indexFormatClass(None, None) + indexSubTable.indexFormat = indexFormat + indexSubTable.fromXML(name, attrs, content, ttFont) + self.indexSubTables.append(indexSubTable) + + +class BitmapSizeTable(object): + + # Returns all the simple metric names that bitmap size table + # cares about in terms of XML creation. + def _getXMLMetricNames(self): + dataNames = sstruct.getformat(bitmapSizeTableFormatPart1)[1] + dataNames = dataNames + sstruct.getformat(bitmapSizeTableFormatPart2)[1] + # Skip the first 3 data names because they are byte offsets and counts. + return dataNames[3:] + + def toXML(self, writer, ttFont): + writer.begintag('bitmapSizeTable') + writer.newline() + for metric in ('hori', 'vert'): + getattr(self, metric).toXML(metric, writer, ttFont) + for metricName in self._getXMLMetricNames(): + writer.simpletag(metricName, value=getattr(self, metricName)) + writer.newline() + writer.endtag('bitmapSizeTable') + writer.newline() + + def fromXML(self, name, attrs, content, ttFont): + # Create a lookup for all the simple names that make sense to + # bitmap size table. Only read the information from these names. + dataNames = set(self._getXMLMetricNames()) + for element in content: + if not isinstance(element, tuple): + continue + name, attrs, content = element + if name == 'sbitLineMetrics': + direction = attrs['direction'] + assert direction in ('hori', 'vert'), "SbitLineMetrics direction specified invalid." + metricObj = SbitLineMetrics() + metricObj.fromXML(name, attrs, content, ttFont) + vars(self)[direction] = metricObj + elif name in dataNames: + vars(self)[name] = safeEval(attrs['value']) + else: + print("Warning: unknown name '%s' being ignored in BitmapSizeTable." % name) + + +class SbitLineMetrics(object): + + def toXML(self, name, writer, ttFont): + writer.begintag('sbitLineMetrics', [('direction', name)]) + writer.newline() + for metricName in sstruct.getformat(sbitLineMetricsFormat)[1]: + writer.simpletag(metricName, value=getattr(self, metricName)) + writer.newline() + writer.endtag('sbitLineMetrics') + writer.newline() + + def fromXML(self, name, attrs, content, ttFont): + metricNames = set(sstruct.getformat(sbitLineMetricsFormat)[1]) + for element in content: + if not isinstance(element, tuple): + continue + name, attrs, content = element + if name in metricNames: + vars(self)[name] = safeEval(attrs['value']) + +# Important information about the naming scheme. Used for identifying subtables. +_indexSubTableSubclassPrefix = 'eblc_index_sub_table_' + +class EblcIndexSubTable(object): + + def __init__(self, data, ttFont): + self.data = data + self.ttFont = ttFont + # TODO Currently non-lazy decompiling doesn't work for this class... + #if not ttFont.lazy: + # self.decompile() + # del self.data, self.ttFont + + def __getattr__(self, attr): + # Allow lazy decompile. + if attr[:2] == '__': + raise AttributeError(attr) + if not hasattr(self, "data"): + raise AttributeError(attr) + self.decompile() + del self.data, self.ttFont + return getattr(self, attr) + + # This method just takes care of the indexSubHeader. Implementing subclasses + # should call it to compile the indexSubHeader and then continue compiling + # the remainder of their unique format. + def compile(self, ttFont): + return struct.pack(indexSubHeaderFormat, self.indexFormat, self.imageFormat, self.imageDataOffset) + + # Creates the XML for bitmap glyphs. Each index sub table basically makes + # the same XML except for specific metric information that is written + # out via a method call that a subclass implements optionally. + def toXML(self, writer, ttFont): + writer.begintag(self.__class__.__name__, [ + ('imageFormat', self.imageFormat), + ('firstGlyphIndex', self.firstGlyphIndex), + ('lastGlyphIndex', self.lastGlyphIndex), + ]) + writer.newline() + self.writeMetrics(writer, ttFont) + # Write out the names as thats all thats needed to rebuild etc. + # For font debugging of consecutive formats the ids are also written. + # The ids are not read when moving from the XML format. + glyphIds = map(ttFont.getGlyphID, self.names) + for glyphName, glyphId in zip(self.names, glyphIds): + writer.simpletag('glyphLoc', name=glyphName, id=glyphId) + writer.newline() + writer.endtag(self.__class__.__name__) + writer.newline() + + def fromXML(self, name, attrs, content, ttFont): + # Read all the attributes. Even though the glyph indices are + # recalculated, they are still read in case there needs to + # be an immediate export of the data. + self.imageFormat = safeEval(attrs['imageFormat']) + self.firstGlyphIndex = safeEval(attrs['firstGlyphIndex']) + self.lastGlyphIndex = safeEval(attrs['lastGlyphIndex']) + + self.readMetrics(name, attrs, content, ttFont) + + self.names = [] + for element in content: + if not isinstance(element, tuple): + continue + name, attrs, content = element + if name == 'glyphLoc': + self.names.append(attrs['name']) + + # A helper method that writes the metrics for the index sub table. It also + # is responsible for writing the image size for fixed size data since fixed + # size is not recalculated on compile. Default behavior is to do nothing. + def writeMetrics(self, writer, ttFont): + pass + + # A helper method that is the inverse of writeMetrics. + def readMetrics(self, name, attrs, content, ttFont): + pass + + # This method is for fixed glyph data sizes. There are formats where + # the glyph data is fixed but are actually composite glyphs. To handle + # this the font spec in indexSubTable makes the data the size of the + # fixed size by padding the component arrays. This function abstracts + # out this padding process. Input is data unpadded. Output is data + # padded only in fixed formats. Default behavior is to return the data. + def padBitmapData(self, data): + return data + + # Remove any of the glyph locations and names that are flagged as skipped. + # This only occurs in formats {1,3}. + def removeSkipGlyphs(self): + # Determines if a name, location pair is a valid data location. + # Skip glyphs are marked when the size is equal to zero. + def isValidLocation(args): + (name, (startByte, endByte)) = args + return startByte < endByte + # Remove all skip glyphs. + dataPairs = list(filter(isValidLocation, zip(self.names, self.locations))) + self.names, self.locations = list(map(list, zip(*dataPairs))) + +# A closure for creating a custom mixin. This is done because formats 1 and 3 +# are very similar. The only difference between them is the size per offset +# value. Code put in here should handle both cases generally. +def _createOffsetArrayIndexSubTableMixin(formatStringForDataType): + + # Prep the data size for the offset array data format. + dataFormat = '>'+formatStringForDataType + offsetDataSize = struct.calcsize(dataFormat) + + class OffsetArrayIndexSubTableMixin(object): + + def decompile(self): + + numGlyphs = self.lastGlyphIndex - self.firstGlyphIndex + 1 + indexingOffsets = [glyphIndex * offsetDataSize for glyphIndex in range(numGlyphs+2)] + indexingLocations = zip(indexingOffsets, indexingOffsets[1:]) + offsetArray = [struct.unpack(dataFormat, self.data[slice(*loc)])[0] for loc in indexingLocations] + + glyphIds = list(range(self.firstGlyphIndex, self.lastGlyphIndex+1)) + modifiedOffsets = [offset + self.imageDataOffset for offset in offsetArray] + self.locations = list(zip(modifiedOffsets, modifiedOffsets[1:])) + + self.names = list(map(self.ttFont.getGlyphName, glyphIds)) + self.removeSkipGlyphs() + + def compile(self, ttFont): + # First make sure that all the data lines up properly. Formats 1 and 3 + # must have all its data lined up consecutively. If not this will fail. + for curLoc, nxtLoc in zip(self.locations, self.locations[1:]): + assert curLoc[1] == nxtLoc[0], "Data must be consecutive in indexSubTable offset formats" + + glyphIds = list(map(ttFont.getGlyphID, self.names)) + # Make sure that all ids are sorted strictly increasing. + assert all(glyphIds[i] < glyphIds[i+1] for i in range(len(glyphIds)-1)) + + # Run a simple algorithm to add skip glyphs to the data locations at + # the places where an id is not present. + idQueue = deque(glyphIds) + locQueue = deque(self.locations) + allGlyphIds = list(range(self.firstGlyphIndex, self.lastGlyphIndex+1)) + allLocations = [] + for curId in allGlyphIds: + if curId != idQueue[0]: + allLocations.append((locQueue[0][0], locQueue[0][0])) + else: + idQueue.popleft() + allLocations.append(locQueue.popleft()) + + # Now that all the locations are collected, pack them appropriately into + # offsets. This is the form where offset[i] is the location and + # offset[i+1]-offset[i] is the size of the data location. + offsets = list(allLocations[0]) + [loc[1] for loc in allLocations[1:]] + # Image data offset must be less than or equal to the minimum of locations. + # This offset may change the value for round tripping but is safer and + # allows imageDataOffset to not be required to be in the XML version. + self.imageDataOffset = min(offsets) + offsetArray = [offset - self.imageDataOffset for offset in offsets] + + dataList = [EblcIndexSubTable.compile(self, ttFont)] + dataList += [struct.pack(dataFormat, offsetValue) for offsetValue in offsetArray] + # Take care of any padding issues. Only occurs in format 3. + if offsetDataSize * len(dataList) % 4 != 0: + dataList.append(struct.pack(dataFormat, 0)) + return bytesjoin(dataList) + + return OffsetArrayIndexSubTableMixin + +# A Mixin for functionality shared between the different kinds +# of fixed sized data handling. Both kinds have big metrics so +# that kind of special processing is also handled in this mixin. +class FixedSizeIndexSubTableMixin(object): + + def writeMetrics(self, writer, ttFont): + writer.simpletag('imageSize', value=self.imageSize) + writer.newline() + self.metrics.toXML(writer, ttFont) + + def readMetrics(self, name, attrs, content, ttFont): + for element in content: + if not isinstance(element, tuple): + continue + name, attrs, content = element + if name == 'imageSize': + self.imageSize = safeEval(attrs['value']) + elif name == BigGlyphMetrics.__name__: + self.metrics = BigGlyphMetrics() + self.metrics.fromXML(name, attrs, content, ttFont) + elif name == SmallGlyphMetrics.__name__: + print("Warning: SmallGlyphMetrics being ignored in format %d." % self.indexFormat) + + def padBitmapData(self, data): + # Make sure that the data isn't bigger than the fixed size. + assert len(data) <= self.imageSize, "Data in indexSubTable format %d must be less than the fixed size." % self.indexFormat + # Pad the data so that it matches the fixed size. + pad = (self.imageSize - len(data)) * b'\0' + return data + pad + +class eblc_index_sub_table_1(_createOffsetArrayIndexSubTableMixin('L'), EblcIndexSubTable): + pass + +class eblc_index_sub_table_2(FixedSizeIndexSubTableMixin, EblcIndexSubTable): + + def decompile(self): + (self.imageSize,) = struct.unpack(">L", self.data[:4]) + self.metrics = BigGlyphMetrics() + sstruct.unpack2(bigGlyphMetricsFormat, self.data[4:], self.metrics) + glyphIds = list(range(self.firstGlyphIndex, self.lastGlyphIndex+1)) + offsets = [self.imageSize * i + self.imageDataOffset for i in range(len(glyphIds)+1)] + self.locations = list(zip(offsets, offsets[1:])) + self.names = list(map(self.ttFont.getGlyphName, glyphIds)) + + def compile(self, ttFont): + glyphIds = list(map(ttFont.getGlyphID, self.names)) + # Make sure all the ids are consecutive. This is required by Format 2. + assert glyphIds == list(range(self.firstGlyphIndex, self.lastGlyphIndex+1)), "Format 2 ids must be consecutive." + self.imageDataOffset = min(zip(*self.locations)[0]) + + dataList = [EblcIndexSubTable.compile(self, ttFont)] + dataList.append(struct.pack(">L", self.imageSize)) + dataList.append(sstruct.pack(bigGlyphMetricsFormat, self.metrics)) + return bytesjoin(dataList) + +class eblc_index_sub_table_3(_createOffsetArrayIndexSubTableMixin('H'), EblcIndexSubTable): + pass + +class eblc_index_sub_table_4(EblcIndexSubTable): + + def decompile(self): + + (numGlyphs,) = struct.unpack(">L", self.data[:4]) + data = self.data[4:] + indexingOffsets = [glyphIndex * codeOffsetPairSize for glyphIndex in range(numGlyphs+2)] + indexingLocations = zip(indexingOffsets, indexingOffsets[1:]) + glyphArray = [struct.unpack(codeOffsetPairFormat, data[slice(*loc)]) for loc in indexingLocations] + glyphIds, offsets = list(map(list, zip(*glyphArray))) + # There are one too many glyph ids. Get rid of the last one. + glyphIds.pop() + + offsets = [offset + self.imageDataOffset for offset in offsets] + self.locations = list(zip(offsets, offsets[1:])) + self.names = list(map(self.ttFont.getGlyphName, glyphIds)) + + def compile(self, ttFont): + # First make sure that all the data lines up properly. Format 4 + # must have all its data lined up consecutively. If not this will fail. + for curLoc, nxtLoc in zip(self.locations, self.locations[1:]): + assert curLoc[1] == nxtLoc[0], "Data must be consecutive in indexSubTable format 4" + + offsets = list(self.locations[0]) + [loc[1] for loc in self.locations[1:]] + # Image data offset must be less than or equal to the minimum of locations. + # Resetting this offset may change the value for round tripping but is safer + # and allows imageDataOffset to not be required to be in the XML version. + self.imageDataOffset = min(offsets) + offsets = [offset - self.imageDataOffset for offset in offsets] + glyphIds = list(map(ttFont.getGlyphID, self.names)) + # Create an iterator over the ids plus a padding value. + idsPlusPad = list(itertools.chain(glyphIds, [0])) + + dataList = [EblcIndexSubTable.compile(self, ttFont)] + dataList.append(struct.pack(">L", len(glyphIds))) + tmp = [struct.pack(codeOffsetPairFormat, *cop) for cop in zip(idsPlusPad, offsets)] + dataList += tmp + data = bytesjoin(dataList) + return data + +class eblc_index_sub_table_5(FixedSizeIndexSubTableMixin, EblcIndexSubTable): + + def decompile(self): + self.origDataLen = 0 + (self.imageSize,) = struct.unpack(">L", self.data[:4]) + data = self.data[4:] + self.metrics, data = sstruct.unpack2(bigGlyphMetricsFormat, data, BigGlyphMetrics()) + (numGlyphs,) = struct.unpack(">L", data[:4]) + data = data[4:] + glyphIds = [struct.unpack(">H", data[2*i:2*(i+1)])[0] for i in range(numGlyphs)] + + offsets = [self.imageSize * i + self.imageDataOffset for i in range(len(glyphIds)+1)] + self.locations = list(zip(offsets, offsets[1:])) + self.names = list(map(self.ttFont.getGlyphName, glyphIds)) + + def compile(self, ttFont): + self.imageDataOffset = min(zip(*self.locations)[0]) + dataList = [EblcIndexSubTable.compile(self, ttFont)] + dataList.append(struct.pack(">L", self.imageSize)) + dataList.append(sstruct.pack(bigGlyphMetricsFormat, self.metrics)) + glyphIds = list(map(ttFont.getGlyphID, self.names)) + dataList.append(struct.pack(">L", len(glyphIds))) + dataList += [struct.pack(">H", curId) for curId in glyphIds] + if len(glyphIds) % 2 == 1: + dataList.append(struct.pack(">H", 0)) + return bytesjoin(dataList) + +# Dictionary of indexFormat to the class representing that format. +eblc_sub_table_classes = { + 1: eblc_index_sub_table_1, + 2: eblc_index_sub_table_2, + 3: eblc_index_sub_table_3, + 4: eblc_index_sub_table_4, + 5: eblc_index_sub_table_5, + } diff -Nru fonttools-2.4/Snippets/fontTools/ttLib/tables/_f_e_a_t.py fonttools-3.0/Snippets/fontTools/ttLib/tables/_f_e_a_t.py --- fonttools-2.4/Snippets/fontTools/ttLib/tables/_f_e_a_t.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.0/Snippets/fontTools/ttLib/tables/_f_e_a_t.py 2015-08-31 17:57:15.000000000 +0000 @@ -0,0 +1,7 @@ +from __future__ import print_function, division, absolute_import +from fontTools.misc.py23 import * +from .otBase import BaseTTXConverter + + +class table__f_e_a_t(BaseTTXConverter): + pass diff -Nru fonttools-2.4/Snippets/fontTools/ttLib/tables/F_F_T_M_.py fonttools-3.0/Snippets/fontTools/ttLib/tables/F_F_T_M_.py --- fonttools-2.4/Snippets/fontTools/ttLib/tables/F_F_T_M_.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.0/Snippets/fontTools/ttLib/tables/F_F_T_M_.py 2015-08-31 17:57:15.000000000 +0000 @@ -0,0 +1,42 @@ +from __future__ import print_function, division, absolute_import +from fontTools.misc.py23 import * +from fontTools.misc import sstruct +from fontTools.misc.textTools import safeEval +from fontTools.misc.timeTools import timestampFromString, timestampToString +from . import DefaultTable + +FFTMFormat = """ + > # big endian + version: I + FFTimeStamp: Q + sourceCreated: Q + sourceModified: Q +""" + +class table_F_F_T_M_(DefaultTable.DefaultTable): + + def decompile(self, data, ttFont): + dummy, rest = sstruct.unpack2(FFTMFormat, data, self) + + def compile(self, ttFont): + data = sstruct.pack(FFTMFormat, self) + return data + + def toXML(self, writer, ttFont): + writer.comment("FontForge's timestamp, font source creation and modification dates") + writer.newline() + formatstring, names, fixes = sstruct.getformat(FFTMFormat) + for name in names: + value = getattr(self, name) + if name in ("FFTimeStamp", "sourceCreated", "sourceModified"): + value = timestampToString(value) + writer.simpletag(name, value=value) + writer.newline() + + def fromXML(self, name, attrs, content, ttFont): + value = attrs["value"] + if name in ("FFTimeStamp", "sourceCreated", "sourceModified"): + value = timestampFromString(value) + else: + value = safeEval(value) + setattr(self, name, value) diff -Nru fonttools-2.4/Snippets/fontTools/ttLib/tables/_f_p_g_m.py fonttools-3.0/Snippets/fontTools/ttLib/tables/_f_p_g_m.py --- fonttools-2.4/Snippets/fontTools/ttLib/tables/_f_p_g_m.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.0/Snippets/fontTools/ttLib/tables/_f_p_g_m.py 2015-08-31 17:57:15.000000000 +0000 @@ -0,0 +1,51 @@ +from __future__ import print_function, division, absolute_import +from fontTools.misc.py23 import * +from . import DefaultTable +from . import ttProgram + +class table__f_p_g_m(DefaultTable.DefaultTable): + + def decompile(self, data, ttFont): + program = ttProgram.Program() + program.fromBytecode(data) + self.program = program + + def compile(self, ttFont): + return self.program.getBytecode() + + def toXML(self, writer, ttFont): + self.program.toXML(writer, ttFont) + writer.newline() + + def fromXML(self, name, attrs, content, ttFont): + program = ttProgram.Program() + program.fromXML(name, attrs, content, ttFont) + self.program = program + + def __bool__(self): + """ + >>> fpgm = table__f_p_g_m() + >>> bool(fpgm) + False + >>> p = ttProgram.Program() + >>> fpgm.program = p + >>> bool(fpgm) + False + >>> bc = bytearray([0]) + >>> p.fromBytecode(bc) + >>> bool(fpgm) + True + >>> p.bytecode.pop() + 0 + >>> bool(fpgm) + False + """ + return hasattr(self, 'program') and bool(self.program) + + __nonzero__ = __bool__ + + +if __name__ == "__main__": + import sys + import doctest + sys.exit(doctest.testmod().failed) diff -Nru fonttools-2.4/Snippets/fontTools/ttLib/tables/_f_v_a_r.py fonttools-3.0/Snippets/fontTools/ttLib/tables/_f_v_a_r.py --- fonttools-2.4/Snippets/fontTools/ttLib/tables/_f_v_a_r.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.0/Snippets/fontTools/ttLib/tables/_f_v_a_r.py 2015-08-31 17:57:15.000000000 +0000 @@ -0,0 +1,187 @@ +from __future__ import print_function, division, absolute_import +from fontTools.misc.py23 import * +from fontTools.misc import sstruct +from fontTools.misc.fixedTools import fixedToFloat, floatToFixed +from fontTools.misc.textTools import safeEval, num2binary, binary2num +from fontTools.ttLib import TTLibError +from . import DefaultTable +import struct + + +# Apple's documentation of 'fvar': +# https://developer.apple.com/fonts/TrueType-Reference-Manual/RM06/Chap6fvar.html + +FVAR_HEADER_FORMAT = """ + > # big endian + version: L + offsetToData: H + countSizePairs: H + axisCount: H + axisSize: H + instanceCount: H + instanceSize: H +""" + +FVAR_AXIS_FORMAT = """ + > # big endian + axisTag: 4s + minValue: 16.16F + defaultValue: 16.16F + maxValue: 16.16F + flags: H + nameID: H +""" + +FVAR_INSTANCE_FORMAT = """ + > # big endian + nameID: H + flags: H +""" + +class table__f_v_a_r(DefaultTable.DefaultTable): + dependencies = ["name"] + + def __init__(self, tag="fvar"): + DefaultTable.DefaultTable.__init__(self, tag) + self.axes = [] + self.instances = [] + + def compile(self, ttFont): + header = { + "version": 0x00010000, + "offsetToData": sstruct.calcsize(FVAR_HEADER_FORMAT), + "countSizePairs": 2, + "axisCount": len(self.axes), + "axisSize": sstruct.calcsize(FVAR_AXIS_FORMAT), + "instanceCount": len(self.instances), + "instanceSize": sstruct.calcsize(FVAR_INSTANCE_FORMAT) + len(self.axes) * 4 + } + result = [sstruct.pack(FVAR_HEADER_FORMAT, header)] + result.extend([axis.compile() for axis in self.axes]) + axisTags = [axis.axisTag for axis in self.axes] + result.extend([instance.compile(axisTags) for instance in self.instances]) + return bytesjoin(result) + + def decompile(self, data, ttFont): + header = {} + headerSize = sstruct.calcsize(FVAR_HEADER_FORMAT) + header = sstruct.unpack(FVAR_HEADER_FORMAT, data[0:headerSize]) + if header["version"] != 0x00010000: + raise TTLibError("unsupported 'fvar' version %04x" % header["version"]) + pos = header["offsetToData"] + axisSize = header["axisSize"] + for _ in range(header["axisCount"]): + axis = Axis() + axis.decompile(data[pos:pos+axisSize]) + self.axes.append(axis) + pos += axisSize + instanceSize = header["instanceSize"] + axisTags = [axis.axisTag for axis in self.axes] + for _ in range(header["instanceCount"]): + instance = NamedInstance() + instance.decompile(data[pos:pos+instanceSize], axisTags) + self.instances.append(instance) + pos += instanceSize + + def toXML(self, writer, ttFont, progress=None): + for axis in self.axes: + axis.toXML(writer, ttFont) + for instance in self.instances: + instance.toXML(writer, ttFont) + + def fromXML(self, name, attrs, content, ttFont): + if name == "Axis": + axis = Axis() + axis.fromXML(name, attrs, content, ttFont) + self.axes.append(axis) + elif name == "NamedInstance": + instance = NamedInstance() + instance.fromXML(name, attrs, content, ttFont) + self.instances.append(instance) + +class Axis(object): + def __init__(self): + self.axisTag = None + self.nameID = 0 + self.flags = 0 # not exposed in XML because spec defines no values + self.minValue = -1.0 + self.defaultValue = 0.0 + self.maxValue = 1.0 + + def compile(self): + return sstruct.pack(FVAR_AXIS_FORMAT, self) + + def decompile(self, data): + sstruct.unpack2(FVAR_AXIS_FORMAT, data, self) + + def toXML(self, writer, ttFont): + name = ttFont["name"].getDebugName(self.nameID) + if name is not None: + writer.newline() + writer.comment(name) + writer.newline() + writer.begintag("Axis") + writer.newline() + for tag, value in [("AxisTag", self.axisTag), + ("MinValue", str(self.minValue)), + ("DefaultValue", str(self.defaultValue)), + ("MaxValue", str(self.maxValue)), + ("NameID", str(self.nameID))]: + writer.begintag(tag) + writer.write(value) + writer.endtag(tag) + writer.newline() + writer.endtag("Axis") + writer.newline() + + def fromXML(self, name, _attrs, content, ttFont): + assert(name == "Axis") + for tag, _, value in filter(lambda t: type(t) is tuple, content): + value = ''.join(value) + if tag == "AxisTag": + self.axisTag = value + elif tag in ["MinValue", "DefaultValue", "MaxValue", "NameID"]: + setattr(self, tag[0].lower() + tag[1:], safeEval(value)) + +class NamedInstance(object): + def __init__(self): + self.nameID = 0 + self.flags = 0 # not exposed in XML because spec defines no values + self.coordinates = {} + + def compile(self, axisTags): + result = [sstruct.pack(FVAR_INSTANCE_FORMAT, self)] + for axis in axisTags: + fixedCoord = floatToFixed(self.coordinates[axis], 16) + result.append(struct.pack(">l", fixedCoord)) + return bytesjoin(result) + + def decompile(self, data, axisTags): + sstruct.unpack2(FVAR_INSTANCE_FORMAT, data, self) + pos = sstruct.calcsize(FVAR_INSTANCE_FORMAT) + for axis in axisTags: + value = struct.unpack(">l", data[pos : pos + 4])[0] + self.coordinates[axis] = fixedToFloat(value, 16) + pos += 4 + + def toXML(self, writer, ttFont): + name = ttFont["name"].getDebugName(self.nameID) + if name is not None: + writer.newline() + writer.comment(name) + writer.newline() + writer.begintag("NamedInstance", nameID=self.nameID) + writer.newline() + for axis in ttFont["fvar"].axes: + writer.simpletag("coord", axis=axis.axisTag, + value=self.coordinates[axis.axisTag]) + writer.newline() + writer.endtag("NamedInstance") + writer.newline() + + def fromXML(self, name, attrs, content, ttFont): + assert(name == "NamedInstance") + self.nameID = safeEval(attrs["nameID"]) + for tag, elementAttrs, _ in filter(lambda t: type(t) is tuple, content): + if tag == "coord": + self.coordinates[elementAttrs["axis"]] = safeEval(elementAttrs["value"]) diff -Nru fonttools-2.4/Snippets/fontTools/ttLib/tables/_f_v_a_r_test.py fonttools-3.0/Snippets/fontTools/ttLib/tables/_f_v_a_r_test.py --- fonttools-2.4/Snippets/fontTools/ttLib/tables/_f_v_a_r_test.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.0/Snippets/fontTools/ttLib/tables/_f_v_a_r_test.py 2015-08-31 17:57:15.000000000 +0000 @@ -0,0 +1,190 @@ +from __future__ import print_function, division, absolute_import, unicode_literals +from fontTools.misc.py23 import * +from fontTools.misc.textTools import deHexStr +from fontTools.misc.xmlWriter import XMLWriter +from fontTools.ttLib import TTLibError +from fontTools.ttLib.tables._f_v_a_r import table__f_v_a_r, Axis, NamedInstance +from fontTools.ttLib.tables._n_a_m_e import table__n_a_m_e, NameRecord +import unittest + + + +FVAR_DATA = deHexStr( + "00 01 00 00 00 10 00 02 00 02 00 14 00 02 00 0C " + "77 67 68 74 00 64 00 00 01 90 00 00 03 84 00 00 00 00 01 01 " + "77 64 74 68 00 32 00 00 00 64 00 00 00 c8 00 00 00 00 01 02 " + "01 03 00 00 01 2c 00 00 00 64 00 00 " + "01 04 00 00 01 2c 00 00 00 4b 00 00") + +FVAR_AXIS_DATA = deHexStr( + "6F 70 73 7a ff ff 80 00 00 01 4c cd 00 01 80 00 00 00 01 59") + +FVAR_INSTANCE_DATA = deHexStr("01 59 00 00 00 00 b3 33 00 00 80 00") + + +def xml_lines(writer): + content = writer.file.getvalue().decode("utf-8") + return [line.strip() for line in content.splitlines()][1:] + + +def AddName(font, name): + nameTable = font.get("name") + if nameTable is None: + nameTable = font["name"] = table__n_a_m_e() + nameTable.names = [] + namerec = NameRecord() + namerec.nameID = 1 + max([n.nameID for n in nameTable.names] + [256]) + namerec.string = name.encode('mac_roman') + namerec.platformID, namerec.platEncID, namerec.langID = (1, 0, 0) + nameTable.names.append(namerec) + return namerec + + +def MakeFont(): + axes = [("wght", "Weight", 100, 400, 900), ("wdth", "Width", 50, 100, 200)] + instances = [("Light", 300, 100), ("Light Condensed", 300, 75)] + fvarTable = table__f_v_a_r() + font = {"fvar": fvarTable} + for tag, name, minValue, defaultValue, maxValue in axes: + axis = Axis() + axis.axisTag = tag + axis.defaultValue = defaultValue + axis.minValue, axis.maxValue = minValue, maxValue + axis.nameID = AddName(font, name).nameID + fvarTable.axes.append(axis) + for name, weight, width in instances: + inst = NamedInstance() + inst.nameID = AddName(font, name).nameID + inst.coordinates = {"wght": weight, "wdth": width} + fvarTable.instances.append(inst) + return font + + +class FontVariationTableTest(unittest.TestCase): + def test_compile(self): + font = MakeFont() + h = font["fvar"].compile(font) + self.assertEqual(FVAR_DATA, font["fvar"].compile(font)) + + def test_decompile(self): + fvar = table__f_v_a_r() + fvar.decompile(FVAR_DATA, ttFont={"fvar": fvar}) + self.assertEqual(["wght", "wdth"], [a.axisTag for a in fvar.axes]) + self.assertEqual([259, 260], [i.nameID for i in fvar.instances]) + + def test_toXML(self): + font = MakeFont() + writer = XMLWriter(BytesIO()) + font["fvar"].toXML(writer, font) + xml = writer.file.getvalue().decode("utf-8") + self.assertEqual(2, xml.count("")) + self.assertTrue("wght" in xml) + self.assertTrue("wdth" in xml) + self.assertEqual(2, xml.count("" in xml) + self.assertTrue("" in xml) + + def test_fromXML(self): + fvar = table__f_v_a_r() + fvar.fromXML("Axis", {}, [("AxisTag", {}, ["opsz"])], ttFont=None) + fvar.fromXML("Axis", {}, [("AxisTag", {}, ["slnt"])], ttFont=None) + fvar.fromXML("NamedInstance", {"nameID": "765"}, [], ttFont=None) + fvar.fromXML("NamedInstance", {"nameID": "234"}, [], ttFont=None) + self.assertEqual(["opsz", "slnt"], [a.axisTag for a in fvar.axes]) + self.assertEqual([765, 234], [i.nameID for i in fvar.instances]) + + +class AxisTest(unittest.TestCase): + def test_compile(self): + axis = Axis() + axis.axisTag, axis.nameID = ('opsz', 345) + axis.minValue, axis.defaultValue, axis.maxValue = (-0.5, 1.3, 1.5) + self.assertEqual(FVAR_AXIS_DATA, axis.compile()) + + def test_decompile(self): + axis = Axis() + axis.decompile(FVAR_AXIS_DATA) + self.assertEqual("opsz", axis.axisTag) + self.assertEqual(345, axis.nameID) + self.assertEqual(-0.5, axis.minValue) + self.assertEqual(1.3, axis.defaultValue) + self.assertEqual(1.5, axis.maxValue) + + def test_toXML(self): + font = MakeFont() + axis = Axis() + axis.decompile(FVAR_AXIS_DATA) + AddName(font, "Optical Size").nameID = 256 + axis.nameID = 256 + writer = XMLWriter(BytesIO()) + axis.toXML(writer, font) + self.assertEqual([ + '', + '', + '', + 'opsz', + '-0.5', + '1.3', + '1.5', + '256', + '' + ], xml_lines(writer)) + + def test_fromXML(self): + axis = Axis() + axis.fromXML("Axis", {}, [ + ("AxisTag", {}, ["wght"]), + ("MinValue", {}, ["100"]), + ("DefaultValue", {}, ["400"]), + ("MaxValue", {}, ["900"]), + ("NameID", {}, ["256"]) + ], ttFont=None) + self.assertEqual("wght", axis.axisTag) + self.assertEqual(100, axis.minValue) + self.assertEqual(400, axis.defaultValue) + self.assertEqual(900, axis.maxValue) + self.assertEqual(256, axis.nameID) + + +class NamedInstanceTest(unittest.TestCase): + def test_compile(self): + inst = NamedInstance() + inst.nameID = 345 + inst.coordinates = {"wght": 0.7, "wdth": 0.5} + self.assertEqual(FVAR_INSTANCE_DATA, inst.compile(["wght", "wdth"])) + + def test_decompile(self): + inst = NamedInstance() + inst.decompile(FVAR_INSTANCE_DATA, ["wght", "wdth"]) + self.assertEqual(345, inst.nameID) + self.assertEqual({"wght": 0.7, "wdth": 0.5}, inst.coordinates) + + def test_toXML(self): + font = MakeFont() + inst = NamedInstance() + inst.nameID = AddName(font, "Light Condensed").nameID + inst.coordinates = {"wght": 0.7, "wdth": 0.5} + writer = XMLWriter(BytesIO()) + inst.toXML(writer, font) + self.assertEqual([ + '', + '', + '' % inst.nameID, + '', + '', + '' + ], xml_lines(writer)) + + def test_fromXML(self): + inst = NamedInstance() + attrs = {"nameID": "345"} + inst.fromXML("NamedInstance", attrs, [ + ("coord", {"axis": "wght", "value": "0.7"}, []), + ("coord", {"axis": "wdth", "value": "0.5"}, []), + ], ttFont=MakeFont()) + self.assertEqual(345, inst.nameID) + self.assertEqual({"wght": 0.7, "wdth": 0.5}, inst.coordinates) + + +if __name__ == "__main__": + unittest.main() diff -Nru fonttools-2.4/Snippets/fontTools/ttLib/tables/_g_a_s_p.py fonttools-3.0/Snippets/fontTools/ttLib/tables/_g_a_s_p.py --- fonttools-2.4/Snippets/fontTools/ttLib/tables/_g_a_s_p.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.0/Snippets/fontTools/ttLib/tables/_g_a_s_p.py 2015-08-31 17:57:15.000000000 +0000 @@ -0,0 +1,51 @@ +from __future__ import print_function, division, absolute_import +from fontTools.misc.py23 import * +from fontTools.misc.textTools import safeEval +from . import DefaultTable +import struct + + +GASP_SYMMETRIC_GRIDFIT = 0x0004 +GASP_SYMMETRIC_SMOOTHING = 0x0008 +GASP_DOGRAY = 0x0002 +GASP_GRIDFIT = 0x0001 + +class table__g_a_s_p(DefaultTable.DefaultTable): + + def decompile(self, data, ttFont): + self.version, numRanges = struct.unpack(">HH", data[:4]) + assert 0 <= self.version <= 1, "unknown 'gasp' format: %s" % self.version + data = data[4:] + self.gaspRange = {} + for i in range(numRanges): + rangeMaxPPEM, rangeGaspBehavior = struct.unpack(">HH", data[:4]) + self.gaspRange[int(rangeMaxPPEM)] = int(rangeGaspBehavior) + data = data[4:] + assert not data, "too much data" + + def compile(self, ttFont): + version = 0 # ignore self.version + numRanges = len(self.gaspRange) + data = b"" + items = sorted(self.gaspRange.items()) + for rangeMaxPPEM, rangeGaspBehavior in items: + data = data + struct.pack(">HH", rangeMaxPPEM, rangeGaspBehavior) + if rangeGaspBehavior & ~(GASP_GRIDFIT | GASP_DOGRAY): + version = 1 + data = struct.pack(">HH", version, numRanges) + data + return data + + def toXML(self, writer, ttFont): + items = sorted(self.gaspRange.items()) + for rangeMaxPPEM, rangeGaspBehavior in items: + writer.simpletag("gaspRange", [ + ("rangeMaxPPEM", rangeMaxPPEM), + ("rangeGaspBehavior", rangeGaspBehavior)]) + writer.newline() + + def fromXML(self, name, attrs, content, ttFont): + if name != "gaspRange": + return + if not hasattr(self, "gaspRange"): + self.gaspRange = {} + self.gaspRange[safeEval(attrs["rangeMaxPPEM"])] = safeEval(attrs["rangeGaspBehavior"]) diff -Nru fonttools-2.4/Snippets/fontTools/ttLib/tables/G_D_E_F_.py fonttools-3.0/Snippets/fontTools/ttLib/tables/G_D_E_F_.py --- fonttools-2.4/Snippets/fontTools/ttLib/tables/G_D_E_F_.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.0/Snippets/fontTools/ttLib/tables/G_D_E_F_.py 2015-08-31 17:57:15.000000000 +0000 @@ -0,0 +1,7 @@ +from __future__ import print_function, division, absolute_import +from fontTools.misc.py23 import * +from .otBase import BaseTTXConverter + + +class table_G_D_E_F_(BaseTTXConverter): + pass diff -Nru fonttools-2.4/Snippets/fontTools/ttLib/tables/_g_l_y_f.py fonttools-3.0/Snippets/fontTools/ttLib/tables/_g_l_y_f.py --- fonttools-2.4/Snippets/fontTools/ttLib/tables/_g_l_y_f.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.0/Snippets/fontTools/ttLib/tables/_g_l_y_f.py 2015-08-31 17:57:15.000000000 +0000 @@ -0,0 +1,1246 @@ +"""_g_l_y_f.py -- Converter classes for the 'glyf' table.""" + +from __future__ import print_function, division, absolute_import +from fontTools.misc.py23 import * +from fontTools.misc import sstruct +from fontTools import ttLib +from fontTools.misc.textTools import safeEval, pad +from fontTools.misc.arrayTools import calcBounds, calcIntBounds, pointInRect +from fontTools.misc.bezierTools import calcQuadraticBounds +from fontTools.misc.fixedTools import fixedToFloat as fi2fl, floatToFixed as fl2fi +from . import DefaultTable +from . import ttProgram +import sys +import struct +import array +import warnings + +# +# The Apple and MS rasterizers behave differently for +# scaled composite components: one does scale first and then translate +# and the other does it vice versa. MS defined some flags to indicate +# the difference, but it seems nobody actually _sets_ those flags. +# +# Funny thing: Apple seems to _only_ do their thing in the +# WE_HAVE_A_SCALE (eg. Chicago) case, and not when it's WE_HAVE_AN_X_AND_Y_SCALE +# (eg. Charcoal)... +# +SCALE_COMPONENT_OFFSET_DEFAULT = 0 # 0 == MS, 1 == Apple + + +class table__g_l_y_f(DefaultTable.DefaultTable): + + def decompile(self, data, ttFont): + loca = ttFont['loca'] + last = int(loca[0]) + noname = 0 + self.glyphs = {} + self.glyphOrder = glyphOrder = ttFont.getGlyphOrder() + for i in range(0, len(loca)-1): + try: + glyphName = glyphOrder[i] + except IndexError: + noname = noname + 1 + glyphName = 'ttxautoglyph%s' % i + next = int(loca[i+1]) + glyphdata = data[last:next] + if len(glyphdata) != (next - last): + raise ttLib.TTLibError("not enough 'glyf' table data") + glyph = Glyph(glyphdata) + self.glyphs[glyphName] = glyph + last = next + if len(data) - next >= 4: + warnings.warn("too much 'glyf' table data: expected %d, received %d bytes" % + (next, len(data))) + if noname: + warnings.warn('%s glyphs have no name' % noname) + if ttFont.lazy is False: # Be lazy for None and True + for glyph in self.glyphs.values(): + glyph.expand(self) + + def compile(self, ttFont): + if not hasattr(self, "glyphOrder"): + self.glyphOrder = ttFont.getGlyphOrder() + padding = self.padding if hasattr(self, 'padding') else None + locations = [] + currentLocation = 0 + dataList = [] + recalcBBoxes = ttFont.recalcBBoxes + for glyphName in self.glyphOrder: + glyph = self.glyphs[glyphName] + glyphData = glyph.compile(self, recalcBBoxes) + if padding: + glyphData = pad(glyphData, size=padding) + locations.append(currentLocation) + currentLocation = currentLocation + len(glyphData) + dataList.append(glyphData) + locations.append(currentLocation) + + if padding is None and currentLocation < 0x20000: + # See if we can pad any odd-lengthed glyphs to allow loca + # table to use the short offsets. + indices = [i for i,glyphData in enumerate(dataList) if len(glyphData) % 2 == 1] + if indices and currentLocation + len(indices) < 0x20000: + # It fits. Do it. + for i in indices: + dataList[i] += b'\0' + currentLocation = 0 + for i,glyphData in enumerate(dataList): + locations[i] = currentLocation + currentLocation += len(glyphData) + locations[len(dataList)] = currentLocation + + data = bytesjoin(dataList) + if 'loca' in ttFont: + ttFont['loca'].set(locations) + if 'maxp' in ttFont: + ttFont['maxp'].numGlyphs = len(self.glyphs) + return data + + def toXML(self, writer, ttFont, progress=None): + writer.newline() + glyphNames = ttFont.getGlyphNames() + writer.comment("The xMin, yMin, xMax and yMax values\nwill be recalculated by the compiler.") + writer.newline() + writer.newline() + counter = 0 + progressStep = 10 + numGlyphs = len(glyphNames) + for glyphName in glyphNames: + if not counter % progressStep and progress is not None: + progress.setLabel("Dumping 'glyf' table... (%s)" % glyphName) + progress.increment(progressStep / numGlyphs) + counter = counter + 1 + glyph = self[glyphName] + if glyph.numberOfContours: + writer.begintag('TTGlyph', [ + ("name", glyphName), + ("xMin", glyph.xMin), + ("yMin", glyph.yMin), + ("xMax", glyph.xMax), + ("yMax", glyph.yMax), + ]) + writer.newline() + glyph.toXML(writer, ttFont) + writer.endtag('TTGlyph') + writer.newline() + else: + writer.simpletag('TTGlyph', name=glyphName) + writer.comment("contains no outline data") + writer.newline() + writer.newline() + + def fromXML(self, name, attrs, content, ttFont): + if name != "TTGlyph": + return + if not hasattr(self, "glyphs"): + self.glyphs = {} + if not hasattr(self, "glyphOrder"): + self.glyphOrder = ttFont.getGlyphOrder() + glyphName = attrs["name"] + if ttFont.verbose: + ttLib.debugmsg("unpacking glyph '%s'" % glyphName) + glyph = Glyph() + for attr in ['xMin', 'yMin', 'xMax', 'yMax']: + setattr(glyph, attr, safeEval(attrs.get(attr, '0'))) + self.glyphs[glyphName] = glyph + for element in content: + if not isinstance(element, tuple): + continue + name, attrs, content = element + glyph.fromXML(name, attrs, content, ttFont) + if not ttFont.recalcBBoxes: + glyph.compact(self, 0) + + def setGlyphOrder(self, glyphOrder): + self.glyphOrder = glyphOrder + + def getGlyphName(self, glyphID): + return self.glyphOrder[glyphID] + + def getGlyphID(self, glyphName): + # XXX optimize with reverse dict!!! + return self.glyphOrder.index(glyphName) + + def keys(self): + return self.glyphs.keys() + + def has_key(self, glyphName): + return glyphName in self.glyphs + + __contains__ = has_key + + def __getitem__(self, glyphName): + glyph = self.glyphs[glyphName] + glyph.expand(self) + return glyph + + def __setitem__(self, glyphName, glyph): + self.glyphs[glyphName] = glyph + if glyphName not in self.glyphOrder: + self.glyphOrder.append(glyphName) + + def __delitem__(self, glyphName): + del self.glyphs[glyphName] + self.glyphOrder.remove(glyphName) + + def __len__(self): + assert len(self.glyphOrder) == len(self.glyphs) + return len(self.glyphs) + + +glyphHeaderFormat = """ + > # big endian + numberOfContours: h + xMin: h + yMin: h + xMax: h + yMax: h +""" + +# flags +flagOnCurve = 0x01 +flagXShort = 0x02 +flagYShort = 0x04 +flagRepeat = 0x08 +flagXsame = 0x10 +flagYsame = 0x20 +flagReserved1 = 0x40 +flagReserved2 = 0x80 + +_flagSignBytes = { + 0: 2, + flagXsame: 0, + flagXShort|flagXsame: +1, + flagXShort: -1, + flagYsame: 0, + flagYShort|flagYsame: +1, + flagYShort: -1, +} + +def flagBest(x, y, onCurve): + """For a given x,y delta pair, returns the flag that packs this pair + most efficiently, as well as the number of byte cost of such flag.""" + + flag = flagOnCurve if onCurve else 0 + cost = 0 + # do x + if x == 0: + flag = flag | flagXsame + elif -255 <= x <= 255: + flag = flag | flagXShort + if x > 0: + flag = flag | flagXsame + cost += 1 + else: + cost += 2 + # do y + if y == 0: + flag = flag | flagYsame + elif -255 <= y <= 255: + flag = flag | flagYShort + if y > 0: + flag = flag | flagYsame + cost += 1 + else: + cost += 2 + return flag, cost + +def flagFits(newFlag, oldFlag, mask): + newBytes = _flagSignBytes[newFlag & mask] + oldBytes = _flagSignBytes[oldFlag & mask] + return newBytes == oldBytes or abs(newBytes) > abs(oldBytes) + +def flagSupports(newFlag, oldFlag): + return ((oldFlag & flagOnCurve) == (newFlag & flagOnCurve) and + flagFits(newFlag, oldFlag, flagXsame|flagXShort) and + flagFits(newFlag, oldFlag, flagYsame|flagYShort)) + +def flagEncodeCoord(flag, mask, coord, coordBytes): + byteCount = _flagSignBytes[flag & mask] + if byteCount == 1: + coordBytes.append(coord) + elif byteCount == -1: + coordBytes.append(-coord) + elif byteCount == 2: + coordBytes.append((coord >> 8) & 0xFF) + coordBytes.append(coord & 0xFF) + +def flagEncodeCoords(flag, x, y, xBytes, yBytes): + flagEncodeCoord(flag, flagXsame|flagXShort, x, xBytes) + flagEncodeCoord(flag, flagYsame|flagYShort, y, yBytes) + + +ARG_1_AND_2_ARE_WORDS = 0x0001 # if set args are words otherwise they are bytes +ARGS_ARE_XY_VALUES = 0x0002 # if set args are xy values, otherwise they are points +ROUND_XY_TO_GRID = 0x0004 # for the xy values if above is true +WE_HAVE_A_SCALE = 0x0008 # Sx = Sy, otherwise scale == 1.0 +NON_OVERLAPPING = 0x0010 # set to same value for all components (obsolete!) +MORE_COMPONENTS = 0x0020 # indicates at least one more glyph after this one +WE_HAVE_AN_X_AND_Y_SCALE = 0x0040 # Sx, Sy +WE_HAVE_A_TWO_BY_TWO = 0x0080 # t00, t01, t10, t11 +WE_HAVE_INSTRUCTIONS = 0x0100 # instructions follow +USE_MY_METRICS = 0x0200 # apply these metrics to parent glyph +OVERLAP_COMPOUND = 0x0400 # used by Apple in GX fonts +SCALED_COMPONENT_OFFSET = 0x0800 # composite designed to have the component offset scaled (designed for Apple) +UNSCALED_COMPONENT_OFFSET = 0x1000 # composite designed not to have the component offset scaled (designed for MS) + + +class Glyph(object): + + def __init__(self, data=""): + if not data: + # empty char + self.numberOfContours = 0 + return + self.data = data + + def compact(self, glyfTable, recalcBBoxes=True): + data = self.compile(glyfTable, recalcBBoxes) + self.__dict__.clear() + self.data = data + + def expand(self, glyfTable): + if not hasattr(self, "data"): + # already unpacked + return + if not self.data: + # empty char + self.numberOfContours = 0 + return + dummy, data = sstruct.unpack2(glyphHeaderFormat, self.data, self) + del self.data + if self.isComposite(): + self.decompileComponents(data, glyfTable) + else: + self.decompileCoordinates(data) + + def compile(self, glyfTable, recalcBBoxes=True): + if hasattr(self, "data"): + return self.data + if self.numberOfContours == 0: + return "" + if recalcBBoxes: + self.recalcBounds(glyfTable) + data = sstruct.pack(glyphHeaderFormat, self) + if self.isComposite(): + data = data + self.compileComponents(glyfTable) + else: + data = data + self.compileCoordinates() + return data + + def toXML(self, writer, ttFont): + if self.isComposite(): + for compo in self.components: + compo.toXML(writer, ttFont) + if hasattr(self, "program"): + writer.begintag("instructions") + self.program.toXML(writer, ttFont) + writer.endtag("instructions") + writer.newline() + else: + last = 0 + for i in range(self.numberOfContours): + writer.begintag("contour") + writer.newline() + for j in range(last, self.endPtsOfContours[i] + 1): + writer.simpletag("pt", [ + ("x", self.coordinates[j][0]), + ("y", self.coordinates[j][1]), + ("on", self.flags[j] & flagOnCurve)]) + writer.newline() + last = self.endPtsOfContours[i] + 1 + writer.endtag("contour") + writer.newline() + if self.numberOfContours: + writer.begintag("instructions") + self.program.toXML(writer, ttFont) + writer.endtag("instructions") + writer.newline() + + def fromXML(self, name, attrs, content, ttFont): + if name == "contour": + if self.numberOfContours < 0: + raise ttLib.TTLibError("can't mix composites and contours in glyph") + self.numberOfContours = self.numberOfContours + 1 + coordinates = GlyphCoordinates() + flags = [] + for element in content: + if not isinstance(element, tuple): + continue + name, attrs, content = element + if name != "pt": + continue # ignore anything but "pt" + coordinates.append((safeEval(attrs["x"]), safeEval(attrs["y"]))) + flags.append(not not safeEval(attrs["on"])) + flags = array.array("B", flags) + if not hasattr(self, "coordinates"): + self.coordinates = coordinates + self.flags = flags + self.endPtsOfContours = [len(coordinates)-1] + else: + self.coordinates.extend (coordinates) + self.flags.extend(flags) + self.endPtsOfContours.append(len(self.coordinates)-1) + elif name == "component": + if self.numberOfContours > 0: + raise ttLib.TTLibError("can't mix composites and contours in glyph") + self.numberOfContours = -1 + if not hasattr(self, "components"): + self.components = [] + component = GlyphComponent() + self.components.append(component) + component.fromXML(name, attrs, content, ttFont) + elif name == "instructions": + self.program = ttProgram.Program() + for element in content: + if not isinstance(element, tuple): + continue + name, attrs, content = element + self.program.fromXML(name, attrs, content, ttFont) + + def getCompositeMaxpValues(self, glyfTable, maxComponentDepth=1): + assert self.isComposite() + nContours = 0 + nPoints = 0 + for compo in self.components: + baseGlyph = glyfTable[compo.glyphName] + if baseGlyph.numberOfContours == 0: + continue + elif baseGlyph.numberOfContours > 0: + nP, nC = baseGlyph.getMaxpValues() + else: + nP, nC, maxComponentDepth = baseGlyph.getCompositeMaxpValues( + glyfTable, maxComponentDepth + 1) + nPoints = nPoints + nP + nContours = nContours + nC + return nPoints, nContours, maxComponentDepth + + def getMaxpValues(self): + assert self.numberOfContours > 0 + return len(self.coordinates), len(self.endPtsOfContours) + + def decompileComponents(self, data, glyfTable): + self.components = [] + more = 1 + haveInstructions = 0 + while more: + component = GlyphComponent() + more, haveInstr, data = component.decompile(data, glyfTable) + haveInstructions = haveInstructions | haveInstr + self.components.append(component) + if haveInstructions: + numInstructions, = struct.unpack(">h", data[:2]) + data = data[2:] + self.program = ttProgram.Program() + self.program.fromBytecode(data[:numInstructions]) + data = data[numInstructions:] + if len(data) >= 4: + warnings.warn("too much glyph data at the end of composite glyph: %d excess bytes" % len(data)) + + def decompileCoordinates(self, data): + endPtsOfContours = array.array("h") + endPtsOfContours.fromstring(data[:2*self.numberOfContours]) + if sys.byteorder != "big": + endPtsOfContours.byteswap() + self.endPtsOfContours = endPtsOfContours.tolist() + + data = data[2*self.numberOfContours:] + + instructionLength, = struct.unpack(">h", data[:2]) + data = data[2:] + self.program = ttProgram.Program() + self.program.fromBytecode(data[:instructionLength]) + data = data[instructionLength:] + nCoordinates = self.endPtsOfContours[-1] + 1 + flags, xCoordinates, yCoordinates = \ + self.decompileCoordinatesRaw(nCoordinates, data) + + # fill in repetitions and apply signs + self.coordinates = coordinates = GlyphCoordinates.zeros(nCoordinates) + xIndex = 0 + yIndex = 0 + for i in range(nCoordinates): + flag = flags[i] + # x coordinate + if flag & flagXShort: + if flag & flagXsame: + x = xCoordinates[xIndex] + else: + x = -xCoordinates[xIndex] + xIndex = xIndex + 1 + elif flag & flagXsame: + x = 0 + else: + x = xCoordinates[xIndex] + xIndex = xIndex + 1 + # y coordinate + if flag & flagYShort: + if flag & flagYsame: + y = yCoordinates[yIndex] + else: + y = -yCoordinates[yIndex] + yIndex = yIndex + 1 + elif flag & flagYsame: + y = 0 + else: + y = yCoordinates[yIndex] + yIndex = yIndex + 1 + coordinates[i] = (x, y) + assert xIndex == len(xCoordinates) + assert yIndex == len(yCoordinates) + coordinates.relativeToAbsolute() + # discard all flags but for "flagOnCurve" + self.flags = array.array("B", (f & flagOnCurve for f in flags)) + + def decompileCoordinatesRaw(self, nCoordinates, data): + # unpack flags and prepare unpacking of coordinates + flags = array.array("B", [0] * nCoordinates) + # Warning: deep Python trickery going on. We use the struct module to unpack + # the coordinates. We build a format string based on the flags, so we can + # unpack the coordinates in one struct.unpack() call. + xFormat = ">" # big endian + yFormat = ">" # big endian + i = j = 0 + while True: + flag = byteord(data[i]) + i = i + 1 + repeat = 1 + if flag & flagRepeat: + repeat = byteord(data[i]) + 1 + i = i + 1 + for k in range(repeat): + if flag & flagXShort: + xFormat = xFormat + 'B' + elif not (flag & flagXsame): + xFormat = xFormat + 'h' + if flag & flagYShort: + yFormat = yFormat + 'B' + elif not (flag & flagYsame): + yFormat = yFormat + 'h' + flags[j] = flag + j = j + 1 + if j >= nCoordinates: + break + assert j == nCoordinates, "bad glyph flags" + data = data[i:] + # unpack raw coordinates, krrrrrr-tching! + xDataLen = struct.calcsize(xFormat) + yDataLen = struct.calcsize(yFormat) + if len(data) - (xDataLen + yDataLen) >= 4: + warnings.warn("too much glyph data: %d excess bytes" % (len(data) - (xDataLen + yDataLen))) + xCoordinates = struct.unpack(xFormat, data[:xDataLen]) + yCoordinates = struct.unpack(yFormat, data[xDataLen:xDataLen+yDataLen]) + return flags, xCoordinates, yCoordinates + + def compileComponents(self, glyfTable): + data = b"" + lastcomponent = len(self.components) - 1 + more = 1 + haveInstructions = 0 + for i in range(len(self.components)): + if i == lastcomponent: + haveInstructions = hasattr(self, "program") + more = 0 + compo = self.components[i] + data = data + compo.compile(more, haveInstructions, glyfTable) + if haveInstructions: + instructions = self.program.getBytecode() + data = data + struct.pack(">h", len(instructions)) + instructions + return data + + def compileCoordinates(self): + assert len(self.coordinates) == len(self.flags) + data = [] + endPtsOfContours = array.array("h", self.endPtsOfContours) + if sys.byteorder != "big": + endPtsOfContours.byteswap() + data.append(endPtsOfContours.tostring()) + instructions = self.program.getBytecode() + data.append(struct.pack(">h", len(instructions))) + data.append(instructions) + + deltas = self.coordinates.copy() + if deltas.isFloat(): + # Warn? + xPoints = [int(round(x)) for x in xPoints] + yPoints = [int(round(y)) for y in xPoints] + deltas.absoluteToRelative() + + # TODO(behdad): Add a configuration option for this? + deltas = self.compileDeltasGreedy(self.flags, deltas) + #deltas = self.compileDeltasOptimal(self.flags, deltas) + + data.extend(deltas) + return bytesjoin(data) + + def compileDeltasGreedy(self, flags, deltas): + # Implements greedy algorithm for packing coordinate deltas: + # uses shortest representation one coordinate at a time. + compressedflags = [] + xPoints = [] + yPoints = [] + lastflag = None + repeat = 0 + for flag,(x,y) in zip(flags, deltas): + # Oh, the horrors of TrueType + # do x + if x == 0: + flag = flag | flagXsame + elif -255 <= x <= 255: + flag = flag | flagXShort + if x > 0: + flag = flag | flagXsame + else: + x = -x + xPoints.append(bytechr(x)) + else: + xPoints.append(struct.pack(">h", x)) + # do y + if y == 0: + flag = flag | flagYsame + elif -255 <= y <= 255: + flag = flag | flagYShort + if y > 0: + flag = flag | flagYsame + else: + y = -y + yPoints.append(bytechr(y)) + else: + yPoints.append(struct.pack(">h", y)) + # handle repeating flags + if flag == lastflag and repeat != 255: + repeat = repeat + 1 + if repeat == 1: + compressedflags.append(flag) + else: + compressedflags[-2] = flag | flagRepeat + compressedflags[-1] = repeat + else: + repeat = 0 + compressedflags.append(flag) + lastflag = flag + compressedFlags = array.array("B", compressedflags).tostring() + compressedXs = bytesjoin(xPoints) + compressedYs = bytesjoin(yPoints) + return (compressedFlags, compressedXs, compressedYs) + + def compileDeltasOptimal(self, flags, deltas): + # Implements optimal, dynaic-programming, algorithm for packing coordinate + # deltas. The savings are negligible :(. + candidates = [] + bestTuple = None + bestCost = 0 + repeat = 0 + for flag,(x,y) in zip(flags, deltas): + # Oh, the horrors of TrueType + flag, coordBytes = flagBest(x, y, flag) + bestCost += 1 + coordBytes + newCandidates = [(bestCost, bestTuple, flag, coordBytes), + (bestCost+1, bestTuple, (flag|flagRepeat), coordBytes)] + for lastCost,lastTuple,lastFlag,coordBytes in candidates: + if lastCost + coordBytes <= bestCost + 1 and (lastFlag & flagRepeat) and (lastFlag < 0xff00) and flagSupports(lastFlag, flag): + if (lastFlag & 0xFF) == (flag|flagRepeat) and lastCost == bestCost + 1: + continue + newCandidates.append((lastCost + coordBytes, lastTuple, lastFlag+256, coordBytes)) + candidates = newCandidates + bestTuple = min(candidates, key=lambda t:t[0]) + bestCost = bestTuple[0] + + flags = [] + while bestTuple: + cost, bestTuple, flag, coordBytes = bestTuple + flags.append(flag) + flags.reverse() + + compressedFlags = array.array("B") + compressedXs = array.array("B") + compressedYs = array.array("B") + coords = iter(deltas) + ff = [] + for flag in flags: + repeatCount, flag = flag >> 8, flag & 0xFF + compressedFlags.append(flag) + if flag & flagRepeat: + assert(repeatCount > 0) + compressedFlags.append(repeatCount) + else: + assert(repeatCount == 0) + for i in range(1 + repeatCount): + x,y = next(coords) + flagEncodeCoords(flag, x, y, compressedXs, compressedYs) + ff.append(flag) + try: + next(coords) + raise Exception("internal error") + except StopIteration: + pass + compressedFlags = compressedFlags.tostring() + compressedXs = compressedXs.tostring() + compressedYs = compressedYs.tostring() + + return (compressedFlags, compressedXs, compressedYs) + + def recalcBounds(self, glyfTable): + coords, endPts, flags = self.getCoordinates(glyfTable) + if len(coords) > 0: + if 0: + # This branch calculates exact glyph outline bounds + # analytically, handling cases without on-curve + # extremas, etc. However, the glyf table header + # simply says that the bounds should be min/max x/y + # "for coordinate data", so I suppose that means no + # fancy thing here, just get extremas of all coord + # points (on and off). As such, this branch is + # disabled. + + # Collect on-curve points + onCurveCoords = [coords[j] for j in range(len(coords)) + if flags[j] & flagOnCurve] + # Add implicit on-curve points + start = 0 + for end in endPts: + last = end + for j in range(start, end + 1): + if not ((flags[j] | flags[last]) & flagOnCurve): + x = (coords[last][0] + coords[j][0]) / 2 + y = (coords[last][1] + coords[j][1]) / 2 + onCurveCoords.append((x,y)) + last = j + start = end + 1 + # Add bounds for curves without an explicit extrema + start = 0 + for end in endPts: + last = end + for j in range(start, end + 1): + if not (flags[j] & flagOnCurve): + next = j + 1 if j < end else start + bbox = calcBounds([coords[last], coords[next]]) + if not pointInRect(coords[j], bbox): + # Ouch! + warnings.warn("Outline has curve with implicit extrema.") + # Ouch! Find analytical curve bounds. + pthis = coords[j] + plast = coords[last] + if not (flags[last] & flagOnCurve): + plast = ((pthis[0]+plast[0])/2, (pthis[1]+plast[1])/2) + pnext = coords[next] + if not (flags[next] & flagOnCurve): + pnext = ((pthis[0]+pnext[0])/2, (pthis[1]+pnext[1])/2) + bbox = calcQuadraticBounds(plast, pthis, pnext) + onCurveCoords.append((bbox[0],bbox[1])) + onCurveCoords.append((bbox[2],bbox[3])) + last = j + start = end + 1 + + self.xMin, self.yMin, self.xMax, self.yMax = calcIntBounds(onCurveCoords) + else: + self.xMin, self.yMin, self.xMax, self.yMax = calcIntBounds(coords) + else: + self.xMin, self.yMin, self.xMax, self.yMax = (0, 0, 0, 0) + + def isComposite(self): + """Can be called on compact or expanded glyph.""" + if hasattr(self, "data") and self.data: + return struct.unpack(">h", self.data[:2])[0] == -1 + else: + return self.numberOfContours == -1 + + def __getitem__(self, componentIndex): + if not self.isComposite(): + raise ttLib.TTLibError("can't use glyph as sequence") + return self.components[componentIndex] + + def getCoordinates(self, glyfTable): + if self.numberOfContours > 0: + return self.coordinates, self.endPtsOfContours, self.flags + elif self.isComposite(): + # it's a composite + allCoords = GlyphCoordinates() + allFlags = array.array("B") + allEndPts = [] + for compo in self.components: + g = glyfTable[compo.glyphName] + coordinates, endPts, flags = g.getCoordinates(glyfTable) + if hasattr(compo, "firstPt"): + # move according to two reference points + x1,y1 = allCoords[compo.firstPt] + x2,y2 = coordinates[compo.secondPt] + move = x1-x2, y1-y2 + else: + move = compo.x, compo.y + + coordinates = GlyphCoordinates(coordinates) + if not hasattr(compo, "transform"): + coordinates.translate(move) + else: + apple_way = compo.flags & SCALED_COMPONENT_OFFSET + ms_way = compo.flags & UNSCALED_COMPONENT_OFFSET + assert not (apple_way and ms_way) + if not (apple_way or ms_way): + scale_component_offset = SCALE_COMPONENT_OFFSET_DEFAULT # see top of this file + else: + scale_component_offset = apple_way + if scale_component_offset: + # the Apple way: first move, then scale (ie. scale the component offset) + coordinates.translate(move) + coordinates.transform(compo.transform) + else: + # the MS way: first scale, then move + coordinates.transform(compo.transform) + coordinates.translate(move) + offset = len(allCoords) + allEndPts.extend(e + offset for e in endPts) + allCoords.extend(coordinates) + allFlags.extend(flags) + return allCoords, allEndPts, allFlags + else: + return GlyphCoordinates(), [], array.array("B") + + def getComponentNames(self, glyfTable): + if not hasattr(self, "data"): + if self.isComposite(): + return [c.glyphName for c in self.components] + else: + return [] + + # Extract components without expanding glyph + + if not self.data or struct.unpack(">h", self.data[:2])[0] >= 0: + return [] # Not composite + + data = self.data + i = 10 + components = [] + more = 1 + while more: + flags, glyphID = struct.unpack(">HH", data[i:i+4]) + i += 4 + flags = int(flags) + components.append(glyfTable.getGlyphName(int(glyphID))) + + if flags & ARG_1_AND_2_ARE_WORDS: i += 4 + else: i += 2 + if flags & WE_HAVE_A_SCALE: i += 2 + elif flags & WE_HAVE_AN_X_AND_Y_SCALE: i += 4 + elif flags & WE_HAVE_A_TWO_BY_TWO: i += 8 + more = flags & MORE_COMPONENTS + + return components + + def trim(self, remove_hinting=False): + """ Remove padding and, if requested, hinting, from a glyph. + This works on both expanded and compacted glyphs, without + expanding it.""" + if not hasattr(self, "data"): + if remove_hinting: + self.program = ttProgram.Program() + self.program.fromBytecode([]) + # No padding to trim. + return + if not self.data: + return + numContours = struct.unpack(">h", self.data[:2])[0] + data = array.array("B", self.data) + i = 10 + if numContours >= 0: + i += 2 * numContours # endPtsOfContours + nCoordinates = ((data[i-2] << 8) | data[i-1]) + 1 + instructionLen = (data[i] << 8) | data[i+1] + if remove_hinting: + # Zero instruction length + data[i] = data [i+1] = 0 + i += 2 + if instructionLen: + # Splice it out + data = data[:i] + data[i+instructionLen:] + instructionLen = 0 + else: + i += 2 + instructionLen + + coordBytes = 0 + j = 0 + while True: + flag = data[i] + i = i + 1 + repeat = 1 + if flag & flagRepeat: + repeat = data[i] + 1 + i = i + 1 + xBytes = yBytes = 0 + if flag & flagXShort: + xBytes = 1 + elif not (flag & flagXsame): + xBytes = 2 + if flag & flagYShort: + yBytes = 1 + elif not (flag & flagYsame): + yBytes = 2 + coordBytes += (xBytes + yBytes) * repeat + j += repeat + if j >= nCoordinates: + break + assert j == nCoordinates, "bad glyph flags" + i += coordBytes + # Remove padding + data = data[:i] + else: + more = 1 + we_have_instructions = False + while more: + flags =(data[i] << 8) | data[i+1] + if remove_hinting: + flags &= ~WE_HAVE_INSTRUCTIONS + if flags & WE_HAVE_INSTRUCTIONS: + we_have_instructions = True + data[i+0] = flags >> 8 + data[i+1] = flags & 0xFF + i += 4 + flags = int(flags) + + if flags & ARG_1_AND_2_ARE_WORDS: i += 4 + else: i += 2 + if flags & WE_HAVE_A_SCALE: i += 2 + elif flags & WE_HAVE_AN_X_AND_Y_SCALE: i += 4 + elif flags & WE_HAVE_A_TWO_BY_TWO: i += 8 + more = flags & MORE_COMPONENTS + if we_have_instructions: + instructionLen = (data[i] << 8) | data[i+1] + i += 2 + instructionLen + # Remove padding + data = data[:i] + + self.data = data.tostring() + + def removeHinting(self): + self.trim (remove_hinting=True) + + def draw(self, pen, glyfTable, offset=0): + + if self.isComposite(): + for component in self.components: + glyphName, transform = component.getComponentInfo() + pen.addComponent(glyphName, transform) + return + + coordinates, endPts, flags = self.getCoordinates(glyfTable) + if offset: + coordinates = coordinates.copy() + coordinates.translate((offset, 0)) + start = 0 + for end in endPts: + end = end + 1 + contour = coordinates[start:end] + cFlags = flags[start:end] + start = end + if 1 not in cFlags: + # There is not a single on-curve point on the curve, + # use pen.qCurveTo's special case by specifying None + # as the on-curve point. + contour.append(None) + pen.qCurveTo(*contour) + else: + # Shuffle the points so that contour the is guaranteed + # to *end* in an on-curve point, which we'll use for + # the moveTo. + firstOnCurve = cFlags.index(1) + 1 + contour = contour[firstOnCurve:] + contour[:firstOnCurve] + cFlags = cFlags[firstOnCurve:] + cFlags[:firstOnCurve] + pen.moveTo(contour[-1]) + while contour: + nextOnCurve = cFlags.index(1) + 1 + if nextOnCurve == 1: + pen.lineTo(contour[0]) + else: + pen.qCurveTo(*contour[:nextOnCurve]) + contour = contour[nextOnCurve:] + cFlags = cFlags[nextOnCurve:] + pen.closePath() + + def __ne__(self, other): + return not self.__eq__(other) + def __eq__(self, other): + if type(self) != type(other): + return NotImplemented + return self.__dict__ == other.__dict__ + + +class GlyphComponent(object): + + def __init__(self): + pass + + def getComponentInfo(self): + """Return the base glyph name and a transform.""" + # XXX Ignoring self.firstPt & self.lastpt for now: I need to implement + # something equivalent in fontTools.objects.glyph (I'd rather not + # convert it to an absolute offset, since it is valuable information). + # This method will now raise "AttributeError: x" on glyphs that use + # this TT feature. + if hasattr(self, "transform"): + [[xx, xy], [yx, yy]] = self.transform + trans = (xx, xy, yx, yy, self.x, self.y) + else: + trans = (1, 0, 0, 1, self.x, self.y) + return self.glyphName, trans + + def decompile(self, data, glyfTable): + flags, glyphID = struct.unpack(">HH", data[:4]) + self.flags = int(flags) + glyphID = int(glyphID) + self.glyphName = glyfTable.getGlyphName(int(glyphID)) + #print ">>", reprflag(self.flags) + data = data[4:] + + if self.flags & ARG_1_AND_2_ARE_WORDS: + if self.flags & ARGS_ARE_XY_VALUES: + self.x, self.y = struct.unpack(">hh", data[:4]) + else: + x, y = struct.unpack(">HH", data[:4]) + self.firstPt, self.secondPt = int(x), int(y) + data = data[4:] + else: + if self.flags & ARGS_ARE_XY_VALUES: + self.x, self.y = struct.unpack(">bb", data[:2]) + else: + x, y = struct.unpack(">BB", data[:2]) + self.firstPt, self.secondPt = int(x), int(y) + data = data[2:] + + if self.flags & WE_HAVE_A_SCALE: + scale, = struct.unpack(">h", data[:2]) + self.transform = [[fi2fl(scale,14), 0], [0, fi2fl(scale,14)]] # fixed 2.14 + data = data[2:] + elif self.flags & WE_HAVE_AN_X_AND_Y_SCALE: + xscale, yscale = struct.unpack(">hh", data[:4]) + self.transform = [[fi2fl(xscale,14), 0], [0, fi2fl(yscale,14)]] # fixed 2.14 + data = data[4:] + elif self.flags & WE_HAVE_A_TWO_BY_TWO: + (xscale, scale01, + scale10, yscale) = struct.unpack(">hhhh", data[:8]) + self.transform = [[fi2fl(xscale,14), fi2fl(scale01,14)], + [fi2fl(scale10,14), fi2fl(yscale,14)]] # fixed 2.14 + data = data[8:] + more = self.flags & MORE_COMPONENTS + haveInstructions = self.flags & WE_HAVE_INSTRUCTIONS + self.flags = self.flags & (ROUND_XY_TO_GRID | USE_MY_METRICS | + SCALED_COMPONENT_OFFSET | UNSCALED_COMPONENT_OFFSET | + NON_OVERLAPPING) + return more, haveInstructions, data + + def compile(self, more, haveInstructions, glyfTable): + data = b"" + + # reset all flags we will calculate ourselves + flags = self.flags & (ROUND_XY_TO_GRID | USE_MY_METRICS | + SCALED_COMPONENT_OFFSET | UNSCALED_COMPONENT_OFFSET | + NON_OVERLAPPING) + if more: + flags = flags | MORE_COMPONENTS + if haveInstructions: + flags = flags | WE_HAVE_INSTRUCTIONS + + if hasattr(self, "firstPt"): + if (0 <= self.firstPt <= 255) and (0 <= self.secondPt <= 255): + data = data + struct.pack(">BB", self.firstPt, self.secondPt) + else: + data = data + struct.pack(">HH", self.firstPt, self.secondPt) + flags = flags | ARG_1_AND_2_ARE_WORDS + else: + flags = flags | ARGS_ARE_XY_VALUES + if (-128 <= self.x <= 127) and (-128 <= self.y <= 127): + data = data + struct.pack(">bb", self.x, self.y) + else: + data = data + struct.pack(">hh", self.x, self.y) + flags = flags | ARG_1_AND_2_ARE_WORDS + + if hasattr(self, "transform"): + transform = [[fl2fi(x,14) for x in row] for row in self.transform] + if transform[0][1] or transform[1][0]: + flags = flags | WE_HAVE_A_TWO_BY_TWO + data = data + struct.pack(">hhhh", + transform[0][0], transform[0][1], + transform[1][0], transform[1][1]) + elif transform[0][0] != transform[1][1]: + flags = flags | WE_HAVE_AN_X_AND_Y_SCALE + data = data + struct.pack(">hh", + transform[0][0], transform[1][1]) + else: + flags = flags | WE_HAVE_A_SCALE + data = data + struct.pack(">h", + transform[0][0]) + + glyphID = glyfTable.getGlyphID(self.glyphName) + return struct.pack(">HH", flags, glyphID) + data + + def toXML(self, writer, ttFont): + attrs = [("glyphName", self.glyphName)] + if not hasattr(self, "firstPt"): + attrs = attrs + [("x", self.x), ("y", self.y)] + else: + attrs = attrs + [("firstPt", self.firstPt), ("secondPt", self.secondPt)] + + if hasattr(self, "transform"): + transform = self.transform + if transform[0][1] or transform[1][0]: + attrs = attrs + [ + ("scalex", transform[0][0]), ("scale01", transform[0][1]), + ("scale10", transform[1][0]), ("scaley", transform[1][1]), + ] + elif transform[0][0] != transform[1][1]: + attrs = attrs + [ + ("scalex", transform[0][0]), ("scaley", transform[1][1]), + ] + else: + attrs = attrs + [("scale", transform[0][0])] + attrs = attrs + [("flags", hex(self.flags))] + writer.simpletag("component", attrs) + writer.newline() + + def fromXML(self, name, attrs, content, ttFont): + self.glyphName = attrs["glyphName"] + if "firstPt" in attrs: + self.firstPt = safeEval(attrs["firstPt"]) + self.secondPt = safeEval(attrs["secondPt"]) + else: + self.x = safeEval(attrs["x"]) + self.y = safeEval(attrs["y"]) + if "scale01" in attrs: + scalex = safeEval(attrs["scalex"]) + scale01 = safeEval(attrs["scale01"]) + scale10 = safeEval(attrs["scale10"]) + scaley = safeEval(attrs["scaley"]) + self.transform = [[scalex, scale01], [scale10, scaley]] + elif "scalex" in attrs: + scalex = safeEval(attrs["scalex"]) + scaley = safeEval(attrs["scaley"]) + self.transform = [[scalex, 0], [0, scaley]] + elif "scale" in attrs: + scale = safeEval(attrs["scale"]) + self.transform = [[scale, 0], [0, scale]] + self.flags = safeEval(attrs["flags"]) + + def __ne__(self, other): + return not self.__eq__(other) + def __eq__(self, other): + if type(self) != type(other): + return NotImplemented + return self.__dict__ == other.__dict__ + +class GlyphCoordinates(object): + + def __init__(self, iterable=[]): + self._a = array.array("h") + self.extend(iterable) + + def isFloat(self): + return self._a.typecode == 'f' + + def _ensureFloat(self): + if self.isFloat(): + return + # The conversion to list() is to work around Jython bug + self._a = array.array("f", list(self._a)) + + def _checkFloat(self, p): + if any(isinstance(v, float) for v in p): + p = [int(v) if int(v) == v else v for v in p] + if any(isinstance(v, float) for v in p): + self._ensureFloat() + return p + + @staticmethod + def zeros(count): + return GlyphCoordinates([(0,0)] * count) + + def copy(self): + c = GlyphCoordinates() + c._a.extend(self._a) + return c + + def __len__(self): + return len(self._a) // 2 + + def __getitem__(self, k): + if isinstance(k, slice): + indices = range(*k.indices(len(self))) + return [self[i] for i in indices] + return self._a[2*k],self._a[2*k+1] + + def __setitem__(self, k, v): + if isinstance(k, slice): + indices = range(*k.indices(len(self))) + # XXX This only works if len(v) == len(indices) + # TODO Implement __delitem__ + for j,i in enumerate(indices): + self[i] = v[j] + return + v = self._checkFloat(v) + self._a[2*k],self._a[2*k+1] = v + + def __repr__(self): + return 'GlyphCoordinates(['+','.join(str(c) for c in self)+'])' + + def append(self, p): + p = self._checkFloat(p) + self._a.extend(tuple(p)) + + def extend(self, iterable): + for p in iterable: + p = self._checkFloat(p) + self._a.extend(p) + + def relativeToAbsolute(self): + a = self._a + x,y = 0,0 + for i in range(len(a) // 2): + a[2*i ] = x = a[2*i ] + x + a[2*i+1] = y = a[2*i+1] + y + + def absoluteToRelative(self): + a = self._a + x,y = 0,0 + for i in range(len(a) // 2): + dx = a[2*i ] - x + dy = a[2*i+1] - y + x = a[2*i ] + y = a[2*i+1] + a[2*i ] = dx + a[2*i+1] = dy + + def translate(self, p): + (x,y) = p + a = self._a + for i in range(len(a) // 2): + a[2*i ] += x + a[2*i+1] += y + + def transform(self, t): + a = self._a + for i in range(len(a) // 2): + x = a[2*i ] + y = a[2*i+1] + px = x * t[0][0] + y * t[1][0] + py = x * t[0][1] + y * t[1][1] + self[i] = (px, py) + + def __ne__(self, other): + return not self.__eq__(other) + def __eq__(self, other): + if type(self) != type(other): + return NotImplemented + return self._a == other._a + + +def reprflag(flag): + bin = "" + if isinstance(flag, str): + flag = byteord(flag) + while flag: + if flag & 0x01: + bin = "1" + bin + else: + bin = "0" + bin + flag = flag >> 1 + bin = (14 - len(bin)) * "0" + bin + return bin diff -Nru fonttools-2.4/Snippets/fontTools/ttLib/tables/G_M_A_P_.py fonttools-3.0/Snippets/fontTools/ttLib/tables/G_M_A_P_.py --- fonttools-2.4/Snippets/fontTools/ttLib/tables/G_M_A_P_.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.0/Snippets/fontTools/ttLib/tables/G_M_A_P_.py 2015-08-31 17:57:15.000000000 +0000 @@ -0,0 +1,128 @@ +from __future__ import print_function, division, absolute_import +from fontTools.misc.py23 import * +from fontTools.misc import sstruct +from fontTools.misc.textTools import safeEval +from . import DefaultTable + +GMAPFormat = """ + > # big endian + tableVersionMajor: H + tableVersionMinor: H + flags: H + recordsCount: H + recordsOffset: H + fontNameLength: H +""" +# psFontName is a byte string which follows the record above. This is zero padded +# to the beginning of the records array. The recordsOffsst is 32 bit aligned. + +GMAPRecordFormat1 = """ + > # big endian + UV: L + cid: H + gid: H + ggid: H + name: 32s +""" + + +class GMAPRecord(object): + def __init__(self, uv=0, cid=0, gid=0, ggid=0, name=""): + self.UV = uv + self.cid = cid + self.gid = gid + self.ggid = ggid + self.name = name + + def toXML(self, writer, ttFont): + writer.begintag("GMAPRecord") + writer.newline() + writer.simpletag("UV", value=self.UV) + writer.newline() + writer.simpletag("cid", value=self.cid) + writer.newline() + writer.simpletag("gid", value=self.gid) + writer.newline() + writer.simpletag("glyphletGid", value=self.gid) + writer.newline() + writer.simpletag("GlyphletName", value=self.name) + writer.newline() + writer.endtag("GMAPRecord") + writer.newline() + + def fromXML(self, name, attrs, content, ttFont): + value = attrs["value"] + if name == "GlyphletName": + self.name = value + else: + setattr(self, name, safeEval(value)) + + def compile(self, ttFont): + if self.UV is None: + self.UV = 0 + nameLen = len(self.name) + if nameLen < 32: + self.name = self.name + "\0"*(32 - nameLen) + data = sstruct.pack(GMAPRecordFormat1, self) + return data + + def __repr__(self): + return "GMAPRecord[ UV: " + str(self.UV) + ", cid: " + str(self.cid) + ", gid: " + str(self.gid) + ", ggid: " + str(self.ggid) + ", Glyphlet Name: " + str(self.name) + " ]" + + +class table_G_M_A_P_(DefaultTable.DefaultTable): + + dependencies = [] + + def decompile(self, data, ttFont): + dummy, newData = sstruct.unpack2(GMAPFormat, data, self) + self.psFontName = tostr(newData[:self.fontNameLength]) + assert (self.recordsOffset % 4) == 0, "GMAP error: recordsOffset is not 32 bit aligned." + newData = data[self.recordsOffset:] + self.gmapRecords = [] + for i in range (self.recordsCount): + gmapRecord, newData = sstruct.unpack2(GMAPRecordFormat1, newData, GMAPRecord()) + gmapRecord.name = gmapRecord.name.strip('\0') + self.gmapRecords.append(gmapRecord) + + def compile(self, ttFont): + self.recordsCount = len(self.gmapRecords) + self.fontNameLength = len(self.psFontName) + self.recordsOffset = 4 * (((self.fontNameLength + 12) + 3) // 4) + data = sstruct.pack(GMAPFormat, self) + data = data + tobytes(self.psFontName) + data = data + b"\0" * (self.recordsOffset - len(data)) + for record in self.gmapRecords: + data = data + record.compile(ttFont) + return data + + def toXML(self, writer, ttFont): + writer.comment("Most of this table will be recalculated by the compiler") + writer.newline() + formatstring, names, fixes = sstruct.getformat(GMAPFormat) + for name in names: + value = getattr(self, name) + writer.simpletag(name, value=value) + writer.newline() + writer.simpletag("PSFontName", value=self.psFontName) + writer.newline() + for gmapRecord in self.gmapRecords: + gmapRecord.toXML(writer, ttFont) + + def fromXML(self, name, attrs, content, ttFont): + if name == "GMAPRecord": + if not hasattr(self, "gmapRecords"): + self.gmapRecords = [] + gmapRecord = GMAPRecord() + self.gmapRecords.append(gmapRecord) + for element in content: + if isinstance(element, basestring): + continue + name, attrs, content = element + gmapRecord.fromXML(name, attrs, content, ttFont) + else: + value = attrs["value"] + if name == "PSFontName": + self.psFontName = value + else: + setattr(self, name, safeEval(value)) diff -Nru fonttools-2.4/Snippets/fontTools/ttLib/tables/G_P_K_G_.py fonttools-3.0/Snippets/fontTools/ttLib/tables/G_P_K_G_.py --- fonttools-2.4/Snippets/fontTools/ttLib/tables/G_P_K_G_.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.0/Snippets/fontTools/ttLib/tables/G_P_K_G_.py 2015-08-31 17:57:15.000000000 +0000 @@ -0,0 +1,129 @@ +from __future__ import print_function, division, absolute_import +from fontTools.misc.py23 import * +from fontTools.misc import sstruct +from fontTools.misc.textTools import safeEval, readHex +from . import DefaultTable +import sys +import array + +GPKGFormat = """ + > # big endian + version: H + flags: H + numGMAPs: H + numGlyplets: H +""" +# psFontName is a byte string which follows the record above. This is zero padded +# to the beginning of the records array. The recordsOffsst is 32 bit aligned. + + +class table_G_P_K_G_(DefaultTable.DefaultTable): + + def decompile(self, data, ttFont): + dummy, newData = sstruct.unpack2(GPKGFormat, data, self) + + GMAPoffsets = array.array("I") + endPos = (self.numGMAPs+1) * 4 + GMAPoffsets.fromstring(newData[:endPos]) + if sys.byteorder != "big": + GMAPoffsets.byteswap() + self.GMAPs = [] + for i in range(self.numGMAPs): + start = GMAPoffsets[i] + end = GMAPoffsets[i+1] + self.GMAPs.append(data[start:end]) + pos = endPos + endPos = pos + (self.numGlyplets + 1)*4 + glyphletOffsets = array.array("I") + glyphletOffsets.fromstring(newData[pos:endPos]) + if sys.byteorder != "big": + glyphletOffsets.byteswap() + self.glyphlets = [] + for i in range(self.numGlyplets): + start = glyphletOffsets[i] + end = glyphletOffsets[i+1] + self.glyphlets.append(data[start:end]) + + def compile(self, ttFont): + self.numGMAPs = len(self.GMAPs) + self.numGlyplets = len(self.glyphlets) + GMAPoffsets = [0]*(self.numGMAPs + 1) + glyphletOffsets = [0]*(self.numGlyplets + 1) + + dataList =[ sstruct.pack(GPKGFormat, self)] + + pos = len(dataList[0]) + (self.numGMAPs + 1)*4 + (self.numGlyplets + 1)*4 + GMAPoffsets[0] = pos + for i in range(1, self.numGMAPs +1): + pos += len(self.GMAPs[i-1]) + GMAPoffsets[i] = pos + gmapArray = array.array("I", GMAPoffsets) + if sys.byteorder != "big": + gmapArray.byteswap() + dataList.append(gmapArray.tostring()) + + glyphletOffsets[0] = pos + for i in range(1, self.numGlyplets +1): + pos += len(self.glyphlets[i-1]) + glyphletOffsets[i] = pos + glyphletArray = array.array("I", glyphletOffsets) + if sys.byteorder != "big": + glyphletArray.byteswap() + dataList.append(glyphletArray.tostring()) + dataList += self.GMAPs + dataList += self.glyphlets + data = bytesjoin(dataList) + return data + + def toXML(self, writer, ttFont): + writer.comment("Most of this table will be recalculated by the compiler") + writer.newline() + formatstring, names, fixes = sstruct.getformat(GPKGFormat) + for name in names: + value = getattr(self, name) + writer.simpletag(name, value=value) + writer.newline() + + writer.begintag("GMAPs") + writer.newline() + for gmapData in self.GMAPs: + writer.begintag("hexdata") + writer.newline() + writer.dumphex(gmapData) + writer.endtag("hexdata") + writer.newline() + writer.endtag("GMAPs") + writer.newline() + + writer.begintag("glyphlets") + writer.newline() + for glyphletData in self.glyphlets: + writer.begintag("hexdata") + writer.newline() + writer.dumphex(glyphletData) + writer.endtag("hexdata") + writer.newline() + writer.endtag("glyphlets") + writer.newline() + + def fromXML(self, name, attrs, content, ttFont): + if name == "GMAPs": + if not hasattr(self, "GMAPs"): + self.GMAPs = [] + for element in content: + if isinstance(element, basestring): + continue + itemName, itemAttrs, itemContent = element + if itemName == "hexdata": + self.GMAPs.append(readHex(itemContent)) + elif name == "glyphlets": + if not hasattr(self, "glyphlets"): + self.glyphlets = [] + for element in content: + if isinstance(element, basestring): + continue + itemName, itemAttrs, itemContent = element + if itemName == "hexdata": + self.glyphlets.append(readHex(itemContent)) + else: + setattr(self, name, safeEval(attrs["value"])) diff -Nru fonttools-2.4/Snippets/fontTools/ttLib/tables/G_P_O_S_.py fonttools-3.0/Snippets/fontTools/ttLib/tables/G_P_O_S_.py --- fonttools-2.4/Snippets/fontTools/ttLib/tables/G_P_O_S_.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.0/Snippets/fontTools/ttLib/tables/G_P_O_S_.py 2015-08-31 17:57:15.000000000 +0000 @@ -0,0 +1,7 @@ +from __future__ import print_function, division, absolute_import +from fontTools.misc.py23 import * +from .otBase import BaseTTXConverter + + +class table_G_P_O_S_(BaseTTXConverter): + pass diff -Nru fonttools-2.4/Snippets/fontTools/ttLib/tables/G_S_U_B_.py fonttools-3.0/Snippets/fontTools/ttLib/tables/G_S_U_B_.py --- fonttools-2.4/Snippets/fontTools/ttLib/tables/G_S_U_B_.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.0/Snippets/fontTools/ttLib/tables/G_S_U_B_.py 2015-08-31 17:57:15.000000000 +0000 @@ -0,0 +1,7 @@ +from __future__ import print_function, division, absolute_import +from fontTools.misc.py23 import * +from .otBase import BaseTTXConverter + + +class table_G_S_U_B_(BaseTTXConverter): + pass diff -Nru fonttools-2.4/Snippets/fontTools/ttLib/tables/_g_v_a_r.py fonttools-3.0/Snippets/fontTools/ttLib/tables/_g_v_a_r.py --- fonttools-2.4/Snippets/fontTools/ttLib/tables/_g_v_a_r.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.0/Snippets/fontTools/ttLib/tables/_g_v_a_r.py 2015-08-31 17:57:15.000000000 +0000 @@ -0,0 +1,717 @@ +from __future__ import print_function, division, absolute_import +from fontTools.misc.py23 import * +from fontTools import ttLib +from fontTools.misc import sstruct +from fontTools.misc.fixedTools import fixedToFloat, floatToFixed +from fontTools.misc.textTools import safeEval +from fontTools.ttLib import TTLibError +from . import DefaultTable +import array +import io +import sys +import struct + +# Apple's documentation of 'gvar': +# https://developer.apple.com/fonts/TrueType-Reference-Manual/RM06/Chap6gvar.html +# +# FreeType2 source code for parsing 'gvar': +# http://git.savannah.gnu.org/cgit/freetype/freetype2.git/tree/src/truetype/ttgxvar.c + +GVAR_HEADER_FORMAT = """ + > # big endian + version: H + reserved: H + axisCount: H + sharedCoordCount: H + offsetToCoord: I + glyphCount: H + flags: H + offsetToData: I +""" + +GVAR_HEADER_SIZE = sstruct.calcsize(GVAR_HEADER_FORMAT) + +TUPLES_SHARE_POINT_NUMBERS = 0x8000 +TUPLE_COUNT_MASK = 0x0fff + +EMBEDDED_TUPLE_COORD = 0x8000 +INTERMEDIATE_TUPLE = 0x4000 +PRIVATE_POINT_NUMBERS = 0x2000 +TUPLE_INDEX_MASK = 0x0fff + +DELTAS_ARE_ZERO = 0x80 +DELTAS_ARE_WORDS = 0x40 +DELTA_RUN_COUNT_MASK = 0x3f + +POINTS_ARE_WORDS = 0x80 +POINT_RUN_COUNT_MASK = 0x7f + + +class table__g_v_a_r(DefaultTable.DefaultTable): + + dependencies = ["fvar", "glyf"] + + def compile(self, ttFont): + axisTags = [axis.axisTag for axis in ttFont["fvar"].axes] + + sharedCoords = self.compileSharedCoords_(axisTags) + sharedCoordIndices = {coord:i for i, coord in enumerate(sharedCoords)} + sharedCoordSize = sum([len(c) for c in sharedCoords]) + + compiledGlyphs = self.compileGlyphs_(ttFont, axisTags, sharedCoordIndices) + offset = 0 + offsets = [] + for glyph in compiledGlyphs: + offsets.append(offset) + offset += len(glyph) + offsets.append(offset) + compiledOffsets, tableFormat = self.compileOffsets_(offsets) + + header = {} + header["version"] = self.version + header["reserved"] = self.reserved + header["axisCount"] = len(axisTags) + header["sharedCoordCount"] = len(sharedCoords) + header["offsetToCoord"] = GVAR_HEADER_SIZE + len(compiledOffsets) + header["glyphCount"] = len(compiledGlyphs) + header["flags"] = tableFormat + header["offsetToData"] = header["offsetToCoord"] + sharedCoordSize + compiledHeader = sstruct.pack(GVAR_HEADER_FORMAT, header) + + result = [compiledHeader, compiledOffsets] + result.extend(sharedCoords) + result.extend(compiledGlyphs) + return bytesjoin(result) + + def compileSharedCoords_(self, axisTags): + coordCount = {} + for variations in self.variations.values(): + for gvar in variations: + coord = gvar.compileCoord(axisTags) + coordCount[coord] = coordCount.get(coord, 0) + 1 + sharedCoords = [(count, coord) for (coord, count) in coordCount.items() if count > 1] + sharedCoords.sort(reverse=True) + MAX_NUM_SHARED_COORDS = TUPLE_INDEX_MASK + 1 + sharedCoords = sharedCoords[:MAX_NUM_SHARED_COORDS] + return [c[1] for c in sharedCoords] # Strip off counts. + + def compileGlyphs_(self, ttFont, axisTags, sharedCoordIndices): + result = [] + for glyphName in ttFont.getGlyphOrder(): + glyph = ttFont["glyf"][glyphName] + numPointsInGlyph = self.getNumPoints_(glyph) + result.append(self.compileGlyph_(glyphName, numPointsInGlyph, axisTags, sharedCoordIndices)) + return result + + def compileGlyph_(self, glyphName, numPointsInGlyph, axisTags, sharedCoordIndices): + variations = self.variations.get(glyphName, []) + variations = [v for v in variations if v.hasImpact()] + if len(variations) == 0: + return b"" + + # Each glyph variation tuples modifies a set of control points. To indicate + # which exact points are getting modified, a single tuple can either refer + # to a shared set of points, or the tuple can supply its private point numbers. + # Because the impact of sharing can be positive (no need for a private point list) + # or negative (need to supply 0,0 deltas for unused points), it is not obvious + # how to determine which tuples should take their points from the shared + # pool versus have their own. Perhaps we should resort to brute force, + # and try all combinations? However, if a glyph has n variation tuples, + # we would need to try 2^n combinations (because each tuple may or may not + # be part of the shared set). How many variations tuples do glyphs have? + # + # Skia.ttf: {3: 1, 5: 11, 6: 41, 7: 62, 8: 387, 13: 1, 14: 3} + # JamRegular.ttf: {3: 13, 4: 122, 5: 1, 7: 4, 8: 1, 9: 1, 10: 1} + # BuffaloGalRegular.ttf: {1: 16, 2: 13, 4: 2, 5: 4, 6: 19, 7: 1, 8: 3, 9: 18} + # (Reading example: In Skia.ttf, 41 glyphs have 6 variation tuples). + # + # Is this even worth optimizing? If we never use a shared point list, + # the private lists will consume 112K for Skia, 5K for BuffaloGalRegular, + # and 15K for JamRegular. If we always use a shared point list, + # the shared lists will consume 16K for Skia, 3K for BuffaloGalRegular, + # and 10K for JamRegular. However, in the latter case the delta arrays + # will become larger, but I haven't yet measured by how much. From + # gut feeling (which may be wrong), the optimum is to share some but + # not all points; however, then we would need to try all combinations. + # + # For the time being, we try two variants and then pick the better one: + # (a) each tuple supplies its own private set of points; + # (b) all tuples refer to a shared set of points, which consists of + # "every control point in the glyph". + allPoints = set(range(numPointsInGlyph)) + tuples = [] + data = [] + someTuplesSharePoints = False + for gvar in variations: + privateTuple, privateData = gvar.compile(axisTags, sharedCoordIndices, sharedPoints=None) + sharedTuple, sharedData = gvar.compile(axisTags, sharedCoordIndices, sharedPoints=allPoints) + # TODO: If we use shared points, Apple MacOS X 10.9.5 cannot display our fonts. + # This is probably a problem with our code; find the problem and fix it. + #if (len(sharedTuple) + len(sharedData)) < (len(privateTuple) + len(privateData)): + if False: + tuples.append(sharedTuple) + data.append(sharedData) + someTuplesSharePoints = True + else: + tuples.append(privateTuple) + data.append(privateData) + if someTuplesSharePoints: + data = bytechr(0) + bytesjoin(data) # 0x00 = "all points in glyph" + tupleCount = TUPLES_SHARE_POINT_NUMBERS | len(tuples) + else: + data = bytesjoin(data) + tupleCount = len(tuples) + tuples = bytesjoin(tuples) + result = struct.pack(">HH", tupleCount, 4 + len(tuples)) + tuples + data + if len(result) % 2 != 0: + result = result + b"\0" # padding + return result + + def decompile(self, data, ttFont): + axisTags = [axis.axisTag for axis in ttFont["fvar"].axes] + glyphs = ttFont.getGlyphOrder() + sstruct.unpack(GVAR_HEADER_FORMAT, data[0:GVAR_HEADER_SIZE], self) + assert len(glyphs) == self.glyphCount + assert len(axisTags) == self.axisCount + offsets = self.decompileOffsets_(data[GVAR_HEADER_SIZE:], tableFormat=(self.flags & 1), glyphCount=self.glyphCount) + sharedCoords = self.decompileSharedCoords_(axisTags, data) + self.variations = {} + for i in range(self.glyphCount): + glyphName = glyphs[i] + glyph = ttFont["glyf"][glyphName] + numPointsInGlyph = self.getNumPoints_(glyph) + gvarData = data[self.offsetToData + offsets[i] : self.offsetToData + offsets[i + 1]] + self.variations[glyphName] = \ + self.decompileGlyph_(numPointsInGlyph, sharedCoords, axisTags, gvarData) + + def decompileSharedCoords_(self, axisTags, data): + result, _pos = GlyphVariation.decompileCoords_(axisTags, self.sharedCoordCount, data, self.offsetToCoord) + return result + + @staticmethod + def decompileOffsets_(data, tableFormat, glyphCount): + if tableFormat == 0: + # Short format: array of UInt16 + offsets = array.array("H") + offsetsSize = (glyphCount + 1) * 2 + else: + # Long format: array of UInt32 + offsets = array.array("I") + offsetsSize = (glyphCount + 1) * 4 + offsets.fromstring(data[0 : offsetsSize]) + if sys.byteorder != "big": + offsets.byteswap() + + # In the short format, offsets need to be multiplied by 2. + # This is not documented in Apple's TrueType specification, + # but can be inferred from the FreeType implementation, and + # we could verify it with two sample GX fonts. + if tableFormat == 0: + offsets = [off * 2 for off in offsets] + + return offsets + + @staticmethod + def compileOffsets_(offsets): + """Packs a list of offsets into a 'gvar' offset table. + + Returns a pair (bytestring, tableFormat). Bytestring is the + packed offset table. Format indicates whether the table + uses short (tableFormat=0) or long (tableFormat=1) integers. + The returned tableFormat should get packed into the flags field + of the 'gvar' header. + """ + assert len(offsets) >= 2 + for i in range(1, len(offsets)): + assert offsets[i - 1] <= offsets[i] + if max(offsets) <= 0xffff * 2: + packed = array.array("H", [n >> 1 for n in offsets]) + tableFormat = 0 + else: + packed = array.array("I", offsets) + tableFormat = 1 + if sys.byteorder != "big": + packed.byteswap() + return (packed.tostring(), tableFormat) + + def decompileGlyph_(self, numPointsInGlyph, sharedCoords, axisTags, data): + if len(data) < 4: + return [] + numAxes = len(axisTags) + tuples = [] + flags, offsetToData = struct.unpack(">HH", data[:4]) + pos = 4 + dataPos = offsetToData + if (flags & TUPLES_SHARE_POINT_NUMBERS) != 0: + sharedPoints, dataPos = GlyphVariation.decompilePoints_(numPointsInGlyph, data, dataPos) + else: + sharedPoints = [] + for _ in range(flags & TUPLE_COUNT_MASK): + dataSize, flags = struct.unpack(">HH", data[pos:pos+4]) + tupleSize = GlyphVariation.getTupleSize_(flags, numAxes) + tupleData = data[pos : pos + tupleSize] + pointDeltaData = data[dataPos : dataPos + dataSize] + tuples.append(self.decompileTuple_(numPointsInGlyph, sharedCoords, sharedPoints, axisTags, tupleData, pointDeltaData)) + pos += tupleSize + dataPos += dataSize + return tuples + + @staticmethod + def decompileTuple_(numPointsInGlyph, sharedCoords, sharedPoints, axisTags, data, tupleData): + flags = struct.unpack(">H", data[2:4])[0] + + pos = 4 + if (flags & EMBEDDED_TUPLE_COORD) == 0: + coord = sharedCoords[flags & TUPLE_INDEX_MASK] + else: + coord, pos = GlyphVariation.decompileCoord_(axisTags, data, pos) + if (flags & INTERMEDIATE_TUPLE) != 0: + minCoord, pos = GlyphVariation.decompileCoord_(axisTags, data, pos) + maxCoord, pos = GlyphVariation.decompileCoord_(axisTags, data, pos) + else: + minCoord, maxCoord = table__g_v_a_r.computeMinMaxCoord_(coord) + axes = {} + for axis in axisTags: + coords = minCoord[axis], coord[axis], maxCoord[axis] + if coords != (0.0, 0.0, 0.0): + axes[axis] = coords + pos = 0 + if (flags & PRIVATE_POINT_NUMBERS) != 0: + points, pos = GlyphVariation.decompilePoints_(numPointsInGlyph, tupleData, pos) + else: + points = sharedPoints + deltas_x, pos = GlyphVariation.decompileDeltas_(len(points), tupleData, pos) + deltas_y, pos = GlyphVariation.decompileDeltas_(len(points), tupleData, pos) + deltas = [None] * numPointsInGlyph + for p, x, y in zip(points, deltas_x, deltas_y): + deltas[p] = (x, y) + return GlyphVariation(axes, deltas) + + @staticmethod + def computeMinMaxCoord_(coord): + minCoord = {} + maxCoord = {} + for (axis, value) in coord.items(): + minCoord[axis] = min(value, 0.0) # -0.3 --> -0.3; 0.7 --> 0.0 + maxCoord[axis] = max(value, 0.0) # -0.3 --> 0.0; 0.7 --> 0.7 + return (minCoord, maxCoord) + + def toXML(self, writer, ttFont, progress=None): + writer.simpletag("version", value=self.version) + writer.newline() + writer.simpletag("reserved", value=self.reserved) + writer.newline() + axisTags = [axis.axisTag for axis in ttFont["fvar"].axes] + for glyphName in ttFont.getGlyphOrder(): + variations = self.variations.get(glyphName) + if not variations: + continue + writer.begintag("glyphVariations", glyph=glyphName) + writer.newline() + for gvar in variations: + gvar.toXML(writer, axisTags) + writer.endtag("glyphVariations") + writer.newline() + + def fromXML(self, name, attrs, content, ttFont): + if name == "version": + self.version = safeEval(attrs["value"]) + elif name == "reserved": + self.reserved = safeEval(attrs["value"]) + elif name == "glyphVariations": + if not hasattr(self, "variations"): + self.variations = {} + glyphName = attrs["glyph"] + glyph = ttFont["glyf"][glyphName] + numPointsInGlyph = self.getNumPoints_(glyph) + glyphVariations = [] + for element in content: + if isinstance(element, tuple): + name, attrs, content = element + if name == "tuple": + gvar = GlyphVariation({}, [None] * numPointsInGlyph) + glyphVariations.append(gvar) + for tupleElement in content: + if isinstance(tupleElement, tuple): + tupleName, tupleAttrs, tupleContent = tupleElement + gvar.fromXML(tupleName, tupleAttrs, tupleContent) + self.variations[glyphName] = glyphVariations + + @staticmethod + def getNumPoints_(glyph): + NUM_PHANTOM_POINTS = 4 + if glyph.isComposite(): + return len(glyph.components) + NUM_PHANTOM_POINTS + else: + # Empty glyphs (eg. space, nonmarkingreturn) have no "coordinates" attribute. + return len(getattr(glyph, "coordinates", [])) + NUM_PHANTOM_POINTS + + +class GlyphVariation(object): + def __init__(self, axes, coordinates): + self.axes = axes + self.coordinates = coordinates + + def __repr__(self): + axes = ",".join(sorted(["%s=%s" % (name, value) for (name, value) in self.axes.items()])) + return "" % (axes, self.coordinates) + + def __eq__(self, other): + return self.coordinates == other.coordinates and self.axes == other.axes + + def getUsedPoints(self): + result = set() + for i, point in enumerate(self.coordinates): + if point is not None: + result.add(i) + return result + + def hasImpact(self): + """Returns True if this GlyphVariation has any visible impact. + + If the result is False, the GlyphVariation can be omitted from the font + without making any visible difference. + """ + for c in self.coordinates: + if c is not None: + return True + return False + + def toXML(self, writer, axisTags): + writer.begintag("tuple") + writer.newline() + for axis in axisTags: + value = self.axes.get(axis) + if value is not None: + minValue, value, maxValue = value + defaultMinValue = min(value, 0.0) # -0.3 --> -0.3; 0.7 --> 0.0 + defaultMaxValue = max(value, 0.0) # -0.3 --> 0.0; 0.7 --> 0.7 + if minValue == defaultMinValue and maxValue == defaultMaxValue: + writer.simpletag("coord", axis=axis, value=value) + else: + writer.simpletag("coord", axis=axis, value=value, min=minValue, max=maxValue) + writer.newline() + wrote_any_points = False + for i, point in enumerate(self.coordinates): + if point is not None: + writer.simpletag("delta", pt=i, x=point[0], y=point[1]) + writer.newline() + wrote_any_points = True + if not wrote_any_points: + writer.comment("no deltas") + writer.newline() + writer.endtag("tuple") + writer.newline() + + def fromXML(self, name, attrs, _content): + if name == "coord": + axis = attrs["axis"] + value = float(attrs["value"]) + defaultMinValue = min(value, 0.0) # -0.3 --> -0.3; 0.7 --> 0.0 + defaultMaxValue = max(value, 0.0) # -0.3 --> 0.0; 0.7 --> 0.7 + minValue = float(attrs.get("min", defaultMinValue)) + maxValue = float(attrs.get("max", defaultMaxValue)) + self.axes[axis] = (minValue, value, maxValue) + elif name == "delta": + point = safeEval(attrs["pt"]) + x = safeEval(attrs["x"]) + y = safeEval(attrs["y"]) + self.coordinates[point] = (x, y) + + def compile(self, axisTags, sharedCoordIndices, sharedPoints): + tupleData = [] + + coord = self.compileCoord(axisTags) + if coord in sharedCoordIndices: + flags = sharedCoordIndices[coord] + else: + flags = EMBEDDED_TUPLE_COORD + tupleData.append(coord) + + intermediateCoord = self.compileIntermediateCoord(axisTags) + if intermediateCoord is not None: + flags |= INTERMEDIATE_TUPLE + tupleData.append(intermediateCoord) + + if sharedPoints is not None: + auxData = self.compileDeltas(sharedPoints) + else: + flags |= PRIVATE_POINT_NUMBERS + points = self.getUsedPoints() + numPointsInGlyph = len(self.coordinates) + auxData = self.compilePoints(points, numPointsInGlyph) + self.compileDeltas(points) + + tupleData = struct.pack('>HH', len(auxData), flags) + bytesjoin(tupleData) + return (tupleData, auxData) + + def compileCoord(self, axisTags): + result = [] + for axis in axisTags: + _minValue, value, _maxValue = self.axes.get(axis, (0.0, 0.0, 0.0)) + result.append(struct.pack(">h", floatToFixed(value, 14))) + return bytesjoin(result) + + def compileIntermediateCoord(self, axisTags): + needed = False + for axis in axisTags: + minValue, value, maxValue = self.axes.get(axis, (0.0, 0.0, 0.0)) + defaultMinValue = min(value, 0.0) # -0.3 --> -0.3; 0.7 --> 0.0 + defaultMaxValue = max(value, 0.0) # -0.3 --> 0.0; 0.7 --> 0.7 + if (minValue != defaultMinValue) or (maxValue != defaultMaxValue): + needed = True + break + if not needed: + return None + minCoords = [] + maxCoords = [] + for axis in axisTags: + minValue, value, maxValue = self.axes.get(axis, (0.0, 0.0, 0.0)) + minCoords.append(struct.pack(">h", floatToFixed(minValue, 14))) + maxCoords.append(struct.pack(">h", floatToFixed(maxValue, 14))) + return bytesjoin(minCoords + maxCoords) + + @staticmethod + def decompileCoord_(axisTags, data, offset): + coord = {} + pos = offset + for axis in axisTags: + coord[axis] = fixedToFloat(struct.unpack(">h", data[pos:pos+2])[0], 14) + pos += 2 + return coord, pos + + @staticmethod + def decompileCoords_(axisTags, numCoords, data, offset): + result = [] + pos = offset + for _ in range(numCoords): + coord, pos = GlyphVariation.decompileCoord_(axisTags, data, pos) + result.append(coord) + return result, pos + + @staticmethod + def compilePoints(points, numPointsInGlyph): + # If the set consists of all points in the glyph, it gets encoded with + # a special encoding: a single zero byte. + if len(points) == numPointsInGlyph: + return b"\0" + + # In the 'gvar' table, the packing of point numbers is a little surprising. + # It consists of multiple runs, each being a delta-encoded list of integers. + # For example, the point set {17, 18, 19, 20, 21, 22, 23} gets encoded as + # [6, 17, 1, 1, 1, 1, 1, 1]. The first value (6) is the run length minus 1. + # There are two types of runs, with values being either 8 or 16 bit unsigned + # integers. + points = list(points) + points.sort() + numPoints = len(points) + + # The binary representation starts with the total number of points in the set, + # encoded into one or two bytes depending on the value. + if numPoints < 0x80: + result = [bytechr(numPoints)] + else: + result = [bytechr((numPoints >> 8) | 0x80) + bytechr(numPoints & 0xff)] + + MAX_RUN_LENGTH = 127 + pos = 0 + while pos < numPoints: + run = io.BytesIO() + runLength = 0 + lastValue = 0 + useByteEncoding = (points[pos] <= 0xff) + while pos < numPoints and runLength <= MAX_RUN_LENGTH: + curValue = points[pos] + delta = curValue - lastValue + if useByteEncoding and delta > 0xff: + # we need to start a new run (which will not use byte encoding) + break + if useByteEncoding: + run.write(bytechr(delta)) + else: + run.write(bytechr(delta >> 8)) + run.write(bytechr(delta & 0xff)) + lastValue = curValue + pos += 1 + runLength += 1 + if useByteEncoding: + runHeader = bytechr(runLength - 1) + else: + runHeader = bytechr((runLength - 1) | POINTS_ARE_WORDS) + result.append(runHeader) + result.append(run.getvalue()) + + return bytesjoin(result) + + @staticmethod + def decompilePoints_(numPointsInGlyph, data, offset): + """(numPointsInGlyph, data, offset) --> ([point1, point2, ...], newOffset)""" + pos = offset + numPointsInData = byteord(data[pos]) + pos += 1 + if (numPointsInData & POINTS_ARE_WORDS) != 0: + numPointsInData = (numPointsInData & POINT_RUN_COUNT_MASK) << 8 | byteord(data[pos]) + pos += 1 + if numPointsInData == 0: + return (range(numPointsInGlyph), pos) + result = [] + while len(result) < numPointsInData: + runHeader = byteord(data[pos]) + pos += 1 + numPointsInRun = (runHeader & POINT_RUN_COUNT_MASK) + 1 + point = 0 + if (runHeader & POINTS_ARE_WORDS) == 0: + for _ in range(numPointsInRun): + point += byteord(data[pos]) + pos += 1 + result.append(point) + else: + for _ in range(numPointsInRun): + point += struct.unpack(">H", data[pos:pos+2])[0] + pos += 2 + result.append(point) + if max(result) >= numPointsInGlyph: + raise TTLibError("malformed 'gvar' table") + return (result, pos) + + def compileDeltas(self, points): + deltaX = [] + deltaY = [] + for p in sorted(list(points)): + c = self.coordinates[p] + if c is not None: + deltaX.append(c[0]) + deltaY.append(c[1]) + return self.compileDeltaValues_(deltaX) + self.compileDeltaValues_(deltaY) + + @staticmethod + def compileDeltaValues_(deltas): + """[value1, value2, value3, ...] --> bytestring + + Emits a sequence of runs. Each run starts with a + byte-sized header whose 6 least significant bits + (header & 0x3F) indicate how many values are encoded + in this run. The stored length is the actual length + minus one; run lengths are thus in the range [1..64]. + If the header byte has its most significant bit (0x80) + set, all values in this run are zero, and no data + follows. Otherwise, the header byte is followed by + ((header & 0x3F) + 1) signed values. If (header & + 0x40) is clear, the delta values are stored as signed + bytes; if (header & 0x40) is set, the delta values are + signed 16-bit integers. + """ # Explaining the format because the 'gvar' spec is hard to understand. + stream = io.BytesIO() + pos = 0 + while pos < len(deltas): + value = deltas[pos] + if value == 0: + pos = GlyphVariation.encodeDeltaRunAsZeroes_(deltas, pos, stream) + elif value >= -128 and value <= 127: + pos = GlyphVariation.encodeDeltaRunAsBytes_(deltas, pos, stream) + else: + pos = GlyphVariation.encodeDeltaRunAsWords_(deltas, pos, stream) + return stream.getvalue() + + @staticmethod + def encodeDeltaRunAsZeroes_(deltas, offset, stream): + runLength = 0 + pos = offset + numDeltas = len(deltas) + while pos < numDeltas and runLength < 64 and deltas[pos] == 0: + pos += 1 + runLength += 1 + assert runLength >= 1 and runLength <= 64 + stream.write(bytechr(DELTAS_ARE_ZERO | (runLength - 1))) + return pos + + @staticmethod + def encodeDeltaRunAsBytes_(deltas, offset, stream): + runLength = 0 + pos = offset + numDeltas = len(deltas) + while pos < numDeltas and runLength < 64: + value = deltas[pos] + if value < -128 or value > 127: + break + # Within a byte-encoded run of deltas, a single zero + # is best stored literally as 0x00 value. However, + # if are two or more zeroes in a sequence, it is + # better to start a new run. For example, the sequence + # of deltas [15, 15, 0, 15, 15] becomes 6 bytes + # (04 0F 0F 00 0F 0F) when storing the zero value + # literally, but 7 bytes (01 0F 0F 80 01 0F 0F) + # when starting a new run. + if value == 0 and pos+1 < numDeltas and deltas[pos+1] == 0: + break + pos += 1 + runLength += 1 + assert runLength >= 1 and runLength <= 64 + stream.write(bytechr(runLength - 1)) + for i in range(offset, pos): + stream.write(struct.pack('b', deltas[i])) + return pos + + @staticmethod + def encodeDeltaRunAsWords_(deltas, offset, stream): + runLength = 0 + pos = offset + numDeltas = len(deltas) + while pos < numDeltas and runLength < 64: + value = deltas[pos] + # Within a word-encoded run of deltas, it is easiest + # to start a new run (with a different encoding) + # whenever we encounter a zero value. For example, + # the sequence [0x6666, 0, 0x7777] needs 7 bytes when + # storing the zero literally (42 66 66 00 00 77 77), + # and equally 7 bytes when starting a new run + # (40 66 66 80 40 77 77). + if value == 0: + break + + # Within a word-encoded run of deltas, a single value + # in the range (-128..127) should be encoded literally + # because it is more compact. For example, the sequence + # [0x6666, 2, 0x7777] becomes 7 bytes when storing + # the value literally (42 66 66 00 02 77 77), but 8 bytes + # when starting a new run (40 66 66 00 02 40 77 77). + isByteEncodable = lambda value: value >= -128 and value <= 127 + if isByteEncodable(value) and pos+1 < numDeltas and isByteEncodable(deltas[pos+1]): + break + pos += 1 + runLength += 1 + assert runLength >= 1 and runLength <= 64 + stream.write(bytechr(DELTAS_ARE_WORDS | (runLength - 1))) + for i in range(offset, pos): + stream.write(struct.pack('>h', deltas[i])) + return pos + + @staticmethod + def decompileDeltas_(numDeltas, data, offset): + """(numDeltas, data, offset) --> ([delta, delta, ...], newOffset)""" + result = [] + pos = offset + while len(result) < numDeltas: + runHeader = byteord(data[pos]) + pos += 1 + numDeltasInRun = (runHeader & DELTA_RUN_COUNT_MASK) + 1 + if (runHeader & DELTAS_ARE_ZERO) != 0: + result.extend([0] * numDeltasInRun) + elif (runHeader & DELTAS_ARE_WORDS) != 0: + for _ in range(numDeltasInRun): + result.append(struct.unpack(">h", data[pos:pos+2])[0]) + pos += 2 + else: + for _ in range(numDeltasInRun): + result.append(struct.unpack(">b", data[pos:pos+1])[0]) + pos += 1 + assert len(result) == numDeltas + return (result, pos) + + @staticmethod + def getTupleSize_(flags, axisCount): + size = 4 + if (flags & EMBEDDED_TUPLE_COORD) != 0: + size += axisCount * 2 + if (flags & INTERMEDIATE_TUPLE) != 0: + size += axisCount * 4 + return size diff -Nru fonttools-2.4/Snippets/fontTools/ttLib/tables/_g_v_a_r_test.py fonttools-3.0/Snippets/fontTools/ttLib/tables/_g_v_a_r_test.py --- fonttools-2.4/Snippets/fontTools/ttLib/tables/_g_v_a_r_test.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.0/Snippets/fontTools/ttLib/tables/_g_v_a_r_test.py 2015-08-31 17:57:15.000000000 +0000 @@ -0,0 +1,539 @@ +from __future__ import print_function, division, absolute_import, unicode_literals +from fontTools.misc.py23 import * +from fontTools.misc.textTools import deHexStr, hexStr +from fontTools.misc.xmlWriter import XMLWriter +from fontTools.ttLib import TTLibError +from fontTools.ttLib.tables._g_v_a_r import table__g_v_a_r, GlyphVariation +import random +import unittest + +def hexencode(s): + h = hexStr(s).upper() + return ' '.join([h[i:i+2] for i in range(0, len(h), 2)]) + +# Glyph variation table of uppercase I in the Skia font, as printed in Apple's +# TrueType spec. The actual Skia font uses a different table for uppercase I. +# https://developer.apple.com/fonts/TrueType-Reference-Manual/RM06/Chap6gvar.html +SKIA_GVAR_I = deHexStr( + "00 08 00 24 00 33 20 00 00 15 20 01 00 1B 20 02 " + "00 24 20 03 00 15 20 04 00 26 20 07 00 0D 20 06 " + "00 1A 20 05 00 40 01 01 01 81 80 43 FF 7E FF 7E " + "FF 7E FF 7E 00 81 45 01 01 01 03 01 04 01 04 01 " + "04 01 02 80 40 00 82 81 81 04 3A 5A 3E 43 20 81 " + "04 0E 40 15 45 7C 83 00 0D 9E F3 F2 F0 F0 F0 F0 " + "F3 9E A0 A1 A1 A1 9F 80 00 91 81 91 00 0D 0A 0A " + "09 0A 0A 0A 0A 0A 0A 0A 0A 0A 0A 0B 80 00 15 81 " + "81 00 C4 89 00 C4 83 00 0D 80 99 98 96 96 96 96 " + "99 80 82 83 83 83 81 80 40 FF 18 81 81 04 E6 F9 " + "10 21 02 81 04 E8 E5 EB 4D DA 83 00 0D CE D3 D4 " + "D3 D3 D3 D5 D2 CE CC CD CD CD CD 80 00 A1 81 91 " + "00 0D 07 03 04 02 02 02 03 03 07 07 08 08 08 07 " + "80 00 09 81 81 00 28 40 00 A4 02 24 24 66 81 04 " + "08 FA FA FA 28 83 00 82 02 FF FF FF 83 02 01 01 " + "01 84 91 00 80 06 07 08 08 08 08 0A 07 80 03 FE " + "FF FF FF 81 00 08 81 82 02 EE EE EE 8B 6D 00") + +# Shared coordinates in the Skia font, as printed in Apple's TrueType spec. +SKIA_SHARED_COORDS = deHexStr( + "40 00 00 00 C0 00 00 00 00 00 40 00 00 00 C0 00 " + "C0 00 C0 00 40 00 C0 00 40 00 40 00 C0 00 40 00") + + +class GlyphVariationTableTest(unittest.TestCase): + def test_compileOffsets_shortFormat(self): + self.assertEqual((deHexStr("00 00 00 02 FF C0"), 0), + table__g_v_a_r.compileOffsets_([0, 4, 0x1ff80])) + + def test_compileOffsets_longFormat(self): + self.assertEqual((deHexStr("00 00 00 00 00 00 00 04 CA FE BE EF"), 1), + table__g_v_a_r.compileOffsets_([0, 4, 0xCAFEBEEF])) + + def test_decompileOffsets_shortFormat(self): + decompileOffsets = table__g_v_a_r.decompileOffsets_ + data = deHexStr("00 11 22 33 44 55 66 77 88 99 aa bb") + self.assertEqual([2*0x0011, 2*0x2233, 2*0x4455, 2*0x6677, 2*0x8899, 2*0xaabb], + list(decompileOffsets(data, tableFormat=0, glyphCount=5))) + + def test_decompileOffsets_longFormat(self): + decompileOffsets = table__g_v_a_r.decompileOffsets_ + data = deHexStr("00 11 22 33 44 55 66 77 88 99 aa bb") + self.assertEqual([0x00112233, 0x44556677, 0x8899aabb], + list(decompileOffsets(data, tableFormat=1, glyphCount=2))) + + def test_compileGlyph_noVariations(self): + table = table__g_v_a_r() + table.variations = {} + self.assertEqual(b"", table.compileGlyph_("glyphname", 8, ["wght", "opsz"], {})) + + def test_compileGlyph_emptyVariations(self): + table = table__g_v_a_r() + table.variations = {"glyphname": []} + self.assertEqual(b"", table.compileGlyph_("glyphname", 8, ["wght", "opsz"], {})) + + def test_compileGlyph_onlyRedundantVariations(self): + table = table__g_v_a_r() + axes = {"wght": (0.3, 0.4, 0.5), "opsz": (0.7, 0.8, 0.9)} + table.variations = {"glyphname": [ + GlyphVariation(axes, [None] * 4), + GlyphVariation(axes, [None] * 4), + GlyphVariation(axes, [None] * 4) + ]} + self.assertEqual(b"", table.compileGlyph_("glyphname", 8, ["wght", "opsz"], {})) + + def test_compileGlyph_roundTrip(self): + table = table__g_v_a_r() + axisTags = ["wght", "wdth"] + numPointsInGlyph = 4 + glyphCoords = [(1,1), (2,2), (3,3), (4,4)] + gvar1 = GlyphVariation({"wght": (0.5, 1.0, 1.0), "wdth": (1.0, 1.0, 1.0)}, glyphCoords) + gvar2 = GlyphVariation({"wght": (1.0, 1.0, 1.0), "wdth": (1.0, 1.0, 1.0)}, glyphCoords) + table.variations = {"oslash": [gvar1, gvar2]} + data = table.compileGlyph_("oslash", numPointsInGlyph, axisTags, {}) + self.assertEqual([gvar1, gvar2], table.decompileGlyph_(numPointsInGlyph, {}, axisTags, data)) + + def test_compileSharedCoords(self): + table = table__g_v_a_r() + table.variations = {} + deltas = [None] * 4 + table.variations["A"] = [ + GlyphVariation({"wght": (1.0, 1.0, 1.0), "wdth": (0.5, 0.7, 1.0)}, deltas) + ] + table.variations["B"] = [ + GlyphVariation({"wght": (1.0, 1.0, 1.0), "wdth": (0.2, 0.7, 1.0)}, deltas), + GlyphVariation({"wght": (1.0, 1.0, 1.0), "wdth": (0.2, 0.8, 1.0)}, deltas) + ] + table.variations["C"] = [ + GlyphVariation({"wght": (1.0, 1.0, 1.0), "wdth": (0.3, 0.7, 1.0)}, deltas), + GlyphVariation({"wght": (1.0, 1.0, 1.0), "wdth": (0.3, 0.8, 1.0)}, deltas), + GlyphVariation({"wght": (1.0, 1.0, 1.0), "wdth": (0.3, 0.9, 1.0)}, deltas) + ] + # {"wght":1.0, "wdth":0.7} is shared 3 times; {"wght":1.0, "wdth":0.8} is shared twice. + # Min and max values are not part of the shared coordinate pool and should get ignored. + result = table.compileSharedCoords_(["wght", "wdth"]) + self.assertEqual(["40 00 2C CD", "40 00 33 33"], [hexencode(c) for c in result]) + + def test_decompileSharedCoords_Skia(self): + table = table__g_v_a_r() + table.offsetToCoord = 0 + table.sharedCoordCount = 8 + sharedCoords = table.decompileSharedCoords_(["wght", "wdth"], SKIA_SHARED_COORDS) + self.assertEqual([ + {"wght": 1.0, "wdth": 0.0}, + {"wght": -1.0, "wdth": 0.0}, + {"wght": 0.0, "wdth": 1.0}, + {"wght": 0.0, "wdth": -1.0}, + {"wght": -1.0, "wdth": -1.0}, + {"wght": 1.0, "wdth": -1.0}, + {"wght": 1.0, "wdth": 1.0}, + {"wght": -1.0, "wdth": 1.0} + ], sharedCoords) + + def test_decompileSharedCoords_empty(self): + table = table__g_v_a_r() + table.offsetToCoord = 0 + table.sharedCoordCount = 0 + self.assertEqual([], table.decompileSharedCoords_(["wght"], b"")) + + def test_decompileGlyph_Skia_I(self): + axes = ["wght", "wdth"] + table = table__g_v_a_r() + table.offsetToCoord = 0 + table.sharedCoordCount = 8 + table.axisCount = len(axes) + sharedCoords = table.decompileSharedCoords_(axes, SKIA_SHARED_COORDS) + tuples = table.decompileGlyph_(18, sharedCoords, axes, SKIA_GVAR_I) + self.assertEqual(8, len(tuples)) + self.assertEqual({"wght": (0.0, 1.0, 1.0)}, tuples[0].axes) + self.assertEqual("257,0 -127,0 -128,58 -130,90 -130,62 -130,67 -130,32 -127,0 257,0 " + "259,14 260,64 260,21 260,69 258,124 0,0 130,0 0,0 0,0", + " ".join(["%d,%d" % c for c in tuples[0].coordinates])) + + def test_decompileGlyph_empty(self): + table = table__g_v_a_r() + self.assertEqual([], table.decompileGlyph_(numPointsInGlyph=5, sharedCoords=[], axisTags=[], data=b"")) + + def test_computeMinMaxCord(self): + coord = {"wght": -0.3, "wdth": 0.7} + minCoord, maxCoord = table__g_v_a_r.computeMinMaxCoord_(coord) + self.assertEqual({"wght": -0.3, "wdth": 0.0}, minCoord) + self.assertEqual({"wght": 0.0, "wdth": 0.7}, maxCoord) + +class GlyphVariationTest(unittest.TestCase): + def test_equal(self): + gvar1 = GlyphVariation({"wght":(0.0, 1.0, 1.0)}, [(0,0), (9,8), (7,6)]) + gvar2 = GlyphVariation({"wght":(0.0, 1.0, 1.0)}, [(0,0), (9,8), (7,6)]) + self.assertEqual(gvar1, gvar2) + + def test_equal_differentAxes(self): + gvar1 = GlyphVariation({"wght":(0.0, 1.0, 1.0)}, [(0,0), (9,8), (7,6)]) + gvar2 = GlyphVariation({"wght":(0.7, 0.8, 0.9)}, [(0,0), (9,8), (7,6)]) + self.assertNotEqual(gvar1, gvar2) + + def test_equal_differentCoordinates(self): + gvar1 = GlyphVariation({"wght":(0.0, 1.0, 1.0)}, [(0,0), (9,8), (7,6)]) + gvar2 = GlyphVariation({"wght":(0.0, 1.0, 1.0)}, [(0,0), (9,8)]) + self.assertNotEqual(gvar1, gvar2) + + def test_hasImpact_someDeltasNotZero(self): + axes = {"wght":(0.0, 1.0, 1.0)} + gvar = GlyphVariation(axes, [(0,0), (9,8), (7,6)]) + self.assertTrue(gvar.hasImpact()) + + def test_hasImpact_allDeltasZero(self): + axes = {"wght":(0.0, 1.0, 1.0)} + gvar = GlyphVariation(axes, [(0,0), (0,0), (0,0)]) + self.assertTrue(gvar.hasImpact()) + + def test_hasImpact_allDeltasNone(self): + axes = {"wght":(0.0, 1.0, 1.0)} + gvar = GlyphVariation(axes, [None, None, None]) + self.assertFalse(gvar.hasImpact()) + + def test_toXML(self): + writer = XMLWriter(BytesIO()) + axes = {"wdth":(0.3, 0.4, 0.5), "wght":(0.0, 1.0, 1.0), "opsz":(-0.7, -0.7, 0.0)} + g = GlyphVariation(axes, [(9,8), None, (7,6), (0,0), (-1,-2), None]) + g.toXML(writer, ["wdth", "wght", "opsz"]) + self.assertEqual([ + '', + '', + '', + '', + '', + '', + '', + '', + '' + ], GlyphVariationTest.xml_lines(writer)) + + def test_toXML_allDeltasNone(self): + writer = XMLWriter(BytesIO()) + axes = {"wght":(0.0, 1.0, 1.0)} + g = GlyphVariation(axes, [None] * 5) + g.toXML(writer, ["wght", "wdth"]) + self.assertEqual([ + '', + '', + '', + '' + ], GlyphVariationTest.xml_lines(writer)) + + def test_fromXML(self): + g = GlyphVariation({}, [None] * 4) + g.fromXML("coord", {"axis":"wdth", "min":"0.3", "value":"0.4", "max":"0.5"}, []) + g.fromXML("coord", {"axis":"wght", "value":"1.0"}, []) + g.fromXML("coord", {"axis":"opsz", "value":"-0.5"}, []) + g.fromXML("delta", {"pt":"1", "x":"33", "y":"44"}, []) + g.fromXML("delta", {"pt":"2", "x":"-2", "y":"170"}, []) + self.assertEqual({ + "wdth":( 0.3, 0.4, 0.5), + "wght":( 0.0, 1.0, 1.0), + "opsz":(-0.5, -0.5, 0.0) + }, g.axes) + self.assertEqual([None, (33, 44), (-2, 170), None], g.coordinates) + + def test_compile_sharedCoords_nonIntermediate_sharedPoints(self): + gvar = GlyphVariation({"wght": (0.0, 0.5, 0.5), "wdth": (0.0, 0.8, 0.8)}, + [(7,4), (8,5), (9,6)]) + axisTags = ["wght", "wdth"] + sharedCoordIndices = { gvar.compileCoord(axisTags): 0x77 } + tuple, data = gvar.compile(axisTags, sharedCoordIndices, sharedPoints={0,1,2}) + # len(data)=8; flags=None; tupleIndex=0x77 + # embeddedCoord=[]; intermediateCoord=[] + self.assertEqual("00 08 00 77", hexencode(tuple)) + self.assertEqual("02 07 08 09 " # deltaX: [7, 8, 9] + "02 04 05 06", # deltaY: [4, 5, 6] + hexencode(data)) + + def test_compile_sharedCoords_intermediate_sharedPoints(self): + gvar = GlyphVariation({"wght": (0.3, 0.5, 0.7), "wdth": (0.1, 0.8, 0.9)}, + [(7,4), (8,5), (9,6)]) + axisTags = ["wght", "wdth"] + sharedCoordIndices = { gvar.compileCoord(axisTags): 0x77 } + tuple, data = gvar.compile(axisTags, sharedCoordIndices, sharedPoints={0,1,2}) + # len(data)=8; flags=INTERMEDIATE_TUPLE; tupleIndex=0x77 + # embeddedCoord=[]; intermediateCoord=[(0.3, 0.1), (0.7, 0.9)] + self.assertEqual("00 08 40 77 13 33 06 66 2C CD 39 9A", hexencode(tuple)) + self.assertEqual("02 07 08 09 " # deltaX: [7, 8, 9] + "02 04 05 06", # deltaY: [4, 5, 6] + hexencode(data)) + + def test_compile_sharedCoords_nonIntermediate_privatePoints(self): + gvar = GlyphVariation({"wght": (0.0, 0.5, 0.5), "wdth": (0.0, 0.8, 0.8)}, + [(7,4), (8,5), (9,6)]) + axisTags = ["wght", "wdth"] + sharedCoordIndices = { gvar.compileCoord(axisTags): 0x77 } + tuple, data = gvar.compile(axisTags, sharedCoordIndices, sharedPoints=None) + # len(data)=13; flags=PRIVATE_POINT_NUMBERS; tupleIndex=0x77 + # embeddedCoord=[]; intermediateCoord=[] + self.assertEqual("00 09 20 77", hexencode(tuple)) + self.assertEqual("00 " # all points in glyph + "02 07 08 09 " # deltaX: [7, 8, 9] + "02 04 05 06", # deltaY: [4, 5, 6] + hexencode(data)) + + def test_compile_sharedCoords_intermediate_privatePoints(self): + gvar = GlyphVariation({"wght": (0.0, 0.5, 1.0), "wdth": (0.0, 0.8, 1.0)}, + [(7,4), (8,5), (9,6)]) + axisTags = ["wght", "wdth"] + sharedCoordIndices = { gvar.compileCoord(axisTags): 0x77 } + tuple, data = gvar.compile(axisTags, sharedCoordIndices, sharedPoints=None) + # len(data)=13; flags=PRIVATE_POINT_NUMBERS; tupleIndex=0x77 + # embeddedCoord=[]; intermediateCoord=[(0.0, 0.0), (1.0, 1.0)] + self.assertEqual("00 09 60 77 00 00 00 00 40 00 40 00", hexencode(tuple)) + self.assertEqual("00 " # all points in glyph + "02 07 08 09 " # deltaX: [7, 8, 9] + "02 04 05 06", # deltaY: [4, 5, 6] + hexencode(data)) + + def test_compile_embeddedCoords_nonIntermediate_sharedPoints(self): + gvar = GlyphVariation({"wght": (0.0, 0.5, 0.5), "wdth": (0.0, 0.8, 0.8)}, + [(7,4), (8,5), (9,6)]) + axisTags = ["wght", "wdth"] + tuple, data = gvar.compile(axisTags, sharedCoordIndices={}, sharedPoints={0,1,2}) + # len(data)=8; flags=EMBEDDED_TUPLE_COORD + # embeddedCoord=[(0.5, 0.8)]; intermediateCoord=[] + self.assertEqual("00 08 80 00 20 00 33 33", hexencode(tuple)) + self.assertEqual("02 07 08 09 " # deltaX: [7, 8, 9] + "02 04 05 06", # deltaY: [4, 5, 6] + hexencode(data)) + + def test_compile_embeddedCoords_intermediate_sharedPoints(self): + gvar = GlyphVariation({"wght": (0.0, 0.5, 1.0), "wdth": (0.0, 0.8, 0.8)}, + [(7,4), (8,5), (9,6)]) + axisTags = ["wght", "wdth"] + tuple, data = gvar.compile(axisTags, sharedCoordIndices={}, sharedPoints={0,1,2}) + # len(data)=8; flags=EMBEDDED_TUPLE_COORD + # embeddedCoord=[(0.5, 0.8)]; intermediateCoord=[(0.0, 0.0), (1.0, 0.8)] + self.assertEqual("00 08 C0 00 20 00 33 33 00 00 00 00 40 00 33 33", hexencode(tuple)) + self.assertEqual("02 07 08 09 " # deltaX: [7, 8, 9] + "02 04 05 06", # deltaY: [4, 5, 6] + hexencode(data)) + + def test_compile_embeddedCoords_nonIntermediate_privatePoints(self): + gvar = GlyphVariation({"wght": (0.0, 0.5, 0.5), "wdth": (0.0, 0.8, 0.8)}, + [(7,4), (8,5), (9,6)]) + axisTags = ["wght", "wdth"] + tuple, data = gvar.compile(axisTags, sharedCoordIndices={}, sharedPoints=None) + # len(data)=13; flags=PRIVATE_POINT_NUMBERS|EMBEDDED_TUPLE_COORD + # embeddedCoord=[(0.5, 0.8)]; intermediateCoord=[] + self.assertEqual("00 09 A0 00 20 00 33 33", hexencode(tuple)) + self.assertEqual("00 " # all points in glyph + "02 07 08 09 " # deltaX: [7, 8, 9] + "02 04 05 06", # deltaY: [4, 5, 6] + hexencode(data)) + + def test_compile_embeddedCoords_intermediate_privatePoints(self): + gvar = GlyphVariation({"wght": (0.4, 0.5, 0.6), "wdth": (0.7, 0.8, 0.9)}, + [(7,4), (8,5), (9,6)]) + axisTags = ["wght", "wdth"] + tuple, data = gvar.compile(axisTags, sharedCoordIndices={}, sharedPoints=None) + # len(data)=13; flags=PRIVATE_POINT_NUMBERS|INTERMEDIATE_TUPLE|EMBEDDED_TUPLE_COORD + # embeddedCoord=(0.5, 0.8); intermediateCoord=[(0.4, 0.7), (0.6, 0.9)] + self.assertEqual("00 09 E0 00 20 00 33 33 19 9A 2C CD 26 66 39 9A", hexencode(tuple)) + self.assertEqual("00 " # all points in glyph + "02 07 08 09 " # deltaX: [7, 8, 9] + "02 04 05 06", # deltaY: [4, 5, 6] + hexencode(data)) + + def test_compileCoord(self): + gvar = GlyphVariation({"wght": (-1.0, -1.0, -1.0), "wdth": (0.4, 0.5, 0.6)}, [None] * 4) + self.assertEqual("C0 00 20 00", hexencode(gvar.compileCoord(["wght", "wdth"]))) + self.assertEqual("20 00 C0 00", hexencode(gvar.compileCoord(["wdth", "wght"]))) + self.assertEqual("C0 00", hexencode(gvar.compileCoord(["wght"]))) + + def test_compileIntermediateCoord(self): + gvar = GlyphVariation({"wght": (-1.0, -1.0, 0.0), "wdth": (0.4, 0.5, 0.6)}, [None] * 4) + self.assertEqual("C0 00 19 9A 00 00 26 66", hexencode(gvar.compileIntermediateCoord(["wght", "wdth"]))) + self.assertEqual("19 9A C0 00 26 66 00 00", hexencode(gvar.compileIntermediateCoord(["wdth", "wght"]))) + self.assertEqual(None, gvar.compileIntermediateCoord(["wght"])) + self.assertEqual("19 9A 26 66", hexencode(gvar.compileIntermediateCoord(["wdth"]))) + + def test_decompileCoord(self): + decompileCoord = GlyphVariation.decompileCoord_ + data = deHexStr("DE AD C0 00 20 00 DE AD") + self.assertEqual(({"wght": -1.0, "wdth": 0.5}, 6), decompileCoord(["wght", "wdth"], data, 2)) + + def test_decompileCoord_roundTrip(self): + # Make sure we are not affected by https://github.com/behdad/fonttools/issues/286 + data = deHexStr("7F B9 80 35") + values, _ = GlyphVariation.decompileCoord_(["wght", "wdth"], data, 0) + axisValues = {axis:(val, val, val) for axis, val in values.items()} + gvar = GlyphVariation(axisValues, [None] * 4) + self.assertEqual("7F B9 80 35", hexencode(gvar.compileCoord(["wght", "wdth"]))) + + def test_decompileCoords(self): + decompileCoords = GlyphVariation.decompileCoords_ + axes = ["wght", "wdth", "opsz"] + coords = [ + {"wght": 1.0, "wdth": 0.0, "opsz": 0.5}, + {"wght": -1.0, "wdth": 0.0, "opsz": 0.25}, + {"wght": 0.0, "wdth": -1.0, "opsz": 1.0} + ] + data = deHexStr("DE AD 40 00 00 00 20 00 C0 00 00 00 10 00 00 00 C0 00 40 00") + self.assertEqual((coords, 20), decompileCoords(axes, numCoords=3, data=data, offset=2)) + + def test_compilePoints(self): + compilePoints = lambda p: GlyphVariation.compilePoints(set(p), numPointsInGlyph=999) + self.assertEqual("00", hexencode(compilePoints(range(999)))) # all points in glyph + self.assertEqual("01 00 07", hexencode(compilePoints([7]))) + self.assertEqual("01 80 FF FF", hexencode(compilePoints([65535]))) + self.assertEqual("02 01 09 06", hexencode(compilePoints([9, 15]))) + self.assertEqual("06 05 07 01 F7 02 01 F2", hexencode(compilePoints([7, 8, 255, 257, 258, 500]))) + self.assertEqual("03 01 07 01 80 01 F4", hexencode(compilePoints([7, 8, 500]))) + self.assertEqual("04 01 07 01 81 BE EF 0C 0F", hexencode(compilePoints([7, 8, 0xBEEF, 0xCAFE]))) + self.assertEqual("81 2C" + # 300 points (0x12c) in total + " 7F 00" + (127 * " 01") + # first run, contains 128 points: [0 .. 127] + " 7F 80" + (127 * " 01") + # second run, contains 128 points: [128 .. 255] + " AB 01 00" + (43 * " 00 01"), # third run, contains 44 points: [256 .. 299] + hexencode(compilePoints(range(300)))) + self.assertEqual("81 8F" + # 399 points (0x18f) in total + " 7F 00" + (127 * " 01") + # first run, contains 128 points: [0 .. 127] + " 7F 80" + (127 * " 01") + # second run, contains 128 points: [128 .. 255] + " FF 01 00" + (127 * " 00 01") + # third run, contains 128 points: [256 .. 383] + " 8E 01 80" + (14 * " 00 01"), # fourth run, contains 15 points: [384 .. 398] + hexencode(compilePoints(range(399)))) + + def test_decompilePoints(self): + numPointsInGlyph = 65536 + allPoints = list(range(numPointsInGlyph)) + def decompilePoints(data, offset): + points, offset = GlyphVariation.decompilePoints_(numPointsInGlyph, deHexStr(data), offset) + # Conversion to list needed for Python 3. + return (list(points), offset) + # all points in glyph + self.assertEqual((allPoints, 1), decompilePoints("00", 0)) + # all points in glyph (in overly verbose encoding, not explicitly prohibited by spec) + self.assertEqual((allPoints, 2), decompilePoints("80 00", 0)) + # 2 points; first run: [9, 9+6] + self.assertEqual(([9, 15], 4), decompilePoints("02 01 09 06", 0)) + # 2 points; first run: [0xBEEF, 0xCAFE]. (0x0C0F = 0xCAFE - 0xBEEF) + self.assertEqual(([0xBEEF, 0xCAFE], 6), decompilePoints("02 81 BE EF 0C 0F", 0)) + # 1 point; first run: [7] + self.assertEqual(([7], 3), decompilePoints("01 00 07", 0)) + # 1 point; first run: [7] in overly verbose encoding + self.assertEqual(([7], 4), decompilePoints("01 80 00 07", 0)) + # 1 point; first run: [65535]; requires words to be treated as unsigned numbers + self.assertEqual(([65535], 4), decompilePoints("01 80 FF FF", 0)) + # 4 points; first run: [7, 8]; second run: [255, 257]. 257 is stored in delta-encoded bytes (0xFF + 2). + self.assertEqual(([7, 8, 255, 257], 7), decompilePoints("04 01 07 01 01 FF 02", 0)) + # combination of all encodings, preceded and followed by 4 bytes of unused data + data = "DE AD DE AD 04 01 07 01 81 BE EF 0C 0F DE AD DE AD" + self.assertEqual(([7, 8, 0xBEEF, 0xCAFE], 13), decompilePoints(data, 4)) + self.assertSetEqual(set(range(300)), set(decompilePoints( + "81 2C" + # 300 points (0x12c) in total + " 7F 00" + (127 * " 01") + # first run, contains 128 points: [0 .. 127] + " 7F 80" + (127 * " 01") + # second run, contains 128 points: [128 .. 255] + " AB 01 00" + (43 * " 00 01"), # third run, contains 44 points: [256 .. 299] + 0)[0])) + self.assertSetEqual(set(range(399)), set(decompilePoints( + "81 8F" + # 399 points (0x18f) in total + " 7F 00" + (127 * " 01") + # first run, contains 128 points: [0 .. 127] + " 7F 80" + (127 * " 01") + # second run, contains 128 points: [128 .. 255] + " FF 01 00" + (127 * " 00 01") + # third run, contains 128 points: [256 .. 383] + " 8E 01 80" + (14 * " 00 01"), # fourth run, contains 15 points: [384 .. 398] + 0)[0])) + + def test_decompilePoints_shouldGuardAgainstBadPointNumbers(self): + decompilePoints = GlyphVariation.decompilePoints_ + # 2 points; first run: [3, 9]. + numPointsInGlyph = 8 + self.assertRaises(TTLibError, decompilePoints, numPointsInGlyph, deHexStr("02 01 03 06"), 0) + + def test_decompilePoints_roundTrip(self): + numPointsInGlyph = 500 # greater than 255, so we also exercise code path for 16-bit encoding + compile = lambda points: GlyphVariation.compilePoints(points, numPointsInGlyph) + decompile = lambda data: set(GlyphVariation.decompilePoints_(numPointsInGlyph, data, 0)[0]) + for i in range(50): + points = set(random.sample(range(numPointsInGlyph), 30)) + self.assertSetEqual(points, decompile(compile(points)), + "failed round-trip decompile/compilePoints; points=%s" % points) + allPoints = set(range(numPointsInGlyph)) + self.assertSetEqual(allPoints, decompile(compile(allPoints))) + + def test_compileDeltas(self): + gvar = GlyphVariation({}, [(0,0), (1, 0), (2, 0), (3, 3)]) + points = {1, 2} + # deltaX for points: [1, 2]; deltaY for points: [0, 0] + self.assertEqual("01 01 02 81", hexencode(gvar.compileDeltas(points))) + + def test_compileDeltaValues(self): + compileDeltaValues = lambda values: hexencode(GlyphVariation.compileDeltaValues_(values)) + # zeroes + self.assertEqual("80", compileDeltaValues([0])) + self.assertEqual("BF", compileDeltaValues([0] * 64)) + self.assertEqual("BF 80", compileDeltaValues([0] * 65)) + self.assertEqual("BF A3", compileDeltaValues([0] * 100)) + self.assertEqual("BF BF BF BF", compileDeltaValues([0] * 256)) + # bytes + self.assertEqual("00 01", compileDeltaValues([1])) + self.assertEqual("06 01 02 03 7F 80 FF FE", compileDeltaValues([1, 2, 3, 127, -128, -1, -2])) + self.assertEqual("3F" + (64 * " 7F"), compileDeltaValues([127] * 64)) + self.assertEqual("3F" + (64 * " 7F") + " 00 7F", compileDeltaValues([127] * 65)) + # words + self.assertEqual("40 66 66", compileDeltaValues([0x6666])) + self.assertEqual("43 66 66 7F FF FF FF 80 00", compileDeltaValues([0x6666, 32767, -1, -32768])) + self.assertEqual("7F" + (64 * " 11 22"), compileDeltaValues([0x1122] * 64)) + self.assertEqual("7F" + (64 * " 11 22") + " 40 11 22", compileDeltaValues([0x1122] * 65)) + # bytes, zeroes, bytes: a single zero is more compact when encoded as part of the bytes run + self.assertEqual("04 7F 7F 00 7F 7F", compileDeltaValues([127, 127, 0, 127, 127])) + self.assertEqual("01 7F 7F 81 01 7F 7F", compileDeltaValues([127, 127, 0, 0, 127, 127])) + self.assertEqual("01 7F 7F 82 01 7F 7F", compileDeltaValues([127, 127, 0, 0, 0, 127, 127])) + self.assertEqual("01 7F 7F 83 01 7F 7F", compileDeltaValues([127, 127, 0, 0, 0, 0, 127, 127])) + # bytes, zeroes + self.assertEqual("01 01 00", compileDeltaValues([1, 0])) + self.assertEqual("00 01 81", compileDeltaValues([1, 0, 0])) + # words, bytes, words: a single byte is more compact when encoded as part of the words run + self.assertEqual("42 66 66 00 02 77 77", compileDeltaValues([0x6666, 2, 0x7777])) + self.assertEqual("40 66 66 01 02 02 40 77 77", compileDeltaValues([0x6666, 2, 2, 0x7777])) + # words, zeroes, words + self.assertEqual("40 66 66 80 40 77 77", compileDeltaValues([0x6666, 0, 0x7777])) + self.assertEqual("40 66 66 81 40 77 77", compileDeltaValues([0x6666, 0, 0, 0x7777])) + self.assertEqual("40 66 66 82 40 77 77", compileDeltaValues([0x6666, 0, 0, 0, 0x7777])) + # words, zeroes, bytes + self.assertEqual("40 66 66 80 02 01 02 03", compileDeltaValues([0x6666, 0, 1, 2, 3])) + self.assertEqual("40 66 66 81 02 01 02 03", compileDeltaValues([0x6666, 0, 0, 1, 2, 3])) + self.assertEqual("40 66 66 82 02 01 02 03", compileDeltaValues([0x6666, 0, 0, 0, 1, 2, 3])) + # words, zeroes + self.assertEqual("40 66 66 80", compileDeltaValues([0x6666, 0])) + self.assertEqual("40 66 66 81", compileDeltaValues([0x6666, 0, 0])) + + def test_decompileDeltas(self): + decompileDeltas = GlyphVariation.decompileDeltas_ + # 83 = zero values (0x80), count = 4 (1 + 0x83 & 0x3F) + self.assertEqual(([0, 0, 0, 0], 1), decompileDeltas(4, deHexStr("83"), 0)) + # 41 01 02 FF FF = signed 16-bit values (0x40), count = 2 (1 + 0x41 & 0x3F) + self.assertEqual(([258, -1], 5), decompileDeltas(2, deHexStr("41 01 02 FF FF"), 0)) + # 01 81 07 = signed 8-bit values, count = 2 (1 + 0x01 & 0x3F) + self.assertEqual(([-127, 7], 3), decompileDeltas(2, deHexStr("01 81 07"), 0)) + # combination of all three encodings, preceded and followed by 4 bytes of unused data + data = deHexStr("DE AD BE EF 83 40 01 02 01 81 80 DE AD BE EF") + self.assertEqual(([0, 0, 0, 0, 258, -127, -128], 11), decompileDeltas(7, data, 4)) + + def test_decompileDeltas_roundTrip(self): + numDeltas = 30 + compile = GlyphVariation.compileDeltaValues_ + decompile = lambda data: GlyphVariation.decompileDeltas_(numDeltas, data, 0)[0] + for i in range(50): + deltas = random.sample(range(-128, 127), 10) + deltas.extend(random.sample(range(-32768, 32767), 10)) + deltas.extend([0] * 10) + random.shuffle(deltas) + self.assertListEqual(deltas, decompile(compile(deltas))) + + def test_getTupleSize(self): + getTupleSize = GlyphVariation.getTupleSize_ + numAxes = 3 + self.assertEqual(4 + numAxes * 2, getTupleSize(0x8042, numAxes)) + self.assertEqual(4 + numAxes * 4, getTupleSize(0x4077, numAxes)) + self.assertEqual(4, getTupleSize(0x2077, numAxes)) + self.assertEqual(4, getTupleSize(11, numAxes)) + + @staticmethod + def xml_lines(writer): + content = writer.file.getvalue().decode("utf-8") + return [line.strip() for line in content.splitlines()][1:] + + +if __name__ == "__main__": + unittest.main() diff -Nru fonttools-2.4/Snippets/fontTools/ttLib/tables/_h_d_m_x.py fonttools-3.0/Snippets/fontTools/ttLib/tables/_h_d_m_x.py --- fonttools-2.4/Snippets/fontTools/ttLib/tables/_h_d_m_x.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.0/Snippets/fontTools/ttLib/tables/_h_d_m_x.py 2015-08-31 17:57:15.000000000 +0000 @@ -0,0 +1,121 @@ +from __future__ import print_function, division, absolute_import +from fontTools.misc.py23 import * +from fontTools.misc import sstruct +from . import DefaultTable +import array + +hdmxHeaderFormat = """ + > # big endian! + version: H + numRecords: H + recordSize: l +""" + +try: + from collections.abc import Mapping +except: + from UserDict import DictMixin as Mapping + +class _GlyphnamedList(Mapping): + + def __init__(self, reverseGlyphOrder, data): + self._array = data + self._map = dict(reverseGlyphOrder) + + def __getitem__(self, k): + return self._array[self._map[k]] + + def __len__(self): + return len(self._map) + + def __iter__(self): + return iter(self._map) + + def keys(self): + return self._map.keys() + +class table__h_d_m_x(DefaultTable.DefaultTable): + + def decompile(self, data, ttFont): + numGlyphs = ttFont['maxp'].numGlyphs + glyphOrder = ttFont.getGlyphOrder() + dummy, data = sstruct.unpack2(hdmxHeaderFormat, data, self) + self.hdmx = {} + for i in range(self.numRecords): + ppem = byteord(data[0]) + maxSize = byteord(data[1]) + widths = _GlyphnamedList(ttFont.getReverseGlyphMap(), array.array("B", data[2:2+numGlyphs])) + self.hdmx[ppem] = widths + data = data[self.recordSize:] + assert len(data) == 0, "too much hdmx data" + + def compile(self, ttFont): + self.version = 0 + numGlyphs = ttFont['maxp'].numGlyphs + glyphOrder = ttFont.getGlyphOrder() + self.recordSize = 4 * ((2 + numGlyphs + 3) // 4) + pad = (self.recordSize - 2 - numGlyphs) * b"\0" + self.numRecords = len(self.hdmx) + data = sstruct.pack(hdmxHeaderFormat, self) + items = sorted(self.hdmx.items()) + for ppem, widths in items: + data = data + bytechr(ppem) + bytechr(max(widths.values())) + for glyphID in range(len(glyphOrder)): + width = widths[glyphOrder[glyphID]] + data = data + bytechr(width) + data = data + pad + return data + + def toXML(self, writer, ttFont): + writer.begintag("hdmxData") + writer.newline() + ppems = sorted(self.hdmx.keys()) + records = [] + format = "" + for ppem in ppems: + widths = self.hdmx[ppem] + records.append(widths) + format = format + "%4d" + glyphNames = ttFont.getGlyphOrder()[:] + glyphNames.sort() + maxNameLen = max(map(len, glyphNames)) + format = "%" + repr(maxNameLen) + 's:' + format + ' ;' + writer.write(format % (("ppem",) + tuple(ppems))) + writer.newline() + writer.newline() + for glyphName in glyphNames: + row = [] + for ppem in ppems: + widths = self.hdmx[ppem] + row.append(widths[glyphName]) + if ";" in glyphName: + glyphName = "\\x3b".join(glyphName.split(";")) + writer.write(format % ((glyphName,) + tuple(row))) + writer.newline() + writer.endtag("hdmxData") + writer.newline() + + def fromXML(self, name, attrs, content, ttFont): + if name != "hdmxData": + return + content = strjoin(content) + lines = content.split(";") + topRow = lines[0].split() + assert topRow[0] == "ppem:", "illegal hdmx format" + ppems = list(map(int, topRow[1:])) + self.hdmx = hdmx = {} + for ppem in ppems: + hdmx[ppem] = {} + lines = (line.split() for line in lines[1:]) + for line in lines: + if not line: + continue + assert line[0][-1] == ":", "illegal hdmx format" + glyphName = line[0][:-1] + if "\\" in glyphName: + from fontTools.misc.textTools import safeEval + glyphName = safeEval('"""' + glyphName + '"""') + line = list(map(int, line[1:])) + assert len(line) == len(ppems), "illegal hdmx format" + for i in range(len(ppems)): + hdmx[ppems[i]][glyphName] = line[i] diff -Nru fonttools-2.4/Snippets/fontTools/ttLib/tables/_h_e_a_d.py fonttools-3.0/Snippets/fontTools/ttLib/tables/_h_e_a_d.py --- fonttools-2.4/Snippets/fontTools/ttLib/tables/_h_e_a_d.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.0/Snippets/fontTools/ttLib/tables/_h_e_a_d.py 2015-08-31 17:57:15.000000000 +0000 @@ -0,0 +1,92 @@ +from __future__ import print_function, division, absolute_import +from fontTools.misc.py23 import * +from fontTools.misc import sstruct +from fontTools.misc.textTools import safeEval, num2binary, binary2num +from fontTools.misc.timeTools import timestampFromString, timestampToString, timestampNow +from fontTools.misc.timeTools import epoch_diff as mac_epoch_diff # For backward compat +from . import DefaultTable +import warnings + + +headFormat = """ + > # big endian + tableVersion: 16.16F + fontRevision: 16.16F + checkSumAdjustment: I + magicNumber: I + flags: H + unitsPerEm: H + created: Q + modified: Q + xMin: h + yMin: h + xMax: h + yMax: h + macStyle: H + lowestRecPPEM: H + fontDirectionHint: h + indexToLocFormat: h + glyphDataFormat: h +""" + +class table__h_e_a_d(DefaultTable.DefaultTable): + + dependencies = ['maxp', 'loca'] + + def decompile(self, data, ttFont): + dummy, rest = sstruct.unpack2(headFormat, data, self) + if rest: + # this is quite illegal, but there seem to be fonts out there that do this + warnings.warn("extra bytes at the end of 'head' table") + assert rest == "\0\0" + + # For timestamp fields, ignore the top four bytes. Some fonts have + # bogus values there. Since till 2038 those bytes only can be zero, + # ignore them. + # + # https://github.com/behdad/fonttools/issues/99#issuecomment-66776810 + for stamp in 'created', 'modified': + value = getattr(self, stamp) + if value > 0xFFFFFFFF: + warnings.warn("'%s' timestamp out of range; ignoring top bytes" % stamp) + value &= 0xFFFFFFFF + setattr(self, stamp, value) + if value < 0x7C259DC0: # January 1, 1970 00:00:00 + warnings.warn("'%s' timestamp seems very low; regarding as unix timestamp" % stamp) + value += 0x7C259DC0 + setattr(self, stamp, value) + + def compile(self, ttFont): + if ttFont.recalcTimestamp: + self.modified = timestampNow() + data = sstruct.pack(headFormat, self) + return data + + def toXML(self, writer, ttFont): + writer.comment("Most of this table will be recalculated by the compiler") + writer.newline() + formatstring, names, fixes = sstruct.getformat(headFormat) + for name in names: + value = getattr(self, name) + if name in ("created", "modified"): + value = timestampToString(value) + if name in ("magicNumber", "checkSumAdjustment"): + if value < 0: + value = value + 0x100000000 + value = hex(value) + if value[-1:] == "L": + value = value[:-1] + elif name in ("macStyle", "flags"): + value = num2binary(value, 16) + writer.simpletag(name, value=value) + writer.newline() + + def fromXML(self, name, attrs, content, ttFont): + value = attrs["value"] + if name in ("created", "modified"): + value = timestampFromString(value) + elif name in ("macStyle", "flags"): + value = binary2num(value) + else: + value = safeEval(value) + setattr(self, name, value) diff -Nru fonttools-2.4/Snippets/fontTools/ttLib/tables/_h_h_e_a.py fonttools-3.0/Snippets/fontTools/ttLib/tables/_h_h_e_a.py --- fonttools-2.4/Snippets/fontTools/ttLib/tables/_h_h_e_a.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.0/Snippets/fontTools/ttLib/tables/_h_h_e_a.py 2015-08-31 17:57:15.000000000 +0000 @@ -0,0 +1,91 @@ +from __future__ import print_function, division, absolute_import +from fontTools.misc.py23 import * +from fontTools.misc import sstruct +from fontTools.misc.textTools import safeEval +from . import DefaultTable + +hheaFormat = """ + > # big endian + tableVersion: 16.16F + ascent: h + descent: h + lineGap: h + advanceWidthMax: H + minLeftSideBearing: h + minRightSideBearing: h + xMaxExtent: h + caretSlopeRise: h + caretSlopeRun: h + caretOffset: h + reserved0: h + reserved1: h + reserved2: h + reserved3: h + metricDataFormat: h + numberOfHMetrics: H +""" + + +class table__h_h_e_a(DefaultTable.DefaultTable): + + # Note: Keep in sync with table__v_h_e_a + + dependencies = ['hmtx', 'glyf'] + + def decompile(self, data, ttFont): + sstruct.unpack(hheaFormat, data, self) + + def compile(self, ttFont): + if ttFont.isLoaded('glyf') and ttFont.recalcBBoxes: + self.recalc(ttFont) + return sstruct.pack(hheaFormat, self) + + def recalc(self, ttFont): + hmtxTable = ttFont['hmtx'] + if 'glyf' in ttFont: + glyfTable = ttFont['glyf'] + INFINITY = 100000 + advanceWidthMax = 0 + minLeftSideBearing = +INFINITY # arbitrary big number + minRightSideBearing = +INFINITY # arbitrary big number + xMaxExtent = -INFINITY # arbitrary big negative number + + for name in ttFont.getGlyphOrder(): + width, lsb = hmtxTable[name] + advanceWidthMax = max(advanceWidthMax, width) + g = glyfTable[name] + if g.numberOfContours == 0: + continue + if g.numberOfContours < 0 and not hasattr(g, "xMax"): + # Composite glyph without extents set. + # Calculate those. + g.recalcBounds(glyfTable) + minLeftSideBearing = min(minLeftSideBearing, lsb) + rsb = width - lsb - (g.xMax - g.xMin) + minRightSideBearing = min(minRightSideBearing, rsb) + extent = lsb + (g.xMax - g.xMin) + xMaxExtent = max(xMaxExtent, extent) + + if xMaxExtent == -INFINITY: + # No glyph has outlines. + minLeftSideBearing = 0 + minRightSideBearing = 0 + xMaxExtent = 0 + + self.advanceWidthMax = advanceWidthMax + self.minLeftSideBearing = minLeftSideBearing + self.minRightSideBearing = minRightSideBearing + self.xMaxExtent = xMaxExtent + else: + # XXX CFF recalc... + pass + + def toXML(self, writer, ttFont): + formatstring, names, fixes = sstruct.getformat(hheaFormat) + for name in names: + value = getattr(self, name) + writer.simpletag(name, value=value) + writer.newline() + + def fromXML(self, name, attrs, content, ttFont): + setattr(self, name, safeEval(attrs["value"])) diff -Nru fonttools-2.4/Snippets/fontTools/ttLib/tables/_h_m_t_x.py fonttools-3.0/Snippets/fontTools/ttLib/tables/_h_m_t_x.py --- fonttools-2.4/Snippets/fontTools/ttLib/tables/_h_m_t_x.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.0/Snippets/fontTools/ttLib/tables/_h_m_t_x.py 2015-08-31 17:57:15.000000000 +0000 @@ -0,0 +1,101 @@ +from __future__ import print_function, division, absolute_import +from fontTools.misc.py23 import * +from fontTools.misc.textTools import safeEval +from . import DefaultTable +import sys +import array +import warnings + + +class table__h_m_t_x(DefaultTable.DefaultTable): + + headerTag = 'hhea' + advanceName = 'width' + sideBearingName = 'lsb' + numberOfMetricsName = 'numberOfHMetrics' + + def decompile(self, data, ttFont): + numGlyphs = ttFont['maxp'].numGlyphs + numberOfMetrics = int(getattr(ttFont[self.headerTag], self.numberOfMetricsName)) + if numberOfMetrics > numGlyphs: + numberOfMetrics = numGlyphs # We warn later. + # Note: advanceWidth is unsigned, but we read/write as signed. + metrics = array.array("h", data[:4 * numberOfMetrics]) + if sys.byteorder != "big": + metrics.byteswap() + data = data[4 * numberOfMetrics:] + numberOfSideBearings = numGlyphs - numberOfMetrics + sideBearings = array.array("h", data[:2 * numberOfSideBearings]) + data = data[2 * numberOfSideBearings:] + + if sys.byteorder != "big": + sideBearings.byteswap() + if data: + warnings.warn("too much 'hmtx'/'vmtx' table data") + self.metrics = {} + glyphOrder = ttFont.getGlyphOrder() + for i in range(numberOfMetrics): + glyphName = glyphOrder[i] + self.metrics[glyphName] = list(metrics[i*2:i*2+2]) + lastAdvance = metrics[-2] + for i in range(numberOfSideBearings): + glyphName = glyphOrder[i + numberOfMetrics] + self.metrics[glyphName] = [lastAdvance, sideBearings[i]] + + def compile(self, ttFont): + metrics = [] + for glyphName in ttFont.getGlyphOrder(): + metrics.append(self.metrics[glyphName]) + lastAdvance = metrics[-1][0] + lastIndex = len(metrics) + while metrics[lastIndex-2][0] == lastAdvance: + lastIndex -= 1 + if lastIndex <= 1: + # all advances are equal + lastIndex = 1 + break + additionalMetrics = metrics[lastIndex:] + additionalMetrics = [sb for advance, sb in additionalMetrics] + metrics = metrics[:lastIndex] + setattr(ttFont[self.headerTag], self.numberOfMetricsName, len(metrics)) + + allMetrics = [] + for item in metrics: + allMetrics.extend(item) + allMetrics = array.array("h", allMetrics) + if sys.byteorder != "big": + allMetrics.byteswap() + data = allMetrics.tostring() + + additionalMetrics = array.array("h", additionalMetrics) + if sys.byteorder != "big": + additionalMetrics.byteswap() + data = data + additionalMetrics.tostring() + return data + + def toXML(self, writer, ttFont): + names = sorted(self.metrics.keys()) + for glyphName in names: + advance, sb = self.metrics[glyphName] + writer.simpletag("mtx", [ + ("name", glyphName), + (self.advanceName, advance), + (self.sideBearingName, sb), + ]) + writer.newline() + + def fromXML(self, name, attrs, content, ttFont): + if not hasattr(self, "metrics"): + self.metrics = {} + if name == "mtx": + self.metrics[attrs["name"]] = [safeEval(attrs[self.advanceName]), + safeEval(attrs[self.sideBearingName])] + + def __delitem__(self, glyphName): + del self.metrics[glyphName] + + def __getitem__(self, glyphName): + return self.metrics[glyphName] + + def __setitem__(self, glyphName, advance_sb_pair): + self.metrics[glyphName] = tuple(advance_sb_pair) diff -Nru fonttools-2.4/Snippets/fontTools/ttLib/tables/__init__.py fonttools-3.0/Snippets/fontTools/ttLib/tables/__init__.py --- fonttools-2.4/Snippets/fontTools/ttLib/tables/__init__.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.0/Snippets/fontTools/ttLib/tables/__init__.py 2015-08-31 17:57:15.000000000 +0000 @@ -0,0 +1,74 @@ + +from __future__ import print_function, division, absolute_import +from fontTools.misc.py23 import * + +# DON'T EDIT! This file is generated by MetaTools/buildTableList.py. +def _moduleFinderHint(): + """Dummy function to let modulefinder know what tables may be + dynamically imported. Generated by MetaTools/buildTableList.py. + + >>> _moduleFinderHint() + """ + from . import B_A_S_E_ + from . import C_B_D_T_ + from . import C_B_L_C_ + from . import C_F_F_ + from . import C_O_L_R_ + from . import C_P_A_L_ + from . import D_S_I_G_ + from . import E_B_D_T_ + from . import E_B_L_C_ + from . import F_F_T_M_ + from . import G_D_E_F_ + from . import G_M_A_P_ + from . import G_P_K_G_ + from . import G_P_O_S_ + from . import G_S_U_B_ + from . import J_S_T_F_ + from . import L_T_S_H_ + from . import M_A_T_H_ + from . import M_E_T_A_ + from . import O_S_2f_2 + from . import S_I_N_G_ + from . import S_V_G_ + from . import T_S_I_B_ + from . import T_S_I_D_ + from . import T_S_I_J_ + from . import T_S_I_P_ + from . import T_S_I_S_ + from . import T_S_I_V_ + from . import T_S_I__0 + from . import T_S_I__1 + from . import T_S_I__2 + from . import T_S_I__3 + from . import T_S_I__5 + from . import V_D_M_X_ + from . import V_O_R_G_ + from . import _a_v_a_r + from . import _c_m_a_p + from . import _c_v_t + from . import _f_e_a_t + from . import _f_p_g_m + from . import _f_v_a_r + from . import _g_a_s_p + from . import _g_l_y_f + from . import _g_v_a_r + from . import _h_d_m_x + from . import _h_e_a_d + from . import _h_h_e_a + from . import _h_m_t_x + from . import _k_e_r_n + from . import _l_o_c_a + from . import _l_t_a_g + from . import _m_a_x_p + from . import _m_e_t_a + from . import _n_a_m_e + from . import _p_o_s_t + from . import _p_r_e_p + from . import _s_b_i_x + from . import _v_h_e_a + from . import _v_m_t_x + +if __name__ == "__main__": + import doctest, sys + sys.exit(doctest.testmod().failed) diff -Nru fonttools-2.4/Snippets/fontTools/ttLib/tables/J_S_T_F_.py fonttools-3.0/Snippets/fontTools/ttLib/tables/J_S_T_F_.py --- fonttools-2.4/Snippets/fontTools/ttLib/tables/J_S_T_F_.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.0/Snippets/fontTools/ttLib/tables/J_S_T_F_.py 2015-08-31 17:57:15.000000000 +0000 @@ -0,0 +1,7 @@ +from __future__ import print_function, division, absolute_import +from fontTools.misc.py23 import * +from .otBase import BaseTTXConverter + + +class table_J_S_T_F_(BaseTTXConverter): + pass diff -Nru fonttools-2.4/Snippets/fontTools/ttLib/tables/_k_e_r_n.py fonttools-3.0/Snippets/fontTools/ttLib/tables/_k_e_r_n.py --- fonttools-2.4/Snippets/fontTools/ttLib/tables/_k_e_r_n.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.0/Snippets/fontTools/ttLib/tables/_k_e_r_n.py 2015-08-31 17:57:15.000000000 +0000 @@ -0,0 +1,200 @@ +from __future__ import print_function, division, absolute_import +from fontTools.misc.py23 import * +from fontTools.ttLib import getSearchRange +from fontTools.misc.textTools import safeEval, readHex +from fontTools.misc.fixedTools import fixedToFloat as fi2fl, floatToFixed as fl2fi +from . import DefaultTable +import struct +import array +import warnings + + +class table__k_e_r_n(DefaultTable.DefaultTable): + + def getkern(self, format): + for subtable in self.kernTables: + if subtable.version == format: + return subtable + return None # not found + + def decompile(self, data, ttFont): + version, nTables = struct.unpack(">HH", data[:4]) + apple = False + if (len(data) >= 8) and (version == 1): + # AAT Apple's "new" format. Hm. + version, nTables = struct.unpack(">LL", data[:8]) + self.version = fi2fl(version, 16) + data = data[8:] + apple = True + else: + self.version = version + data = data[4:] + tablesIndex = [] + self.kernTables = [] + for i in range(nTables): + if self.version == 1.0: + # Apple + length, coverage, tupleIndex = struct.unpack(">lHH", data[:8]) + version = coverage & 0xff + else: + version, length = struct.unpack(">HH", data[:4]) + length = int(length) + if version not in kern_classes: + subtable = KernTable_format_unkown(version) + else: + subtable = kern_classes[version]() + subtable.apple = apple + subtable.decompile(data[:length], ttFont) + self.kernTables.append(subtable) + data = data[length:] + + def compile(self, ttFont): + if hasattr(self, "kernTables"): + nTables = len(self.kernTables) + else: + nTables = 0 + if self.version == 1.0: + # AAT Apple's "new" format. + data = struct.pack(">ll", fl2fi(self.version, 16), nTables) + else: + data = struct.pack(">HH", self.version, nTables) + if hasattr(self, "kernTables"): + for subtable in self.kernTables: + data = data + subtable.compile(ttFont) + return data + + def toXML(self, writer, ttFont): + writer.simpletag("version", value=self.version) + writer.newline() + for subtable in self.kernTables: + subtable.toXML(writer, ttFont) + + def fromXML(self, name, attrs, content, ttFont): + if name == "version": + self.version = safeEval(attrs["value"]) + return + if name != "kernsubtable": + return + if not hasattr(self, "kernTables"): + self.kernTables = [] + format = safeEval(attrs["format"]) + if format not in kern_classes: + subtable = KernTable_format_unkown(format) + else: + subtable = kern_classes[format]() + self.kernTables.append(subtable) + subtable.fromXML(name, attrs, content, ttFont) + + +class KernTable_format_0(object): + + def decompile(self, data, ttFont): + version, length, coverage = (0,0,0) + if not self.apple: + version, length, coverage = struct.unpack(">HHH", data[:6]) + data = data[6:] + else: + version, length, coverage = struct.unpack(">LHH", data[:8]) + data = data[8:] + self.version, self.coverage = int(version), int(coverage) + + self.kernTable = kernTable = {} + + nPairs, searchRange, entrySelector, rangeShift = struct.unpack(">HHHH", data[:8]) + data = data[8:] + + nPairs = min(nPairs, len(data) // 6) + datas = array.array("H", data[:6 * nPairs]) + if sys.byteorder != "big": + datas.byteswap() + it = iter(datas) + glyphOrder = ttFont.getGlyphOrder() + for k in range(nPairs): + left, right, value = next(it), next(it), next(it) + if value >= 32768: value -= 65536 + try: + kernTable[(glyphOrder[left], glyphOrder[right])] = value + except IndexError: + # Slower, but will not throw an IndexError on an invalid glyph id. + kernTable[(ttFont.getGlyphName(left), ttFont.getGlyphName(right))] = value + if len(data) > 6 * nPairs: + warnings.warn("excess data in 'kern' subtable: %d bytes" % len(data)) + + def compile(self, ttFont): + nPairs = len(self.kernTable) + searchRange, entrySelector, rangeShift = getSearchRange(nPairs, 6) + data = struct.pack(">HHHH", nPairs, searchRange, entrySelector, rangeShift) + + # yeehee! (I mean, turn names into indices) + try: + reverseOrder = ttFont.getReverseGlyphMap() + kernTable = sorted((reverseOrder[left], reverseOrder[right], value) for ((left,right),value) in self.kernTable.items()) + except KeyError: + # Slower, but will not throw KeyError on invalid glyph id. + getGlyphID = ttFont.getGlyphID + kernTable = sorted((getGlyphID(left), getGlyphID(right), value) for ((left,right),value) in self.kernTable.items()) + + for left, right, value in kernTable: + data = data + struct.pack(">HHh", left, right, value) + return struct.pack(">HHH", self.version, len(data) + 6, self.coverage) + data + + def toXML(self, writer, ttFont): + writer.begintag("kernsubtable", coverage=self.coverage, format=0) + writer.newline() + items = sorted(self.kernTable.items()) + for (left, right), value in items: + writer.simpletag("pair", [ + ("l", left), + ("r", right), + ("v", value) + ]) + writer.newline() + writer.endtag("kernsubtable") + writer.newline() + + def fromXML(self, name, attrs, content, ttFont): + self.coverage = safeEval(attrs["coverage"]) + self.version = safeEval(attrs["format"]) + if not hasattr(self, "kernTable"): + self.kernTable = {} + for element in content: + if not isinstance(element, tuple): + continue + name, attrs, content = element + self.kernTable[(attrs["l"], attrs["r"])] = safeEval(attrs["v"]) + + def __getitem__(self, pair): + return self.kernTable[pair] + + def __setitem__(self, pair, value): + self.kernTable[pair] = value + + def __delitem__(self, pair): + del self.kernTable[pair] + + +class KernTable_format_unkown(object): + + def __init__(self, format): + self.format = format + + def decompile(self, data, ttFont): + self.data = data + + def compile(self, ttFont): + return self.data + + def toXML(self, writer, ttFont): + writer.begintag("kernsubtable", format=self.format) + writer.newline() + writer.comment("unknown 'kern' subtable format") + writer.newline() + writer.dumphex(self.data) + writer.endtag("kernsubtable") + writer.newline() + + def fromXML(self, name, attrs, content, ttFont): + self.decompile(readHex(content), ttFont) + + +kern_classes = {0: KernTable_format_0} diff -Nru fonttools-2.4/Snippets/fontTools/ttLib/tables/_k_e_r_n_test.py fonttools-3.0/Snippets/fontTools/ttLib/tables/_k_e_r_n_test.py --- fonttools-2.4/Snippets/fontTools/ttLib/tables/_k_e_r_n_test.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.0/Snippets/fontTools/ttLib/tables/_k_e_r_n_test.py 2015-08-31 17:57:15.000000000 +0000 @@ -0,0 +1,29 @@ +from __future__ import print_function, absolute_import +from fontTools.misc.py23 import * +from fontTools import ttLib +import unittest +from ._k_e_r_n import KernTable_format_0 + +class MockFont(object): + + def getGlyphOrder(self): + return ["glyph00000", "glyph00001", "glyph00002", "glyph00003"] + + def getGlyphName(self, glyphID): + return "glyph%.5d" % glyphID + +class KernTable_format_0_Test(unittest.TestCase): + + def test_decompileBadGlyphId(self): + subtable = KernTable_format_0() + subtable.apple = False + subtable.decompile( b'\x00' * 6 + + b'\x00' + b'\x02' + b'\x00' * 6 + + b'\x00' + b'\x01' + b'\x00' + b'\x03' + b'\x00' + b'\x01' + + b'\x00' + b'\x01' + b'\xFF' + b'\xFF' + b'\x00' + b'\x02', + MockFont()) + self.assertEqual(subtable[("glyph00001", "glyph00003")], 1) + self.assertEqual(subtable[("glyph00001", "glyph65535")], 2) + +if __name__ == "__main__": + unittest.main() diff -Nru fonttools-2.4/Snippets/fontTools/ttLib/tables/_l_o_c_a.py fonttools-3.0/Snippets/fontTools/ttLib/tables/_l_o_c_a.py --- fonttools-2.4/Snippets/fontTools/ttLib/tables/_l_o_c_a.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.0/Snippets/fontTools/ttLib/tables/_l_o_c_a.py 2015-08-31 17:57:15.000000000 +0000 @@ -0,0 +1,60 @@ +from __future__ import print_function, division, absolute_import +from fontTools.misc.py23 import * +from . import DefaultTable +import sys +import array +import warnings + +class table__l_o_c_a(DefaultTable.DefaultTable): + + dependencies = ['glyf'] + + def decompile(self, data, ttFont): + longFormat = ttFont['head'].indexToLocFormat + if longFormat: + format = "I" + else: + format = "H" + locations = array.array(format) + locations.fromstring(data) + if sys.byteorder != "big": + locations.byteswap() + if not longFormat: + l = array.array("I") + for i in range(len(locations)): + l.append(locations[i] * 2) + locations = l + if len(locations) < (ttFont['maxp'].numGlyphs + 1): + warnings.warn("corrupt 'loca' table, or wrong numGlyphs in 'maxp': %d %d" % (len(locations) - 1, ttFont['maxp'].numGlyphs)) + self.locations = locations + + def compile(self, ttFont): + try: + max_location = max(self.locations) + except AttributeError: + self.set([]) + max_location = 0 + if max_location < 0x20000 and all(l % 2 == 0 for l in self.locations): + locations = array.array("H") + for i in range(len(self.locations)): + locations.append(self.locations[i] // 2) + ttFont['head'].indexToLocFormat = 0 + else: + locations = array.array("I", self.locations) + ttFont['head'].indexToLocFormat = 1 + if sys.byteorder != "big": + locations.byteswap() + return locations.tostring() + + def set(self, locations): + self.locations = array.array("I", locations) + + def toXML(self, writer, ttFont): + writer.comment("The 'loca' table will be calculated by the compiler") + writer.newline() + + def __getitem__(self, index): + return self.locations[index] + + def __len__(self): + return len(self.locations) diff -Nru fonttools-2.4/Snippets/fontTools/ttLib/tables/_l_t_a_g.py fonttools-3.0/Snippets/fontTools/ttLib/tables/_l_t_a_g.py --- fonttools-2.4/Snippets/fontTools/ttLib/tables/_l_t_a_g.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.0/Snippets/fontTools/ttLib/tables/_l_t_a_g.py 2015-08-31 17:57:15.000000000 +0000 @@ -0,0 +1,50 @@ +from __future__ import print_function, division, absolute_import +from fontTools.misc.py23 import * +from fontTools.misc.textTools import safeEval +from . import DefaultTable +import struct + +# https://developer.apple.com/fonts/TrueType-Reference-Manual/RM06/Chap6ltag.html + +class table__l_t_a_g(DefaultTable.DefaultTable): + + def decompile(self, data, ttFont): + self.version, self.flags, numTags = struct.unpack(">LLL", data[:12]) + assert self.version == 1 + self.tags = [] + for i in range(numTags): + pos = 12 + i * 4 + offset, length = struct.unpack(">HH", data[pos:pos+4]) + tag = data[offset:offset+length].decode("ascii") + self.tags.append(tag) + + def compile(self, ttFont): + dataList = [struct.pack(">LLL", self.version, self.flags, len(self.tags))] + stringPool = "" + for tag in self.tags: + offset = stringPool.find(tag) + if offset < 0: + offset = len(stringPool) + stringPool = stringPool + tag + offset = offset + 12 + len(self.tags) * 4 + dataList.append(struct.pack(">HH", offset, len(tag))) + dataList.append(stringPool) + return bytesjoin(dataList) + + def toXML(self, writer, ttFont): + writer.simpletag("version", value=self.version) + writer.newline() + writer.simpletag("flags", value=self.flags) + writer.newline() + for tag in self.tags: + writer.simpletag("LanguageTag", tag=tag) + writer.newline() + + def fromXML(self, name, attrs, content, ttFont): + if not hasattr(self, "tags"): + self.tags = [] + if name == "LanguageTag": + self.tags.append(attrs["tag"]) + elif "value" in attrs: + value = safeEval(attrs["value"]) + setattr(self, name, value) diff -Nru fonttools-2.4/Snippets/fontTools/ttLib/tables/_l_t_a_g_test.py fonttools-3.0/Snippets/fontTools/ttLib/tables/_l_t_a_g_test.py --- fonttools-2.4/Snippets/fontTools/ttLib/tables/_l_t_a_g_test.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.0/Snippets/fontTools/ttLib/tables/_l_t_a_g_test.py 2015-08-31 17:57:15.000000000 +0000 @@ -0,0 +1,49 @@ +from __future__ import print_function, division, absolute_import +from fontTools.misc.py23 import * +from fontTools.misc.xmlWriter import XMLWriter +import os +import struct +import unittest +from ._l_t_a_g import table__l_t_a_g + +class Test_l_t_a_g(unittest.TestCase): + + DATA_ = struct.pack(b">LLLHHHHHH", 1, 0, 3, 24 + 0, 2, 24 + 2, 7, 24 + 2, 2) + b"enzh-Hant" + TAGS_ = ["en", "zh-Hant", "zh"] + + def test_decompile_compile(self): + table = table__l_t_a_g() + table.decompile(self.DATA_, ttFont=None) + self.assertEqual(1, table.version) + self.assertEqual(0, table.flags) + self.assertEqual(self.TAGS_, table.tags) + self.assertEqual(self.DATA_, table.compile(ttFont=None)) + + def test_fromXML(self): + table = table__l_t_a_g() + table.fromXML("version", {"value": "1"}, content=None, ttFont=None) + table.fromXML("flags", {"value": "777"}, content=None, ttFont=None) + table.fromXML("LanguageTag", {"tag": "sr-Latn"}, content=None, ttFont=None) + table.fromXML("LanguageTag", {"tag": "fa"}, content=None, ttFont=None) + self.assertEqual(1, table.version) + self.assertEqual(777, table.flags) + self.assertEqual(["sr-Latn", "fa"], table.tags) + + def test_toXML(self): + writer = XMLWriter(BytesIO()) + table = table__l_t_a_g() + table.decompile(self.DATA_, ttFont=None) + table.toXML(writer, ttFont=None) + expected = os.linesep.join([ + '', + '', + '', + '', + '', + '' + ]) + os.linesep + self.assertEqual(expected.encode("utf_8"), writer.file.getvalue()) + + +if __name__ == '__main__': + unittest.main() diff -Nru fonttools-2.4/Snippets/fontTools/ttLib/tables/L_T_S_H_.py fonttools-3.0/Snippets/fontTools/ttLib/tables/L_T_S_H_.py --- fonttools-2.4/Snippets/fontTools/ttLib/tables/L_T_S_H_.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.0/Snippets/fontTools/ttLib/tables/L_T_S_H_.py 2015-08-31 17:57:15.000000000 +0000 @@ -0,0 +1,50 @@ +from __future__ import print_function, division, absolute_import +from fontTools.misc.py23 import * +from fontTools.misc.textTools import safeEval +from . import DefaultTable +import struct +import array + +# XXX I've lowered the strictness, to make sure Apple's own Chicago +# XXX gets through. They're looking into it, I hope to raise the standards +# XXX back to normal eventually. + +class table_L_T_S_H_(DefaultTable.DefaultTable): + + def decompile(self, data, ttFont): + version, numGlyphs = struct.unpack(">HH", data[:4]) + data = data[4:] + assert version == 0, "unknown version: %s" % version + assert (len(data) % numGlyphs) < 4, "numGlyphs doesn't match data length" + # ouch: the assertion is not true in Chicago! + #assert numGlyphs == ttFont['maxp'].numGlyphs + yPels = array.array("B") + yPels.fromstring(data) + self.yPels = {} + for i in range(numGlyphs): + self.yPels[ttFont.getGlyphName(i)] = yPels[i] + + def compile(self, ttFont): + version = 0 + names = list(self.yPels.keys()) + numGlyphs = len(names) + yPels = [0] * numGlyphs + # ouch: the assertion is not true in Chicago! + #assert len(self.yPels) == ttFont['maxp'].numGlyphs == numGlyphs + for name in names: + yPels[ttFont.getGlyphID(name)] = self.yPels[name] + yPels = array.array("B", yPels) + return struct.pack(">HH", version, numGlyphs) + yPels.tostring() + + def toXML(self, writer, ttFont): + names = sorted(self.yPels.keys()) + for name in names: + writer.simpletag("yPel", name=name, value=self.yPels[name]) + writer.newline() + + def fromXML(self, name, attrs, content, ttFont): + if not hasattr(self, "yPels"): + self.yPels = {} + if name != "yPel": + return # ignore unknown tags + self.yPels[attrs["name"]] = safeEval(attrs["value"]) diff -Nru fonttools-2.4/Snippets/fontTools/ttLib/tables/M_A_T_H_.py fonttools-3.0/Snippets/fontTools/ttLib/tables/M_A_T_H_.py --- fonttools-2.4/Snippets/fontTools/ttLib/tables/M_A_T_H_.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.0/Snippets/fontTools/ttLib/tables/M_A_T_H_.py 2015-08-31 17:57:15.000000000 +0000 @@ -0,0 +1,7 @@ +from __future__ import print_function, division, absolute_import +from fontTools.misc.py23 import * +from .otBase import BaseTTXConverter + + +class table_M_A_T_H_(BaseTTXConverter): + pass diff -Nru fonttools-2.4/Snippets/fontTools/ttLib/tables/_m_a_x_p.py fonttools-3.0/Snippets/fontTools/ttLib/tables/_m_a_x_p.py --- fonttools-2.4/Snippets/fontTools/ttLib/tables/_m_a_x_p.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.0/Snippets/fontTools/ttLib/tables/_m_a_x_p.py 2015-08-31 17:57:15.000000000 +0000 @@ -0,0 +1,139 @@ +from __future__ import print_function, division, absolute_import +from fontTools.misc.py23 import * +from fontTools.misc import sstruct +from fontTools.misc.textTools import safeEval +from . import DefaultTable + +maxpFormat_0_5 = """ + > # big endian + tableVersion: i + numGlyphs: H +""" + +maxpFormat_1_0_add = """ + > # big endian + maxPoints: H + maxContours: H + maxCompositePoints: H + maxCompositeContours: H + maxZones: H + maxTwilightPoints: H + maxStorage: H + maxFunctionDefs: H + maxInstructionDefs: H + maxStackElements: H + maxSizeOfInstructions: H + maxComponentElements: H + maxComponentDepth: H +""" + + +class table__m_a_x_p(DefaultTable.DefaultTable): + + dependencies = ['glyf'] + + def decompile(self, data, ttFont): + dummy, data = sstruct.unpack2(maxpFormat_0_5, data, self) + self.numGlyphs = int(self.numGlyphs) + if self.tableVersion != 0x00005000: + dummy, data = sstruct.unpack2(maxpFormat_1_0_add, data, self) + assert len(data) == 0 + + def compile(self, ttFont): + if 'glyf' in ttFont: + if ttFont.isLoaded('glyf') and ttFont.recalcBBoxes: + self.recalc(ttFont) + else: + pass # CFF + self.numGlyphs = len(ttFont.getGlyphOrder()) + if self.tableVersion != 0x00005000: + self.tableVersion = 0x00010000 + data = sstruct.pack(maxpFormat_0_5, self) + if self.tableVersion == 0x00010000: + data = data + sstruct.pack(maxpFormat_1_0_add, self) + return data + + def recalc(self, ttFont): + """Recalculate the font bounding box, and most other maxp values except + for the TT instructions values. Also recalculate the value of bit 1 + of the flags field and the font bounding box of the 'head' table. + """ + glyfTable = ttFont['glyf'] + hmtxTable = ttFont['hmtx'] + headTable = ttFont['head'] + self.numGlyphs = len(glyfTable) + INFINITY = 100000 + xMin = +INFINITY + yMin = +INFINITY + xMax = -INFINITY + yMax = -INFINITY + maxPoints = 0 + maxContours = 0 + maxCompositePoints = 0 + maxCompositeContours = 0 + maxComponentElements = 0 + maxComponentDepth = 0 + allXMaxIsLsb = 1 + for glyphName in ttFont.getGlyphOrder(): + g = glyfTable[glyphName] + if g.numberOfContours: + if hmtxTable[glyphName][1] != g.xMin: + allXMaxIsLsb = 0 + xMin = min(xMin, g.xMin) + yMin = min(yMin, g.yMin) + xMax = max(xMax, g.xMax) + yMax = max(yMax, g.yMax) + if g.numberOfContours > 0: + nPoints, nContours = g.getMaxpValues() + maxPoints = max(maxPoints, nPoints) + maxContours = max(maxContours, nContours) + else: + nPoints, nContours, componentDepth = g.getCompositeMaxpValues(glyfTable) + maxCompositePoints = max(maxCompositePoints, nPoints) + maxCompositeContours = max(maxCompositeContours, nContours) + maxComponentElements = max(maxComponentElements, len(g.components)) + maxComponentDepth = max(maxComponentDepth, componentDepth) + if xMin == +INFINITY: + headTable.xMin = 0 + headTable.yMin = 0 + headTable.xMax = 0 + headTable.yMax = 0 + else: + headTable.xMin = xMin + headTable.yMin = yMin + headTable.xMax = xMax + headTable.yMax = yMax + self.maxPoints = maxPoints + self.maxContours = maxContours + self.maxCompositePoints = maxCompositePoints + self.maxCompositeContours = maxCompositeContours + self.maxComponentDepth = maxComponentDepth + if allXMaxIsLsb: + headTable.flags = headTable.flags | 0x2 + else: + headTable.flags = headTable.flags & ~0x2 + + def testrepr(self): + items = sorted(self.__dict__.items()) + print(". . . . . . . . .") + for combo in items: + print(" %s: %s" % combo) + print(". . . . . . . . .") + + def toXML(self, writer, ttFont): + if self.tableVersion != 0x00005000: + writer.comment("Most of this table will be recalculated by the compiler") + writer.newline() + formatstring, names, fixes = sstruct.getformat(maxpFormat_0_5) + if self.tableVersion != 0x00005000: + formatstring, names_1_0, fixes = sstruct.getformat(maxpFormat_1_0_add) + names = names + names_1_0 + for name in names: + value = getattr(self, name) + if name == "tableVersion": + value = hex(value) + writer.simpletag(name, value=value) + writer.newline() + + def fromXML(self, name, attrs, content, ttFont): + setattr(self, name, safeEval(attrs["value"])) diff -Nru fonttools-2.4/Snippets/fontTools/ttLib/tables/_m_e_t_a.py fonttools-3.0/Snippets/fontTools/ttLib/tables/_m_e_t_a.py --- fonttools-2.4/Snippets/fontTools/ttLib/tables/_m_e_t_a.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.0/Snippets/fontTools/ttLib/tables/_m_e_t_a.py 2015-08-31 17:57:15.000000000 +0000 @@ -0,0 +1,93 @@ +from __future__ import print_function, division, absolute_import +from fontTools.misc.py23 import * +from fontTools.misc import sstruct +from fontTools.misc.textTools import readHex +from fontTools.ttLib import TTLibError +from . import DefaultTable + +# Apple's documentation of 'meta': +# https://developer.apple.com/fonts/TrueType-Reference-Manual/RM06/Chap6meta.html + +META_HEADER_FORMAT = """ + > # big endian + version: L + flags: L + dataOffset: L + numDataMaps: L +""" + +# According to Apple's spec, the dataMaps entries contain a dataOffset +# that is documented as "Offset from the beginning of the data section +# to the data for this tag". However, this is *not* the case with +# the fonts that Apple ships pre-installed on MacOS X Yosemite 10.10.4, +# and it also does not reflect how Apple's ftxdumperfuser tool is parsing +# the 'meta' table (tested ftxdumperfuser build 330, FontToolbox.framework +# build 187). Instead of what is claimed in the spec, the data maps contain +# a dataOffset relative to the very beginning of the 'meta' table. +# The dataOffset field of the 'meta' header apparently gets ignored. + +DATA_MAP_FORMAT = """ + > # big endian + tag: 4s + dataOffset: L + dataLength: L +""" + + +class table__m_e_t_a(DefaultTable.DefaultTable): + def __init__(self, tag="meta"): + DefaultTable.DefaultTable.__init__(self, tag) + self.data = {} + + def decompile(self, data, ttFont): + headerSize = sstruct.calcsize(META_HEADER_FORMAT) + header = sstruct.unpack(META_HEADER_FORMAT, data[0 : headerSize]) + if header["version"] != 1: + raise TTLibError("unsupported 'meta' version %d" % + header["version"]) + dataMapSize = sstruct.calcsize(DATA_MAP_FORMAT) + for i in range(header["numDataMaps"]): + dataMapOffset = headerSize + i * dataMapSize + dataMap = sstruct.unpack( + DATA_MAP_FORMAT, + data[dataMapOffset : dataMapOffset + dataMapSize]) + tag = dataMap["tag"] + offset = dataMap["dataOffset"] + self.data[tag] = data[offset : offset + dataMap["dataLength"]] + + def compile(self, ttFont): + keys = sorted(self.data.keys()) + headerSize = sstruct.calcsize(META_HEADER_FORMAT) + dataOffset = headerSize + len(keys) * sstruct.calcsize(DATA_MAP_FORMAT) + header = sstruct.pack(META_HEADER_FORMAT, { + "version": 1, + "flags": 0, + "dataOffset": dataOffset, + "numDataMaps": len(keys) + }) + dataMaps = [] + dataBlocks = [] + for tag in keys: + data = self.data[tag] + dataMaps.append(sstruct.pack(DATA_MAP_FORMAT, { + "tag": tag, + "dataOffset": dataOffset, + "dataLength": len(data) + })) + dataBlocks.append(data) + dataOffset += len(data) + return bytesjoin([header] + dataMaps + dataBlocks) + + def toXML(self, writer, ttFont, progress=None): + for tag in sorted(self.data.keys()): + writer.begintag("hexdata", tag=tag) + writer.newline() + writer.dumphex(self.data[tag]) + writer.endtag("hexdata") + writer.newline() + + def fromXML(self, name, attrs, content, ttFont): + if name == "hexdata": + self.data[attrs["tag"]] = readHex(content) + else: + raise TTLibError("can't handle '%s' element" % name) diff -Nru fonttools-2.4/Snippets/fontTools/ttLib/tables/M_E_T_A_.py fonttools-3.0/Snippets/fontTools/ttLib/tables/M_E_T_A_.py --- fonttools-2.4/Snippets/fontTools/ttLib/tables/M_E_T_A_.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.0/Snippets/fontTools/ttLib/tables/M_E_T_A_.py 2015-08-31 17:57:15.000000000 +0000 @@ -0,0 +1,305 @@ +from __future__ import print_function, division, absolute_import +from fontTools.misc.py23 import * +from fontTools.misc import sstruct +from fontTools.misc.textTools import safeEval +from . import DefaultTable +import pdb +import struct + + +METAHeaderFormat = """ + > # big endian + tableVersionMajor: H + tableVersionMinor: H + metaEntriesVersionMajor: H + metaEntriesVersionMinor: H + unicodeVersion: L + metaFlags: H + nMetaRecs: H +""" +# This record is followed by nMetaRecs of METAGlyphRecordFormat. +# This in turn is followd by as many METAStringRecordFormat entries +# as specified by the METAGlyphRecordFormat entries +# this is followed by the strings specifried in the METAStringRecordFormat +METAGlyphRecordFormat = """ + > # big endian + glyphID: H + nMetaEntry: H +""" +# This record is followd by a variable data length field: +# USHORT or ULONG hdrOffset +# Offset from start of META table to the beginning +# of this glyphs array of ns Metadata string entries. +# Size determined by metaFlags field +# METAGlyphRecordFormat entries must be sorted by glyph ID + +METAStringRecordFormat = """ + > # big endian + labelID: H + stringLen: H +""" +# This record is followd by a variable data length field: +# USHORT or ULONG stringOffset +# METAStringRecordFormat entries must be sorted in order of labelID +# There may be more than one entry with the same labelID +# There may be more than one strign with the same content. + +# Strings shall be Unicode UTF-8 encoded, and null-terminated. + +METALabelDict = { + 0: "MojikumiX4051", # An integer in the range 1-20 + 1: "UNIUnifiedBaseChars", + 2: "BaseFontName", + 3: "Language", + 4: "CreationDate", + 5: "FoundryName", + 6: "FoundryCopyright", + 7: "OwnerURI", + 8: "WritingScript", + 10: "StrokeCount", + 11: "IndexingRadical", +} + + +def getLabelString(labelID): + try: + label = METALabelDict[labelID] + except KeyError: + label = "Unknown label" + return str(label) + + +class table_M_E_T_A_(DefaultTable.DefaultTable): + + dependencies = [] + + def decompile(self, data, ttFont): + dummy, newData = sstruct.unpack2(METAHeaderFormat, data, self) + self.glyphRecords = [] + for i in range(self.nMetaRecs): + glyphRecord, newData = sstruct.unpack2(METAGlyphRecordFormat, newData, GlyphRecord()) + if self.metaFlags == 0: + [glyphRecord.offset] = struct.unpack(">H", newData[:2]) + newData = newData[2:] + elif self.metaFlags == 1: + [glyphRecord.offset] = struct.unpack(">H", newData[:4]) + newData = newData[4:] + else: + assert 0, "The metaFlags field in the META table header has a value other than 0 or 1 :" + str(self.metaFlags) + glyphRecord.stringRecs = [] + newData = data[glyphRecord.offset:] + for j in range(glyphRecord.nMetaEntry): + stringRec, newData = sstruct.unpack2(METAStringRecordFormat, newData, StringRecord()) + if self.metaFlags == 0: + [stringRec.offset] = struct.unpack(">H", newData[:2]) + newData = newData[2:] + else: + [stringRec.offset] = struct.unpack(">H", newData[:4]) + newData = newData[4:] + stringRec.string = data[stringRec.offset:stringRec.offset + stringRec.stringLen] + glyphRecord.stringRecs.append(stringRec) + self.glyphRecords.append(glyphRecord) + + def compile(self, ttFont): + offsetOK = 0 + self.nMetaRecs = len(self.glyphRecords) + count = 0 + while (offsetOK != 1): + count = count + 1 + if count > 4: + pdb.set_trace() + metaData = sstruct.pack(METAHeaderFormat, self) + stringRecsOffset = len(metaData) + self.nMetaRecs * (6 + 2*(self.metaFlags & 1)) + stringRecSize = (6 + 2*(self.metaFlags & 1)) + for glyphRec in self.glyphRecords: + glyphRec.offset = stringRecsOffset + if (glyphRec.offset > 65535) and ((self.metaFlags & 1) == 0): + self.metaFlags = self.metaFlags + 1 + offsetOK = -1 + break + metaData = metaData + glyphRec.compile(self) + stringRecsOffset = stringRecsOffset + (glyphRec.nMetaEntry * stringRecSize) + # this will be the String Record offset for the next GlyphRecord. + if offsetOK == -1: + offsetOK = 0 + continue + + # metaData now contains the header and all of the GlyphRecords. Its length should bw + # the offset to the first StringRecord. + stringOffset = stringRecsOffset + for glyphRec in self.glyphRecords: + assert (glyphRec.offset == len(metaData)), "Glyph record offset did not compile correctly! for rec:" + str(glyphRec) + for stringRec in glyphRec.stringRecs: + stringRec.offset = stringOffset + if (stringRec.offset > 65535) and ((self.metaFlags & 1) == 0): + self.metaFlags = self.metaFlags + 1 + offsetOK = -1 + break + metaData = metaData + stringRec.compile(self) + stringOffset = stringOffset + stringRec.stringLen + if offsetOK == -1: + offsetOK = 0 + continue + + if ((self.metaFlags & 1) == 1) and (stringOffset < 65536): + self.metaFlags = self.metaFlags - 1 + continue + else: + offsetOK = 1 + + # metaData now contains the header and all of the GlyphRecords and all of the String Records. + # Its length should be the offset to the first string datum. + for glyphRec in self.glyphRecords: + for stringRec in glyphRec.stringRecs: + assert (stringRec.offset == len(metaData)), "String offset did not compile correctly! for string:" + str(stringRec.string) + metaData = metaData + stringRec.string + + return metaData + + def toXML(self, writer, ttFont): + writer.comment("Lengths and number of entries in this table will be recalculated by the compiler") + writer.newline() + formatstring, names, fixes = sstruct.getformat(METAHeaderFormat) + for name in names: + value = getattr(self, name) + writer.simpletag(name, value=value) + writer.newline() + for glyphRec in self.glyphRecords: + glyphRec.toXML(writer, ttFont) + + def fromXML(self, name, attrs, content, ttFont): + if name == "GlyphRecord": + if not hasattr(self, "glyphRecords"): + self.glyphRecords = [] + glyphRec = GlyphRecord() + self.glyphRecords.append(glyphRec) + for element in content: + if isinstance(element, basestring): + continue + name, attrs, content = element + glyphRec.fromXML(name, attrs, content, ttFont) + glyphRec.offset = -1 + glyphRec.nMetaEntry = len(glyphRec.stringRecs) + else: + setattr(self, name, safeEval(attrs["value"])) + + +class GlyphRecord(object): + def __init__(self): + self.glyphID = -1 + self.nMetaEntry = -1 + self.offset = -1 + self.stringRecs = [] + + def toXML(self, writer, ttFont): + writer.begintag("GlyphRecord") + writer.newline() + writer.simpletag("glyphID", value=self.glyphID) + writer.newline() + writer.simpletag("nMetaEntry", value=self.nMetaEntry) + writer.newline() + for stringRec in self.stringRecs: + stringRec.toXML(writer, ttFont) + writer.endtag("GlyphRecord") + writer.newline() + + def fromXML(self, name, attrs, content, ttFont): + if name == "StringRecord": + stringRec = StringRecord() + self.stringRecs.append(stringRec) + for element in content: + if isinstance(element, basestring): + continue + stringRec.fromXML(name, attrs, content, ttFont) + stringRec.stringLen = len(stringRec.string) + else: + setattr(self, name, safeEval(attrs["value"])) + + def compile(self, parentTable): + data = sstruct.pack(METAGlyphRecordFormat, self) + if parentTable.metaFlags == 0: + datum = struct.pack(">H", self.offset) + elif parentTable.metaFlags == 1: + datum = struct.pack(">L", self.offset) + data = data + datum + return data + + def __repr__(self): + return "GlyphRecord[ glyphID: " + str(self.glyphID) + ", nMetaEntry: " + str(self.nMetaEntry) + ", offset: " + str(self.offset) + " ]" + +# XXX The following two functions are really broken around UTF-8 vs Unicode + +def mapXMLToUTF8(string): + uString = unicode() + strLen = len(string) + i = 0 + while i < strLen: + prefixLen = 0 + if (string[i:i+3] == "&#x"): + prefixLen = 3 + elif (string[i:i+7] == "&#x"): + prefixLen = 7 + if prefixLen: + i = i+prefixLen + j= i + while string[i] != ";": + i = i+1 + valStr = string[j:i] + + uString = uString + unichr(eval('0x' + valStr)) + else: + uString = uString + unichr(byteord(string[i])) + i = i +1 + + return uString.encode('utf_8') + + +def mapUTF8toXML(string): + uString = string.decode('utf_8') + string = "" + for uChar in uString: + i = ord(uChar) + if (i < 0x80) and (i > 0x1F): + string = string + uChar + else: + string = string + "&#x" + hex(i)[2:] + ";" + return string + + +class StringRecord(object): + + def toXML(self, writer, ttFont): + writer.begintag("StringRecord") + writer.newline() + writer.simpletag("labelID", value=self.labelID) + writer.comment(getLabelString(self.labelID)) + writer.newline() + writer.newline() + writer.simpletag("string", value=mapUTF8toXML(self.string)) + writer.newline() + writer.endtag("StringRecord") + writer.newline() + + def fromXML(self, name, attrs, content, ttFont): + for element in content: + if isinstance(element, basestring): + continue + name, attrs, content = element + value = attrs["value"] + if name == "string": + self.string = mapXMLToUTF8(value) + else: + setattr(self, name, safeEval(value)) + + def compile(self, parentTable): + data = sstruct.pack(METAStringRecordFormat, self) + if parentTable.metaFlags == 0: + datum = struct.pack(">H", self.offset) + elif parentTable.metaFlags == 1: + datum = struct.pack(">L", self.offset) + data = data + datum + return data + + def __repr__(self): + return "StringRecord [ labelID: " + str(self.labelID) + " aka " + getLabelString(self.labelID) \ + + ", offset: " + str(self.offset) + ", length: " + str(self.stringLen) + ", string: " +self.string + " ]" diff -Nru fonttools-2.4/Snippets/fontTools/ttLib/tables/_m_e_t_a_test.py fonttools-3.0/Snippets/fontTools/ttLib/tables/_m_e_t_a_test.py --- fonttools-2.4/Snippets/fontTools/ttLib/tables/_m_e_t_a_test.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.0/Snippets/fontTools/ttLib/tables/_m_e_t_a_test.py 2015-08-31 17:57:15.000000000 +0000 @@ -0,0 +1,54 @@ +from __future__ import print_function, division, absolute_import, unicode_literals +from fontTools.misc.py23 import * +from fontTools.misc.textTools import deHexStr +from fontTools.misc.xmlWriter import XMLWriter +from fontTools.ttLib import TTLibError +from fontTools.ttLib.tables._m_e_t_a import table__m_e_t_a +import unittest + + +# From a real font on MacOS X, but substituted 'bild' tag by 'TEST', +# and shortened the payload. Note that from the 'meta' spec, one would +# expect that header.dataOffset is 0x0000001C (pointing to the beginning +# of the data section) and that dataMap[0].dataOffset should be 0 (relative +# to the beginning of the data section). However, in the fonts that Apple +# ships on MacOS X 10.10.4, dataMap[0].dataOffset is actually relative +# to the beginning of the 'meta' table, i.e. 0x0000001C again. While the +# following test data is invalid according to the 'meta' specification, +# it is reflecting the 'meta' table structure in all Apple-supplied fonts. +META_DATA = deHexStr( + "00 00 00 01 00 00 00 00 00 00 00 1C 00 00 00 01 " + "54 45 53 54 00 00 00 1C 00 00 00 04 CA FE BE EF") + + +class MetaTableTest(unittest.TestCase): + def test_decompile(self): + table = table__m_e_t_a() + table.decompile(META_DATA, ttFont={"meta": table}) + self.assertEqual({"TEST": b"\xCA\xFE\xBE\xEF"}, table.data) + + def test_compile(self): + table = table__m_e_t_a() + table.data["TEST"] = b"\xCA\xFE\xBE\xEF" + self.assertEqual(META_DATA, table.compile(ttFont={"meta": table})) + + def test_toXML(self): + table = table__m_e_t_a() + table.data["TEST"] = b"\xCA\xFE\xBE\xEF" + writer = XMLWriter(BytesIO()) + table.toXML(writer, {"meta": table}) + xml = writer.file.getvalue().decode("utf-8") + self.assertEqual([ + '', + 'cafebeef', + '' + ], [line.strip() for line in xml.splitlines()][1:]) + + def test_fromXML(self): + table = table__m_e_t_a() + table.fromXML("hexdata", {"tag": "TEST"}, ['cafebeef'], ttFont=None) + self.assertEqual({"TEST": b"\xCA\xFE\xBE\xEF"}, table.data) + + +if __name__ == "__main__": + unittest.main() diff -Nru fonttools-2.4/Snippets/fontTools/ttLib/tables/_n_a_m_e.py fonttools-3.0/Snippets/fontTools/ttLib/tables/_n_a_m_e.py --- fonttools-2.4/Snippets/fontTools/ttLib/tables/_n_a_m_e.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.0/Snippets/fontTools/ttLib/tables/_n_a_m_e.py 2015-08-31 17:57:15.000000000 +0000 @@ -0,0 +1,262 @@ +from __future__ import print_function, division, absolute_import +from fontTools.misc.py23 import * +from fontTools.misc import sstruct +from fontTools.misc.textTools import safeEval +from fontTools.misc.encodingTools import getEncoding +from . import DefaultTable +import struct + +nameRecordFormat = """ + > # big endian + platformID: H + platEncID: H + langID: H + nameID: H + length: H + offset: H +""" + +nameRecordSize = sstruct.calcsize(nameRecordFormat) + + +class table__n_a_m_e(DefaultTable.DefaultTable): + + def decompile(self, data, ttFont): + format, n, stringOffset = struct.unpack(">HHH", data[:6]) + expectedStringOffset = 6 + n * nameRecordSize + if stringOffset != expectedStringOffset: + # XXX we need a warn function + print("Warning: 'name' table stringOffset incorrect. Expected: %s; Actual: %s" % (expectedStringOffset, stringOffset)) + stringData = data[stringOffset:] + data = data[6:] + self.names = [] + for i in range(n): + if len(data) < 12: + # compensate for buggy font + break + name, data = sstruct.unpack2(nameRecordFormat, data, NameRecord()) + name.string = stringData[name.offset:name.offset+name.length] + assert len(name.string) == name.length + #if (name.platEncID, name.platformID) in ((0, 0), (1, 3)): + # if len(name.string) % 2: + # print "2-byte string doesn't have even length!" + # print name.__dict__ + del name.offset, name.length + self.names.append(name) + + def compile(self, ttFont): + if not hasattr(self, "names"): + # only happens when there are NO name table entries read + # from the TTX file + self.names = [] + names = self.names + names.sort() # sort according to the spec; see NameRecord.__lt__() + stringData = b"" + format = 0 + n = len(names) + stringOffset = 6 + n * sstruct.calcsize(nameRecordFormat) + data = struct.pack(">HHH", format, n, stringOffset) + lastoffset = 0 + done = {} # remember the data so we can reuse the "pointers" + for name in names: + string = name.toBytes() + if string in done: + name.offset, name.length = done[string] + else: + name.offset, name.length = done[string] = len(stringData), len(string) + stringData = bytesjoin([stringData, string]) + data = data + sstruct.pack(nameRecordFormat, name) + return data + stringData + + def toXML(self, writer, ttFont): + for name in self.names: + name.toXML(writer, ttFont) + + def fromXML(self, name, attrs, content, ttFont): + if name != "namerecord": + return # ignore unknown tags + if not hasattr(self, "names"): + self.names = [] + name = NameRecord() + self.names.append(name) + name.fromXML(name, attrs, content, ttFont) + + def getName(self, nameID, platformID, platEncID, langID=None): + for namerecord in self.names: + if ( namerecord.nameID == nameID and + namerecord.platformID == platformID and + namerecord.platEncID == platEncID): + if langID is None or namerecord.langID == langID: + return namerecord + return None # not found + + def getDebugName(self, nameID): + englishName = someName = None + for name in self.names: + if name.nameID != nameID: + continue + try: + unistr = name.toUnicode() + except UnicodeDecodeError: + continue + + someName = unistr + if (name.platformID, name.langID) in ((1, 0), (3, 0x409)): + englishName = unistr + break + if englishName: + return englishName + elif someName: + return someName + else: + return None + +class NameRecord(object): + + def getEncoding(self, default='ascii'): + """Returns the Python encoding name for this name entry based on its platformID, + platEncID, and langID. If encoding for these values is not known, by default + 'ascii' is returned. That can be overriden by passing a value to the default + argument. + """ + return getEncoding(self.platformID, self.platEncID, self.langID, default) + + def encodingIsUnicodeCompatible(self): + return self.getEncoding(None) in ['utf_16_be', 'ucs2be', 'ascii', 'latin1'] + + def __str__(self): + try: + return self.toUnicode() + except UnicodeDecodeError: + return str(self.string) + + def isUnicode(self): + return (self.platformID == 0 or + (self.platformID == 3 and self.platEncID in [0, 1, 10])) + + def toUnicode(self, errors='strict'): + """ + If self.string is a Unicode string, return it; otherwise try decoding the + bytes in self.string to a Unicode string using the encoding of this + entry as returned by self.getEncoding(); Note that self.getEncoding() + returns 'ascii' if the encoding is unknown to the library. + + Certain heuristics are performed to recover data from bytes that are + ill-formed in the chosen encoding, or that otherwise look misencoded + (mostly around bad UTF-16BE encoded bytes, or bytes that look like UTF-16BE + but marked otherwise). If the bytes are ill-formed and the heuristics fail, + the error is handled according to the errors parameter to this function, which is + passed to the underlying decode() function; by default it throws a + UnicodeDecodeError exception. + + Note: The mentioned heuristics mean that roundtripping a font to XML and back + to binary might recover some misencoded data whereas just loading the font + and saving it back will not change them. + """ + def isascii(b): + return (b >= 0x20 and b <= 0x7E) or b in [0x09, 0x0A, 0x0D] + encoding = self.getEncoding() + string = self.string + + if encoding == 'utf_16_be' and len(string) % 2 == 1: + # Recover badly encoded UTF-16 strings that have an odd number of bytes: + # - If the last byte is zero, drop it. Otherwise, + # - If all the odd bytes are zero and all the even bytes are ASCII, + # prepend one zero byte. Otherwise, + # - If first byte is zero and all other bytes are ASCII, insert zero + # bytes between consecutive ASCII bytes. + # + # (Yes, I've seen all of these in the wild... sigh) + if byteord(string[-1]) == 0: + string = string[:-1] + elif all(byteord(b) == 0 if i % 2 else isascii(byteord(b)) for i,b in enumerate(string)): + string = b'\0' + string + elif byteord(string[0]) == 0 and all(isascii(byteord(b)) for b in string[1:]): + string = bytesjoin(b'\0'+bytechr(byteord(b)) for b in string[1:]) + + string = tounicode(string, encoding=encoding, errors=errors) + + # If decoded strings still looks like UTF-16BE, it suggests a double-encoding. + # Fix it up. + if all(ord(c) == 0 if i % 2 == 0 else isascii(ord(c)) for i,c in enumerate(string)): + # If string claims to be Mac encoding, but looks like UTF-16BE with ASCII text, + # narrow it down. + string = ''.join(c for c in string[1::2]) + + return string + + def toBytes(self, errors='strict'): + """ If self.string is a bytes object, return it; otherwise try encoding + the Unicode string in self.string to bytes using the encoding of this + entry as returned by self.getEncoding(); Note that self.getEncoding() + returns 'ascii' if the encoding is unknown to the library. + + If the Unicode string cannot be encoded to bytes in the chosen encoding, + the error is handled according to the errors parameter to this function, + which is passed to the underlying encode() function; by default it throws a + UnicodeEncodeError exception. + """ + return tobytes(self.string, encoding=self.getEncoding(), errors=errors) + + def toXML(self, writer, ttFont): + try: + unistr = self.toUnicode() + except UnicodeDecodeError: + unistr = None + attrs = [ + ("nameID", self.nameID), + ("platformID", self.platformID), + ("platEncID", self.platEncID), + ("langID", hex(self.langID)), + ] + + if unistr is None or not self.encodingIsUnicodeCompatible(): + attrs.append(("unicode", unistr is not None)) + + writer.begintag("namerecord", attrs) + writer.newline() + if unistr is not None: + writer.write(unistr) + else: + writer.write8bit(self.string) + writer.newline() + writer.endtag("namerecord") + writer.newline() + + def fromXML(self, name, attrs, content, ttFont): + self.nameID = safeEval(attrs["nameID"]) + self.platformID = safeEval(attrs["platformID"]) + self.platEncID = safeEval(attrs["platEncID"]) + self.langID = safeEval(attrs["langID"]) + s = strjoin(content).strip() + encoding = self.getEncoding() + if self.encodingIsUnicodeCompatible() or safeEval(attrs.get("unicode", "False")): + self.string = s.encode(encoding) + else: + # This is the inverse of write8bit... + self.string = s.encode("latin1") + + def __lt__(self, other): + if type(self) != type(other): + return NotImplemented + + # implemented so that list.sort() sorts according to the spec. + selfTuple = ( + getattr(self, "platformID", None), + getattr(self, "platEncID", None), + getattr(self, "langID", None), + getattr(self, "nameID", None), + getattr(self, "string", None), + ) + otherTuple = ( + getattr(other, "platformID", None), + getattr(other, "platEncID", None), + getattr(other, "langID", None), + getattr(other, "nameID", None), + getattr(other, "string", None), + ) + return selfTuple < otherTuple + + def __repr__(self): + return "" % ( + self.nameID, self.platformID, self.langID) diff -Nru fonttools-2.4/Snippets/fontTools/ttLib/tables/_n_a_m_e_test.py fonttools-3.0/Snippets/fontTools/ttLib/tables/_n_a_m_e_test.py --- fonttools-2.4/Snippets/fontTools/ttLib/tables/_n_a_m_e_test.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.0/Snippets/fontTools/ttLib/tables/_n_a_m_e_test.py 2015-08-31 17:57:15.000000000 +0000 @@ -0,0 +1,143 @@ +# -*- coding: utf-8 -*- +from __future__ import print_function, division, absolute_import, unicode_literals +from fontTools.misc.py23 import * +from fontTools.misc.xmlWriter import XMLWriter +import unittest +from ._n_a_m_e import table__n_a_m_e, NameRecord + + +def makeName(text, nameID, platformID, platEncID, langID): + name = NameRecord() + name.nameID, name.platformID, name.platEncID, name.langID = ( + nameID, platformID, platEncID, langID) + name.string = tobytes(text, encoding=name.getEncoding()) + return name + + +class NameTableTest(unittest.TestCase): + + def test_getDebugName(self): + table = table__n_a_m_e() + table.names = [ + makeName("Bold", 258, 1, 0, 0), # Mac, MacRoman, English + makeName("Gras", 258, 1, 0, 1), # Mac, MacRoman, French + makeName("Fett", 258, 1, 0, 2), # Mac, MacRoman, German + makeName("Sem Fracções", 292, 1, 0, 8) # Mac, MacRoman, Portuguese + ] + self.assertEqual("Bold", table.getDebugName(258)) + self.assertEqual("Sem Fracções", table.getDebugName(292)) + self.assertEqual(None, table.getDebugName(999)) + + +class NameRecordTest(unittest.TestCase): + + def test_toUnicode_utf16be(self): + name = makeName("Foo Bold", 111, 0, 2, 7) + self.assertEqual("utf_16_be", name.getEncoding()) + self.assertEqual("Foo Bold", name.toUnicode()) + + def test_toUnicode_macroman(self): + name = makeName("Foo Italic", 222, 1, 0, 7) # MacRoman + self.assertEqual("mac_roman", name.getEncoding()) + self.assertEqual("Foo Italic", name.toUnicode()) + + def test_toUnicode_macromanian(self): + name = makeName(b"Foo Italic\xfb", 222, 1, 0, 37) # Mac Romanian + self.assertEqual("mac_romanian", name.getEncoding()) + self.assertEqual("Foo Italic"+unichr(0x02DA), name.toUnicode()) + + def test_toUnicode_UnicodeDecodeError(self): + name = makeName(b"\1", 111, 0, 2, 7) + self.assertEqual("utf_16_be", name.getEncoding()) + self.assertRaises(UnicodeDecodeError, name.toUnicode) + + def toXML(self, name): + writer = XMLWriter(BytesIO()) + name.toXML(writer, ttFont=None) + xml = writer.file.getvalue().decode("utf_8").strip() + return xml.split(writer.newlinestr.decode("utf_8"))[1:] + + def test_toXML_utf16be(self): + name = makeName("Foo Bold", 111, 0, 2, 7) + self.assertEqual([ + '', + ' Foo Bold', + '' + ], self.toXML(name)) + + def test_toXML_utf16be_odd_length1(self): + name = makeName(b"\0F\0o\0o\0", 111, 0, 2, 7) + self.assertEqual([ + '', + ' Foo', + '' + ], self.toXML(name)) + + def test_toXML_utf16be_odd_length2(self): + name = makeName(b"\0Fooz", 111, 0, 2, 7) + self.assertEqual([ + '', + ' Fooz', + '' + ], self.toXML(name)) + + def test_toXML_utf16be_double_encoded(self): + name = makeName(b"\0\0\0F\0\0\0o", 111, 0, 2, 7) + self.assertEqual([ + '', + ' Fo', + '' + ], self.toXML(name)) + + def test_toXML_macroman(self): + name = makeName("Foo Italic", 222, 1, 0, 7) # MacRoman + self.assertEqual([ + '', + ' Foo Italic', + '' + ], self.toXML(name)) + + def test_toXML_macroman_actual_utf16be(self): + name = makeName("\0F\0o\0o", 222, 1, 0, 7) + self.assertEqual([ + '', + ' Foo', + '' + ], self.toXML(name)) + + def test_toXML_unknownPlatEncID_nonASCII(self): + name = makeName(b"B\x8arli", 333, 1, 9876, 7) # Unknown Mac encodingID + self.assertEqual([ + '', + ' BŠrli', + '' + ], self.toXML(name)) + + def test_toXML_unknownPlatEncID_ASCII(self): + name = makeName(b"Barli", 333, 1, 9876, 7) # Unknown Mac encodingID + self.assertEqual([ + '', + ' Barli', + '' + ], self.toXML(name)) + + def test_encoding_macroman_misc(self): + name = makeName('', 123, 1, 0, 17) # Mac Turkish + self.assertEqual(name.getEncoding(), "mac_turkish") + name.langID = 37 + self.assertEqual(name.getEncoding(), "mac_romanian") + name.langID = 45 # Other + self.assertEqual(name.getEncoding(), "mac_roman") + + def test_extended_mac_encodings(self): + name = makeName(b'\xfe', 123, 1, 1, 0) # Mac Japanese + self.assertEqual(name.toUnicode(), unichr(0x2122)) + + def test_extended_unknown(self): + name = makeName(b'\xfe', 123, 10, 11, 12) + self.assertEqual(name.getEncoding(), "ascii") + self.assertEqual(name.getEncoding(None), None) + self.assertEqual(name.getEncoding(default=None), None) + +if __name__ == "__main__": + unittest.main() diff -Nru fonttools-2.4/Snippets/fontTools/ttLib/tables/O_S_2f_2.py fonttools-3.0/Snippets/fontTools/ttLib/tables/O_S_2f_2.py --- fonttools-2.4/Snippets/fontTools/ttLib/tables/O_S_2f_2.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.0/Snippets/fontTools/ttLib/tables/O_S_2f_2.py 2015-08-31 17:57:15.000000000 +0000 @@ -0,0 +1,230 @@ +from __future__ import print_function, division, absolute_import +from fontTools.misc.py23 import * +from fontTools.misc import sstruct +from fontTools.misc.textTools import safeEval, num2binary, binary2num +from . import DefaultTable +import warnings + + +# panose classification + +panoseFormat = """ + bFamilyType: B + bSerifStyle: B + bWeight: B + bProportion: B + bContrast: B + bStrokeVariation: B + bArmStyle: B + bLetterForm: B + bMidline: B + bXHeight: B +""" + +class Panose(object): + + def toXML(self, writer, ttFont): + formatstring, names, fixes = sstruct.getformat(panoseFormat) + for name in names: + writer.simpletag(name, value=getattr(self, name)) + writer.newline() + + def fromXML(self, name, attrs, content, ttFont): + setattr(self, name, safeEval(attrs["value"])) + + +# 'sfnt' OS/2 and Windows Metrics table - 'OS/2' + +OS2_format_0 = """ + > # big endian + version: H # version + xAvgCharWidth: h # average character width + usWeightClass: H # degree of thickness of strokes + usWidthClass: H # aspect ratio + fsType: h # type flags + ySubscriptXSize: h # subscript horizontal font size + ySubscriptYSize: h # subscript vertical font size + ySubscriptXOffset: h # subscript x offset + ySubscriptYOffset: h # subscript y offset + ySuperscriptXSize: h # superscript horizontal font size + ySuperscriptYSize: h # superscript vertical font size + ySuperscriptXOffset: h # superscript x offset + ySuperscriptYOffset: h # superscript y offset + yStrikeoutSize: h # strikeout size + yStrikeoutPosition: h # strikeout position + sFamilyClass: h # font family class and subclass + panose: 10s # panose classification number + ulUnicodeRange1: L # character range + ulUnicodeRange2: L # character range + ulUnicodeRange3: L # character range + ulUnicodeRange4: L # character range + achVendID: 4s # font vendor identification + fsSelection: H # font selection flags + usFirstCharIndex: H # first unicode character index + usLastCharIndex: H # last unicode character index + sTypoAscender: h # typographic ascender + sTypoDescender: h # typographic descender + sTypoLineGap: h # typographic line gap + usWinAscent: H # Windows ascender + usWinDescent: H # Windows descender +""" + +OS2_format_1_addition = """ + ulCodePageRange1: L + ulCodePageRange2: L +""" + +OS2_format_2_addition = OS2_format_1_addition + """ + sxHeight: h + sCapHeight: h + usDefaultChar: H + usBreakChar: H + usMaxContext: H +""" + +OS2_format_5_addition = OS2_format_2_addition + """ + usLowerOpticalPointSize: H + usUpperOpticalPointSize: H +""" + +bigendian = " > # big endian\n" + +OS2_format_1 = OS2_format_0 + OS2_format_1_addition +OS2_format_2 = OS2_format_0 + OS2_format_2_addition +OS2_format_5 = OS2_format_0 + OS2_format_5_addition +OS2_format_1_addition = bigendian + OS2_format_1_addition +OS2_format_2_addition = bigendian + OS2_format_2_addition +OS2_format_5_addition = bigendian + OS2_format_5_addition + + +class table_O_S_2f_2(DefaultTable.DefaultTable): + + """the OS/2 table""" + + def decompile(self, data, ttFont): + dummy, data = sstruct.unpack2(OS2_format_0, data, self) + + if self.version == 1: + dummy, data = sstruct.unpack2(OS2_format_1_addition, data, self) + elif self.version in (2, 3, 4): + dummy, data = sstruct.unpack2(OS2_format_2_addition, data, self) + elif self.version == 5: + dummy, data = sstruct.unpack2(OS2_format_5_addition, data, self) + self.usLowerOpticalPointSize /= 20 + self.usUpperOpticalPointSize /= 20 + elif self.version != 0: + from fontTools import ttLib + raise ttLib.TTLibError("unknown format for OS/2 table: version %s" % self.version) + if len(data): + warnings.warn("too much 'OS/2' table data") + + self.panose = sstruct.unpack(panoseFormat, self.panose, Panose()) + + def compile(self, ttFont): + self.updateFirstAndLastCharIndex(ttFont) + panose = self.panose + self.panose = sstruct.pack(panoseFormat, self.panose) + if self.version == 0: + data = sstruct.pack(OS2_format_0, self) + elif self.version == 1: + data = sstruct.pack(OS2_format_1, self) + elif self.version in (2, 3, 4): + data = sstruct.pack(OS2_format_2, self) + elif self.version == 5: + d = self.__dict__.copy() + d['usLowerOpticalPointSize'] = int(round(self.usLowerOpticalPointSize * 20)) + d['usUpperOpticalPointSize'] = int(round(self.usUpperOpticalPointSize * 20)) + data = sstruct.pack(OS2_format_5, d) + else: + from fontTools import ttLib + raise ttLib.TTLibError("unknown format for OS/2 table: version %s" % self.version) + self.panose = panose + return data + + def toXML(self, writer, ttFont): + writer.comment( + "The fields 'usFirstCharIndex' and 'usLastCharIndex'\n" + "will be recalculated by the compiler") + writer.newline() + if self.version == 1: + format = OS2_format_1 + elif self.version in (2, 3, 4): + format = OS2_format_2 + elif self.version == 5: + format = OS2_format_5 + else: + format = OS2_format_0 + formatstring, names, fixes = sstruct.getformat(format) + for name in names: + value = getattr(self, name) + if name=="panose": + writer.begintag("panose") + writer.newline() + value.toXML(writer, ttFont) + writer.endtag("panose") + elif name in ("ulUnicodeRange1", "ulUnicodeRange2", + "ulUnicodeRange3", "ulUnicodeRange4", + "ulCodePageRange1", "ulCodePageRange2"): + writer.simpletag(name, value=num2binary(value)) + elif name in ("fsType", "fsSelection"): + writer.simpletag(name, value=num2binary(value, 16)) + elif name == "achVendID": + writer.simpletag(name, value=repr(value)[1:-1]) + else: + writer.simpletag(name, value=value) + writer.newline() + + def fromXML(self, name, attrs, content, ttFont): + if name == "panose": + self.panose = panose = Panose() + for element in content: + if isinstance(element, tuple): + name, attrs, content = element + panose.fromXML(name, attrs, content, ttFont) + elif name in ("ulUnicodeRange1", "ulUnicodeRange2", + "ulUnicodeRange3", "ulUnicodeRange4", + "ulCodePageRange1", "ulCodePageRange2", + "fsType", "fsSelection"): + setattr(self, name, binary2num(attrs["value"])) + elif name == "achVendID": + setattr(self, name, safeEval("'''" + attrs["value"] + "'''")) + else: + setattr(self, name, safeEval(attrs["value"])) + + def updateFirstAndLastCharIndex(self, ttFont): + codes = set() + for table in ttFont['cmap'].tables: + if table.isUnicode(): + codes.update(table.cmap.keys()) + if codes: + minCode = min(codes) + maxCode = max(codes) + # USHORT cannot hold codepoints greater than 0xFFFF + self.usFirstCharIndex = 0xFFFF if minCode > 0xFFFF else minCode + self.usLastCharIndex = 0xFFFF if maxCode > 0xFFFF else maxCode + + # misspelled attributes kept for legacy reasons + + @property + def usMaxContex(self): + return self.usMaxContext + + @usMaxContex.setter + def usMaxContex(self, value): + self.usMaxContext = value + + @property + def fsFirstCharIndex(self): + return self.usFirstCharIndex + + @fsFirstCharIndex.setter + def fsFirstCharIndex(self, value): + self.usFirstCharIndex = value + + @property + def fsLastCharIndex(self): + return self.usLastCharIndex + + @fsLastCharIndex.setter + def fsLastCharIndex(self, value): + self.usLastCharIndex = value diff -Nru fonttools-2.4/Snippets/fontTools/ttLib/tables/otBase.py fonttools-3.0/Snippets/fontTools/ttLib/tables/otBase.py --- fonttools-2.4/Snippets/fontTools/ttLib/tables/otBase.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.0/Snippets/fontTools/ttLib/tables/otBase.py 2015-08-31 17:57:15.000000000 +0000 @@ -0,0 +1,901 @@ +from __future__ import print_function, division, absolute_import +from fontTools.misc.py23 import * +from .DefaultTable import DefaultTable +import struct + +class OverflowErrorRecord(object): + def __init__(self, overflowTuple): + self.tableType = overflowTuple[0] + self.LookupListIndex = overflowTuple[1] + self.SubTableIndex = overflowTuple[2] + self.itemName = overflowTuple[3] + self.itemIndex = overflowTuple[4] + + def __repr__(self): + return str((self.tableType, "LookupIndex:", self.LookupListIndex, "SubTableIndex:", self.SubTableIndex, "ItemName:", self.itemName, "ItemIndex:", self.itemIndex)) + +class OTLOffsetOverflowError(Exception): + def __init__(self, overflowErrorRecord): + self.value = overflowErrorRecord + + def __str__(self): + return repr(self.value) + + +class BaseTTXConverter(DefaultTable): + + """Generic base class for TTX table converters. It functions as an + adapter between the TTX (ttLib actually) table model and the model + we use for OpenType tables, which is necessarily subtly different. + """ + + def decompile(self, data, font): + from . import otTables + cachingStats = None if True else {} + class GlobalState(object): + def __init__(self, tableType, cachingStats): + self.tableType = tableType + self.cachingStats = cachingStats + globalState = GlobalState(tableType=self.tableTag, + cachingStats=cachingStats) + reader = OTTableReader(data, globalState) + tableClass = getattr(otTables, self.tableTag) + self.table = tableClass() + self.table.decompile(reader, font) + if cachingStats: + stats = sorted([(v, k) for k, v in cachingStats.items()]) + stats.reverse() + print("cachingsstats for ", self.tableTag) + for v, k in stats: + if v < 2: + break + print(v, k) + print("---", len(stats)) + + def compile(self, font): + """ Create a top-level OTFWriter for the GPOS/GSUB table. + Call the compile method for the the table + for each 'converter' record in the table converter list + call converter's write method for each item in the value. + - For simple items, the write method adds a string to the + writer's self.items list. + - For Struct/Table/Subtable items, it add first adds new writer to the + to the writer's self.items, then calls the item's compile method. + This creates a tree of writers, rooted at the GUSB/GPOS writer, with + each writer representing a table, and the writer.items list containing + the child data strings and writers. + call the getAllData method + call _doneWriting, which removes duplicates + call _gatherTables. This traverses the tables, adding unique occurences to a flat list of tables + Traverse the flat list of tables, calling getDataLength on each to update their position + Traverse the flat list of tables again, calling getData each get the data in the table, now that + pos's and offset are known. + + If a lookup subtable overflows an offset, we have to start all over. + """ + class GlobalState(object): + def __init__(self, tableType): + self.tableType = tableType + globalState = GlobalState(tableType=self.tableTag) + overflowRecord = None + + while True: + try: + writer = OTTableWriter(globalState) + self.table.compile(writer, font) + return writer.getAllData() + + except OTLOffsetOverflowError as e: + + if overflowRecord == e.value: + raise # Oh well... + + overflowRecord = e.value + print("Attempting to fix OTLOffsetOverflowError", e) + lastItem = overflowRecord + + ok = 0 + if overflowRecord.itemName is None: + from .otTables import fixLookupOverFlows + ok = fixLookupOverFlows(font, overflowRecord) + else: + from .otTables import fixSubTableOverFlows + ok = fixSubTableOverFlows(font, overflowRecord) + if not ok: + raise + + def toXML(self, writer, font): + self.table.toXML2(writer, font) + + def fromXML(self, name, attrs, content, font): + from . import otTables + if not hasattr(self, "table"): + tableClass = getattr(otTables, self.tableTag) + self.table = tableClass() + self.table.fromXML(name, attrs, content, font) + + +class OTTableReader(object): + + """Helper class to retrieve data from an OpenType table.""" + + __slots__ = ('data', 'offset', 'pos', 'globalState', 'localState') + + def __init__(self, data, globalState={}, localState=None, offset=0): + self.data = data + self.offset = offset + self.pos = offset + self.globalState = globalState + self.localState = localState + + def advance(self, count): + self.pos += count + def seek(self, pos): + self.pos = pos + + def copy(self): + other = self.__class__(self.data, self.globalState, self.localState, self.offset) + other.pos = self.pos + return other + + def getSubReader(self, offset): + offset = self.offset + offset + cachingStats = self.globalState.cachingStats + if cachingStats is not None: + cachingStats[offset] = cachingStats.get(offset, 0) + 1 + return self.__class__(self.data, self.globalState, self.localState, offset) + + def readUShort(self): + pos = self.pos + newpos = pos + 2 + value, = struct.unpack(">H", self.data[pos:newpos]) + self.pos = newpos + return value + + def readShort(self): + pos = self.pos + newpos = pos + 2 + value, = struct.unpack(">h", self.data[pos:newpos]) + self.pos = newpos + return value + + def readLong(self): + pos = self.pos + newpos = pos + 4 + value, = struct.unpack(">l", self.data[pos:newpos]) + self.pos = newpos + return value + + def readUInt24(self): + pos = self.pos + newpos = pos + 3 + value, = struct.unpack(">l", b'\0'+self.data[pos:newpos]) + self.pos = newpos + return value + + def readULong(self): + pos = self.pos + newpos = pos + 4 + value, = struct.unpack(">L", self.data[pos:newpos]) + self.pos = newpos + return value + + def readTag(self): + pos = self.pos + newpos = pos + 4 + value = Tag(self.data[pos:newpos]) + assert len(value) == 4 + self.pos = newpos + return value + + def readData(self, count): + pos = self.pos + newpos = pos + count + value = self.data[pos:newpos] + self.pos = newpos + return value + + def __setitem__(self, name, value): + state = self.localState.copy() if self.localState else dict() + state[name] = value + self.localState = state + + def __getitem__(self, name): + return self.localState and self.localState[name] + + def __contains__(self, name): + return self.localState and name in self.localState + + +class OTTableWriter(object): + + """Helper class to gather and assemble data for OpenType tables.""" + + def __init__(self, globalState, localState=None): + self.items = [] + self.pos = None + self.globalState = globalState + self.localState = localState + self.longOffset = False + self.parent = None + + def __setitem__(self, name, value): + state = self.localState.copy() if self.localState else dict() + state[name] = value + self.localState = state + + def __getitem__(self, name): + return self.localState[name] + + # assembler interface + + def getAllData(self): + """Assemble all data, including all subtables.""" + self._doneWriting() + tables, extTables = self._gatherTables() + tables.reverse() + extTables.reverse() + # Gather all data in two passes: the absolute positions of all + # subtable are needed before the actual data can be assembled. + pos = 0 + for table in tables: + table.pos = pos + pos = pos + table.getDataLength() + + for table in extTables: + table.pos = pos + pos = pos + table.getDataLength() + + data = [] + for table in tables: + tableData = table.getData() + data.append(tableData) + + for table in extTables: + tableData = table.getData() + data.append(tableData) + + return bytesjoin(data) + + def getDataLength(self): + """Return the length of this table in bytes, without subtables.""" + l = 0 + for item in self.items: + if hasattr(item, "getData") or hasattr(item, "getCountData"): + if item.longOffset: + l = l + 4 # sizeof(ULong) + else: + l = l + 2 # sizeof(UShort) + else: + l = l + len(item) + return l + + def getData(self): + """Assemble the data for this writer/table, without subtables.""" + items = list(self.items) # make a shallow copy + pos = self.pos + numItems = len(items) + for i in range(numItems): + item = items[i] + + if hasattr(item, "getData"): + if item.longOffset: + items[i] = packULong(item.pos - pos) + else: + try: + items[i] = packUShort(item.pos - pos) + except struct.error: + # provide data to fix overflow problem. + # If the overflow is to a lookup, or from a lookup to a subtable, + # just report the current item. Otherwise... + if self.name not in [ 'LookupList', 'Lookup']: + # overflow is within a subTable. Life is more complicated. + # If we split the sub-table just before the current item, we may still suffer overflow. + # This is because duplicate table merging is done only within an Extension subTable tree; + # when we split the subtable in two, some items may no longer be duplicates. + # Get worst case by adding up all the item lengths, depth first traversal. + # and then report the first item that overflows a short. + def getDeepItemLength(table): + if hasattr(table, "getDataLength"): + length = 0 + for item in table.items: + length = length + getDeepItemLength(item) + else: + length = len(table) + return length + + length = self.getDataLength() + if hasattr(self, "sortCoverageLast") and item.name == "Coverage": + # Coverage is first in the item list, but last in the table list, + # The original overflow is really in the item list. Skip the Coverage + # table in the following test. + items = items[i+1:] + + for j in range(len(items)): + item = items[j] + length = length + getDeepItemLength(item) + if length > 65535: + break + overflowErrorRecord = self.getOverflowErrorRecord(item) + + raise OTLOffsetOverflowError(overflowErrorRecord) + + return bytesjoin(items) + + def __hash__(self): + # only works after self._doneWriting() has been called + return hash(self.items) + + def __ne__(self, other): + return not self.__eq__(other) + def __eq__(self, other): + if type(self) != type(other): + return NotImplemented + return self.items == other.items + + def _doneWriting(self, internedTables=None): + # Convert CountData references to data string items + # collapse duplicate table references to a unique entry + # "tables" are OTTableWriter objects. + + # For Extension Lookup types, we can + # eliminate duplicates only within the tree under the Extension Lookup, + # as offsets may exceed 64K even between Extension LookupTable subtables. + if internedTables is None: + internedTables = {} + items = self.items + iRange = list(range(len(items))) + + if hasattr(self, "Extension"): + newTree = 1 + else: + newTree = 0 + for i in iRange: + item = items[i] + if hasattr(item, "getCountData"): + items[i] = item.getCountData() + elif hasattr(item, "getData"): + if newTree: + item._doneWriting() + else: + item._doneWriting(internedTables) + internedItem = internedTables.get(item) + if internedItem: + items[i] = item = internedItem + else: + internedTables[item] = item + self.items = tuple(items) + + def _gatherTables(self, tables=None, extTables=None, done=None): + # Convert table references in self.items tree to a flat + # list of tables in depth-first traversal order. + # "tables" are OTTableWriter objects. + # We do the traversal in reverse order at each level, in order to + # resolve duplicate references to be the last reference in the list of tables. + # For extension lookups, duplicate references can be merged only within the + # writer tree under the extension lookup. + if tables is None: # init call for first time. + tables = [] + extTables = [] + done = {} + + done[self] = 1 + + numItems = len(self.items) + iRange = list(range(numItems)) + iRange.reverse() + + if hasattr(self, "Extension"): + appendExtensions = 1 + else: + appendExtensions = 0 + + # add Coverage table if it is sorted last. + sortCoverageLast = 0 + if hasattr(self, "sortCoverageLast"): + # Find coverage table + for i in range(numItems): + item = self.items[i] + if hasattr(item, "name") and (item.name == "Coverage"): + sortCoverageLast = 1 + break + if item not in done: + item._gatherTables(tables, extTables, done) + else: + # We're a new parent of item + pass + + for i in iRange: + item = self.items[i] + if not hasattr(item, "getData"): + continue + + if sortCoverageLast and (i==1) and item.name == 'Coverage': + # we've already 'gathered' it above + continue + + if appendExtensions: + assert extTables is not None, "Program or XML editing error. Extension subtables cannot contain extensions subtables" + newDone = {} + item._gatherTables(extTables, None, newDone) + + elif item not in done: + item._gatherTables(tables, extTables, done) + else: + # We're a new parent of item + pass + + tables.append(self) + return tables, extTables + + # interface for gathering data, as used by table.compile() + + def getSubWriter(self): + subwriter = self.__class__(self.globalState, self.localState) + subwriter.parent = self # because some subtables have idential values, we discard + # the duplicates under the getAllData method. Hence some + # subtable writers can have more than one parent writer. + # But we just care about first one right now. + return subwriter + + def writeUShort(self, value): + assert 0 <= value < 0x10000 + self.items.append(struct.pack(">H", value)) + + def writeShort(self, value): + self.items.append(struct.pack(">h", value)) + + def writeUInt24(self, value): + assert 0 <= value < 0x1000000 + b = struct.pack(">L", value) + self.items.append(b[1:]) + + def writeLong(self, value): + self.items.append(struct.pack(">l", value)) + + def writeULong(self, value): + self.items.append(struct.pack(">L", value)) + + def writeTag(self, tag): + tag = Tag(tag).tobytes() + assert len(tag) == 4 + self.items.append(tag) + + def writeSubTable(self, subWriter): + self.items.append(subWriter) + + def writeCountReference(self, table, name): + ref = CountReference(table, name) + self.items.append(ref) + return ref + + def writeStruct(self, format, values): + data = struct.pack(*(format,) + values) + self.items.append(data) + + def writeData(self, data): + self.items.append(data) + + def getOverflowErrorRecord(self, item): + LookupListIndex = SubTableIndex = itemName = itemIndex = None + if self.name == 'LookupList': + LookupListIndex = item.repeatIndex + elif self.name == 'Lookup': + LookupListIndex = self.repeatIndex + SubTableIndex = item.repeatIndex + else: + itemName = item.name + if hasattr(item, 'repeatIndex'): + itemIndex = item.repeatIndex + if self.name == 'SubTable': + LookupListIndex = self.parent.repeatIndex + SubTableIndex = self.repeatIndex + elif self.name == 'ExtSubTable': + LookupListIndex = self.parent.parent.repeatIndex + SubTableIndex = self.parent.repeatIndex + else: # who knows how far below the SubTable level we are! Climb back up to the nearest subtable. + itemName = ".".join([self.name, item.name]) + p1 = self.parent + while p1 and p1.name not in ['ExtSubTable', 'SubTable']: + itemName = ".".join([p1.name, item.name]) + p1 = p1.parent + if p1: + if p1.name == 'ExtSubTable': + LookupListIndex = p1.parent.parent.repeatIndex + SubTableIndex = p1.parent.repeatIndex + else: + LookupListIndex = p1.parent.repeatIndex + SubTableIndex = p1.repeatIndex + + return OverflowErrorRecord( (self.globalState.tableType, LookupListIndex, SubTableIndex, itemName, itemIndex) ) + + +class CountReference(object): + """A reference to a Count value, not a count of references.""" + def __init__(self, table, name): + self.table = table + self.name = name + def setValue(self, value): + table = self.table + name = self.name + if table[name] is None: + table[name] = value + else: + assert table[name] == value, (name, table[name], value) + def getCountData(self): + return packUShort(self.table[self.name]) + + +def packUShort(value): + return struct.pack(">H", value) + + +def packULong(value): + assert 0 <= value < 0x100000000, value + return struct.pack(">L", value) + + +class BaseTable(object): + + """Generic base class for all OpenType (sub)tables.""" + + def __getattr__(self, attr): + reader = self.__dict__.get("reader") + if reader: + del self.reader + font = self.font + del self.font + self.decompile(reader, font) + return getattr(self, attr) + + raise AttributeError(attr) + + def ensureDecompiled(self): + reader = self.__dict__.get("reader") + if reader: + del self.reader + font = self.font + del self.font + self.decompile(reader, font) + + @classmethod + def getRecordSize(cls, reader): + totalSize = 0 + for conv in cls.converters: + size = conv.getRecordSize(reader) + if size is NotImplemented: return NotImplemented + countValue = 1 + if conv.repeat: + if conv.repeat in reader: + countValue = reader[conv.repeat] + else: + return NotImplemented + totalSize += size * countValue + return totalSize + + def getConverters(self): + return self.converters + + def getConverterByName(self, name): + return self.convertersByName[name] + + def decompile(self, reader, font): + self.readFormat(reader) + table = {} + self.__rawTable = table # for debugging + converters = self.getConverters() + for conv in converters: + if conv.name == "SubTable": + conv = conv.getConverter(reader.globalState.tableType, + table["LookupType"]) + if conv.name == "ExtSubTable": + conv = conv.getConverter(reader.globalState.tableType, + table["ExtensionLookupType"]) + if conv.name == "FeatureParams": + conv = conv.getConverter(reader["FeatureTag"]) + if conv.repeat: + if conv.repeat in table: + countValue = table[conv.repeat] + else: + # conv.repeat is a propagated count + countValue = reader[conv.repeat] + countValue += conv.aux + table[conv.name] = conv.readArray(reader, font, table, countValue) + else: + if conv.aux and not eval(conv.aux, None, table): + continue + table[conv.name] = conv.read(reader, font, table) + if conv.isPropagated: + reader[conv.name] = table[conv.name] + + self.postRead(table, font) + + del self.__rawTable # succeeded, get rid of debugging info + + def compile(self, writer, font): + self.ensureDecompiled() + table = self.preWrite(font) + + if hasattr(self, 'sortCoverageLast'): + writer.sortCoverageLast = 1 + + if hasattr(self.__class__, 'LookupType'): + writer['LookupType'].setValue(self.__class__.LookupType) + + self.writeFormat(writer) + for conv in self.getConverters(): + value = table.get(conv.name) + if conv.repeat: + if value is None: + value = [] + countValue = len(value) - conv.aux + if conv.repeat in table: + CountReference(table, conv.repeat).setValue(countValue) + else: + # conv.repeat is a propagated count + writer[conv.repeat].setValue(countValue) + conv.writeArray(writer, font, table, value) + elif conv.isCount: + # Special-case Count values. + # Assumption: a Count field will *always* precede + # the actual array(s). + # We need a default value, as it may be set later by a nested + # table. We will later store it here. + # We add a reference: by the time the data is assembled + # the Count value will be filled in. + ref = writer.writeCountReference(table, conv.name) + table[conv.name] = None + if conv.isPropagated: + writer[conv.name] = ref + elif conv.isLookupType: + ref = writer.writeCountReference(table, conv.name) + table[conv.name] = None + writer['LookupType'] = ref + else: + if conv.aux and not eval(conv.aux, None, table): + continue + conv.write(writer, font, table, value) + if conv.isPropagated: + writer[conv.name] = value + + def readFormat(self, reader): + pass + + def writeFormat(self, writer): + pass + + def postRead(self, table, font): + self.__dict__.update(table) + + def preWrite(self, font): + return self.__dict__.copy() + + def toXML(self, xmlWriter, font, attrs=None, name=None): + tableName = name if name else self.__class__.__name__ + if attrs is None: + attrs = [] + if hasattr(self, "Format"): + attrs = attrs + [("Format", self.Format)] + xmlWriter.begintag(tableName, attrs) + xmlWriter.newline() + self.toXML2(xmlWriter, font) + xmlWriter.endtag(tableName) + xmlWriter.newline() + + def toXML2(self, xmlWriter, font): + # Simpler variant of toXML, *only* for the top level tables (like GPOS, GSUB). + # This is because in TTX our parent writes our main tag, and in otBase.py we + # do it ourselves. I think I'm getting schizophrenic... + for conv in self.getConverters(): + if conv.repeat: + value = getattr(self, conv.name) + for i in range(len(value)): + item = value[i] + conv.xmlWrite(xmlWriter, font, item, conv.name, + [("index", i)]) + else: + if conv.aux and not eval(conv.aux, None, vars(self)): + continue + value = getattr(self, conv.name) + conv.xmlWrite(xmlWriter, font, value, conv.name, []) + + def fromXML(self, name, attrs, content, font): + try: + conv = self.getConverterByName(name) + except KeyError: + raise # XXX on KeyError, raise nice error + value = conv.xmlRead(attrs, content, font) + if conv.repeat: + seq = getattr(self, conv.name, None) + if seq is None: + seq = [] + setattr(self, conv.name, seq) + seq.append(value) + else: + setattr(self, conv.name, value) + + def __ne__(self, other): + return not self.__eq__(other) + def __eq__(self, other): + if type(self) != type(other): + return NotImplemented + + self.ensureDecompiled() + other.ensureDecompiled() + + return self.__dict__ == other.__dict__ + + +class FormatSwitchingBaseTable(BaseTable): + + """Minor specialization of BaseTable, for tables that have multiple + formats, eg. CoverageFormat1 vs. CoverageFormat2.""" + + @classmethod + def getRecordSize(cls, reader): + return NotImplemented + + def getConverters(self): + return self.converters[self.Format] + + def getConverterByName(self, name): + return self.convertersByName[self.Format][name] + + def readFormat(self, reader): + self.Format = reader.readUShort() + assert self.Format != 0, (self, reader.pos, len(reader.data)) + + def writeFormat(self, writer): + writer.writeUShort(self.Format) + + def toXML(self, xmlWriter, font, attrs=None, name=None): + BaseTable.toXML(self, xmlWriter, font, attrs, name=self.__class__.__name__) + + +# +# Support for ValueRecords +# +# This data type is so different from all other OpenType data types that +# it requires quite a bit of code for itself. It even has special support +# in OTTableReader and OTTableWriter... +# + +valueRecordFormat = [ +# Mask Name isDevice signed + (0x0001, "XPlacement", 0, 1), + (0x0002, "YPlacement", 0, 1), + (0x0004, "XAdvance", 0, 1), + (0x0008, "YAdvance", 0, 1), + (0x0010, "XPlaDevice", 1, 0), + (0x0020, "YPlaDevice", 1, 0), + (0x0040, "XAdvDevice", 1, 0), + (0x0080, "YAdvDevice", 1, 0), +# reserved: + (0x0100, "Reserved1", 0, 0), + (0x0200, "Reserved2", 0, 0), + (0x0400, "Reserved3", 0, 0), + (0x0800, "Reserved4", 0, 0), + (0x1000, "Reserved5", 0, 0), + (0x2000, "Reserved6", 0, 0), + (0x4000, "Reserved7", 0, 0), + (0x8000, "Reserved8", 0, 0), +] + +def _buildDict(): + d = {} + for mask, name, isDevice, signed in valueRecordFormat: + d[name] = mask, isDevice, signed + return d + +valueRecordFormatDict = _buildDict() + + +class ValueRecordFactory(object): + + """Given a format code, this object convert ValueRecords.""" + + def __init__(self, valueFormat): + format = [] + for mask, name, isDevice, signed in valueRecordFormat: + if valueFormat & mask: + format.append((name, isDevice, signed)) + self.format = format + + def __len__(self): + return len(self.format) + + def readValueRecord(self, reader, font): + format = self.format + if not format: + return None + valueRecord = ValueRecord() + for name, isDevice, signed in format: + if signed: + value = reader.readShort() + else: + value = reader.readUShort() + if isDevice: + if value: + from . import otTables + subReader = reader.getSubReader(value) + value = getattr(otTables, name)() + value.decompile(subReader, font) + else: + value = None + setattr(valueRecord, name, value) + return valueRecord + + def writeValueRecord(self, writer, font, valueRecord): + for name, isDevice, signed in self.format: + value = getattr(valueRecord, name, 0) + if isDevice: + if value: + subWriter = writer.getSubWriter() + writer.writeSubTable(subWriter) + value.compile(subWriter, font) + else: + writer.writeUShort(0) + elif signed: + writer.writeShort(value) + else: + writer.writeUShort(value) + + +class ValueRecord(object): + + # see ValueRecordFactory + + def getFormat(self): + format = 0 + for name in self.__dict__.keys(): + format = format | valueRecordFormatDict[name][0] + return format + + def toXML(self, xmlWriter, font, valueName, attrs=None): + if attrs is None: + simpleItems = [] + else: + simpleItems = list(attrs) + for mask, name, isDevice, format in valueRecordFormat[:4]: # "simple" values + if hasattr(self, name): + simpleItems.append((name, getattr(self, name))) + deviceItems = [] + for mask, name, isDevice, format in valueRecordFormat[4:8]: # device records + if hasattr(self, name): + device = getattr(self, name) + if device is not None: + deviceItems.append((name, device)) + if deviceItems: + xmlWriter.begintag(valueName, simpleItems) + xmlWriter.newline() + for name, deviceRecord in deviceItems: + if deviceRecord is not None: + deviceRecord.toXML(xmlWriter, font) + xmlWriter.endtag(valueName) + xmlWriter.newline() + else: + xmlWriter.simpletag(valueName, simpleItems) + xmlWriter.newline() + + def fromXML(self, name, attrs, content, font): + from . import otTables + for k, v in attrs.items(): + setattr(self, k, int(v)) + for element in content: + if not isinstance(element, tuple): + continue + name, attrs, content = element + value = getattr(otTables, name)() + for elem2 in content: + if not isinstance(elem2, tuple): + continue + name2, attrs2, content2 = elem2 + value.fromXML(name2, attrs2, content2, font) + setattr(self, name, value) + + def __ne__(self, other): + return not self.__eq__(other) + def __eq__(self, other): + if type(self) != type(other): + return NotImplemented + return self.__dict__ == other.__dict__ diff -Nru fonttools-2.4/Snippets/fontTools/ttLib/tables/otConverters.py fonttools-3.0/Snippets/fontTools/ttLib/tables/otConverters.py --- fonttools-2.4/Snippets/fontTools/ttLib/tables/otConverters.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.0/Snippets/fontTools/ttLib/tables/otConverters.py 2015-08-31 17:57:15.000000000 +0000 @@ -0,0 +1,481 @@ +from __future__ import print_function, division, absolute_import +from fontTools.misc.py23 import * +from fontTools.misc.textTools import safeEval +from fontTools.misc.fixedTools import fixedToFloat as fi2fl, floatToFixed as fl2fi +from .otBase import ValueRecordFactory +import array + + +def buildConverters(tableSpec, tableNamespace): + """Given a table spec from otData.py, build a converter object for each + field of the table. This is called for each table in otData.py, and + the results are assigned to the corresponding class in otTables.py.""" + converters = [] + convertersByName = {} + for tp, name, repeat, aux, descr in tableSpec: + tableName = name + if name.startswith("ValueFormat"): + assert tp == "uint16" + converterClass = ValueFormat + elif name.endswith("Count") or name.endswith("LookupType"): + assert tp == "uint16" + converterClass = ComputedUShort + elif name == "SubTable": + converterClass = SubTable + elif name == "ExtSubTable": + converterClass = ExtSubTable + elif name == "FeatureParams": + converterClass = FeatureParams + else: + if not tp in converterMapping: + tableName = tp + converterClass = Struct + else: + converterClass = converterMapping[tp] + tableClass = tableNamespace.get(tableName) + conv = converterClass(name, repeat, aux, tableClass) + if name in ["SubTable", "ExtSubTable"]: + conv.lookupTypes = tableNamespace['lookupTypes'] + # also create reverse mapping + for t in conv.lookupTypes.values(): + for cls in t.values(): + convertersByName[cls.__name__] = Table(name, repeat, aux, cls) + if name == "FeatureParams": + conv.featureParamTypes = tableNamespace['featureParamTypes'] + conv.defaultFeatureParams = tableNamespace['FeatureParams'] + for cls in conv.featureParamTypes.values(): + convertersByName[cls.__name__] = Table(name, repeat, aux, cls) + converters.append(conv) + assert name not in convertersByName, name + convertersByName[name] = conv + return converters, convertersByName + + +class _MissingItem(tuple): + __slots__ = () + +try: + from collections import UserList +except: + from UserList import UserList + +class _LazyList(UserList): + + def __getslice__(self, i, j): + return self.__getitem__(slice(i, j)) + def __getitem__(self, k): + if isinstance(k, slice): + indices = range(*k.indices(len(self))) + return [self[i] for i in indices] + item = self.data[k] + if isinstance(item, _MissingItem): + self.reader.seek(self.pos + item[0] * self.recordSize) + item = self.conv.read(self.reader, self.font, {}) + self.data[k] = item + return item + +class BaseConverter(object): + + """Base class for converter objects. Apart from the constructor, this + is an abstract class.""" + + def __init__(self, name, repeat, aux, tableClass): + self.name = name + self.repeat = repeat + self.aux = aux + self.tableClass = tableClass + self.isCount = name.endswith("Count") + self.isLookupType = name.endswith("LookupType") + self.isPropagated = name in ["ClassCount", "Class2Count", "FeatureTag", "SettingsCount", "AxisCount"] + + def readArray(self, reader, font, tableDict, count): + """Read an array of values from the reader.""" + lazy = font.lazy and count > 8 + if lazy: + recordSize = self.getRecordSize(reader) + if recordSize is NotImplemented: + lazy = False + if not lazy: + l = [] + for i in range(count): + l.append(self.read(reader, font, tableDict)) + return l + else: + l = _LazyList() + l.reader = reader.copy() + l.pos = l.reader.pos + l.font = font + l.conv = self + l.recordSize = recordSize + l.extend(_MissingItem([i]) for i in range(count)) + reader.advance(count * recordSize) + return l + + def getRecordSize(self, reader): + if hasattr(self, 'staticSize'): return self.staticSize + return NotImplemented + + def read(self, reader, font, tableDict): + """Read a value from the reader.""" + raise NotImplementedError(self) + + def writeArray(self, writer, font, tableDict, values): + for i in range(len(values)): + self.write(writer, font, tableDict, values[i], i) + + def write(self, writer, font, tableDict, value, repeatIndex=None): + """Write a value to the writer.""" + raise NotImplementedError(self) + + def xmlRead(self, attrs, content, font): + """Read a value from XML.""" + raise NotImplementedError(self) + + def xmlWrite(self, xmlWriter, font, value, name, attrs): + """Write a value to XML.""" + raise NotImplementedError(self) + + +class SimpleValue(BaseConverter): + def xmlWrite(self, xmlWriter, font, value, name, attrs): + xmlWriter.simpletag(name, attrs + [("value", value)]) + xmlWriter.newline() + def xmlRead(self, attrs, content, font): + return attrs["value"] + +class IntValue(SimpleValue): + def xmlRead(self, attrs, content, font): + return int(attrs["value"], 0) + +class Long(IntValue): + staticSize = 4 + def read(self, reader, font, tableDict): + return reader.readLong() + def write(self, writer, font, tableDict, value, repeatIndex=None): + writer.writeLong(value) + +class ULong(IntValue): + staticSize = 4 + def read(self, reader, font, tableDict): + return reader.readULong() + def write(self, writer, font, tableDict, value, repeatIndex=None): + writer.writeULong(value) + +class Short(IntValue): + staticSize = 2 + def read(self, reader, font, tableDict): + return reader.readShort() + def write(self, writer, font, tableDict, value, repeatIndex=None): + writer.writeShort(value) + +class UShort(IntValue): + staticSize = 2 + def read(self, reader, font, tableDict): + return reader.readUShort() + def write(self, writer, font, tableDict, value, repeatIndex=None): + writer.writeUShort(value) + +class UInt24(IntValue): + staticSize = 3 + def read(self, reader, font, tableDict): + return reader.readUInt24() + def write(self, writer, font, tableDict, value, repeatIndex=None): + writer.writeUInt24(value) + +class ComputedUShort(UShort): + def xmlWrite(self, xmlWriter, font, value, name, attrs): + xmlWriter.comment("%s=%s" % (name, value)) + xmlWriter.newline() + +class Tag(SimpleValue): + staticSize = 4 + def read(self, reader, font, tableDict): + return reader.readTag() + def write(self, writer, font, tableDict, value, repeatIndex=None): + writer.writeTag(value) + +class GlyphID(SimpleValue): + staticSize = 2 + def readArray(self, reader, font, tableDict, count): + glyphOrder = font.getGlyphOrder() + gids = array.array("H", reader.readData(2 * count)) + if sys.byteorder != "big": + gids.byteswap() + try: + l = [glyphOrder[gid] for gid in gids] + except IndexError: + # Slower, but will not throw an IndexError on an invalid glyph id. + l = [font.getGlyphName(gid) for gid in gids] + return l + def read(self, reader, font, tableDict): + return font.getGlyphName(reader.readUShort()) + def write(self, writer, font, tableDict, value, repeatIndex=None): + writer.writeUShort(font.getGlyphID(value)) + +class FloatValue(SimpleValue): + def xmlRead(self, attrs, content, font): + return float(attrs["value"]) + +class DeciPoints(FloatValue): + staticSize = 2 + def read(self, reader, font, tableDict): + return reader.readUShort() / 10 + + def write(self, writer, font, tableDict, value, repeatIndex=None): + writer.writeUShort(int(round(value * 10))) + +class Fixed(FloatValue): + staticSize = 4 + def read(self, reader, font, tableDict): + return fi2fl(reader.readLong(), 16) + def write(self, writer, font, tableDict, value, repeatIndex=None): + writer.writeLong(fl2fi(value, 16)) + +class Version(BaseConverter): + staticSize = 4 + def read(self, reader, font, tableDict): + value = reader.readLong() + assert (value >> 16) == 1, "Unsupported version 0x%08x" % value + return fi2fl(value, 16) + def write(self, writer, font, tableDict, value, repeatIndex=None): + if value < 0x10000: + value = fl2fi(value, 16) + value = int(round(value)) + assert (value >> 16) == 1, "Unsupported version 0x%08x" % value + writer.writeLong(value) + def xmlRead(self, attrs, content, font): + value = attrs["value"] + value = float(int(value, 0)) if value.startswith("0") else float(value) + if value >= 0x10000: + value = fi2fl(value, 16) + return value + def xmlWrite(self, xmlWriter, font, value, name, attrs): + if value >= 0x10000: + value = fi2fl(value, 16) + if value % 1 != 0: + # Write as hex + value = "0x%08x" % fl2fi(value, 16) + xmlWriter.simpletag(name, attrs + [("value", value)]) + xmlWriter.newline() + + +class Struct(BaseConverter): + + def getRecordSize(self, reader): + return self.tableClass and self.tableClass.getRecordSize(reader) + + def read(self, reader, font, tableDict): + table = self.tableClass() + table.decompile(reader, font) + return table + + def write(self, writer, font, tableDict, value, repeatIndex=None): + value.compile(writer, font) + + def xmlWrite(self, xmlWriter, font, value, name, attrs): + if value is None: + if attrs: + # If there are attributes (probably index), then + # don't drop this even if it's NULL. It will mess + # up the array indices of the containing element. + xmlWriter.simpletag(name, attrs + [("empty", 1)]) + xmlWriter.newline() + else: + pass # NULL table, ignore + else: + value.toXML(xmlWriter, font, attrs, name=name) + + def xmlRead(self, attrs, content, font): + if "empty" in attrs and safeEval(attrs["empty"]): + return None + table = self.tableClass() + Format = attrs.get("Format") + if Format is not None: + table.Format = int(Format) + for element in content: + if isinstance(element, tuple): + name, attrs, content = element + table.fromXML(name, attrs, content, font) + else: + pass + return table + + def __repr__(self): + return "Struct of " + repr(self.tableClass) + + +class Table(Struct): + + longOffset = False + staticSize = 2 + + def readOffset(self, reader): + return reader.readUShort() + + def writeNullOffset(self, writer): + if self.longOffset: + writer.writeULong(0) + else: + writer.writeUShort(0) + + def read(self, reader, font, tableDict): + offset = self.readOffset(reader) + if offset == 0: + return None + if offset <= 3: + # XXX hack to work around buggy pala.ttf + print("*** Warning: offset is not 0, yet suspiciously low (%s). table: %s" \ + % (offset, self.tableClass.__name__)) + return None + table = self.tableClass() + reader = reader.getSubReader(offset) + if font.lazy: + table.reader = reader + table.font = font + else: + table.decompile(reader, font) + return table + + def write(self, writer, font, tableDict, value, repeatIndex=None): + if value is None: + self.writeNullOffset(writer) + else: + subWriter = writer.getSubWriter() + subWriter.longOffset = self.longOffset + subWriter.name = self.name + if repeatIndex is not None: + subWriter.repeatIndex = repeatIndex + writer.writeSubTable(subWriter) + value.compile(subWriter, font) + +class LTable(Table): + + longOffset = True + staticSize = 4 + + def readOffset(self, reader): + return reader.readULong() + + +class SubTable(Table): + def getConverter(self, tableType, lookupType): + tableClass = self.lookupTypes[tableType][lookupType] + return self.__class__(self.name, self.repeat, self.aux, tableClass) + + +class ExtSubTable(LTable, SubTable): + + def write(self, writer, font, tableDict, value, repeatIndex=None): + writer.Extension = 1 # actually, mere presence of the field flags it as an Ext Subtable writer. + Table.write(self, writer, font, tableDict, value, repeatIndex) + +class FeatureParams(Table): + def getConverter(self, featureTag): + tableClass = self.featureParamTypes.get(featureTag, self.defaultFeatureParams) + return self.__class__(self.name, self.repeat, self.aux, tableClass) + + +class ValueFormat(IntValue): + staticSize = 2 + def __init__(self, name, repeat, aux, tableClass): + BaseConverter.__init__(self, name, repeat, aux, tableClass) + self.which = "ValueFormat" + ("2" if name[-1] == "2" else "1") + def read(self, reader, font, tableDict): + format = reader.readUShort() + reader[self.which] = ValueRecordFactory(format) + return format + def write(self, writer, font, tableDict, format, repeatIndex=None): + writer.writeUShort(format) + writer[self.which] = ValueRecordFactory(format) + + +class ValueRecord(ValueFormat): + def getRecordSize(self, reader): + return 2 * len(reader[self.which]) + def read(self, reader, font, tableDict): + return reader[self.which].readValueRecord(reader, font) + def write(self, writer, font, tableDict, value, repeatIndex=None): + writer[self.which].writeValueRecord(writer, font, value) + def xmlWrite(self, xmlWriter, font, value, name, attrs): + if value is None: + pass # NULL table, ignore + else: + value.toXML(xmlWriter, font, self.name, attrs) + def xmlRead(self, attrs, content, font): + from .otBase import ValueRecord + value = ValueRecord() + value.fromXML(None, attrs, content, font) + return value + + +class DeltaValue(BaseConverter): + + def read(self, reader, font, tableDict): + StartSize = tableDict["StartSize"] + EndSize = tableDict["EndSize"] + DeltaFormat = tableDict["DeltaFormat"] + assert DeltaFormat in (1, 2, 3), "illegal DeltaFormat" + nItems = EndSize - StartSize + 1 + nBits = 1 << DeltaFormat + minusOffset = 1 << nBits + mask = (1 << nBits) - 1 + signMask = 1 << (nBits - 1) + + DeltaValue = [] + tmp, shift = 0, 0 + for i in range(nItems): + if shift == 0: + tmp, shift = reader.readUShort(), 16 + shift = shift - nBits + value = (tmp >> shift) & mask + if value & signMask: + value = value - minusOffset + DeltaValue.append(value) + return DeltaValue + + def write(self, writer, font, tableDict, value, repeatIndex=None): + StartSize = tableDict["StartSize"] + EndSize = tableDict["EndSize"] + DeltaFormat = tableDict["DeltaFormat"] + DeltaValue = value + assert DeltaFormat in (1, 2, 3), "illegal DeltaFormat" + nItems = EndSize - StartSize + 1 + nBits = 1 << DeltaFormat + assert len(DeltaValue) == nItems + mask = (1 << nBits) - 1 + + tmp, shift = 0, 16 + for value in DeltaValue: + shift = shift - nBits + tmp = tmp | ((value & mask) << shift) + if shift == 0: + writer.writeUShort(tmp) + tmp, shift = 0, 16 + if shift != 16: + writer.writeUShort(tmp) + + def xmlWrite(self, xmlWriter, font, value, name, attrs): + xmlWriter.simpletag(name, attrs + [("value", value)]) + xmlWriter.newline() + + def xmlRead(self, attrs, content, font): + return safeEval(attrs["value"]) + + +converterMapping = { + # type class + "int16": Short, + "uint16": UShort, + "uint24": UInt24, + "uint32": ULong, + "Version": Version, + "Tag": Tag, + "GlyphID": GlyphID, + "DeciPoints": DeciPoints, + "Fixed": Fixed, + "struct": Struct, + "Offset": Table, + "LOffset": LTable, + "ValueRecord": ValueRecord, + "DeltaValue": DeltaValue, +} diff -Nru fonttools-2.4/Snippets/fontTools/ttLib/tables/otData.py fonttools-3.0/Snippets/fontTools/ttLib/tables/otData.py --- fonttools-2.4/Snippets/fontTools/ttLib/tables/otData.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.0/Snippets/fontTools/ttLib/tables/otData.py 2015-08-31 17:57:15.000000000 +0000 @@ -0,0 +1,1025 @@ +from __future__ import print_function, division, absolute_import +from fontTools.misc.py23 import * + +otData = [ + + # + # common + # + + ('LookupOrder', []), + + ('ScriptList', [ + ('uint16', 'ScriptCount', None, None, 'Number of ScriptRecords'), + ('struct', 'ScriptRecord', 'ScriptCount', 0, 'Array of ScriptRecords -listed alphabetically by ScriptTag'), + ]), + + ('ScriptRecord', [ + ('Tag', 'ScriptTag', None, None, '4-byte ScriptTag identifier'), + ('Offset', 'Script', None, None, 'Offset to Script table-from beginning of ScriptList'), + ]), + + ('Script', [ + ('Offset', 'DefaultLangSys', None, None, 'Offset to DefaultLangSys table-from beginning of Script table-may be NULL'), + ('uint16', 'LangSysCount', None, None, 'Number of LangSysRecords for this script-excluding the DefaultLangSys'), + ('struct', 'LangSysRecord', 'LangSysCount', 0, 'Array of LangSysRecords-listed alphabetically by LangSysTag'), + ]), + + ('LangSysRecord', [ + ('Tag', 'LangSysTag', None, None, '4-byte LangSysTag identifier'), + ('Offset', 'LangSys', None, None, 'Offset to LangSys table-from beginning of Script table'), + ]), + + ('LangSys', [ + ('Offset', 'LookupOrder', None, None, '= NULL (reserved for an offset to a reordering table)'), + ('uint16', 'ReqFeatureIndex', None, None, 'Index of a feature required for this language system- if no required features = 0xFFFF'), + ('uint16', 'FeatureCount', None, None, 'Number of FeatureIndex values for this language system-excludes the required feature'), + ('uint16', 'FeatureIndex', 'FeatureCount', 0, 'Array of indices into the FeatureList-in arbitrary order'), + ]), + + ('FeatureList', [ + ('uint16', 'FeatureCount', None, None, 'Number of FeatureRecords in this table'), + ('struct', 'FeatureRecord', 'FeatureCount', 0, 'Array of FeatureRecords-zero-based (first feature has FeatureIndex = 0)-listed alphabetically by FeatureTag'), + ]), + + ('FeatureRecord', [ + ('Tag', 'FeatureTag', None, None, '4-byte feature identification tag'), + ('Offset', 'Feature', None, None, 'Offset to Feature table-from beginning of FeatureList'), + ]), + + ('Feature', [ + ('Offset', 'FeatureParams', None, None, '= NULL (reserved for offset to FeatureParams)'), + ('uint16', 'LookupCount', None, None, 'Number of LookupList indices for this feature'), + ('uint16', 'LookupListIndex', 'LookupCount', 0, 'Array of LookupList indices for this feature -zero-based (first lookup is LookupListIndex = 0)'), + ]), + + ('FeatureParams', [ + ]), + + ('FeatureParamsSize', [ + ('DeciPoints', 'DesignSize', None, None, 'The design size in 720/inch units (decipoints).'), + ('uint16', 'SubfamilyID', None, None, 'Serves as an identifier that associates fonts in a subfamily.'), + ('uint16', 'SubfamilyNameID', None, None, 'Subfamily NameID.'), + ('DeciPoints', 'RangeStart', None, None, 'Small end of recommended usage range (exclusive) in 720/inch units.'), + ('DeciPoints', 'RangeEnd', None, None, 'Large end of recommended usage range (inclusive) in 720/inch units.'), + ]), + + ('FeatureParamsStylisticSet', [ + ('uint16', 'Version', None, None, 'Set to 0.'), + ('uint16', 'UINameID', None, None, 'UI NameID.'), + ]), + + ('FeatureParamsCharacterVariants', [ + ('uint16', 'Format', None, None, 'Set to 0.'), + ('uint16', 'FeatUILabelNameID', None, None, 'Feature UI label NameID.'), + ('uint16', 'FeatUITooltipTextNameID', None, None, 'Feature UI tooltip text NameID.'), + ('uint16', 'SampleTextNameID', None, None, 'Sample text NameID.'), + ('uint16', 'NumNamedParameters', None, None, 'Number of named parameters.'), + ('uint16', 'FirstParamUILabelNameID', None, None, 'First NameID of UI feature parameters.'), + ('uint16', 'CharCount', None, None, 'Count of characters this feature provides glyph variants for.'), + ('uint24', 'Character', 'CharCount', 0, 'Unicode characters for which this feature provides glyph variants.'), + ]), + + ('LookupList', [ + ('uint16', 'LookupCount', None, None, 'Number of lookups in this table'), + ('Offset', 'Lookup', 'LookupCount', 0, 'Array of offsets to Lookup tables-from beginning of LookupList -zero based (first lookup is Lookup index = 0)'), + ]), + + ('Lookup', [ + ('uint16', 'LookupType', None, None, 'Different enumerations for GSUB and GPOS'), + ('uint16', 'LookupFlag', None, None, 'Lookup qualifiers'), + ('uint16', 'SubTableCount', None, None, 'Number of SubTables for this lookup'), + ('Offset', 'SubTable', 'SubTableCount', 0, 'Array of offsets to SubTables-from beginning of Lookup table'), + ('uint16', 'MarkFilteringSet', None, 'LookupFlag & 0x0010', 'If set, indicates that the lookup table structure is followed by a MarkFilteringSet field. The layout engine skips over all mark glyphs not in the mark filtering set indicated.'), + ]), + + ('CoverageFormat1', [ + ('uint16', 'CoverageFormat', None, None, 'Format identifier-format = 1'), + ('uint16', 'GlyphCount', None, None, 'Number of glyphs in the GlyphArray'), + ('GlyphID', 'GlyphArray', 'GlyphCount', 0, 'Array of GlyphIDs-in numerical order'), + ]), + + ('CoverageFormat2', [ + ('uint16', 'CoverageFormat', None, None, 'Format identifier-format = 2'), + ('uint16', 'RangeCount', None, None, 'Number of RangeRecords'), + ('struct', 'RangeRecord', 'RangeCount', 0, 'Array of glyph ranges-ordered by Start GlyphID'), + ]), + + ('RangeRecord', [ + ('GlyphID', 'Start', None, None, 'First GlyphID in the range'), + ('GlyphID', 'End', None, None, 'Last GlyphID in the range'), + ('uint16', 'StartCoverageIndex', None, None, 'Coverage Index of first GlyphID in range'), + ]), + + ('ClassDefFormat1', [ + ('uint16', 'ClassFormat', None, None, 'Format identifier-format = 1'), + ('GlyphID', 'StartGlyph', None, None, 'First GlyphID of the ClassValueArray'), + ('uint16', 'GlyphCount', None, None, 'Size of the ClassValueArray'), + ('uint16', 'ClassValueArray', 'GlyphCount', 0, 'Array of Class Values-one per GlyphID'), + ]), + + ('ClassDefFormat2', [ + ('uint16', 'ClassFormat', None, None, 'Format identifier-format = 2'), + ('uint16', 'ClassRangeCount', None, None, 'Number of ClassRangeRecords'), + ('struct', 'ClassRangeRecord', 'ClassRangeCount', 0, 'Array of ClassRangeRecords-ordered by Start GlyphID'), + ]), + + ('ClassRangeRecord', [ + ('GlyphID', 'Start', None, None, 'First GlyphID in the range'), + ('GlyphID', 'End', None, None, 'Last GlyphID in the range'), + ('uint16', 'Class', None, None, 'Applied to all glyphs in the range'), + ]), + + ('Device', [ + ('uint16', 'StartSize', None, None, 'Smallest size to correct-in ppem'), + ('uint16', 'EndSize', None, None, 'Largest size to correct-in ppem'), + ('uint16', 'DeltaFormat', None, None, 'Format of DeltaValue array data: 1, 2, or 3'), + ('DeltaValue', 'DeltaValue', '', 0, 'Array of compressed data'), + ]), + + + # + # gpos + # + + ('GPOS', [ + ('Version', 'Version', None, None, 'Version of the GPOS table-initially = 0x00010000'), + ('Offset', 'ScriptList', None, None, 'Offset to ScriptList table-from beginning of GPOS table'), + ('Offset', 'FeatureList', None, None, 'Offset to FeatureList table-from beginning of GPOS table'), + ('Offset', 'LookupList', None, None, 'Offset to LookupList table-from beginning of GPOS table'), + ]), + + ('SinglePosFormat1', [ + ('uint16', 'PosFormat', None, None, 'Format identifier-format = 1'), + ('Offset', 'Coverage', None, None, 'Offset to Coverage table-from beginning of SinglePos subtable'), + ('uint16', 'ValueFormat', None, None, 'Defines the types of data in the ValueRecord'), + ('ValueRecord', 'Value', None, None, 'Defines positioning value(s)-applied to all glyphs in the Coverage table'), + ]), + + ('SinglePosFormat2', [ + ('uint16', 'PosFormat', None, None, 'Format identifier-format = 2'), + ('Offset', 'Coverage', None, None, 'Offset to Coverage table-from beginning of SinglePos subtable'), + ('uint16', 'ValueFormat', None, None, 'Defines the types of data in the ValueRecord'), + ('uint16', 'ValueCount', None, None, 'Number of ValueRecords'), + ('ValueRecord', 'Value', 'ValueCount', 0, 'Array of ValueRecords-positioning values applied to glyphs'), + ]), + + ('PairPosFormat1', [ + ('uint16', 'PosFormat', None, None, 'Format identifier-format = 1'), + ('Offset', 'Coverage', None, None, 'Offset to Coverage table-from beginning of PairPos subtable-only the first glyph in each pair'), + ('uint16', 'ValueFormat1', None, None, 'Defines the types of data in ValueRecord1-for the first glyph in the pair -may be zero (0)'), + ('uint16', 'ValueFormat2', None, None, 'Defines the types of data in ValueRecord2-for the second glyph in the pair -may be zero (0)'), + ('uint16', 'PairSetCount', None, None, 'Number of PairSet tables'), + ('Offset', 'PairSet', 'PairSetCount', 0, 'Array of offsets to PairSet tables-from beginning of PairPos subtable-ordered by Coverage Index'), + ]), + + ('PairSet', [ + ('uint16', 'PairValueCount', None, None, 'Number of PairValueRecords'), + ('struct', 'PairValueRecord', 'PairValueCount', 0, 'Array of PairValueRecords-ordered by GlyphID of the second glyph'), + ]), + + ('PairValueRecord', [ + ('GlyphID', 'SecondGlyph', None, None, 'GlyphID of second glyph in the pair-first glyph is listed in the Coverage table'), + ('ValueRecord', 'Value1', None, None, 'Positioning data for the first glyph in the pair'), + ('ValueRecord', 'Value2', None, None, 'Positioning data for the second glyph in the pair'), + ]), + + ('PairPosFormat2', [ + ('uint16', 'PosFormat', None, None, 'Format identifier-format = 2'), + ('Offset', 'Coverage', None, None, 'Offset to Coverage table-from beginning of PairPos subtable-for the first glyph of the pair'), + ('uint16', 'ValueFormat1', None, None, 'ValueRecord definition-for the first glyph of the pair-may be zero (0)'), + ('uint16', 'ValueFormat2', None, None, 'ValueRecord definition-for the second glyph of the pair-may be zero (0)'), + ('Offset', 'ClassDef1', None, None, 'Offset to ClassDef table-from beginning of PairPos subtable-for the first glyph of the pair'), + ('Offset', 'ClassDef2', None, None, 'Offset to ClassDef table-from beginning of PairPos subtable-for the second glyph of the pair'), + ('uint16', 'Class1Count', None, None, 'Number of classes in ClassDef1 table-includes Class0'), + ('uint16', 'Class2Count', None, None, 'Number of classes in ClassDef2 table-includes Class0'), + ('struct', 'Class1Record', 'Class1Count', 0, 'Array of Class1 records-ordered by Class1'), + ]), + + ('Class1Record', [ + ('struct', 'Class2Record', 'Class2Count', 0, 'Array of Class2 records-ordered by Class2'), + ]), + + ('Class2Record', [ + ('ValueRecord', 'Value1', None, None, 'Positioning for first glyph-empty if ValueFormat1 = 0'), + ('ValueRecord', 'Value2', None, None, 'Positioning for second glyph-empty if ValueFormat2 = 0'), + ]), + + ('CursivePosFormat1', [ + ('uint16', 'PosFormat', None, None, 'Format identifier-format = 1'), + ('Offset', 'Coverage', None, None, 'Offset to Coverage table-from beginning of CursivePos subtable'), + ('uint16', 'EntryExitCount', None, None, 'Number of EntryExit records'), + ('struct', 'EntryExitRecord', 'EntryExitCount', 0, 'Array of EntryExit records-in Coverage Index order'), + ]), + + ('EntryExitRecord', [ + ('Offset', 'EntryAnchor', None, None, 'Offset to EntryAnchor table-from beginning of CursivePos subtable-may be NULL'), + ('Offset', 'ExitAnchor', None, None, 'Offset to ExitAnchor table-from beginning of CursivePos subtable-may be NULL'), + ]), + + ('MarkBasePosFormat1', [ + ('uint16', 'PosFormat', None, None, 'Format identifier-format = 1'), + ('Offset', 'MarkCoverage', None, None, 'Offset to MarkCoverage table-from beginning of MarkBasePos subtable'), + ('Offset', 'BaseCoverage', None, None, 'Offset to BaseCoverage table-from beginning of MarkBasePos subtable'), + ('uint16', 'ClassCount', None, None, 'Number of classes defined for marks'), + ('Offset', 'MarkArray', None, None, 'Offset to MarkArray table-from beginning of MarkBasePos subtable'), + ('Offset', 'BaseArray', None, None, 'Offset to BaseArray table-from beginning of MarkBasePos subtable'), + ]), + + ('BaseArray', [ + ('uint16', 'BaseCount', None, None, 'Number of BaseRecords'), + ('struct', 'BaseRecord', 'BaseCount', 0, 'Array of BaseRecords-in order of BaseCoverage Index'), + ]), + + ('BaseRecord', [ + ('Offset', 'BaseAnchor', 'ClassCount', 0, 'Array of offsets (one per class) to Anchor tables-from beginning of BaseArray table-ordered by class-zero-based'), + ]), + + ('MarkLigPosFormat1', [ + ('uint16', 'PosFormat', None, None, 'Format identifier-format = 1'), + ('Offset', 'MarkCoverage', None, None, 'Offset to Mark Coverage table-from beginning of MarkLigPos subtable'), + ('Offset', 'LigatureCoverage', None, None, 'Offset to Ligature Coverage table-from beginning of MarkLigPos subtable'), + ('uint16', 'ClassCount', None, None, 'Number of defined mark classes'), + ('Offset', 'MarkArray', None, None, 'Offset to MarkArray table-from beginning of MarkLigPos subtable'), + ('Offset', 'LigatureArray', None, None, 'Offset to LigatureArray table-from beginning of MarkLigPos subtable'), + ]), + + ('LigatureArray', [ + ('uint16', 'LigatureCount', None, None, 'Number of LigatureAttach table offsets'), + ('Offset', 'LigatureAttach', 'LigatureCount', 0, 'Array of offsets to LigatureAttach tables-from beginning of LigatureArray table-ordered by LigatureCoverage Index'), + ]), + + ('LigatureAttach', [ + ('uint16', 'ComponentCount', None, None, 'Number of ComponentRecords in this ligature'), + ('struct', 'ComponentRecord', 'ComponentCount', 0, 'Array of Component records-ordered in writing direction'), + ]), + + ('ComponentRecord', [ + ('Offset', 'LigatureAnchor', 'ClassCount', 0, 'Array of offsets (one per class) to Anchor tables-from beginning of LigatureAttach table-ordered by class-NULL if a component does not have an attachment for a class-zero-based array'), + ]), + + ('MarkMarkPosFormat1', [ + ('uint16', 'PosFormat', None, None, 'Format identifier-format = 1'), + ('Offset', 'Mark1Coverage', None, None, 'Offset to Combining Mark Coverage table-from beginning of MarkMarkPos subtable'), + ('Offset', 'Mark2Coverage', None, None, 'Offset to Base Mark Coverage table-from beginning of MarkMarkPos subtable'), + ('uint16', 'ClassCount', None, None, 'Number of Combining Mark classes defined'), + ('Offset', 'Mark1Array', None, None, 'Offset to MarkArray table for Mark1-from beginning of MarkMarkPos subtable'), + ('Offset', 'Mark2Array', None, None, 'Offset to Mark2Array table for Mark2-from beginning of MarkMarkPos subtable'), + ]), + + ('Mark2Array', [ + ('uint16', 'Mark2Count', None, None, 'Number of Mark2 records'), + ('struct', 'Mark2Record', 'Mark2Count', 0, 'Array of Mark2 records-in Coverage order'), + ]), + + ('Mark2Record', [ + ('Offset', 'Mark2Anchor', 'ClassCount', 0, 'Array of offsets (one per class) to Anchor tables-from beginning of Mark2Array table-zero-based array'), + ]), + + ('PosLookupRecord', [ + ('uint16', 'SequenceIndex', None, None, 'Index to input glyph sequence-first glyph = 0'), + ('uint16', 'LookupListIndex', None, None, 'Lookup to apply to that position-zero-based'), + ]), + + ('ContextPosFormat1', [ + ('uint16', 'PosFormat', None, None, 'Format identifier-format = 1'), + ('Offset', 'Coverage', None, None, 'Offset to Coverage table-from beginning of ContextPos subtable'), + ('uint16', 'PosRuleSetCount', None, None, 'Number of PosRuleSet tables'), + ('Offset', 'PosRuleSet', 'PosRuleSetCount', 0, 'Array of offsets to PosRuleSet tables-from beginning of ContextPos subtable-ordered by Coverage Index'), + ]), + + ('PosRuleSet', [ + ('uint16', 'PosRuleCount', None, None, 'Number of PosRule tables'), + ('Offset', 'PosRule', 'PosRuleCount', 0, 'Array of offsets to PosRule tables-from beginning of PosRuleSet-ordered by preference'), + ]), + + ('PosRule', [ + ('uint16', 'GlyphCount', None, None, 'Number of glyphs in the Input glyph sequence'), + ('uint16', 'PosCount', None, None, 'Number of PosLookupRecords'), + ('GlyphID', 'Input', 'GlyphCount', -1, 'Array of input GlyphIDs-starting with the second glyph'), + ('struct', 'PosLookupRecord', 'PosCount', 0, 'Array of positioning lookups-in design order'), + ]), + + ('ContextPosFormat2', [ + ('uint16', 'PosFormat', None, None, 'Format identifier-format = 2'), + ('Offset', 'Coverage', None, None, 'Offset to Coverage table-from beginning of ContextPos subtable'), + ('Offset', 'ClassDef', None, None, 'Offset to ClassDef table-from beginning of ContextPos subtable'), + ('uint16', 'PosClassSetCount', None, None, 'Number of PosClassSet tables'), + ('Offset', 'PosClassSet', 'PosClassSetCount', 0, 'Array of offsets to PosClassSet tables-from beginning of ContextPos subtable-ordered by class-may be NULL'), + ]), + + ('PosClassSet', [ + ('uint16', 'PosClassRuleCount', None, None, 'Number of PosClassRule tables'), + ('Offset', 'PosClassRule', 'PosClassRuleCount', 0, 'Array of offsets to PosClassRule tables-from beginning of PosClassSet-ordered by preference'), + ]), + + ('PosClassRule', [ + ('uint16', 'GlyphCount', None, None, 'Number of glyphs to be matched'), + ('uint16', 'PosCount', None, None, 'Number of PosLookupRecords'), + ('uint16', 'Class', 'GlyphCount', -1, 'Array of classes-beginning with the second class-to be matched to the input glyph sequence'), + ('struct', 'PosLookupRecord', 'PosCount', 0, 'Array of positioning lookups-in design order'), + ]), + + ('ContextPosFormat3', [ + ('uint16', 'PosFormat', None, None, 'Format identifier-format = 3'), + ('uint16', 'GlyphCount', None, None, 'Number of glyphs in the input sequence'), + ('uint16', 'PosCount', None, None, 'Number of PosLookupRecords'), + ('Offset', 'Coverage', 'GlyphCount', 0, 'Array of offsets to Coverage tables-from beginning of ContextPos subtable'), + ('struct', 'PosLookupRecord', 'PosCount', 0, 'Array of positioning lookups-in design order'), + ]), + + ('ChainContextPosFormat1', [ + ('uint16', 'PosFormat', None, None, 'Format identifier-format = 1'), + ('Offset', 'Coverage', None, None, 'Offset to Coverage table-from beginning of ContextPos subtable'), + ('uint16', 'ChainPosRuleSetCount', None, None, 'Number of ChainPosRuleSet tables'), + ('Offset', 'ChainPosRuleSet', 'ChainPosRuleSetCount', 0, 'Array of offsets to ChainPosRuleSet tables-from beginning of ContextPos subtable-ordered by Coverage Index'), + ]), + + ('ChainPosRuleSet', [ + ('uint16', 'ChainPosRuleCount', None, None, 'Number of ChainPosRule tables'), + ('Offset', 'ChainPosRule', 'ChainPosRuleCount', 0, 'Array of offsets to ChainPosRule tables-from beginning of ChainPosRuleSet-ordered by preference'), + ]), + + ('ChainPosRule', [ + ('uint16', 'BacktrackGlyphCount', None, None, 'Total number of glyphs in the backtrack sequence (number of glyphs to be matched before the first glyph)'), + ('GlyphID', 'Backtrack', 'BacktrackGlyphCount', 0, "Array of backtracking GlyphID's (to be matched before the input sequence)"), + ('uint16', 'InputGlyphCount', None, None, 'Total number of glyphs in the input sequence (includes the first glyph)'), + ('GlyphID', 'Input', 'InputGlyphCount', -1, 'Array of input GlyphIDs (start with second glyph)'), + ('uint16', 'LookAheadGlyphCount', None, None, 'Total number of glyphs in the look ahead sequence (number of glyphs to be matched after the input sequence)'), + ('GlyphID', 'LookAhead', 'LookAheadGlyphCount', 0, "Array of lookahead GlyphID's (to be matched after the input sequence)"), + ('uint16', 'PosCount', None, None, 'Number of PosLookupRecords'), + ('struct', 'PosLookupRecord', 'PosCount', 0, 'Array of PosLookupRecords (in design order)'), + ]), + + ('ChainContextPosFormat2', [ + ('uint16', 'PosFormat', None, None, 'Format identifier-format = 2'), + ('Offset', 'Coverage', None, None, 'Offset to Coverage table-from beginning of ChainContextPos subtable'), + ('Offset', 'BacktrackClassDef', None, None, 'Offset to ClassDef table containing backtrack sequence context-from beginning of ChainContextPos subtable'), + ('Offset', 'InputClassDef', None, None, 'Offset to ClassDef table containing input sequence context-from beginning of ChainContextPos subtable'), + ('Offset', 'LookAheadClassDef', None, None, 'Offset to ClassDef table containing lookahead sequence context-from beginning of ChainContextPos subtable'), + ('uint16', 'ChainPosClassSetCount', None, None, 'Number of ChainPosClassSet tables'), + ('Offset', 'ChainPosClassSet', 'ChainPosClassSetCount', 0, 'Array of offsets to ChainPosClassSet tables-from beginning of ChainContextPos subtable-ordered by input class-may be NULL'), + ]), + + ('ChainPosClassSet', [ + ('uint16', 'ChainPosClassRuleCount', None, None, 'Number of ChainPosClassRule tables'), + ('Offset', 'ChainPosClassRule', 'ChainPosClassRuleCount', 0, 'Array of offsets to ChainPosClassRule tables-from beginning of ChainPosClassSet-ordered by preference'), + ]), + + ('ChainPosClassRule', [ + ('uint16', 'BacktrackGlyphCount', None, None, 'Total number of glyphs in the backtrack sequence (number of glyphs to be matched before the first glyph)'), + ('uint16', 'Backtrack', 'BacktrackGlyphCount', 0, 'Array of backtracking classes(to be matched before the input sequence)'), + ('uint16', 'InputGlyphCount', None, None, 'Total number of classes in the input sequence (includes the first class)'), + ('uint16', 'Input', 'InputGlyphCount', -1, 'Array of input classes(start with second class; to be matched with the input glyph sequence)'), + ('uint16', 'LookAheadGlyphCount', None, None, 'Total number of classes in the look ahead sequence (number of classes to be matched after the input sequence)'), + ('uint16', 'LookAhead', 'LookAheadGlyphCount', 0, 'Array of lookahead classes(to be matched after the input sequence)'), + ('uint16', 'PosCount', None, None, 'Number of PosLookupRecords'), + ('struct', 'PosLookupRecord', 'PosCount', 0, 'Array of PosLookupRecords (in design order)'), + ]), + + ('ChainContextPosFormat3', [ + ('uint16', 'PosFormat', None, None, 'Format identifier-format = 3'), + ('uint16', 'BacktrackGlyphCount', None, None, 'Number of glyphs in the backtracking sequence'), + ('Offset', 'BacktrackCoverage', 'BacktrackGlyphCount', 0, 'Array of offsets to coverage tables in backtracking sequence, in glyph sequence order'), + ('uint16', 'InputGlyphCount', None, None, 'Number of glyphs in input sequence'), + ('Offset', 'InputCoverage', 'InputGlyphCount', 0, 'Array of offsets to coverage tables in input sequence, in glyph sequence order'), + ('uint16', 'LookAheadGlyphCount', None, None, 'Number of glyphs in lookahead sequence'), + ('Offset', 'LookAheadCoverage', 'LookAheadGlyphCount', 0, 'Array of offsets to coverage tables in lookahead sequence, in glyph sequence order'), + ('uint16', 'PosCount', None, None, 'Number of PosLookupRecords'), + ('struct', 'PosLookupRecord', 'PosCount', 0, 'Array of PosLookupRecords,in design order'), + ]), + + ('ExtensionPosFormat1', [ + ('uint16', 'ExtFormat', None, None, 'Format identifier. Set to 1.'), + ('uint16', 'ExtensionLookupType', None, None, 'Lookup type of subtable referenced by ExtensionOffset (i.e. the extension subtable).'), + ('LOffset', 'ExtSubTable', None, None, 'Offset to SubTable'), + ]), + + ('ValueRecord', [ + ('int16', 'XPlacement', None, None, 'Horizontal adjustment for placement-in design units'), + ('int16', 'YPlacement', None, None, 'Vertical adjustment for placement-in design units'), + ('int16', 'XAdvance', None, None, 'Horizontal adjustment for advance-in design units (only used for horizontal writing)'), + ('int16', 'YAdvance', None, None, 'Vertical adjustment for advance-in design units (only used for vertical writing)'), + ('Offset', 'XPlaDevice', None, None, 'Offset to Device table for horizontal placement-measured from beginning of PosTable (may be NULL)'), + ('Offset', 'YPlaDevice', None, None, 'Offset to Device table for vertical placement-measured from beginning of PosTable (may be NULL)'), + ('Offset', 'XAdvDevice', None, None, 'Offset to Device table for horizontal advance-measured from beginning of PosTable (may be NULL)'), + ('Offset', 'YAdvDevice', None, None, 'Offset to Device table for vertical advance-measured from beginning of PosTable (may be NULL)'), + ]), + + ('AnchorFormat1', [ + ('uint16', 'AnchorFormat', None, None, 'Format identifier-format = 1'), + ('int16', 'XCoordinate', None, None, 'Horizontal value-in design units'), + ('int16', 'YCoordinate', None, None, 'Vertical value-in design units'), + ]), + + ('AnchorFormat2', [ + ('uint16', 'AnchorFormat', None, None, 'Format identifier-format = 2'), + ('int16', 'XCoordinate', None, None, 'Horizontal value-in design units'), + ('int16', 'YCoordinate', None, None, 'Vertical value-in design units'), + ('uint16', 'AnchorPoint', None, None, 'Index to glyph contour point'), + ]), + + ('AnchorFormat3', [ + ('uint16', 'AnchorFormat', None, None, 'Format identifier-format = 3'), + ('int16', 'XCoordinate', None, None, 'Horizontal value-in design units'), + ('int16', 'YCoordinate', None, None, 'Vertical value-in design units'), + ('Offset', 'XDeviceTable', None, None, 'Offset to Device table for X coordinate- from beginning of Anchor table (may be NULL)'), + ('Offset', 'YDeviceTable', None, None, 'Offset to Device table for Y coordinate- from beginning of Anchor table (may be NULL)'), + ]), + + ('MarkArray', [ + ('uint16', 'MarkCount', None, None, 'Number of MarkRecords'), + ('struct', 'MarkRecord', 'MarkCount', 0, 'Array of MarkRecords-in Coverage order'), + ]), + + ('MarkRecord', [ + ('uint16', 'Class', None, None, 'Class defined for this mark'), + ('Offset', 'MarkAnchor', None, None, 'Offset to Anchor table-from beginning of MarkArray table'), + ]), + + + # + # gsub + # + + ('GSUB', [ + ('Version', 'Version', None, None, 'Version of the GSUB table-initially set to 0x00010000'), + ('Offset', 'ScriptList', None, None, 'Offset to ScriptList table-from beginning of GSUB table'), + ('Offset', 'FeatureList', None, None, 'Offset to FeatureList table-from beginning of GSUB table'), + ('Offset', 'LookupList', None, None, 'Offset to LookupList table-from beginning of GSUB table'), + ]), + + ('SingleSubstFormat1', [ + ('uint16', 'SubstFormat', None, None, 'Format identifier-format = 1'), + ('Offset', 'Coverage', None, None, 'Offset to Coverage table-from beginning of Substitution table'), + ('uint16', 'DeltaGlyphID', None, None, 'Add to original GlyphID modulo 65536 to get substitute GlyphID'), + ]), + + ('SingleSubstFormat2', [ + ('uint16', 'SubstFormat', None, None, 'Format identifier-format = 2'), + ('Offset', 'Coverage', None, None, 'Offset to Coverage table-from beginning of Substitution table'), + ('uint16', 'GlyphCount', None, None, 'Number of GlyphIDs in the Substitute array'), + ('GlyphID', 'Substitute', 'GlyphCount', 0, 'Array of substitute GlyphIDs-ordered by Coverage Index'), + ]), + + ('MultipleSubstFormat1', [ + ('uint16', 'SubstFormat', None, None, 'Format identifier-format = 1'), + ('Offset', 'Coverage', None, None, 'Offset to Coverage table-from beginning of Substitution table'), + ('uint16', 'SequenceCount', None, None, 'Number of Sequence table offsets in the Sequence array'), + ('Offset', 'Sequence', 'SequenceCount', 0, 'Array of offsets to Sequence tables-from beginning of Substitution table-ordered by Coverage Index'), + ]), + + ('Sequence', [ + ('uint16', 'GlyphCount', None, None, 'Number of GlyphIDs in the Substitute array. This should always be greater than 0.'), + ('GlyphID', 'Substitute', 'GlyphCount', 0, 'String of GlyphIDs to substitute'), + ]), + + ('AlternateSubstFormat1', [ + ('uint16', 'SubstFormat', None, None, 'Format identifier-format = 1'), + ('Offset', 'Coverage', None, None, 'Offset to Coverage table-from beginning of Substitution table'), + ('uint16', 'AlternateSetCount', None, None, 'Number of AlternateSet tables'), + ('Offset', 'AlternateSet', 'AlternateSetCount', 0, 'Array of offsets to AlternateSet tables-from beginning of Substitution table-ordered by Coverage Index'), + ]), + + ('AlternateSet', [ + ('uint16', 'GlyphCount', None, None, 'Number of GlyphIDs in the Alternate array'), + ('GlyphID', 'Alternate', 'GlyphCount', 0, 'Array of alternate GlyphIDs-in arbitrary order'), + ]), + + ('LigatureSubstFormat1', [ + ('uint16', 'SubstFormat', None, None, 'Format identifier-format = 1'), + ('Offset', 'Coverage', None, None, 'Offset to Coverage table-from beginning of Substitution table'), + ('uint16', 'LigSetCount', None, None, 'Number of LigatureSet tables'), + ('Offset', 'LigatureSet', 'LigSetCount', 0, 'Array of offsets to LigatureSet tables-from beginning of Substitution table-ordered by Coverage Index'), + ]), + + ('LigatureSet', [ + ('uint16', 'LigatureCount', None, None, 'Number of Ligature tables'), + ('Offset', 'Ligature', 'LigatureCount', 0, 'Array of offsets to Ligature tables-from beginning of LigatureSet table-ordered by preference'), + ]), + + ('Ligature', [ + ('GlyphID', 'LigGlyph', None, None, 'GlyphID of ligature to substitute'), + ('uint16', 'CompCount', None, None, 'Number of components in the ligature'), + ('GlyphID', 'Component', 'CompCount', -1, 'Array of component GlyphIDs-start with the second component-ordered in writing direction'), + ]), + + ('SubstLookupRecord', [ + ('uint16', 'SequenceIndex', None, None, 'Index into current glyph sequence-first glyph = 0'), + ('uint16', 'LookupListIndex', None, None, 'Lookup to apply to that position-zero-based'), + ]), + + ('ContextSubstFormat1', [ + ('uint16', 'SubstFormat', None, None, 'Format identifier-format = 1'), + ('Offset', 'Coverage', None, None, 'Offset to Coverage table-from beginning of Substitution table'), + ('uint16', 'SubRuleSetCount', None, None, 'Number of SubRuleSet tables-must equal GlyphCount in Coverage table'), + ('Offset', 'SubRuleSet', 'SubRuleSetCount', 0, 'Array of offsets to SubRuleSet tables-from beginning of Substitution table-ordered by Coverage Index'), + ]), + + ('SubRuleSet', [ + ('uint16', 'SubRuleCount', None, None, 'Number of SubRule tables'), + ('Offset', 'SubRule', 'SubRuleCount', 0, 'Array of offsets to SubRule tables-from beginning of SubRuleSet table-ordered by preference'), + ]), + + ('SubRule', [ + ('uint16', 'GlyphCount', None, None, 'Total number of glyphs in input glyph sequence-includes the first glyph'), + ('uint16', 'SubstCount', None, None, 'Number of SubstLookupRecords'), + ('GlyphID', 'Input', 'GlyphCount', -1, 'Array of input GlyphIDs-start with second glyph'), + ('struct', 'SubstLookupRecord', 'SubstCount', 0, 'Array of SubstLookupRecords-in design order'), + ]), + + ('ContextSubstFormat2', [ + ('uint16', 'SubstFormat', None, None, 'Format identifier-format = 2'), + ('Offset', 'Coverage', None, None, 'Offset to Coverage table-from beginning of Substitution table'), + ('Offset', 'ClassDef', None, None, 'Offset to glyph ClassDef table-from beginning of Substitution table'), + ('uint16', 'SubClassSetCount', None, None, 'Number of SubClassSet tables'), + ('Offset', 'SubClassSet', 'SubClassSetCount', 0, 'Array of offsets to SubClassSet tables-from beginning of Substitution table-ordered by class-may be NULL'), + ]), + + ('SubClassSet', [ + ('uint16', 'SubClassRuleCount', None, None, 'Number of SubClassRule tables'), + ('Offset', 'SubClassRule', 'SubClassRuleCount', 0, 'Array of offsets to SubClassRule tables-from beginning of SubClassSet-ordered by preference'), + ]), + + ('SubClassRule', [ + ('uint16', 'GlyphCount', None, None, 'Total number of classes specified for the context in the rule-includes the first class'), + ('uint16', 'SubstCount', None, None, 'Number of SubstLookupRecords'), + ('uint16', 'Class', 'GlyphCount', -1, 'Array of classes-beginning with the second class-to be matched to the input glyph class sequence'), + ('struct', 'SubstLookupRecord', 'SubstCount', 0, 'Array of Substitution lookups-in design order'), + ]), + + ('ContextSubstFormat3', [ + ('uint16', 'SubstFormat', None, None, 'Format identifier-format = 3'), + ('uint16', 'GlyphCount', None, None, 'Number of glyphs in the input glyph sequence'), + ('uint16', 'SubstCount', None, None, 'Number of SubstLookupRecords'), + ('Offset', 'Coverage', 'GlyphCount', 0, 'Array of offsets to Coverage table-from beginning of Substitution table-in glyph sequence order'), + ('struct', 'SubstLookupRecord', 'SubstCount', 0, 'Array of SubstLookupRecords-in design order'), + ]), + + ('ChainContextSubstFormat1', [ + ('uint16', 'SubstFormat', None, None, 'Format identifier-format = 1'), + ('Offset', 'Coverage', None, None, 'Offset to Coverage table-from beginning of Substitution table'), + ('uint16', 'ChainSubRuleSetCount', None, None, 'Number of ChainSubRuleSet tables-must equal GlyphCount in Coverage table'), + ('Offset', 'ChainSubRuleSet', 'ChainSubRuleSetCount', 0, 'Array of offsets to ChainSubRuleSet tables-from beginning of Substitution table-ordered by Coverage Index'), + ]), + + ('ChainSubRuleSet', [ + ('uint16', 'ChainSubRuleCount', None, None, 'Number of ChainSubRule tables'), + ('Offset', 'ChainSubRule', 'ChainSubRuleCount', 0, 'Array of offsets to ChainSubRule tables-from beginning of ChainSubRuleSet table-ordered by preference'), + ]), + + ('ChainSubRule', [ + ('uint16', 'BacktrackGlyphCount', None, None, 'Total number of glyphs in the backtrack sequence (number of glyphs to be matched before the first glyph)'), + ('GlyphID', 'Backtrack', 'BacktrackGlyphCount', 0, "Array of backtracking GlyphID's (to be matched before the input sequence)"), + ('uint16', 'InputGlyphCount', None, None, 'Total number of glyphs in the input sequence (includes the first glyph)'), + ('GlyphID', 'Input', 'InputGlyphCount', -1, 'Array of input GlyphIDs (start with second glyph)'), + ('uint16', 'LookAheadGlyphCount', None, None, 'Total number of glyphs in the look ahead sequence (number of glyphs to be matched after the input sequence)'), + ('GlyphID', 'LookAhead', 'LookAheadGlyphCount', 0, "Array of lookahead GlyphID's (to be matched after the input sequence)"), + ('uint16', 'SubstCount', None, None, 'Number of SubstLookupRecords'), + ('struct', 'SubstLookupRecord', 'SubstCount', 0, 'Array of SubstLookupRecords (in design order)'), + ]), + + ('ChainContextSubstFormat2', [ + ('uint16', 'SubstFormat', None, None, 'Format identifier-format = 2'), + ('Offset', 'Coverage', None, None, 'Offset to Coverage table-from beginning of Substitution table'), + ('Offset', 'BacktrackClassDef', None, None, 'Offset to glyph ClassDef table containing backtrack sequence data-from beginning of Substitution table'), + ('Offset', 'InputClassDef', None, None, 'Offset to glyph ClassDef table containing input sequence data-from beginning of Substitution table'), + ('Offset', 'LookAheadClassDef', None, None, 'Offset to glyph ClassDef table containing lookahead sequence data-from beginning of Substitution table'), + ('uint16', 'ChainSubClassSetCount', None, None, 'Number of ChainSubClassSet tables'), + ('Offset', 'ChainSubClassSet', 'ChainSubClassSetCount', 0, 'Array of offsets to ChainSubClassSet tables-from beginning of Substitution table-ordered by input class-may be NULL'), + ]), + + ('ChainSubClassSet', [ + ('uint16', 'ChainSubClassRuleCount', None, None, 'Number of ChainSubClassRule tables'), + ('Offset', 'ChainSubClassRule', 'ChainSubClassRuleCount', 0, 'Array of offsets to ChainSubClassRule tables-from beginning of ChainSubClassSet-ordered by preference'), + ]), + + ('ChainSubClassRule', [ + ('uint16', 'BacktrackGlyphCount', None, None, 'Total number of glyphs in the backtrack sequence (number of glyphs to be matched before the first glyph)'), + ('uint16', 'Backtrack', 'BacktrackGlyphCount', 0, 'Array of backtracking classes(to be matched before the input sequence)'), + ('uint16', 'InputGlyphCount', None, None, 'Total number of classes in the input sequence (includes the first class)'), + ('uint16', 'Input', 'InputGlyphCount', -1, 'Array of input classes(start with second class; to be matched with the input glyph sequence)'), + ('uint16', 'LookAheadGlyphCount', None, None, 'Total number of classes in the look ahead sequence (number of classes to be matched after the input sequence)'), + ('uint16', 'LookAhead', 'LookAheadGlyphCount', 0, 'Array of lookahead classes(to be matched after the input sequence)'), + ('uint16', 'SubstCount', None, None, 'Number of SubstLookupRecords'), + ('struct', 'SubstLookupRecord', 'SubstCount', 0, 'Array of SubstLookupRecords (in design order)'), + ]), + + ('ChainContextSubstFormat3', [ + ('uint16', 'SubstFormat', None, None, 'Format identifier-format = 3'), + ('uint16', 'BacktrackGlyphCount', None, None, 'Number of glyphs in the backtracking sequence'), + ('Offset', 'BacktrackCoverage', 'BacktrackGlyphCount', 0, 'Array of offsets to coverage tables in backtracking sequence, in glyph sequence order'), + ('uint16', 'InputGlyphCount', None, None, 'Number of glyphs in input sequence'), + ('Offset', 'InputCoverage', 'InputGlyphCount', 0, 'Array of offsets to coverage tables in input sequence, in glyph sequence order'), + ('uint16', 'LookAheadGlyphCount', None, None, 'Number of glyphs in lookahead sequence'), + ('Offset', 'LookAheadCoverage', 'LookAheadGlyphCount', 0, 'Array of offsets to coverage tables in lookahead sequence, in glyph sequence order'), + ('uint16', 'SubstCount', None, None, 'Number of SubstLookupRecords'), + ('struct', 'SubstLookupRecord', 'SubstCount', 0, 'Array of SubstLookupRecords, in design order'), + ]), + + ('ExtensionSubstFormat1', [ + ('uint16', 'ExtFormat', None, None, 'Format identifier. Set to 1.'), + ('uint16', 'ExtensionLookupType', None, None, 'Lookup type of subtable referenced by ExtensionOffset (i.e. the extension subtable).'), + ('LOffset', 'ExtSubTable', None, None, 'Array of offsets to Lookup tables-from beginning of LookupList -zero based (first lookup is Lookup index = 0)'), + ]), + + ('ReverseChainSingleSubstFormat1', [ + ('uint16', 'SubstFormat', None, None, 'Format identifier-format = 1'), + ('Offset', 'Coverage', None, 0, 'Offset to Coverage table - from beginning of Substitution table'), + ('uint16', 'BacktrackGlyphCount', None, None, 'Number of glyphs in the backtracking sequence'), + ('Offset', 'BacktrackCoverage', 'BacktrackGlyphCount', 0, 'Array of offsets to coverage tables in backtracking sequence, in glyph sequence order'), + ('uint16', 'LookAheadGlyphCount', None, None, 'Number of glyphs in lookahead sequence'), + ('Offset', 'LookAheadCoverage', 'LookAheadGlyphCount', 0, 'Array of offsets to coverage tables in lookahead sequence, in glyph sequence order'), + ('uint16', 'GlyphCount', None, None, 'Number of GlyphIDs in the Substitute array'), + ('GlyphID', 'Substitute', 'GlyphCount', 0, 'Array of substitute GlyphIDs-ordered by Coverage index'), + ]), + + # + # gdef + # + + ('GDEF', [ + ('Version', 'Version', None, None, 'Version of the GDEF table-initially 0x00010000'), + ('Offset', 'GlyphClassDef', None, None, 'Offset to class definition table for glyph type-from beginning of GDEF header (may be NULL)'), + ('Offset', 'AttachList', None, None, 'Offset to list of glyphs with attachment points-from beginning of GDEF header (may be NULL)'), + ('Offset', 'LigCaretList', None, None, 'Offset to list of positioning points for ligature carets-from beginning of GDEF header (may be NULL)'), + ('Offset', 'MarkAttachClassDef', None, None, 'Offset to class definition table for mark attachment type-from beginning of GDEF header (may be NULL)'), + ('Offset', 'MarkGlyphSetsDef', None, 'int(round(Version*0x10000)) >= 0x00010002', 'Offset to the table of mark set definitions-from beginning of GDEF header (may be NULL)'), + ]), + + ('AttachList', [ + ('Offset', 'Coverage', None, None, 'Offset to Coverage table - from beginning of AttachList table'), + ('uint16', 'GlyphCount', None, None, 'Number of glyphs with attachment points'), + ('Offset', 'AttachPoint', 'GlyphCount', 0, 'Array of offsets to AttachPoint tables-from beginning of AttachList table-in Coverage Index order'), + ]), + + ('AttachPoint', [ + ('uint16', 'PointCount', None, None, 'Number of attachment points on this glyph'), + ('uint16', 'PointIndex', 'PointCount', 0, 'Array of contour point indices -in increasing numerical order'), + ]), + + ('LigCaretList', [ + ('Offset', 'Coverage', None, None, 'Offset to Coverage table - from beginning of LigCaretList table'), + ('uint16', 'LigGlyphCount', None, None, 'Number of ligature glyphs'), + ('Offset', 'LigGlyph', 'LigGlyphCount', 0, 'Array of offsets to LigGlyph tables-from beginning of LigCaretList table-in Coverage Index order'), + ]), + + ('LigGlyph', [ + ('uint16', 'CaretCount', None, None, 'Number of CaretValues for this ligature (components - 1)'), + ('Offset', 'CaretValue', 'CaretCount', 0, 'Array of offsets to CaretValue tables-from beginning of LigGlyph table-in increasing coordinate order'), + ]), + + ('CaretValueFormat1', [ + ('uint16', 'CaretValueFormat', None, None, 'Format identifier-format = 1'), + ('int16', 'Coordinate', None, None, 'X or Y value, in design units'), + ]), + + ('CaretValueFormat2', [ + ('uint16', 'CaretValueFormat', None, None, 'Format identifier-format = 2'), + ('uint16', 'CaretValuePoint', None, None, 'Contour point index on glyph'), + ]), + + ('CaretValueFormat3', [ + ('uint16', 'CaretValueFormat', None, None, 'Format identifier-format = 3'), + ('int16', 'Coordinate', None, None, 'X or Y value, in design units'), + ('Offset', 'DeviceTable', None, None, 'Offset to Device table for X or Y value-from beginning of CaretValue table'), + ]), + + ('MarkGlyphSetsDef', [ + ('uint16', 'MarkSetTableFormat', None, None, 'Format identifier == 1'), + ('uint16', 'MarkSetCount', None, None, 'Number of mark sets defined'), + ('LOffset', 'Coverage', 'MarkSetCount', 0, 'Array of offsets to mark set coverage tables.'), + ]), + + # + # base + # + + ('BASE', [ + ('Version', 'Version', None, None, 'Version of the BASE table-initially 0x00010000'), + ('Offset', 'HorizAxis', None, None, 'Offset to horizontal Axis table-from beginning of BASE table-may be NULL'), + ('Offset', 'VertAxis', None, None, 'Offset to vertical Axis table-from beginning of BASE table-may be NULL'), + ]), + + ('Axis', [ + ('Offset', 'BaseTagList', None, None, 'Offset to BaseTagList table-from beginning of Axis table-may be NULL'), + ('Offset', 'BaseScriptList', None, None, 'Offset to BaseScriptList table-from beginning of Axis table'), + ]), + + ('BaseTagList', [ + ('uint16', 'BaseTagCount', None, None, 'Number of baseline identification tags in this text direction-may be zero (0)'), + ('Tag', 'BaselineTag', 'BaseTagCount', 0, 'Array of 4-byte baseline identification tags-must be in alphabetical order'), + ]), + + ('BaseScriptList', [ + ('uint16', 'BaseScriptCount', None, None, 'Number of BaseScriptRecords defined'), + ('struct', 'BaseScriptRecord', 'BaseScriptCount', 0, 'Array of BaseScriptRecords-in alphabetical order by BaseScriptTag'), + ]), + + ('BaseScriptRecord', [ + ('Tag', 'BaseScriptTag', None, None, '4-byte script identification tag'), + ('Offset', 'BaseScript', None, None, 'Offset to BaseScript table-from beginning of BaseScriptList'), + ]), + + ('BaseScript', [ + ('Offset', 'BaseValues', None, None, 'Offset to BaseValues table-from beginning of BaseScript table-may be NULL'), + ('Offset', 'DefaultMinMax', None, None, 'Offset to MinMax table- from beginning of BaseScript table-may be NULL'), + ('uint16', 'BaseLangSysCount', None, None, 'Number of BaseLangSysRecords defined-may be zero (0)'), + ('struct', 'BaseLangSysRecord', 'BaseLangSysCount', 0, 'Array of BaseLangSysRecords-in alphabetical order by BaseLangSysTag'), + ]), + + ('BaseLangSysRecord', [ + ('Tag', 'BaseLangSysTag', None, None, '4-byte language system identification tag'), + ('Offset', 'MinMax', None, None, 'Offset to MinMax table-from beginning of BaseScript table'), + ]), + + ('BaseValues', [ + ('uint16', 'DefaultIndex', None, None, 'Index number of default baseline for this script-equals index position of baseline tag in BaselineArray of the BaseTagList'), + ('uint16', 'BaseCoordCount', None, None, 'Number of BaseCoord tables defined-should equal BaseTagCount in the BaseTagList'), + ('Offset', 'BaseCoord', 'BaseCoordCount', 0, 'Array of offsets to BaseCoord-from beginning of BaseValues table-order matches BaselineTag array in the BaseTagList'), + ]), + + ('MinMax', [ + ('Offset', 'MinCoord', None, None, 'Offset to BaseCoord table-defines minimum extent value-from the beginning of MinMax table-may be NULL'), + ('Offset', 'MaxCoord', None, None, 'Offset to BaseCoord table-defines maximum extent value-from the beginning of MinMax table-may be NULL'), + ('uint16', 'FeatMinMaxCount', None, None, 'Number of FeatMinMaxRecords-may be zero (0)'), + ('struct', 'FeatMinMaxRecord', 'FeatMinMaxCount', 0, 'Array of FeatMinMaxRecords-in alphabetical order, by FeatureTableTag'), + ]), + + ('FeatMinMaxRecord', [ + ('Tag', 'FeatureTableTag', None, None, '4-byte feature identification tag-must match FeatureTag in FeatureList'), + ('Offset', 'MinCoord', None, None, 'Offset to BaseCoord table-defines minimum extent value-from beginning of MinMax table-may be NULL'), + ('Offset', 'MaxCoord', None, None, 'Offset to BaseCoord table-defines maximum extent value-from beginning of MinMax table-may be NULL'), + ]), + + ('BaseCoordFormat1', [ + ('uint16', 'BaseCoordFormat', None, None, 'Format identifier-format = 1'), + ('int16', 'Coordinate', None, None, 'X or Y value, in design units'), + ]), + + ('BaseCoordFormat2', [ + ('uint16', 'BaseCoordFormat', None, None, 'Format identifier-format = 2'), + ('int16', 'Coordinate', None, None, 'X or Y value, in design units'), + ('GlyphID', 'ReferenceGlyph', None, None, 'GlyphID of control glyph'), + ('uint16', 'BaseCoordPoint', None, None, 'Index of contour point on the ReferenceGlyph'), + ]), + + ('BaseCoordFormat3', [ + ('uint16', 'BaseCoordFormat', None, None, 'Format identifier-format = 3'), + ('int16', 'Coordinate', None, None, 'X or Y value, in design units'), + ('Offset', 'DeviceTable', None, None, 'Offset to Device table for X or Y value'), + ]), + + + # + # jstf + # + + ('JSTF', [ + ('Version', 'Version', None, None, 'Version of the JSTF table-initially set to 0x00010000'), + ('uint16', 'JstfScriptCount', None, None, 'Number of JstfScriptRecords in this table'), + ('struct', 'JstfScriptRecord', 'JstfScriptCount', 0, 'Array of JstfScriptRecords-in alphabetical order, by JstfScriptTag'), + ]), + + ('JstfScriptRecord', [ + ('Tag', 'JstfScriptTag', None, None, '4-byte JstfScript identification'), + ('Offset', 'JstfScript', None, None, 'Offset to JstfScript table-from beginning of JSTF Header'), + ]), + + ('JstfScript', [ + ('Offset', 'ExtenderGlyph', None, None, 'Offset to ExtenderGlyph table-from beginning of JstfScript table-may be NULL'), + ('Offset', 'DefJstfLangSys', None, None, 'Offset to Default JstfLangSys table-from beginning of JstfScript table-may be NULL'), + ('uint16', 'JstfLangSysCount', None, None, 'Number of JstfLangSysRecords in this table- may be zero (0)'), + ('struct', 'JstfLangSysRecord', 'JstfLangSysCount', 0, 'Array of JstfLangSysRecords-in alphabetical order, by JstfLangSysTag'), + ]), + + ('JstfLangSysRecord', [ + ('Tag', 'JstfLangSysTag', None, None, '4-byte JstfLangSys identifier'), + ('Offset', 'JstfLangSys', None, None, 'Offset to JstfLangSys table-from beginning of JstfScript table'), + ]), + + ('ExtenderGlyph', [ + ('uint16', 'GlyphCount', None, None, 'Number of Extender Glyphs in this script'), + ('GlyphID', 'ExtenderGlyph', 'GlyphCount', 0, 'GlyphIDs-in increasing numerical order'), + ]), + + ('JstfLangSys', [ + ('uint16', 'JstfPriorityCount', None, None, 'Number of JstfPriority tables'), + ('Offset', 'JstfPriority', 'JstfPriorityCount', 0, 'Array of offsets to JstfPriority tables-from beginning of JstfLangSys table-in priority order'), + ]), + + ('JstfPriority', [ + ('Offset', 'ShrinkageEnableGSUB', None, None, 'Offset to Shrinkage Enable JstfGSUBModList table-from beginning of JstfPriority table-may be NULL'), + ('Offset', 'ShrinkageDisableGSUB', None, None, 'Offset to Shrinkage Disable JstfGSUBModList table-from beginning of JstfPriority table-may be NULL'), + ('Offset', 'ShrinkageEnableGPOS', None, None, 'Offset to Shrinkage Enable JstfGPOSModList table-from beginning of JstfPriority table-may be NULL'), + ('Offset', 'ShrinkageDisableGPOS', None, None, 'Offset to Shrinkage Disable JstfGPOSModList table-from beginning of JstfPriority table-may be NULL'), + ('Offset', 'ShrinkageJstfMax', None, None, 'Offset to Shrinkage JstfMax table-from beginning of JstfPriority table -may be NULL'), + ('Offset', 'ExtensionEnableGSUB', None, None, 'Offset to Extension Enable JstfGSUBModList table-may be NULL'), + ('Offset', 'ExtensionDisableGSUB', None, None, 'Offset to Extension Disable JstfGSUBModList table-from beginning of JstfPriority table-may be NULL'), + ('Offset', 'ExtensionEnableGPOS', None, None, 'Offset to Extension Enable JstfGSUBModList table-may be NULL'), + ('Offset', 'ExtensionDisableGPOS', None, None, 'Offset to Extension Disable JstfGSUBModList table-from beginning of JstfPriority table-may be NULL'), + ('Offset', 'ExtensionJstfMax', None, None, 'Offset to Extension JstfMax table-from beginning of JstfPriority table -may be NULL'), + ]), + + ('JstfGSUBModList', [ + ('uint16', 'LookupCount', None, None, 'Number of lookups for this modification'), + ('uint16', 'GSUBLookupIndex', 'LookupCount', 0, 'Array of LookupIndex identifiers in GSUB-in increasing numerical order'), + ]), + + ('JstfGPOSModList', [ + ('uint16', 'LookupCount', None, None, 'Number of lookups for this modification'), + ('uint16', 'GPOSLookupIndex', 'LookupCount', 0, 'Array of LookupIndex identifiers in GPOS-in increasing numerical order'), + ]), + + ('JstfMax', [ + ('uint16', 'LookupCount', None, None, 'Number of lookup Indices for this modification'), + ('Offset', 'Lookup', 'LookupCount', 0, 'Array of offsets to GPOS-type lookup tables-from beginning of JstfMax table-in design order'), + ]), + + # + # math + # + + ('MATH', [ + ('Version', 'Version', None, None, 'Version of the MATH table-initially set to 0x00010000.'), + ('Offset', 'MathConstants', None, None, 'Offset to MathConstants table - from the beginning of MATH table.'), + ('Offset', 'MathGlyphInfo', None, None, 'Offset to MathGlyphInfo table - from the beginning of MATH table.'), + ('Offset', 'MathVariants', None, None, 'Offset to MathVariants table - from the beginning of MATH table.'), + ]), + + ('MathValueRecord', [ + ('int16', 'Value', None, None, 'The X or Y value in design units.'), + ('Offset', 'DeviceTable', None, None, 'Offset to the device table - from the beginning of parent table. May be NULL. Suggested format for device table is 1.'), + ]), + + ('MathConstants', [ + ('int16', 'ScriptPercentScaleDown', None, None, 'Percentage of scaling down for script level 1. Suggested value: 80%.'), + ('int16', 'ScriptScriptPercentScaleDown', None, None, 'Percentage of scaling down for script level 2 (ScriptScript). Suggested value: 60%.'), + ('uint16', 'DelimitedSubFormulaMinHeight', None, None, 'Minimum height required for a delimited expression to be treated as a subformula. Suggested value: normal line height x1.5.'), + ('uint16', 'DisplayOperatorMinHeight', None, None, 'Minimum height of n-ary operators (such as integral and summation) for formulas in display mode.'), + ('MathValueRecord', 'MathLeading', None, None, 'White space to be left between math formulas to ensure proper line spacing. For example, for applications that treat line gap as a part of line ascender, formulas with ink going above (os2.sTypoAscender + os2.sTypoLineGap - MathLeading) or with ink going below os2.sTypoDescender will result in increasing line height.'), + ('MathValueRecord', 'AxisHeight', None, None, 'Axis height of the font.'), + ('MathValueRecord', 'AccentBaseHeight', None, None, 'Maximum (ink) height of accent base that does not require raising the accents. Suggested: x-height of the font (os2.sxHeight) plus any possible overshots.'), + ('MathValueRecord', 'FlattenedAccentBaseHeight', None, None, 'Maximum (ink) height of accent base that does not require flattening the accents. Suggested: cap height of the font (os2.sCapHeight).'), + ('MathValueRecord', 'SubscriptShiftDown', None, None, 'The standard shift down applied to subscript elements. Positive for moving in the downward direction. Suggested: os2.ySubscriptYOffset.'), + ('MathValueRecord', 'SubscriptTopMax', None, None, 'Maximum allowed height of the (ink) top of subscripts that does not require moving subscripts further down. Suggested: 4/5 x-height.'), + ('MathValueRecord', 'SubscriptBaselineDropMin', None, None, 'Minimum allowed drop of the baseline of subscripts relative to the (ink) bottom of the base. Checked for bases that are treated as a box or extended shape. Positive for subscript baseline dropped below the base bottom.'), + ('MathValueRecord', 'SuperscriptShiftUp', None, None, 'Standard shift up applied to superscript elements. Suggested: os2.ySuperscriptYOffset.'), + ('MathValueRecord', 'SuperscriptShiftUpCramped', None, None, 'Standard shift of superscripts relative to the base, in cramped style.'), + ('MathValueRecord', 'SuperscriptBottomMin', None, None, 'Minimum allowed height of the (ink) bottom of superscripts that does not require moving subscripts further up. Suggested: 1/4 x-height.'), + ('MathValueRecord', 'SuperscriptBaselineDropMax', None, None, 'Maximum allowed drop of the baseline of superscripts relative to the (ink) top of the base. Checked for bases that are treated as a box or extended shape. Positive for superscript baseline below the base top.'), + ('MathValueRecord', 'SubSuperscriptGapMin', None, None, 'Minimum gap between the superscript and subscript ink. Suggested: 4x default rule thickness.'), + ('MathValueRecord', 'SuperscriptBottomMaxWithSubscript', None, None, 'The maximum level to which the (ink) bottom of superscript can be pushed to increase the gap between superscript and subscript, before subscript starts being moved down. Suggested: 4/5 x-height.'), + ('MathValueRecord', 'SpaceAfterScript', None, None, 'Extra white space to be added after each subscript and superscript. Suggested: 0.5pt for a 12 pt font.'), + ('MathValueRecord', 'UpperLimitGapMin', None, None, 'Minimum gap between the (ink) bottom of the upper limit, and the (ink) top of the base operator.'), + ('MathValueRecord', 'UpperLimitBaselineRiseMin', None, None, 'Minimum distance between baseline of upper limit and (ink) top of the base operator.'), + ('MathValueRecord', 'LowerLimitGapMin', None, None, 'Minimum gap between (ink) top of the lower limit, and (ink) bottom of the base operator.'), + ('MathValueRecord', 'LowerLimitBaselineDropMin', None, None, 'Minimum distance between baseline of the lower limit and (ink) bottom of the base operator.'), + ('MathValueRecord', 'StackTopShiftUp', None, None, 'Standard shift up applied to the top element of a stack.'), + ('MathValueRecord', 'StackTopDisplayStyleShiftUp', None, None, 'Standard shift up applied to the top element of a stack in display style.'), + ('MathValueRecord', 'StackBottomShiftDown', None, None, 'Standard shift down applied to the bottom element of a stack. Positive for moving in the downward direction.'), + ('MathValueRecord', 'StackBottomDisplayStyleShiftDown', None, None, 'Standard shift down applied to the bottom element of a stack in display style. Positive for moving in the downward direction.'), + ('MathValueRecord', 'StackGapMin', None, None, 'Minimum gap between (ink) bottom of the top element of a stack, and the (ink) top of the bottom element. Suggested: 3x default rule thickness.'), + ('MathValueRecord', 'StackDisplayStyleGapMin', None, None, 'Minimum gap between (ink) bottom of the top element of a stack, and the (ink) top of the bottom element in display style. Suggested: 7x default rule thickness.'), + ('MathValueRecord', 'StretchStackTopShiftUp', None, None, 'Standard shift up applied to the top element of the stretch stack.'), + ('MathValueRecord', 'StretchStackBottomShiftDown', None, None, 'Standard shift down applied to the bottom element of the stretch stack. Positive for moving in the downward direction.'), + ('MathValueRecord', 'StretchStackGapAboveMin', None, None, 'Minimum gap between the ink of the stretched element, and the (ink) bottom of the element above. Suggested: UpperLimitGapMin'), + ('MathValueRecord', 'StretchStackGapBelowMin', None, None, 'Minimum gap between the ink of the stretched element, and the (ink) top of the element below. Suggested: LowerLimitGapMin.'), + ('MathValueRecord', 'FractionNumeratorShiftUp', None, None, 'Standard shift up applied to the numerator.'), + ('MathValueRecord', 'FractionNumeratorDisplayStyleShiftUp', None, None, 'Standard shift up applied to the numerator in display style. Suggested: StackTopDisplayStyleShiftUp.'), + ('MathValueRecord', 'FractionDenominatorShiftDown', None, None, 'Standard shift down applied to the denominator. Positive for moving in the downward direction.'), + ('MathValueRecord', 'FractionDenominatorDisplayStyleShiftDown', None, None, 'Standard shift down applied to the denominator in display style. Positive for moving in the downward direction. Suggested: StackBottomDisplayStyleShiftDown.'), + ('MathValueRecord', 'FractionNumeratorGapMin', None, None, 'Minimum tolerated gap between the (ink) bottom of the numerator and the ink of the fraction bar. Suggested: default rule thickness'), + ('MathValueRecord', 'FractionNumDisplayStyleGapMin', None, None, 'Minimum tolerated gap between the (ink) bottom of the numerator and the ink of the fraction bar in display style. Suggested: 3x default rule thickness.'), + ('MathValueRecord', 'FractionRuleThickness', None, None, 'Thickness of the fraction bar. Suggested: default rule thickness.'), + ('MathValueRecord', 'FractionDenominatorGapMin', None, None, 'Minimum tolerated gap between the (ink) top of the denominator and the ink of the fraction bar. Suggested: default rule thickness'), + ('MathValueRecord', 'FractionDenomDisplayStyleGapMin', None, None, 'Minimum tolerated gap between the (ink) top of the denominator and the ink of the fraction bar in display style. Suggested: 3x default rule thickness.'), + ('MathValueRecord', 'SkewedFractionHorizontalGap', None, None, 'Horizontal distance between the top and bottom elements of a skewed fraction.'), + ('MathValueRecord', 'SkewedFractionVerticalGap', None, None, 'Vertical distance between the ink of the top and bottom elements of a skewed fraction.'), + ('MathValueRecord', 'OverbarVerticalGap', None, None, 'Distance between the overbar and the (ink) top of he base. Suggested: 3x default rule thickness.'), + ('MathValueRecord', 'OverbarRuleThickness', None, None, 'Thickness of overbar. Suggested: default rule thickness.'), + ('MathValueRecord', 'OverbarExtraAscender', None, None, 'Extra white space reserved above the overbar. Suggested: default rule thickness.'), + ('MathValueRecord', 'UnderbarVerticalGap', None, None, 'Distance between underbar and (ink) bottom of the base. Suggested: 3x default rule thickness.'), + ('MathValueRecord', 'UnderbarRuleThickness', None, None, 'Thickness of underbar. Suggested: default rule thickness.'), + ('MathValueRecord', 'UnderbarExtraDescender', None, None, 'Extra white space reserved below the underbar. Always positive. Suggested: default rule thickness.'), + ('MathValueRecord', 'RadicalVerticalGap', None, None, 'Space between the (ink) top of the expression and the bar over it. Suggested: 1 1/4 default rule thickness.'), + ('MathValueRecord', 'RadicalDisplayStyleVerticalGap', None, None, 'Space between the (ink) top of the expression and the bar over it. Suggested: default rule thickness + 1/4 x-height.'), + ('MathValueRecord', 'RadicalRuleThickness', None, None, 'Thickness of the radical rule. This is the thickness of the rule in designed or constructed radical signs. Suggested: default rule thickness.'), + ('MathValueRecord', 'RadicalExtraAscender', None, None, 'Extra white space reserved above the radical. Suggested: RadicalRuleThickness.'), + ('MathValueRecord', 'RadicalKernBeforeDegree', None, None, 'Extra horizontal kern before the degree of a radical, if such is present. Suggested: 5/18 of em.'), + ('MathValueRecord', 'RadicalKernAfterDegree', None, None, 'Negative kern after the degree of a radical, if such is present. Suggested: 10/18 of em.'), + ('uint16', 'RadicalDegreeBottomRaisePercent', None, None, 'Height of the bottom of the radical degree, if such is present, in proportion to the ascender of the radical sign. Suggested: 60%.'), + ]), + + ('MathGlyphInfo', [ + ('Offset', 'MathItalicsCorrectionInfo', None, None, 'Offset to MathItalicsCorrectionInfo table - from the beginning of MathGlyphInfo table.'), + ('Offset', 'MathTopAccentAttachment', None, None, 'Offset to MathTopAccentAttachment table - from the beginning of MathGlyphInfo table.'), + ('Offset', 'ExtendedShapeCoverage', None, None, 'Offset to coverage table for Extended Shape glyphs - from the beginning of MathGlyphInfo table. When the left or right glyph of a box is an extended shape variant, the (ink) box (and not the default position defined by values in MathConstants table) should be used for vertical positioning purposes. May be NULL.'), + ('Offset', 'MathKernInfo', None, None, 'Offset to MathKernInfo table - from the beginning of MathGlyphInfo table.'), + ]), + + ('MathItalicsCorrectionInfo', [ + ('Offset', 'Coverage', None, None, 'Offset to Coverage table - from the beginning of MathItalicsCorrectionInfo table.'), + ('uint16', 'ItalicsCorrectionCount', None, None, 'Number of italics correction values. Should coincide with the number of covered glyphs.'), + ('MathValueRecord', 'ItalicsCorrection', 'ItalicsCorrectionCount', 0, 'Array of MathValueRecords defining italics correction values for each covered glyph.'), + ]), + + ('MathTopAccentAttachment', [ + ('Offset', 'TopAccentCoverage', None, None, 'Offset to Coverage table - from the beginning of MathTopAccentAttachment table.'), + ('uint16', 'TopAccentAttachmentCount', None, None, 'Number of top accent attachment point values. Should coincide with the number of covered glyphs'), + ('MathValueRecord', 'TopAccentAttachment', 'TopAccentAttachmentCount', 0, 'Array of MathValueRecords defining top accent attachment points for each covered glyph'), + ]), + + ('MathKernInfo', [ + ('Offset', 'MathKernCoverage', None, None, 'Offset to Coverage table - from the beginning of the MathKernInfo table.'), + ('uint16', 'MathKernCount', None, None, 'Number of MathKernInfoRecords.'), + ('MathKernInfoRecord', 'MathKernInfoRecords', 'MathKernCount', 0, 'Array of MathKernInfoRecords, per-glyph information for mathematical positioning of subscripts and superscripts.'), + ]), + + ('MathKernInfoRecord', [ + ('Offset', 'TopRightMathKern', None, None, 'Offset to MathKern table for top right corner - from the beginning of MathKernInfo table. May be NULL.'), + ('Offset', 'TopLeftMathKern', None, None, 'Offset to MathKern table for the top left corner - from the beginning of MathKernInfo table. May be NULL.'), + ('Offset', 'BottomRightMathKern', None, None, 'Offset to MathKern table for bottom right corner - from the beginning of MathKernInfo table. May be NULL.'), + ('Offset', 'BottomLeftMathKern', None, None, 'Offset to MathKern table for bottom left corner - from the beginning of MathKernInfo table. May be NULL.'), + ]), + + ('MathKern', [ + ('uint16', 'HeightCount', None, None, 'Number of heights on which the kern value changes.'), + ('MathValueRecord', 'CorrectionHeight', 'HeightCount', 0, 'Array of correction heights at which the kern value changes. Sorted by the height value in design units.'), + ('MathValueRecord', 'KernValue', 'HeightCount', 1, 'Array of kern values corresponding to heights. First value is the kern value for all heights less or equal than the first height in this table.Last value is the value to be applied for all heights greater than the last height in this table. Negative values are interpreted as move glyphs closer to each other.'), + ]), + + ('MathVariants', [ + ('uint16', 'MinConnectorOverlap', None, None, 'Minimum overlap of connecting glyphs during glyph construction, in design units.'), + ('Offset', 'VertGlyphCoverage', None, None, 'Offset to Coverage table - from the beginning of MathVariants table.'), + ('Offset', 'HorizGlyphCoverage', None, None, 'Offset to Coverage table - from the beginning of MathVariants table.'), + ('uint16', 'VertGlyphCount', None, None, 'Number of glyphs for which information is provided for vertically growing variants.'), + ('uint16', 'HorizGlyphCount', None, None, 'Number of glyphs for which information is provided for horizontally growing variants.'), + ('Offset', 'VertGlyphConstruction', 'VertGlyphCount', 0, 'Array of offsets to MathGlyphConstruction tables - from the beginning of the MathVariants table, for shapes growing in vertical direction.'), + ('Offset', 'HorizGlyphConstruction', 'HorizGlyphCount', 0, 'Array of offsets to MathGlyphConstruction tables - from the beginning of the MathVariants table, for shapes growing in horizontal direction.'), + ]), + + ('MathGlyphConstruction', [ + ('Offset', 'GlyphAssembly', None, None, 'Offset to GlyphAssembly table for this shape - from the beginning of MathGlyphConstruction table. May be NULL'), + ('uint16', 'VariantCount', None, None, 'Count of glyph growing variants for this glyph.'), + ('MathGlyphVariantRecord', 'MathGlyphVariantRecord', 'VariantCount', 0, 'MathGlyphVariantRecords for alternative variants of the glyphs.'), + ]), + + ('MathGlyphVariantRecord', [ + ('GlyphID', 'VariantGlyph', None, None, 'Glyph ID for the variant.'), + ('uint16', 'AdvanceMeasurement', None, None, 'Advance width/height, in design units, of the variant, in the direction of requested glyph extension.'), + ]), + + ('GlyphAssembly', [ + ('MathValueRecord', 'ItalicsCorrection', None, None, 'Italics correction of this GlyphAssembly. Should not depend on the assembly size.'), + ('uint16', 'PartCount', None, None, 'Number of parts in this assembly.'), + ('GlyphPartRecord', 'PartRecords', 'PartCount', 0, 'Array of part records, from left to right and bottom to top.'), + ]), + + ('GlyphPartRecord', [ + ('GlyphID', 'glyph', None, None, 'Glyph ID for the part.'), + ('uint16', 'StartConnectorLength', None, None, 'Advance width/ height of the straight bar connector material, in design units, is at the beginning of the glyph, in the direction of the extension.'), + ('uint16', 'EndConnectorLength', None, None, 'Advance width/ height of the straight bar connector material, in design units, is at the end of the glyph, in the direction of the extension.'), + ('uint16', 'FullAdvance', None, None, 'Full advance width/height for this part, in the direction of the extension. In design units.'), + ('uint16', 'PartFlags', None, None, 'Part qualifiers. PartFlags enumeration currently uses only one bit: 0x0001 fExtender: If set, the part can be skipped or repeated. 0xFFFE Reserved'), + ]), + + + ## + ## Apple Advanced Typography (AAT) tables + ## + + # + # feat + # + + ('feat', [ + ('Version', 'Version', None, None, 'Version of the feat table-initially set to 0x00010000.'), + ('FeatureNames', 'FeatureNames', None, None, 'The feature names.'), + ]), + + ('FeatureNames', [ + ('uint16', 'FeatureNameCount', None, None, 'Number of entries in the feature name array.'), + ('uint16', 'Reserved1', None, None, 'Reserved (set to zero).'), + ('uint32', 'Reserved2', None, None, 'Reserved (set to zero).'), + ('FeatureName', 'FeatureName', 'FeatureNameCount', 0, 'The feature name array.'), + ]), + + ('FeatureName', [ + ('uint16', 'FeatureType', None, None, 'Feature type.'), + ('uint16', 'SettingsCount', None, None, 'The number of records in the setting name array.'), + ('LOffset', 'Settings', None, None, 'Offset to setting table for this feature.'), + ('uint16', 'FeatureFlags', None, None, 'Single-bit flags associated with the feature type.'), + ('uint16', 'FeatureNameID', None, None, 'The name table index for the feature name.'), + ]), + + ('Settings', [ + ('Setting', 'Setting', 'SettingsCount', 0, 'The setting array.'), + ]), + + ('Setting', [ + ('uint16', 'SettingValue', None, None, 'The setting.'), + ('uint16', 'SettingNameID', None, None, 'The name table index for the setting name.'), + ]), + +] diff -Nru fonttools-2.4/Snippets/fontTools/ttLib/tables/otTables.py fonttools-3.0/Snippets/fontTools/ttLib/tables/otTables.py --- fonttools-2.4/Snippets/fontTools/ttLib/tables/otTables.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.0/Snippets/fontTools/ttLib/tables/otTables.py 2015-08-31 17:57:15.000000000 +0000 @@ -0,0 +1,774 @@ +"""fontTools.ttLib.tables.otTables -- A collection of classes representing the various +OpenType subtables. + +Most are constructed upon import from data in otData.py, all are populated with +converter objects from otConverters.py. +""" +from __future__ import print_function, division, absolute_import +from fontTools.misc.py23 import * +from .otBase import BaseTable, FormatSwitchingBaseTable +import operator +import warnings + + +class FeatureParams(BaseTable): + + def compile(self, writer, font): + assert featureParamTypes.get(writer['FeatureTag']) == self.__class__, "Wrong FeatureParams type for feature '%s': %s" % (writer['FeatureTag'], self.__class__.__name__) + BaseTable.compile(self, writer, font) + + def toXML(self, xmlWriter, font, attrs=None, name=None): + BaseTable.toXML(self, xmlWriter, font, attrs, name=self.__class__.__name__) + +class FeatureParamsSize(FeatureParams): + pass + +class FeatureParamsStylisticSet(FeatureParams): + pass + +class FeatureParamsCharacterVariants(FeatureParams): + pass + +class Coverage(FormatSwitchingBaseTable): + + # manual implementation to get rid of glyphID dependencies + + def postRead(self, rawTable, font): + if self.Format == 1: + # TODO only allow glyphs that are valid? + self.glyphs = rawTable["GlyphArray"] + elif self.Format == 2: + glyphs = self.glyphs = [] + ranges = rawTable["RangeRecord"] + glyphOrder = font.getGlyphOrder() + # Some SIL fonts have coverage entries that don't have sorted + # StartCoverageIndex. If it is so, fixup and warn. We undo + # this when writing font out. + sorted_ranges = sorted(ranges, key=lambda a: a.StartCoverageIndex) + if ranges != sorted_ranges: + warnings.warn("GSUB/GPOS Coverage is not sorted by glyph ids.") + ranges = sorted_ranges + del sorted_ranges + for r in ranges: + assert r.StartCoverageIndex == len(glyphs), \ + (r.StartCoverageIndex, len(glyphs)) + start = r.Start + end = r.End + try: + startID = font.getGlyphID(start, requireReal=True) + except KeyError: + warnings.warn("Coverage table has start glyph ID out of range: %s." % start) + continue + try: + endID = font.getGlyphID(end, requireReal=True) + 1 + except KeyError: + # Apparently some tools use 65535 to "match all" the range + if end != 'glyph65535': + warnings.warn("Coverage table has end glyph ID out of range: %s." % end) + # NOTE: We clobber out-of-range things here. There are legit uses for those, + # but none that we have seen in the wild. + endID = len(glyphOrder) + glyphs.extend(glyphOrder[glyphID] for glyphID in range(startID, endID)) + else: + assert 0, "unknown format: %s" % self.Format + del self.Format # Don't need this anymore + + def preWrite(self, font): + glyphs = getattr(self, "glyphs", None) + if glyphs is None: + glyphs = self.glyphs = [] + format = 1 + rawTable = {"GlyphArray": glyphs} + getGlyphID = font.getGlyphID + if glyphs: + # find out whether Format 2 is more compact or not + glyphIDs = [getGlyphID(glyphName) for glyphName in glyphs ] + brokenOrder = sorted(glyphIDs) != glyphIDs + + last = glyphIDs[0] + ranges = [[last]] + for glyphID in glyphIDs[1:]: + if glyphID != last + 1: + ranges[-1].append(last) + ranges.append([glyphID]) + last = glyphID + ranges[-1].append(last) + + if brokenOrder or len(ranges) * 3 < len(glyphs): # 3 words vs. 1 word + # Format 2 is more compact + index = 0 + for i in range(len(ranges)): + start, end = ranges[i] + r = RangeRecord() + r.StartID = start + r.Start = font.getGlyphName(start) + r.End = font.getGlyphName(end) + r.StartCoverageIndex = index + ranges[i] = r + index = index + end - start + 1 + if brokenOrder: + warnings.warn("GSUB/GPOS Coverage is not sorted by glyph ids.") + ranges.sort(key=lambda a: a.StartID) + for r in ranges: + del r.StartID + format = 2 + rawTable = {"RangeRecord": ranges} + #else: + # fallthrough; Format 1 is more compact + self.Format = format + return rawTable + + def toXML2(self, xmlWriter, font): + for glyphName in getattr(self, "glyphs", []): + xmlWriter.simpletag("Glyph", value=glyphName) + xmlWriter.newline() + + def fromXML(self, name, attrs, content, font): + glyphs = getattr(self, "glyphs", None) + if glyphs is None: + glyphs = [] + self.glyphs = glyphs + glyphs.append(attrs["value"]) + + +class SingleSubst(FormatSwitchingBaseTable): + + def postRead(self, rawTable, font): + mapping = {} + input = _getGlyphsFromCoverageTable(rawTable["Coverage"]) + lenMapping = len(input) + if self.Format == 1: + delta = rawTable["DeltaGlyphID"] + inputGIDS = [ font.getGlyphID(name) for name in input ] + outGIDS = [ (glyphID + delta) % 65536 for glyphID in inputGIDS ] + outNames = [ font.getGlyphName(glyphID) for glyphID in outGIDS ] + list(map(operator.setitem, [mapping]*lenMapping, input, outNames)) + elif self.Format == 2: + assert len(input) == rawTable["GlyphCount"], \ + "invalid SingleSubstFormat2 table" + subst = rawTable["Substitute"] + list(map(operator.setitem, [mapping]*lenMapping, input, subst)) + else: + assert 0, "unknown format: %s" % self.Format + self.mapping = mapping + del self.Format # Don't need this anymore + + def preWrite(self, font): + mapping = getattr(self, "mapping", None) + if mapping is None: + mapping = self.mapping = {} + items = list(mapping.items()) + getGlyphID = font.getGlyphID + gidItems = [(getGlyphID(a), getGlyphID(b)) for a,b in items] + sortableItems = sorted(zip(gidItems, items)) + + # figure out format + format = 2 + delta = None + for inID, outID in gidItems: + if delta is None: + delta = (outID - inID) % 65536 + + if (inID + delta) % 65536 != outID: + break + else: + format = 1 + + rawTable = {} + self.Format = format + cov = Coverage() + input = [ item [1][0] for item in sortableItems] + subst = [ item [1][1] for item in sortableItems] + cov.glyphs = input + rawTable["Coverage"] = cov + if format == 1: + assert delta is not None + rawTable["DeltaGlyphID"] = delta + else: + rawTable["Substitute"] = subst + return rawTable + + def toXML2(self, xmlWriter, font): + items = sorted(self.mapping.items()) + for inGlyph, outGlyph in items: + xmlWriter.simpletag("Substitution", + [("in", inGlyph), ("out", outGlyph)]) + xmlWriter.newline() + + def fromXML(self, name, attrs, content, font): + mapping = getattr(self, "mapping", None) + if mapping is None: + mapping = {} + self.mapping = mapping + mapping[attrs["in"]] = attrs["out"] + + +class ClassDef(FormatSwitchingBaseTable): + + def postRead(self, rawTable, font): + classDefs = {} + glyphOrder = font.getGlyphOrder() + + if self.Format == 1: + start = rawTable["StartGlyph"] + classList = rawTable["ClassValueArray"] + try: + startID = font.getGlyphID(start, requireReal=True) + except KeyError: + warnings.warn("ClassDef table has start glyph ID out of range: %s." % start) + startID = len(glyphOrder) + endID = startID + len(classList) + if endID > len(glyphOrder): + warnings.warn("ClassDef table has entries for out of range glyph IDs: %s,%s." % (start, len(classList))) + # NOTE: We clobber out-of-range things here. There are legit uses for those, + # but none that we have seen in the wild. + endID = len(glyphOrder) + + for glyphID, cls in zip(range(startID, endID), classList): + classDefs[glyphOrder[glyphID]] = cls + + elif self.Format == 2: + records = rawTable["ClassRangeRecord"] + for rec in records: + start = rec.Start + end = rec.End + cls = rec.Class + try: + startID = font.getGlyphID(start, requireReal=True) + except KeyError: + warnings.warn("ClassDef table has start glyph ID out of range: %s." % start) + continue + try: + endID = font.getGlyphID(end, requireReal=True) + 1 + except KeyError: + # Apparently some tools use 65535 to "match all" the range + if end != 'glyph65535': + warnings.warn("ClassDef table has end glyph ID out of range: %s." % end) + # NOTE: We clobber out-of-range things here. There are legit uses for those, + # but none that we have seen in the wild. + endID = len(glyphOrder) + for glyphID in range(startID, endID): + classDefs[glyphOrder[glyphID]] = cls + else: + assert 0, "unknown format: %s" % self.Format + self.classDefs = classDefs + del self.Format # Don't need this anymore + + def preWrite(self, font): + classDefs = getattr(self, "classDefs", None) + if classDefs is None: + classDefs = self.classDefs = {} + items = list(classDefs.items()) + format = 2 + rawTable = {"ClassRangeRecord": []} + getGlyphID = font.getGlyphID + for i in range(len(items)): + glyphName, cls = items[i] + items[i] = getGlyphID(glyphName), glyphName, cls + items.sort() + if items: + last, lastName, lastCls = items[0] + ranges = [[lastCls, last, lastName]] + for glyphID, glyphName, cls in items[1:]: + if glyphID != last + 1 or cls != lastCls: + ranges[-1].extend([last, lastName]) + ranges.append([cls, glyphID, glyphName]) + last = glyphID + lastName = glyphName + lastCls = cls + ranges[-1].extend([last, lastName]) + + startGlyph = ranges[0][1] + endGlyph = ranges[-1][3] + glyphCount = endGlyph - startGlyph + 1 + if len(ranges) * 3 < glyphCount + 1: + # Format 2 is more compact + for i in range(len(ranges)): + cls, start, startName, end, endName = ranges[i] + rec = ClassRangeRecord() + rec.Start = startName + rec.End = endName + rec.Class = cls + ranges[i] = rec + format = 2 + rawTable = {"ClassRangeRecord": ranges} + else: + # Format 1 is more compact + startGlyphName = ranges[0][2] + classes = [0] * glyphCount + for cls, start, startName, end, endName in ranges: + for g in range(start - startGlyph, end - startGlyph + 1): + classes[g] = cls + format = 1 + rawTable = {"StartGlyph": startGlyphName, "ClassValueArray": classes} + self.Format = format + return rawTable + + def toXML2(self, xmlWriter, font): + items = sorted(self.classDefs.items()) + for glyphName, cls in items: + xmlWriter.simpletag("ClassDef", [("glyph", glyphName), ("class", cls)]) + xmlWriter.newline() + + def fromXML(self, name, attrs, content, font): + classDefs = getattr(self, "classDefs", None) + if classDefs is None: + classDefs = {} + self.classDefs = classDefs + classDefs[attrs["glyph"]] = int(attrs["class"]) + + +class AlternateSubst(FormatSwitchingBaseTable): + + def postRead(self, rawTable, font): + alternates = {} + if self.Format == 1: + input = _getGlyphsFromCoverageTable(rawTable["Coverage"]) + alts = rawTable["AlternateSet"] + if len(input) != len(alts): + assert len(input) == len(alts) + for i in range(len(input)): + alternates[input[i]] = alts[i].Alternate + else: + assert 0, "unknown format: %s" % self.Format + self.alternates = alternates + del self.Format # Don't need this anymore + + def preWrite(self, font): + self.Format = 1 + alternates = getattr(self, "alternates", None) + if alternates is None: + alternates = self.alternates = {} + items = list(alternates.items()) + for i in range(len(items)): + glyphName, set = items[i] + items[i] = font.getGlyphID(glyphName), glyphName, set + items.sort() + cov = Coverage() + cov.glyphs = [ item[1] for item in items] + alternates = [] + setList = [ item[-1] for item in items] + for set in setList: + alts = AlternateSet() + alts.Alternate = set + alternates.append(alts) + # a special case to deal with the fact that several hundred Adobe Japan1-5 + # CJK fonts will overflow an offset if the coverage table isn't pushed to the end. + # Also useful in that when splitting a sub-table because of an offset overflow + # I don't need to calculate the change in the subtable offset due to the change in the coverage table size. + # Allows packing more rules in subtable. + self.sortCoverageLast = 1 + return {"Coverage": cov, "AlternateSet": alternates} + + def toXML2(self, xmlWriter, font): + items = sorted(self.alternates.items()) + for glyphName, alternates in items: + xmlWriter.begintag("AlternateSet", glyph=glyphName) + xmlWriter.newline() + for alt in alternates: + xmlWriter.simpletag("Alternate", glyph=alt) + xmlWriter.newline() + xmlWriter.endtag("AlternateSet") + xmlWriter.newline() + + def fromXML(self, name, attrs, content, font): + alternates = getattr(self, "alternates", None) + if alternates is None: + alternates = {} + self.alternates = alternates + glyphName = attrs["glyph"] + set = [] + alternates[glyphName] = set + for element in content: + if not isinstance(element, tuple): + continue + name, attrs, content = element + set.append(attrs["glyph"]) + + +class LigatureSubst(FormatSwitchingBaseTable): + + def postRead(self, rawTable, font): + ligatures = {} + if self.Format == 1: + input = _getGlyphsFromCoverageTable(rawTable["Coverage"]) + ligSets = rawTable["LigatureSet"] + assert len(input) == len(ligSets) + for i in range(len(input)): + ligatures[input[i]] = ligSets[i].Ligature + else: + assert 0, "unknown format: %s" % self.Format + self.ligatures = ligatures + del self.Format # Don't need this anymore + + def preWrite(self, font): + self.Format = 1 + ligatures = getattr(self, "ligatures", None) + if ligatures is None: + ligatures = self.ligatures = {} + items = list(ligatures.items()) + for i in range(len(items)): + glyphName, set = items[i] + items[i] = font.getGlyphID(glyphName), glyphName, set + items.sort() + cov = Coverage() + cov.glyphs = [ item[1] for item in items] + + ligSets = [] + setList = [ item[-1] for item in items ] + for set in setList: + ligSet = LigatureSet() + ligs = ligSet.Ligature = [] + for lig in set: + ligs.append(lig) + ligSets.append(ligSet) + # Useful in that when splitting a sub-table because of an offset overflow + # I don't need to calculate the change in subtabl offset due to the coverage table size. + # Allows packing more rules in subtable. + self.sortCoverageLast = 1 + return {"Coverage": cov, "LigatureSet": ligSets} + + def toXML2(self, xmlWriter, font): + items = sorted(self.ligatures.items()) + for glyphName, ligSets in items: + xmlWriter.begintag("LigatureSet", glyph=glyphName) + xmlWriter.newline() + for lig in ligSets: + xmlWriter.simpletag("Ligature", glyph=lig.LigGlyph, + components=",".join(lig.Component)) + xmlWriter.newline() + xmlWriter.endtag("LigatureSet") + xmlWriter.newline() + + def fromXML(self, name, attrs, content, font): + ligatures = getattr(self, "ligatures", None) + if ligatures is None: + ligatures = {} + self.ligatures = ligatures + glyphName = attrs["glyph"] + ligs = [] + ligatures[glyphName] = ligs + for element in content: + if not isinstance(element, tuple): + continue + name, attrs, content = element + lig = Ligature() + lig.LigGlyph = attrs["glyph"] + components = attrs["components"] + lig.Component = components.split(",") if components else [] + ligs.append(lig) + + +# +# For each subtable format there is a class. However, we don't really distinguish +# between "field name" and "format name": often these are the same. Yet there's +# a whole bunch of fields with different names. The following dict is a mapping +# from "format name" to "field name". _buildClasses() uses this to create a +# subclass for each alternate field name. +# +_equivalents = { + 'MarkArray': ("Mark1Array",), + 'LangSys': ('DefaultLangSys',), + 'Coverage': ('MarkCoverage', 'BaseCoverage', 'LigatureCoverage', 'Mark1Coverage', + 'Mark2Coverage', 'BacktrackCoverage', 'InputCoverage', + 'LookAheadCoverage', 'VertGlyphCoverage', 'HorizGlyphCoverage', + 'TopAccentCoverage', 'ExtendedShapeCoverage', 'MathKernCoverage'), + 'ClassDef': ('ClassDef1', 'ClassDef2', 'BacktrackClassDef', 'InputClassDef', + 'LookAheadClassDef', 'GlyphClassDef', 'MarkAttachClassDef'), + 'Anchor': ('EntryAnchor', 'ExitAnchor', 'BaseAnchor', 'LigatureAnchor', + 'Mark2Anchor', 'MarkAnchor'), + 'Device': ('XPlaDevice', 'YPlaDevice', 'XAdvDevice', 'YAdvDevice', + 'XDeviceTable', 'YDeviceTable', 'DeviceTable'), + 'Axis': ('HorizAxis', 'VertAxis',), + 'MinMax': ('DefaultMinMax',), + 'BaseCoord': ('MinCoord', 'MaxCoord',), + 'JstfLangSys': ('DefJstfLangSys',), + 'JstfGSUBModList': ('ShrinkageEnableGSUB', 'ShrinkageDisableGSUB', 'ExtensionEnableGSUB', + 'ExtensionDisableGSUB',), + 'JstfGPOSModList': ('ShrinkageEnableGPOS', 'ShrinkageDisableGPOS', 'ExtensionEnableGPOS', + 'ExtensionDisableGPOS',), + 'JstfMax': ('ShrinkageJstfMax', 'ExtensionJstfMax',), + 'MathKern': ('TopRightMathKern', 'TopLeftMathKern', 'BottomRightMathKern', + 'BottomLeftMathKern'), + 'MathGlyphConstruction': ('VertGlyphConstruction', 'HorizGlyphConstruction'), +} + +# +# OverFlow logic, to automatically create ExtensionLookups +# XXX This should probably move to otBase.py +# + +def fixLookupOverFlows(ttf, overflowRecord): + """ Either the offset from the LookupList to a lookup overflowed, or + an offset from a lookup to a subtable overflowed. + The table layout is: + GPSO/GUSB + Script List + Feature List + LookUpList + Lookup[0] and contents + SubTable offset list + SubTable[0] and contents + ... + SubTable[n] and contents + ... + Lookup[n] and contents + SubTable offset list + SubTable[0] and contents + ... + SubTable[n] and contents + If the offset to a lookup overflowed (SubTableIndex is None) + we must promote the *previous* lookup to an Extension type. + If the offset from a lookup to subtable overflowed, then we must promote it + to an Extension Lookup type. + """ + ok = 0 + lookupIndex = overflowRecord.LookupListIndex + if (overflowRecord.SubTableIndex is None): + lookupIndex = lookupIndex - 1 + if lookupIndex < 0: + return ok + if overflowRecord.tableType == 'GSUB': + extType = 7 + elif overflowRecord.tableType == 'GPOS': + extType = 9 + + lookups = ttf[overflowRecord.tableType].table.LookupList.Lookup + lookup = lookups[lookupIndex] + # If the previous lookup is an extType, look further back. Very unlikely, but possible. + while lookup.SubTable[0].__class__.LookupType == extType: + lookupIndex = lookupIndex -1 + if lookupIndex < 0: + return ok + lookup = lookups[lookupIndex] + + for si in range(len(lookup.SubTable)): + subTable = lookup.SubTable[si] + extSubTableClass = lookupTypes[overflowRecord.tableType][extType] + extSubTable = extSubTableClass() + extSubTable.Format = 1 + extSubTable.ExtSubTable = subTable + lookup.SubTable[si] = extSubTable + ok = 1 + return ok + +def splitAlternateSubst(oldSubTable, newSubTable, overflowRecord): + ok = 1 + newSubTable.Format = oldSubTable.Format + if hasattr(oldSubTable, 'sortCoverageLast'): + newSubTable.sortCoverageLast = oldSubTable.sortCoverageLast + + oldAlts = sorted(oldSubTable.alternates.items()) + oldLen = len(oldAlts) + + if overflowRecord.itemName in [ 'Coverage', 'RangeRecord']: + # Coverage table is written last. overflow is to or within the + # the coverage table. We will just cut the subtable in half. + newLen = oldLen//2 + + elif overflowRecord.itemName == 'AlternateSet': + # We just need to back up by two items + # from the overflowed AlternateSet index to make sure the offset + # to the Coverage table doesn't overflow. + newLen = overflowRecord.itemIndex - 1 + + newSubTable.alternates = {} + for i in range(newLen, oldLen): + item = oldAlts[i] + key = item[0] + newSubTable.alternates[key] = item[1] + del oldSubTable.alternates[key] + + return ok + + +def splitLigatureSubst(oldSubTable, newSubTable, overflowRecord): + ok = 1 + newSubTable.Format = oldSubTable.Format + oldLigs = sorted(oldSubTable.ligatures.items()) + oldLen = len(oldLigs) + + if overflowRecord.itemName in [ 'Coverage', 'RangeRecord']: + # Coverage table is written last. overflow is to or within the + # the coverage table. We will just cut the subtable in half. + newLen = oldLen//2 + + elif overflowRecord.itemName == 'LigatureSet': + # We just need to back up by two items + # from the overflowed AlternateSet index to make sure the offset + # to the Coverage table doesn't overflow. + newLen = overflowRecord.itemIndex - 1 + + newSubTable.ligatures = {} + for i in range(newLen, oldLen): + item = oldLigs[i] + key = item[0] + newSubTable.ligatures[key] = item[1] + del oldSubTable.ligatures[key] + + return ok + + +splitTable = { 'GSUB': { +# 1: splitSingleSubst, +# 2: splitMultipleSubst, + 3: splitAlternateSubst, + 4: splitLigatureSubst, +# 5: splitContextSubst, +# 6: splitChainContextSubst, +# 7: splitExtensionSubst, +# 8: splitReverseChainSingleSubst, + }, + 'GPOS': { +# 1: splitSinglePos, +# 2: splitPairPos, +# 3: splitCursivePos, +# 4: splitMarkBasePos, +# 5: splitMarkLigPos, +# 6: splitMarkMarkPos, +# 7: splitContextPos, +# 8: splitChainContextPos, +# 9: splitExtensionPos, + } + + } + +def fixSubTableOverFlows(ttf, overflowRecord): + """ + An offset has overflowed within a sub-table. We need to divide this subtable into smaller parts. + """ + ok = 0 + table = ttf[overflowRecord.tableType].table + lookup = table.LookupList.Lookup[overflowRecord.LookupListIndex] + subIndex = overflowRecord.SubTableIndex + subtable = lookup.SubTable[subIndex] + + if hasattr(subtable, 'ExtSubTable'): + # We split the subtable of the Extension table, and add a new Extension table + # to contain the new subtable. + + subTableType = subtable.ExtSubTable.__class__.LookupType + extSubTable = subtable + subtable = extSubTable.ExtSubTable + newExtSubTableClass = lookupTypes[overflowRecord.tableType][subtable.__class__.LookupType] + newExtSubTable = newExtSubTableClass() + newExtSubTable.Format = extSubTable.Format + lookup.SubTable.insert(subIndex + 1, newExtSubTable) + + newSubTableClass = lookupTypes[overflowRecord.tableType][subTableType] + newSubTable = newSubTableClass() + newExtSubTable.ExtSubTable = newSubTable + else: + subTableType = subtable.__class__.LookupType + newSubTableClass = lookupTypes[overflowRecord.tableType][subTableType] + newSubTable = newSubTableClass() + lookup.SubTable.insert(subIndex + 1, newSubTable) + + if hasattr(lookup, 'SubTableCount'): # may not be defined yet. + lookup.SubTableCount = lookup.SubTableCount + 1 + + try: + splitFunc = splitTable[overflowRecord.tableType][subTableType] + except KeyError: + return ok + + ok = splitFunc(subtable, newSubTable, overflowRecord) + return ok + +# End of OverFlow logic + + +def _buildClasses(): + import re + from .otData import otData + + formatPat = re.compile("([A-Za-z0-9]+)Format(\d+)$") + namespace = globals() + + # populate module with classes + for name, table in otData: + baseClass = BaseTable + m = formatPat.match(name) + if m: + # XxxFormatN subtable, we only add the "base" table + name = m.group(1) + baseClass = FormatSwitchingBaseTable + if name not in namespace: + # the class doesn't exist yet, so the base implementation is used. + cls = type(name, (baseClass,), {}) + namespace[name] = cls + + for base, alts in _equivalents.items(): + base = namespace[base] + for alt in alts: + namespace[alt] = type(alt, (base,), {}) + + global lookupTypes + lookupTypes = { + 'GSUB': { + 1: SingleSubst, + 2: MultipleSubst, + 3: AlternateSubst, + 4: LigatureSubst, + 5: ContextSubst, + 6: ChainContextSubst, + 7: ExtensionSubst, + 8: ReverseChainSingleSubst, + }, + 'GPOS': { + 1: SinglePos, + 2: PairPos, + 3: CursivePos, + 4: MarkBasePos, + 5: MarkLigPos, + 6: MarkMarkPos, + 7: ContextPos, + 8: ChainContextPos, + 9: ExtensionPos, + }, + } + lookupTypes['JSTF'] = lookupTypes['GPOS'] # JSTF contains GPOS + for lookupEnum in lookupTypes.values(): + for enum, cls in lookupEnum.items(): + cls.LookupType = enum + + global featureParamTypes + featureParamTypes = { + 'size': FeatureParamsSize, + } + for i in range(1, 20+1): + featureParamTypes['ss%02d' % i] = FeatureParamsStylisticSet + for i in range(1, 99+1): + featureParamTypes['cv%02d' % i] = FeatureParamsCharacterVariants + + # add converters to classes + from .otConverters import buildConverters + for name, table in otData: + m = formatPat.match(name) + if m: + # XxxFormatN subtable, add converter to "base" table + name, format = m.groups() + format = int(format) + cls = namespace[name] + if not hasattr(cls, "converters"): + cls.converters = {} + cls.convertersByName = {} + converters, convertersByName = buildConverters(table[1:], namespace) + cls.converters[format] = converters + cls.convertersByName[format] = convertersByName + # XXX Add staticSize? + else: + cls = namespace[name] + cls.converters, cls.convertersByName = buildConverters(table, namespace) + # XXX Add staticSize? + + +_buildClasses() + + +def _getGlyphsFromCoverageTable(coverage): + if coverage is None: + # empty coverage table + return [] + else: + return coverage.glyphs diff -Nru fonttools-2.4/Snippets/fontTools/ttLib/tables/_p_o_s_t.py fonttools-3.0/Snippets/fontTools/ttLib/tables/_p_o_s_t.py --- fonttools-2.4/Snippets/fontTools/ttLib/tables/_p_o_s_t.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.0/Snippets/fontTools/ttLib/tables/_p_o_s_t.py 2015-08-31 17:57:15.000000000 +0000 @@ -0,0 +1,277 @@ +from __future__ import print_function, division, absolute_import +from fontTools.misc.py23 import * +from fontTools import ttLib +from fontTools.ttLib.standardGlyphOrder import standardGlyphOrder +from fontTools.misc import sstruct +from fontTools.misc.textTools import safeEval, readHex +from . import DefaultTable +import sys +import struct +import array + + +postFormat = """ + > + formatType: 16.16F + italicAngle: 16.16F # italic angle in degrees + underlinePosition: h + underlineThickness: h + isFixedPitch: L + minMemType42: L # minimum memory if TrueType font is downloaded + maxMemType42: L # maximum memory if TrueType font is downloaded + minMemType1: L # minimum memory if Type1 font is downloaded + maxMemType1: L # maximum memory if Type1 font is downloaded +""" + +postFormatSize = sstruct.calcsize(postFormat) + + +class table__p_o_s_t(DefaultTable.DefaultTable): + + def decompile(self, data, ttFont): + sstruct.unpack(postFormat, data[:postFormatSize], self) + data = data[postFormatSize:] + if self.formatType == 1.0: + self.decode_format_1_0(data, ttFont) + elif self.formatType == 2.0: + self.decode_format_2_0(data, ttFont) + elif self.formatType == 3.0: + self.decode_format_3_0(data, ttFont) + elif self.formatType == 4.0: + self.decode_format_4_0(data, ttFont) + else: + # supported format + raise ttLib.TTLibError("'post' table format %f not supported" % self.formatType) + + def compile(self, ttFont): + data = sstruct.pack(postFormat, self) + if self.formatType == 1.0: + pass # we're done + elif self.formatType == 2.0: + data = data + self.encode_format_2_0(ttFont) + elif self.formatType == 3.0: + pass # we're done + elif self.formatType == 4.0: + data = data + self.encode_format_4_0(ttFont) + else: + # supported format + raise ttLib.TTLibError("'post' table format %f not supported" % self.formatType) + return data + + def getGlyphOrder(self): + """This function will get called by a ttLib.TTFont instance. + Do not call this function yourself, use TTFont().getGlyphOrder() + or its relatives instead! + """ + if not hasattr(self, "glyphOrder"): + raise ttLib.TTLibError("illegal use of getGlyphOrder()") + glyphOrder = self.glyphOrder + del self.glyphOrder + return glyphOrder + + def decode_format_1_0(self, data, ttFont): + self.glyphOrder = standardGlyphOrder[:ttFont["maxp"].numGlyphs] + + def decode_format_2_0(self, data, ttFont): + numGlyphs, = struct.unpack(">H", data[:2]) + numGlyphs = int(numGlyphs) + if numGlyphs > ttFont['maxp'].numGlyphs: + # Assume the numGlyphs field is bogus, so sync with maxp. + # I've seen this in one font, and if the assumption is + # wrong elsewhere, well, so be it: it's hard enough to + # work around _one_ non-conforming post format... + numGlyphs = ttFont['maxp'].numGlyphs + data = data[2:] + indices = array.array("H") + indices.fromstring(data[:2*numGlyphs]) + if sys.byteorder != "big": + indices.byteswap() + data = data[2*numGlyphs:] + self.extraNames = extraNames = unpackPStrings(data) + self.glyphOrder = glyphOrder = [""] * int(ttFont['maxp'].numGlyphs) + for glyphID in range(numGlyphs): + index = indices[glyphID] + if index > 32767: # reserved for future use; ignore + name = "" + elif index > 257: + try: + name = extraNames[index-258] + except IndexError: + name = "" + else: + # fetch names from standard list + name = standardGlyphOrder[index] + glyphOrder[glyphID] = name + self.build_psNameMapping(ttFont) + + def build_psNameMapping(self, ttFont): + mapping = {} + allNames = {} + for i in range(ttFont['maxp'].numGlyphs): + glyphName = psName = self.glyphOrder[i] + if glyphName == "": + glyphName = "glyph%.5d" % i + if glyphName in allNames: + # make up a new glyphName that's unique + n = allNames[glyphName] + while (glyphName + "#" + str(n)) in allNames: + n += 1 + allNames[glyphName] = n + 1 + glyphName = glyphName + "#" + str(n) + + self.glyphOrder[i] = glyphName + allNames[glyphName] = 1 + if glyphName != psName: + mapping[glyphName] = psName + + self.mapping = mapping + + def decode_format_3_0(self, data, ttFont): + # Setting self.glyphOrder to None will cause the TTFont object + # try and construct glyph names from a Unicode cmap table. + self.glyphOrder = None + + def decode_format_4_0(self, data, ttFont): + from fontTools import agl + numGlyphs = ttFont['maxp'].numGlyphs + indices = array.array("H") + indices.fromstring(data) + if sys.byteorder != "big": + indices.byteswap() + # In some older fonts, the size of the post table doesn't match + # the number of glyphs. Sometimes it's bigger, sometimes smaller. + self.glyphOrder = glyphOrder = [''] * int(numGlyphs) + for i in range(min(len(indices),numGlyphs)): + if indices[i] == 0xFFFF: + self.glyphOrder[i] = '' + elif indices[i] in agl.UV2AGL: + self.glyphOrder[i] = agl.UV2AGL[indices[i]] + else: + self.glyphOrder[i] = "uni%04X" % indices[i] + self.build_psNameMapping(ttFont) + + def encode_format_2_0(self, ttFont): + numGlyphs = ttFont['maxp'].numGlyphs + glyphOrder = ttFont.getGlyphOrder() + assert len(glyphOrder) == numGlyphs + indices = array.array("H") + extraDict = {} + extraNames = self.extraNames + for i in range(len(extraNames)): + extraDict[extraNames[i]] = i + for glyphID in range(numGlyphs): + glyphName = glyphOrder[glyphID] + if glyphName in self.mapping: + psName = self.mapping[glyphName] + else: + psName = glyphName + if psName in extraDict: + index = 258 + extraDict[psName] + elif psName in standardGlyphOrder: + index = standardGlyphOrder.index(psName) + else: + index = 258 + len(extraNames) + assert index < 32768, "Too many glyph names for 'post' table format 2" + extraDict[psName] = len(extraNames) + extraNames.append(psName) + indices.append(index) + if sys.byteorder != "big": + indices.byteswap() + return struct.pack(">H", numGlyphs) + indices.tostring() + packPStrings(extraNames) + + def encode_format_4_0(self, ttFont): + from fontTools import agl + numGlyphs = ttFont['maxp'].numGlyphs + glyphOrder = ttFont.getGlyphOrder() + assert len(glyphOrder) == numGlyphs + indices = array.array("H") + for glyphID in glyphOrder: + glyphID = glyphID.split('#')[0] + if glyphID in agl.AGL2UV: + indices.append(agl.AGL2UV[glyphID]) + elif len(glyphID) == 7 and glyphID[:3] == 'uni': + indices.append(int(glyphID[3:],16)) + else: + indices.append(0xFFFF) + if sys.byteorder != "big": + indices.byteswap() + return indices.tostring() + + def toXML(self, writer, ttFont): + formatstring, names, fixes = sstruct.getformat(postFormat) + for name in names: + value = getattr(self, name) + writer.simpletag(name, value=value) + writer.newline() + if hasattr(self, "mapping"): + writer.begintag("psNames") + writer.newline() + writer.comment("This file uses unique glyph names based on the information\n" + "found in the 'post' table. Since these names might not be unique,\n" + "we have to invent artificial names in case of clashes. In order to\n" + "be able to retain the original information, we need a name to\n" + "ps name mapping for those cases where they differ. That's what\n" + "you see below.\n") + writer.newline() + items = sorted(self.mapping.items()) + for name, psName in items: + writer.simpletag("psName", name=name, psName=psName) + writer.newline() + writer.endtag("psNames") + writer.newline() + if hasattr(self, "extraNames"): + writer.begintag("extraNames") + writer.newline() + writer.comment("following are the name that are not taken from the standard Mac glyph order") + writer.newline() + for name in self.extraNames: + writer.simpletag("psName", name=name) + writer.newline() + writer.endtag("extraNames") + writer.newline() + if hasattr(self, "data"): + writer.begintag("hexdata") + writer.newline() + writer.dumphex(self.data) + writer.endtag("hexdata") + writer.newline() + + def fromXML(self, name, attrs, content, ttFont): + if name not in ("psNames", "extraNames", "hexdata"): + setattr(self, name, safeEval(attrs["value"])) + elif name == "psNames": + self.mapping = {} + for element in content: + if not isinstance(element, tuple): + continue + name, attrs, content = element + if name == "psName": + self.mapping[attrs["name"]] = attrs["psName"] + elif name == "extraNames": + self.extraNames = [] + for element in content: + if not isinstance(element, tuple): + continue + name, attrs, content = element + if name == "psName": + self.extraNames.append(attrs["name"]) + else: + self.data = readHex(content) + + +def unpackPStrings(data): + strings = [] + index = 0 + dataLen = len(data) + while index < dataLen: + length = byteord(data[index]) + strings.append(tostr(data[index+1:index+1+length], encoding="latin1")) + index = index + 1 + length + return strings + + +def packPStrings(strings): + data = b"" + for s in strings: + data = data + bytechr(len(s)) + tobytes(s, encoding="latin1") + return data diff -Nru fonttools-2.4/Snippets/fontTools/ttLib/tables/_p_r_e_p.py fonttools-3.0/Snippets/fontTools/ttLib/tables/_p_r_e_p.py --- fonttools-2.4/Snippets/fontTools/ttLib/tables/_p_r_e_p.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.0/Snippets/fontTools/ttLib/tables/_p_r_e_p.py 2015-08-31 17:57:15.000000000 +0000 @@ -0,0 +1,8 @@ +from __future__ import print_function, division, absolute_import +from fontTools.misc.py23 import * +from fontTools import ttLib + +superclass = ttLib.getTableClass("fpgm") + +class table__p_r_e_p(superclass): + pass diff -Nru fonttools-2.4/Snippets/fontTools/ttLib/tables/sbixGlyph.py fonttools-3.0/Snippets/fontTools/ttLib/tables/sbixGlyph.py --- fonttools-2.4/Snippets/fontTools/ttLib/tables/sbixGlyph.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.0/Snippets/fontTools/ttLib/tables/sbixGlyph.py 2015-08-31 17:57:15.000000000 +0000 @@ -0,0 +1,119 @@ +from __future__ import print_function, division, absolute_import +from fontTools.misc.py23 import * +from fontTools.misc import sstruct +from fontTools.misc.textTools import readHex, safeEval +import struct + + +sbixGlyphHeaderFormat = """ + > + originOffsetX: h # The x-value of the point in the glyph relative to its + # lower-left corner which corresponds to the origin of + # the glyph on the screen, that is the point on the + # baseline at the left edge of the glyph. + originOffsetY: h # The y-value of the point in the glyph relative to its + # lower-left corner which corresponds to the origin of + # the glyph on the screen, that is the point on the + # baseline at the left edge of the glyph. + graphicType: 4s # e.g. "png " +""" + +sbixGlyphHeaderFormatSize = sstruct.calcsize(sbixGlyphHeaderFormat) + + +class Glyph(object): + def __init__(self, glyphName=None, referenceGlyphName=None, originOffsetX=0, originOffsetY=0, graphicType=None, imageData=None, rawdata=None, gid=0): + self.gid = gid + self.glyphName = glyphName + self.referenceGlyphName = referenceGlyphName + self.originOffsetX = originOffsetX + self.originOffsetY = originOffsetY + self.rawdata = rawdata + self.graphicType = graphicType + self.imageData = imageData + + # fix self.graphicType if it is null terminated or too short + if self.graphicType is not None: + if self.graphicType[-1] == "\0": + self.graphicType = self.graphicType[:-1] + if len(self.graphicType) > 4: + from fontTools import ttLib + raise ttLib.TTLibError("Glyph.graphicType must not be longer than 4 characters.") + elif len(self.graphicType) < 4: + # pad with spaces + self.graphicType += " "[:(4 - len(self.graphicType))] + + def decompile(self, ttFont): + self.glyphName = ttFont.getGlyphName(self.gid) + if self.rawdata is None: + from fontTools import ttLib + raise ttLib.TTLibError("No table data to decompile") + if len(self.rawdata) > 0: + if len(self.rawdata) < sbixGlyphHeaderFormatSize: + from fontTools import ttLib + #print "Glyph %i header too short: Expected %x, got %x." % (self.gid, sbixGlyphHeaderFormatSize, len(self.rawdata)) + raise ttLib.TTLibError("Glyph header too short.") + + sstruct.unpack(sbixGlyphHeaderFormat, self.rawdata[:sbixGlyphHeaderFormatSize], self) + + if self.graphicType == "dupe": + # this glyph is a reference to another glyph's image data + gid, = struct.unpack(">H", self.rawdata[sbixGlyphHeaderFormatSize:]) + self.referenceGlyphName = ttFont.getGlyphName(gid) + else: + self.imageData = self.rawdata[sbixGlyphHeaderFormatSize:] + self.referenceGlyphName = None + # clean up + del self.rawdata + del self.gid + + def compile(self, ttFont): + if self.glyphName is None: + from fontTools import ttLib + raise ttLib.TTLibError("Can't compile Glyph without glyph name") + # TODO: if ttFont has no maxp, cmap etc., ignore glyph names and compile by index? + # (needed if you just want to compile the sbix table on its own) + self.gid = struct.pack(">H", ttFont.getGlyphID(self.glyphName)) + if self.graphicType is None: + self.rawdata = "" + else: + self.rawdata = sstruct.pack(sbixGlyphHeaderFormat, self) + self.imageData + + def toXML(self, xmlWriter, ttFont): + if self.graphicType == None: + # TODO: ignore empty glyphs? + # a glyph data entry is required for each glyph, + # but empty ones can be calculated at compile time + xmlWriter.simpletag("glyph", name=self.glyphName) + xmlWriter.newline() + return + xmlWriter.begintag("glyph", + graphicType=self.graphicType, + name=self.glyphName, + originOffsetX=self.originOffsetX, + originOffsetY=self.originOffsetY, + ) + xmlWriter.newline() + if self.graphicType == "dupe": + # graphicType == "dupe" is a reference to another glyph id. + xmlWriter.simpletag("ref", glyphname=self.referenceGlyphName) + else: + xmlWriter.begintag("hexdata") + xmlWriter.newline() + xmlWriter.dumphex(self.imageData) + xmlWriter.endtag("hexdata") + xmlWriter.newline() + xmlWriter.endtag("glyph") + xmlWriter.newline() + + def fromXML(self, name, attrs, content, ttFont): + if name == "ref": + # glyph is a "dupe", i.e. a reference to another glyph's image data. + # in this case imageData contains the glyph id of the reference glyph + # get glyph id from glyphname + self.imageData = struct.pack(">H", ttFont.getGlyphID(safeEval("'''" + attrs["glyphname"] + "'''"))) + elif name == "hexdata": + self.imageData = readHex(content) + else: + from fontTools import ttLib + raise ttLib.TTLibError("can't handle '%s' element" % name) diff -Nru fonttools-2.4/Snippets/fontTools/ttLib/tables/_s_b_i_x.py fonttools-3.0/Snippets/fontTools/ttLib/tables/_s_b_i_x.py --- fonttools-2.4/Snippets/fontTools/ttLib/tables/_s_b_i_x.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.0/Snippets/fontTools/ttLib/tables/_s_b_i_x.py 2015-08-31 17:57:15.000000000 +0000 @@ -0,0 +1,117 @@ +from __future__ import print_function, division, absolute_import +from fontTools.misc.py23 import * +from fontTools.misc import sstruct +from fontTools.misc.textTools import safeEval, num2binary, binary2num +from . import DefaultTable +from .sbixGlyph import * +from .sbixStrike import * + + +sbixHeaderFormat = """ + > + version: H # Version number (set to 1) + flags: H # The only two bits used in the flags field are bits 0 + # and 1. For historical reasons, bit 0 must always be 1. + # Bit 1 is a sbixDrawOutlines flag and is interpreted as + # follows: + # 0: Draw only 'sbix' bitmaps + # 1: Draw both 'sbix' bitmaps and outlines, in that + # order + numStrikes: L # Number of bitmap strikes to follow +""" +sbixHeaderFormatSize = sstruct.calcsize(sbixHeaderFormat) + + +sbixStrikeOffsetFormat = """ + > + strikeOffset: L # Offset from begining of table to data for the + # individual strike +""" +sbixStrikeOffsetFormatSize = sstruct.calcsize(sbixStrikeOffsetFormat) + + +class table__s_b_i_x(DefaultTable.DefaultTable): + def __init__(self, tag): + self.tableTag = tag + self.version = 1 + self.flags = 1 + self.numStrikes = 0 + self.strikes = {} + self.strikeOffsets = [] + + def decompile(self, data, ttFont): + # read table header + sstruct.unpack(sbixHeaderFormat, data[ : sbixHeaderFormatSize], self) + # collect offsets to individual strikes in self.strikeOffsets + for i in range(self.numStrikes): + current_offset = sbixHeaderFormatSize + i * sbixStrikeOffsetFormatSize + offset_entry = sbixStrikeOffset() + sstruct.unpack(sbixStrikeOffsetFormat, \ + data[current_offset:current_offset+sbixStrikeOffsetFormatSize], \ + offset_entry) + self.strikeOffsets.append(offset_entry.strikeOffset) + + # decompile Strikes + for i in range(self.numStrikes-1, -1, -1): + current_strike = Strike(rawdata=data[self.strikeOffsets[i]:]) + data = data[:self.strikeOffsets[i]] + current_strike.decompile(ttFont) + #print " Strike length: %xh" % len(bitmapSetData) + #print "Number of Glyph entries:", len(current_strike.glyphs) + if current_strike.ppem in self.strikes: + from fontTools import ttLib + raise ttLib.TTLibError("Pixel 'ppem' must be unique for each Strike") + self.strikes[current_strike.ppem] = current_strike + + # after the glyph data records have been extracted, we don't need the offsets anymore + del self.strikeOffsets + del self.numStrikes + + def compile(self, ttFont): + sbixData = "" + self.numStrikes = len(self.strikes) + sbixHeader = sstruct.pack(sbixHeaderFormat, self) + + # calculate offset to start of first strike + setOffset = sbixHeaderFormatSize + sbixStrikeOffsetFormatSize * self.numStrikes + + for si in sorted(self.strikes.keys()): + current_strike = self.strikes[si] + current_strike.compile(ttFont) + # append offset to this strike to table header + current_strike.strikeOffset = setOffset + sbixHeader += sstruct.pack(sbixStrikeOffsetFormat, current_strike) + setOffset += len(current_strike.data) + sbixData += current_strike.data + + return sbixHeader + sbixData + + def toXML(self, xmlWriter, ttFont): + xmlWriter.simpletag("version", value=self.version) + xmlWriter.newline() + xmlWriter.simpletag("flags", value=num2binary(self.flags, 16)) + xmlWriter.newline() + for i in sorted(self.strikes.keys()): + self.strikes[i].toXML(xmlWriter, ttFont) + + def fromXML(self, name, attrs, content, ttFont): + if name =="version": + setattr(self, name, safeEval(attrs["value"])) + elif name == "flags": + setattr(self, name, binary2num(attrs["value"])) + elif name == "strike": + current_strike = Strike() + for element in content: + if isinstance(element, tuple): + name, attrs, content = element + current_strike.fromXML(name, attrs, content, ttFont) + self.strikes[current_strike.ppem] = current_strike + else: + from fontTools import ttLib + raise ttLib.TTLibError("can't handle '%s' element" % name) + + +# Helper classes + +class sbixStrikeOffset(object): + pass diff -Nru fonttools-2.4/Snippets/fontTools/ttLib/tables/sbixStrike.py fonttools-3.0/Snippets/fontTools/ttLib/tables/sbixStrike.py --- fonttools-2.4/Snippets/fontTools/ttLib/tables/sbixStrike.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.0/Snippets/fontTools/ttLib/tables/sbixStrike.py 2015-08-31 17:57:15.000000000 +0000 @@ -0,0 +1,150 @@ +from __future__ import print_function, division, absolute_import +from fontTools.misc.py23 import * +from fontTools.misc import sstruct +from fontTools.misc.textTools import readHex +from .sbixGlyph import * +import struct + +sbixStrikeHeaderFormat = """ + > + ppem: H # The PPEM for which this strike was designed (e.g., 9, + # 12, 24) + resolution: H # The screen resolution (in dpi) for which this strike + # was designed (e.g., 72) +""" + +sbixGlyphDataOffsetFormat = """ + > + glyphDataOffset: L # Offset from the beginning of the strike data record + # to data for the individual glyph +""" + +sbixStrikeHeaderFormatSize = sstruct.calcsize(sbixStrikeHeaderFormat) +sbixGlyphDataOffsetFormatSize = sstruct.calcsize(sbixGlyphDataOffsetFormat) + + +class Strike(object): + def __init__(self, rawdata=None, ppem=0, resolution=72): + self.data = rawdata + self.ppem = ppem + self.resolution = resolution + self.glyphs = {} + + def decompile(self, ttFont): + if self.data is None: + from fontTools import ttLib + raise ttLib.TTLibError + if len(self.data) < sbixStrikeHeaderFormatSize: + from fontTools import ttLib + raise(ttLib.TTLibError, "Strike header too short: Expected %x, got %x.") \ + % (sbixStrikeHeaderFormatSize, len(self.data)) + + # read Strike header from raw data + sstruct.unpack(sbixStrikeHeaderFormat, self.data[:sbixStrikeHeaderFormatSize], self) + + # calculate number of glyphs + firstGlyphDataOffset, = struct.unpack(">L", \ + self.data[sbixStrikeHeaderFormatSize:sbixStrikeHeaderFormatSize + sbixGlyphDataOffsetFormatSize]) + self.numGlyphs = (firstGlyphDataOffset - sbixStrikeHeaderFormatSize) // sbixGlyphDataOffsetFormatSize - 1 + # ^ -1 because there's one more offset than glyphs + + # build offset list for single glyph data offsets + self.glyphDataOffsets = [] + for i in range(self.numGlyphs + 1): # + 1 because there's one more offset than glyphs + start = i * sbixGlyphDataOffsetFormatSize + sbixStrikeHeaderFormatSize + current_offset, = struct.unpack(">L", self.data[start:start + sbixGlyphDataOffsetFormatSize]) + self.glyphDataOffsets.append(current_offset) + + # iterate through offset list and slice raw data into glyph data records + for i in range(self.numGlyphs): + current_glyph = Glyph(rawdata=self.data[self.glyphDataOffsets[i]:self.glyphDataOffsets[i+1]], gid=i) + current_glyph.decompile(ttFont) + self.glyphs[current_glyph.glyphName] = current_glyph + del self.glyphDataOffsets + del self.numGlyphs + del self.data + + def compile(self, ttFont): + self.glyphDataOffsets = "" + self.bitmapData = "" + + glyphOrder = ttFont.getGlyphOrder() + + # first glyph starts right after the header + currentGlyphDataOffset = sbixStrikeHeaderFormatSize + sbixGlyphDataOffsetFormatSize * (len(glyphOrder) + 1) + for glyphName in glyphOrder: + if glyphName in self.glyphs: + # we have glyph data for this glyph + current_glyph = self.glyphs[glyphName] + else: + # must add empty glyph data record for this glyph + current_glyph = Glyph(glyphName=glyphName) + current_glyph.compile(ttFont) + current_glyph.glyphDataOffset = currentGlyphDataOffset + self.bitmapData += current_glyph.rawdata + currentGlyphDataOffset += len(current_glyph.rawdata) + self.glyphDataOffsets += sstruct.pack(sbixGlyphDataOffsetFormat, current_glyph) + + # add last "offset", really the end address of the last glyph data record + dummy = Glyph() + dummy.glyphDataOffset = currentGlyphDataOffset + self.glyphDataOffsets += sstruct.pack(sbixGlyphDataOffsetFormat, dummy) + + # pack header + self.data = sstruct.pack(sbixStrikeHeaderFormat, self) + # add offsets and image data after header + self.data += self.glyphDataOffsets + self.bitmapData + + def toXML(self, xmlWriter, ttFont): + xmlWriter.begintag("strike") + xmlWriter.newline() + xmlWriter.simpletag("ppem", value=self.ppem) + xmlWriter.newline() + xmlWriter.simpletag("resolution", value=self.resolution) + xmlWriter.newline() + glyphOrder = ttFont.getGlyphOrder() + for i in range(len(glyphOrder)): + if glyphOrder[i] in self.glyphs: + self.glyphs[glyphOrder[i]].toXML(xmlWriter, ttFont) + # TODO: what if there are more glyph data records than (glyf table) glyphs? + xmlWriter.endtag("strike") + xmlWriter.newline() + + def fromXML(self, name, attrs, content, ttFont): + if name in ["ppem", "resolution"]: + setattr(self, name, safeEval(attrs["value"])) + elif name == "glyph": + if "graphicType" in attrs: + myFormat = safeEval("'''" + attrs["graphicType"] + "'''") + else: + myFormat = None + if "glyphname" in attrs: + myGlyphName = safeEval("'''" + attrs["glyphname"] + "'''") + elif "name" in attrs: + myGlyphName = safeEval("'''" + attrs["name"] + "'''") + else: + from fontTools import ttLib + raise ttLib.TTLibError("Glyph must have a glyph name.") + if "originOffsetX" in attrs: + myOffsetX = safeEval(attrs["originOffsetX"]) + else: + myOffsetX = 0 + if "originOffsetY" in attrs: + myOffsetY = safeEval(attrs["originOffsetY"]) + else: + myOffsetY = 0 + current_glyph = Glyph( + glyphName=myGlyphName, + graphicType=myFormat, + originOffsetX=myOffsetX, + originOffsetY=myOffsetY, + ) + for element in content: + if isinstance(element, tuple): + name, attrs, content = element + current_glyph.fromXML(name, attrs, content, ttFont) + current_glyph.compile(ttFont) + self.glyphs[current_glyph.glyphName] = current_glyph + else: + from fontTools import ttLib + raise ttLib.TTLibError("can't handle '%s' element" % name) diff -Nru fonttools-2.4/Snippets/fontTools/ttLib/tables/S_I_N_G_.py fonttools-3.0/Snippets/fontTools/ttLib/tables/S_I_N_G_.py --- fonttools-2.4/Snippets/fontTools/ttLib/tables/S_I_N_G_.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.0/Snippets/fontTools/ttLib/tables/S_I_N_G_.py 2015-08-31 17:57:15.000000000 +0000 @@ -0,0 +1,95 @@ +from __future__ import print_function, division, absolute_import +from fontTools.misc.py23 import * +from fontTools.misc import sstruct +from fontTools.misc.textTools import safeEval +from . import DefaultTable + +SINGFormat = """ + > # big endian + tableVersionMajor: H + tableVersionMinor: H + glyphletVersion: H + permissions: h + mainGID: H + unitsPerEm: H + vertAdvance: h + vertOrigin: h + uniqueName: 28s + METAMD5: 16s + nameLength: 1s +""" +# baseGlyphName is a byte string which follows the record above. + + +class table_S_I_N_G_(DefaultTable.DefaultTable): + + dependencies = [] + + def decompile(self, data, ttFont): + dummy, rest = sstruct.unpack2(SINGFormat, data, self) + self.uniqueName = self.decompileUniqueName(self.uniqueName) + self.nameLength = byteord(self.nameLength) + assert len(rest) == self.nameLength + self.baseGlyphName = tostr(rest) + + rawMETAMD5 = self.METAMD5 + self.METAMD5 = "[" + hex(byteord(self.METAMD5[0])) + for char in rawMETAMD5[1:]: + self.METAMD5 = self.METAMD5 + ", " + hex(byteord(char)) + self.METAMD5 = self.METAMD5 + "]" + + def decompileUniqueName(self, data): + name = "" + for char in data: + val = byteord(char) + if val == 0: + break + if (val > 31) or (val < 128): + name += chr(val) + else: + octString = oct(val) + if len(octString) > 3: + octString = octString[1:] # chop off that leading zero. + elif len(octString) < 3: + octString.zfill(3) + name += "\\" + octString + return name + + def compile(self, ttFont): + d = self.__dict__.copy() + d["nameLength"] = bytechr(len(self.baseGlyphName)) + d["uniqueName"] = self.compilecompileUniqueName(self.uniqueName, 28) + METAMD5List = eval(self.METAMD5) + d["METAMD5"] = b"" + for val in METAMD5List: + d["METAMD5"] += bytechr(val) + assert (len(d["METAMD5"]) == 16), "Failed to pack 16 byte MD5 hash in SING table" + data = sstruct.pack(SINGFormat, d) + data = data + tobytes(self.baseGlyphName) + return data + + def compilecompileUniqueName(self, name, length): + nameLen = len(name) + if length <= nameLen: + name = name[:length-1] + "\000" + else: + name += (nameLen - length) * "\000" + return name + + def toXML(self, writer, ttFont): + writer.comment("Most of this table will be recalculated by the compiler") + writer.newline() + formatstring, names, fixes = sstruct.getformat(SINGFormat) + for name in names: + value = getattr(self, name) + writer.simpletag(name, value=value) + writer.newline() + writer.simpletag("baseGlyphName", value=self.baseGlyphName) + writer.newline() + + def fromXML(self, name, attrs, content, ttFont): + value = attrs["value"] + if name in ["uniqueName", "METAMD5", "baseGlyphName"]: + setattr(self, name, value) + else: + setattr(self, name, safeEval(value)) diff -Nru fonttools-2.4/Snippets/fontTools/ttLib/tables/S_V_G_.py fonttools-3.0/Snippets/fontTools/ttLib/tables/S_V_G_.py --- fonttools-2.4/Snippets/fontTools/ttLib/tables/S_V_G_.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.0/Snippets/fontTools/ttLib/tables/S_V_G_.py 2015-08-31 17:57:15.000000000 +0000 @@ -0,0 +1,379 @@ +from __future__ import print_function, division, absolute_import +from fontTools.misc.py23 import * +from fontTools.misc import sstruct +from . import DefaultTable +try: + import xml.etree.cElementTree as ET +except ImportError: + import xml.etree.ElementTree as ET +import struct +import re + +__doc__=""" +Compiles/decompiles version 0 and 1 SVG tables from/to XML. + +Version 1 is the first SVG definition, implemented in Mozilla before Aug 2013, now deprecated. +This module will decompile this correctly, but will compile a version 1 table +only if you add the secret element "" to the SVG element in the TTF file. + +Version 0 is the joint Adobe-Mozilla proposal, which supports color palettes. + +The XML format is: + + + <complete SVG doc> ]] + </svgDoc> +... + <svgDoc endGlyphID="n" startGlyphID="m"> + <![CDATA[ <complete SVG doc> ]] + </svgDoc> + + <colorPalettes> + <colorParamUINameID>n</colorParamUINameID> + ... + <colorParamUINameID>m</colorParamUINameID> + <colorPalette uiNameID="n"> + <colorRecord red="<int>" green="<int>" blue="<int>" alpha="<int>" /> + ... + <colorRecord red="<int>" green="<int>" blue="<int>" alpha="<int>" /> + </colorPalette> + ... + <colorPalette uiNameID="m"> + <colorRecord red="<int> green="<int>" blue="<int>" alpha="<int>" /> + ... + <colorRecord red=<int>" green="<int>" blue="<int>" alpha="<int>" /> + </colorPalette> + </colorPalettes> +</SVG> + +Color values must be less than 256. + +The number of color records in each </colorPalette> must be the same as +the number of <colorParamUINameID> elements. + +""" + +XML = ET.XML +XMLElement = ET.Element +xmlToString = ET.tostring + +SVG_format_0 = """ + > # big endian + version: H + offsetToSVGDocIndex: L + offsetToColorPalettes: L +""" + +SVG_format_0Size = sstruct.calcsize(SVG_format_0) + +SVG_format_1 = """ + > # big endian + version: H + numIndicies: H +""" + +SVG_format_1Size = sstruct.calcsize(SVG_format_1) + +doc_index_entry_format_0 = """ + > # big endian + startGlyphID: H + endGlyphID: H + svgDocOffset: L + svgDocLength: L +""" + +doc_index_entry_format_0Size = sstruct.calcsize(doc_index_entry_format_0) + +colorRecord_format_0 = """ + red: B + green: B + blue: B + alpha: B +""" + + +class table_S_V_G_(DefaultTable.DefaultTable): + + def decompile(self, data, ttFont): + self.docList = None + self.colorPalettes = None + pos = 0 + self.version = struct.unpack(">H", data[pos:pos+2])[0] + + if self.version == 1: + self.decompile_format_1(data, ttFont) + else: + if self.version != 0: + print("Unknown SVG table version '%s'. Decompiling as version 0." % (self.version)) + self.decompile_format_0(data, ttFont) + + def decompile_format_0(self, data, ttFont): + dummy, data2 = sstruct.unpack2(SVG_format_0, data, self) + # read in SVG Documents Index + self.decompileEntryList(data) + + # read in colorPalettes table. + self.colorPalettes = colorPalettes = ColorPalettes() + pos = self.offsetToColorPalettes + if pos > 0: + colorPalettes.numColorParams = numColorParams = struct.unpack(">H", data[pos:pos+2])[0] + if numColorParams > 0: + colorPalettes.colorParamUINameIDs = colorParamUINameIDs = [] + pos = pos + 2 + for i in range(numColorParams): + nameID = struct.unpack(">H", data[pos:pos+2])[0] + colorParamUINameIDs.append(nameID) + pos = pos + 2 + + colorPalettes.numColorPalettes = numColorPalettes = struct.unpack(">H", data[pos:pos+2])[0] + pos = pos + 2 + if numColorPalettes > 0: + colorPalettes.colorPaletteList = colorPaletteList = [] + for i in range(numColorPalettes): + colorPalette = ColorPalette() + colorPaletteList.append(colorPalette) + colorPalette.uiNameID = struct.unpack(">H", data[pos:pos+2])[0] + pos = pos + 2 + colorPalette.paletteColors = paletteColors = [] + for j in range(numColorParams): + colorRecord, colorPaletteData = sstruct.unpack2(colorRecord_format_0, data[pos:], ColorRecord()) + paletteColors.append(colorRecord) + pos += 4 + + def decompile_format_1(self, data, ttFont): + pos = 2 + self.numEntries = struct.unpack(">H", data[pos:pos+2])[0] + pos += 2 + self.decompileEntryList(data, pos) + + def decompileEntryList(self, data): + # data starts with the first entry of the entry list. + pos = subTableStart = self.offsetToSVGDocIndex + self.numEntries = numEntries = struct.unpack(">H", data[pos:pos+2])[0] + pos += 2 + if self.numEntries > 0: + data2 = data[pos:] + self.docList = [] + self.entries = entries = [] + for i in range(self.numEntries): + docIndexEntry, data2 = sstruct.unpack2(doc_index_entry_format_0, data2, DocumentIndexEntry()) + entries.append(docIndexEntry) + + for entry in entries: + start = entry.svgDocOffset + subTableStart + end = start + entry.svgDocLength + doc = data[start:end] + if doc.startswith(b"\x1f\x8b"): + import gzip + bytesIO = BytesIO(doc) + with gzip.GzipFile(None, "r", fileobj=bytesIO) as gunzipper: + doc = gunzipper.read() + self.compressed = True + del bytesIO + doc = tostr(doc, "utf_8") + self.docList.append( [doc, entry.startGlyphID, entry.endGlyphID] ) + + def compile(self, ttFont): + if hasattr(self, "version1"): + data = self.compileFormat1(ttFont) + else: + data = self.compileFormat0(ttFont) + return data + + def compileFormat0(self, ttFont): + version = 0 + offsetToSVGDocIndex = SVG_format_0Size # I start the SVGDocIndex right after the header. + # get SGVDoc info. + docList = [] + entryList = [] + numEntries = len(self.docList) + datum = struct.pack(">H",numEntries) + entryList.append(datum) + curOffset = len(datum) + doc_index_entry_format_0Size*numEntries + for doc, startGlyphID, endGlyphID in self.docList: + docOffset = curOffset + docBytes = tobytes(doc, encoding="utf_8") + if getattr(self, "compressed", False) and not docBytes.startswith(b"\x1f\x8b"): + import gzip + bytesIO = BytesIO() + with gzip.GzipFile(None, "w", fileobj=bytesIO) as gzipper: + gzipper.write(docBytes) + gzipped = bytesIO.getvalue() + if len(gzipped) < len(docBytes): + docBytes = gzipped + del gzipped, bytesIO + docLength = len(docBytes) + curOffset += docLength + entry = struct.pack(">HHLL", startGlyphID, endGlyphID, docOffset, docLength) + entryList.append(entry) + docList.append(docBytes) + entryList.extend(docList) + svgDocData = bytesjoin(entryList) + + # get colorpalette info. + if self.colorPalettes is None: + offsetToColorPalettes = 0 + palettesData = "" + else: + offsetToColorPalettes = SVG_format_0Size + len(svgDocData) + dataList = [] + numColorParams = len(self.colorPalettes.colorParamUINameIDs) + datum = struct.pack(">H", numColorParams) + dataList.append(datum) + for uiNameId in self.colorPalettes.colorParamUINameIDs: + datum = struct.pack(">H", uiNameId) + dataList.append(datum) + numColorPalettes = len(self.colorPalettes.colorPaletteList) + datum = struct.pack(">H", numColorPalettes) + dataList.append(datum) + for colorPalette in self.colorPalettes.colorPaletteList: + datum = struct.pack(">H", colorPalette.uiNameID) + dataList.append(datum) + for colorRecord in colorPalette.paletteColors: + data = struct.pack(">BBBB", colorRecord.red, colorRecord.green, colorRecord.blue, colorRecord.alpha) + dataList.append(data) + palettesData = bytesjoin(dataList) + + header = struct.pack(">HLL", version, offsetToSVGDocIndex, offsetToColorPalettes) + data = [header, svgDocData, palettesData] + data = bytesjoin(data) + return data + + def compileFormat1(self, ttFont): + version = 1 + numEntries = len(self.docList) + header = struct.pack(">HH", version, numEntries) + dataList = [header] + docList = [] + curOffset = SVG_format_1Size + doc_index_entry_format_0Size*numEntries + for doc, startGlyphID, endGlyphID in self.docList: + docOffset = curOffset + docBytes = tobytes(doc, encoding="utf_8") + docLength = len(docBytes) + curOffset += docLength + entry = struct.pack(">HHLL", startGlyphID, endGlyphID, docOffset, docLength) + dataList.append(entry) + docList.append(docBytes) + dataList.extend(docList) + data = bytesjoin(dataList) + return data + + def toXML(self, writer, ttFont): + writer.newline() + for doc, startGID, endGID in self.docList: + writer.begintag("svgDoc", startGlyphID=startGID, endGlyphID=endGID) + writer.newline() + writer.writecdata(doc) + writer.newline() + writer.endtag("svgDoc") + writer.newline() + + if (self.colorPalettes is not None) and (self.colorPalettes.numColorParams is not None): + writer.begintag("colorPalettes") + writer.newline() + for uiNameID in self.colorPalettes.colorParamUINameIDs: + writer.begintag("colorParamUINameID") + writer.writeraw(str(uiNameID)) + writer.endtag("colorParamUINameID") + writer.newline() + for colorPalette in self.colorPalettes.colorPaletteList: + writer.begintag("colorPalette", [("uiNameID", str(colorPalette.uiNameID))]) + writer.newline() + for colorRecord in colorPalette.paletteColors: + colorAttributes = [ + ("red", hex(colorRecord.red)), + ("green", hex(colorRecord.green)), + ("blue", hex(colorRecord.blue)), + ("alpha", hex(colorRecord.alpha)), + ] + writer.begintag("colorRecord", colorAttributes) + writer.endtag("colorRecord") + writer.newline() + writer.endtag("colorPalette") + writer.newline() + + writer.endtag("colorPalettes") + writer.newline() + else: + writer.begintag("colorPalettes") + writer.endtag("colorPalettes") + writer.newline() + + def fromXML(self, name, attrs, content, ttFont): + if name == "svgDoc": + if not hasattr(self, "docList"): + self.docList = [] + doc = strjoin(content) + doc = doc.strip() + startGID = int(attrs["startGlyphID"]) + endGID = int(attrs["endGlyphID"]) + self.docList.append( [doc, startGID, endGID] ) + elif name == "colorPalettes": + self.colorPalettes = ColorPalettes() + self.colorPalettes.fromXML(name, attrs, content, ttFont) + if self.colorPalettes.numColorParams == 0: + self.colorPalettes = None + else: + print("Unknown", name, content) + +class DocumentIndexEntry(object): + def __init__(self): + self.startGlyphID = None # USHORT + self.endGlyphID = None # USHORT + self.svgDocOffset = None # ULONG + self.svgDocLength = None # ULONG + + def __repr__(self): + return "startGlyphID: %s, endGlyphID: %s, svgDocOffset: %s, svgDocLength: %s" % (self.startGlyphID, self.endGlyphID, self.svgDocOffset, self.svgDocLength) + +class ColorPalettes(object): + def __init__(self): + self.numColorParams = None # USHORT + self.colorParamUINameIDs = [] # list of name table name ID values that provide UI description of each color palette. + self.numColorPalettes = None # USHORT + self.colorPaletteList = [] # list of ColorPalette records + + def fromXML(self, name, attrs, content, ttFont): + for element in content: + if isinstance(element, type("")): + continue + name, attrib, content = element + if name == "colorParamUINameID": + uiNameID = int(content[0]) + self.colorParamUINameIDs.append(uiNameID) + elif name == "colorPalette": + colorPalette = ColorPalette() + self.colorPaletteList.append(colorPalette) + colorPalette.fromXML((name, attrib, content), ttFont) + + self.numColorParams = len(self.colorParamUINameIDs) + self.numColorPalettes = len(self.colorPaletteList) + for colorPalette in self.colorPaletteList: + if len(colorPalette.paletteColors) != self.numColorParams: + raise ValueError("Number of color records in a colorPalette ('%s') does not match the number of colorParamUINameIDs elements ('%s')." % (len(colorPalette.paletteColors), self.numColorParams)) + +class ColorPalette(object): + def __init__(self): + self.uiNameID = None # USHORT. name table ID that describes user interface strings associated with this color palette. + self.paletteColors = [] # list of ColorRecords + + def fromXML(self, name, attrs, content, ttFont): + self.uiNameID = int(attrs["uiNameID"]) + for element in content: + if isinstance(element, type("")): + continue + name, attrib, content = element + if name == "colorRecord": + colorRecord = ColorRecord() + self.paletteColors.append(colorRecord) + colorRecord.red = eval(attrib["red"]) + colorRecord.green = eval(attrib["green"]) + colorRecord.blue = eval(attrib["blue"]) + colorRecord.alpha = eval(attrib["alpha"]) + +class ColorRecord(object): + def __init__(self): + self.red = 255 # all are one byte values. + self.green = 255 + self.blue = 255 + self.alpha = 255 diff -Nru fonttools-2.4/Snippets/fontTools/ttLib/tables/table_API_readme.txt fonttools-3.0/Snippets/fontTools/ttLib/tables/table_API_readme.txt --- fonttools-2.4/Snippets/fontTools/ttLib/tables/table_API_readme.txt 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.0/Snippets/fontTools/ttLib/tables/table_API_readme.txt 2015-08-31 17:57:15.000000000 +0000 @@ -0,0 +1,91 @@ +This folder is a subpackage of ttLib. Each module here is a +specialized TT/OT table converter: they can convert raw data +to Python objects and vice versa. Usually you don't need to +use the modules directly: they are imported and used +automatically when needed by ttLib. + +If you are writing you own table converter the following is +important. + +The modules here have pretty strange names: this is due to the +fact that we need to map TT table tags (which are case sensitive) +to filenames (which on Mac and Win aren't case sensitive) as well +as to Python identifiers. The latter means it can only contain +[A-Za-z0-9_] and cannot start with a number. + +ttLib provides functions to expand a tag into the format used here: + +>>> from fontTools import ttLib +>>> ttLib.tagToIdentifier("FOO ") +'F_O_O_' +>>> ttLib.tagToIdentifier("cvt ") +'_c_v_t' +>>> ttLib.tagToIdentifier("OS/2") +'O_S_2f_2' +>>> ttLib.tagToIdentifier("glyf") +'_g_l_y_f' +>>> + +And vice versa: + +>>> ttLib.identifierToTag("F_O_O_") +'FOO ' +>>> ttLib.identifierToTag("_c_v_t") +'cvt ' +>>> ttLib.identifierToTag("O_S_2f_2") +'OS/2' +>>> ttLib.identifierToTag("_g_l_y_f") +'glyf' +>>> + +Eg. the 'glyf' table converter lives in a Python file called: + + _g_l_y_f.py + +The converter itself is a class, named "table_" + expandedtag. Eg: + + class table__g_l_y_f: + etc. + +Note that if you _do_ need to use such modules or classes manually, +there are two convenient API functions that let you find them by tag: + +>>> ttLib.getTableModule('glyf') +<module 'ttLib.tables._g_l_y_f'> +>>> ttLib.getTableClass('glyf') +<class ttLib.tables._g_l_y_f.table__g_l_y_f at 645f400> +>>> + +You must subclass from DefaultTable.DefaultTable. It provides some default +behavior, as well as a constructor method (__init__) that you don't need to +override. + +Your converter should minimally provide two methods: + +class table_F_O_O_(DefaultTable.DefaultTable): # converter for table 'FOO ' + + def decompile(self, data, ttFont): + # 'data' is the raw table data. Unpack it into a + # Python data structure. + # 'ttFont' is a ttLib.TTfile instance, enabling you to + # refer to other tables. Do ***not*** keep a reference to + # it: it will cause a circular reference (ttFont saves + # a reference to us), and that means we'll be leaking + # memory. If you need to use it in other methods, just + # pass it around as a method argument. + + def compile(self, ttFont): + # Return the raw data, as converted from the Python + # data structure. + # Again, 'ttFont' is there so you can access other tables. + # Same warning applies. + +If you want to support TTX import/export as well, you need to provide two +additional methods: + + def toXML(self, writer, ttFont): + # XXX + + def fromXML(self, (name, attrs, content), ttFont): + # XXX + diff -Nru fonttools-2.4/Snippets/fontTools/ttLib/tables/T_S_I__0.py fonttools-3.0/Snippets/fontTools/ttLib/tables/T_S_I__0.py --- fonttools-2.4/Snippets/fontTools/ttLib/tables/T_S_I__0.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.0/Snippets/fontTools/ttLib/tables/T_S_I__0.py 2015-08-31 17:57:15.000000000 +0000 @@ -0,0 +1,49 @@ +from __future__ import print_function, division, absolute_import +from fontTools.misc.py23 import * +from . import DefaultTable +import struct + +tsi0Format = '>HHl' + +def fixlongs(glyphID, textLength, textOffset): + return int(glyphID), int(textLength), textOffset + + +class table_T_S_I__0(DefaultTable.DefaultTable): + + dependencies = ["TSI1"] + + def decompile(self, data, ttFont): + numGlyphs = ttFont['maxp'].numGlyphs + indices = [] + size = struct.calcsize(tsi0Format) + for i in range(numGlyphs + 5): + glyphID, textLength, textOffset = fixlongs(*struct.unpack(tsi0Format, data[:size])) + indices.append((glyphID, textLength, textOffset)) + data = data[size:] + assert len(data) == 0 + assert indices[-5] == (0XFFFE, 0, -1409540300), "bad magic number" # 0xABFC1F34 + self.indices = indices[:-5] + self.extra_indices = indices[-4:] + + def compile(self, ttFont): + if not hasattr(self, "indices"): + # We have no corresponding table (TSI1 or TSI3); let's return + # no data, which effectively means "ignore us". + return "" + data = b"" + for index, textLength, textOffset in self.indices: + data = data + struct.pack(tsi0Format, index, textLength, textOffset) + data = data + struct.pack(tsi0Format, 0XFFFE, 0, -1409540300) # 0xABFC1F34 + for index, textLength, textOffset in self.extra_indices: + data = data + struct.pack(tsi0Format, index, textLength, textOffset) + return data + + def set(self, indices, extra_indices): + # gets called by 'TSI1' or 'TSI3' + self.indices = indices + self.extra_indices = extra_indices + + def toXML(self, writer, ttFont): + writer.comment("This table will be calculated by the compiler") + writer.newline() diff -Nru fonttools-2.4/Snippets/fontTools/ttLib/tables/T_S_I__1.py fonttools-3.0/Snippets/fontTools/ttLib/tables/T_S_I__1.py --- fonttools-2.4/Snippets/fontTools/ttLib/tables/T_S_I__1.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.0/Snippets/fontTools/ttLib/tables/T_S_I__1.py 2015-08-31 17:57:15.000000000 +0000 @@ -0,0 +1,116 @@ +from __future__ import print_function, division, absolute_import +from fontTools.misc.py23 import * +from . import DefaultTable + +class table_T_S_I__1(DefaultTable.DefaultTable): + + extras = {0xfffa: "ppgm", 0xfffb: "cvt", 0xfffc: "reserved", 0xfffd: "fpgm"} + + indextable = "TSI0" + + def decompile(self, data, ttFont): + indextable = ttFont[self.indextable] + self.glyphPrograms = {} + for i in range(len(indextable.indices)): + glyphID, textLength, textOffset = indextable.indices[i] + if textLength == 0x8000: + # Ugh. Hi Beat! + textLength = indextable.indices[i+1][1] + if textLength > 0x8000: + pass # XXX Hmmm. + text = data[textOffset:textOffset+textLength] + assert len(text) == textLength + if text: + self.glyphPrograms[ttFont.getGlyphName(glyphID)] = text + + self.extraPrograms = {} + for i in range(len(indextable.extra_indices)): + extraCode, textLength, textOffset = indextable.extra_indices[i] + if textLength == 0x8000: + if self.extras[extraCode] == "fpgm": # this is the last one + textLength = len(data) - textOffset + else: + textLength = indextable.extra_indices[i+1][1] + text = data[textOffset:textOffset+textLength] + assert len(text) == textLength + if text: + self.extraPrograms[self.extras[extraCode]] = text + + def compile(self, ttFont): + if not hasattr(self, "glyphPrograms"): + self.glyphPrograms = {} + self.extraPrograms = {} + data = b'' + indextable = ttFont[self.indextable] + glyphNames = ttFont.getGlyphOrder() + + indices = [] + for i in range(len(glyphNames)): + if len(data) % 2: + data = data + b"\015" # align on 2-byte boundaries, fill with return chars. Yum. + name = glyphNames[i] + if name in self.glyphPrograms: + text = tobytes(self.glyphPrograms[name]) + else: + text = b"" + textLength = len(text) + if textLength >= 0x8000: + textLength = 0x8000 # XXX ??? + indices.append((i, textLength, len(data))) + data = data + text + + extra_indices = [] + codes = sorted(self.extras.items()) + for i in range(len(codes)): + if len(data) % 2: + data = data + b"\015" # align on 2-byte boundaries, fill with return chars. + code, name = codes[i] + if name in self.extraPrograms: + text = tobytes(self.extraPrograms[name]) + else: + text = b"" + textLength = len(text) + if textLength >= 0x8000: + textLength = 0x8000 # XXX ??? + extra_indices.append((code, textLength, len(data))) + data = data + text + indextable.set(indices, extra_indices) + return data + + def toXML(self, writer, ttFont): + names = sorted(self.glyphPrograms.keys()) + writer.newline() + for name in names: + text = self.glyphPrograms[name] + if not text: + continue + writer.begintag("glyphProgram", name=name) + writer.newline() + writer.write_noindent(text.replace(b"\r", b"\n")) + writer.newline() + writer.endtag("glyphProgram") + writer.newline() + writer.newline() + extra_names = sorted(self.extraPrograms.keys()) + for name in extra_names: + text = self.extraPrograms[name] + if not text: + continue + writer.begintag("extraProgram", name=name) + writer.newline() + writer.write_noindent(text.replace(b"\r", b"\n")) + writer.newline() + writer.endtag("extraProgram") + writer.newline() + writer.newline() + + def fromXML(self, name, attrs, content, ttFont): + if not hasattr(self, "glyphPrograms"): + self.glyphPrograms = {} + self.extraPrograms = {} + lines = strjoin(content).replace("\r", "\n").split("\n") + text = '\r'.join(lines[1:-1]) + if name == "glyphProgram": + self.glyphPrograms[attrs["name"]] = text + elif name == "extraProgram": + self.extraPrograms[attrs["name"]] = text diff -Nru fonttools-2.4/Snippets/fontTools/ttLib/tables/T_S_I__2.py fonttools-3.0/Snippets/fontTools/ttLib/tables/T_S_I__2.py --- fonttools-2.4/Snippets/fontTools/ttLib/tables/T_S_I__2.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.0/Snippets/fontTools/ttLib/tables/T_S_I__2.py 2015-08-31 17:57:15.000000000 +0000 @@ -0,0 +1,9 @@ +from __future__ import print_function, division, absolute_import +from fontTools.misc.py23 import * +from fontTools import ttLib + +superclass = ttLib.getTableClass("TSI0") + +class table_T_S_I__2(superclass): + + dependencies = ["TSI3"] diff -Nru fonttools-2.4/Snippets/fontTools/ttLib/tables/T_S_I__3.py fonttools-3.0/Snippets/fontTools/ttLib/tables/T_S_I__3.py --- fonttools-2.4/Snippets/fontTools/ttLib/tables/T_S_I__3.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.0/Snippets/fontTools/ttLib/tables/T_S_I__3.py 2015-08-31 17:57:15.000000000 +0000 @@ -0,0 +1,11 @@ +from __future__ import print_function, division, absolute_import +from fontTools.misc.py23 import * +from fontTools import ttLib + +superclass = ttLib.getTableClass("TSI1") + +class table_T_S_I__3(superclass): + + extras = {0xfffa: "reserved0", 0xfffb: "reserved1", 0xfffc: "reserved2", 0xfffd: "reserved3"} + + indextable = "TSI2" diff -Nru fonttools-2.4/Snippets/fontTools/ttLib/tables/T_S_I__5.py fonttools-3.0/Snippets/fontTools/ttLib/tables/T_S_I__5.py --- fonttools-2.4/Snippets/fontTools/ttLib/tables/T_S_I__5.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.0/Snippets/fontTools/ttLib/tables/T_S_I__5.py 2015-08-31 17:57:15.000000000 +0000 @@ -0,0 +1,42 @@ +from __future__ import print_function, division, absolute_import +from fontTools.misc.py23 import * +from fontTools.misc.textTools import safeEval +from . import DefaultTable +import sys +import array + + +class table_T_S_I__5(DefaultTable.DefaultTable): + + def decompile(self, data, ttFont): + numGlyphs = ttFont['maxp'].numGlyphs + assert len(data) == 2 * numGlyphs + a = array.array("H") + a.fromstring(data) + if sys.byteorder != "big": + a.byteswap() + self.glyphGrouping = {} + for i in range(numGlyphs): + self.glyphGrouping[ttFont.getGlyphName(i)] = a[i] + + def compile(self, ttFont): + glyphNames = ttFont.getGlyphOrder() + a = array.array("H") + for i in range(len(glyphNames)): + a.append(self.glyphGrouping[glyphNames[i]]) + if sys.byteorder != "big": + a.byteswap() + return a.tostring() + + def toXML(self, writer, ttFont): + names = sorted(self.glyphGrouping.keys()) + for glyphName in names: + writer.simpletag("glyphgroup", name=glyphName, value=self.glyphGrouping[glyphName]) + writer.newline() + + def fromXML(self, name, attrs, content, ttFont): + if not hasattr(self, "glyphGrouping"): + self.glyphGrouping = {} + if name != "glyphgroup": + return + self.glyphGrouping[attrs["name"]] = safeEval(attrs["value"]) diff -Nru fonttools-2.4/Snippets/fontTools/ttLib/tables/T_S_I_B_.py fonttools-3.0/Snippets/fontTools/ttLib/tables/T_S_I_B_.py --- fonttools-2.4/Snippets/fontTools/ttLib/tables/T_S_I_B_.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.0/Snippets/fontTools/ttLib/tables/T_S_I_B_.py 2015-08-31 17:57:15.000000000 +0000 @@ -0,0 +1,6 @@ +from __future__ import print_function, division, absolute_import +from fontTools.misc.py23 import * +from . import asciiTable + +class table_T_S_I_B_(asciiTable.asciiTable): + pass diff -Nru fonttools-2.4/Snippets/fontTools/ttLib/tables/T_S_I_D_.py fonttools-3.0/Snippets/fontTools/ttLib/tables/T_S_I_D_.py --- fonttools-2.4/Snippets/fontTools/ttLib/tables/T_S_I_D_.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.0/Snippets/fontTools/ttLib/tables/T_S_I_D_.py 2015-08-31 17:57:15.000000000 +0000 @@ -0,0 +1,6 @@ +from __future__ import print_function, division, absolute_import +from fontTools.misc.py23 import * +from . import asciiTable + +class table_T_S_I_D_(asciiTable.asciiTable): + pass diff -Nru fonttools-2.4/Snippets/fontTools/ttLib/tables/T_S_I_J_.py fonttools-3.0/Snippets/fontTools/ttLib/tables/T_S_I_J_.py --- fonttools-2.4/Snippets/fontTools/ttLib/tables/T_S_I_J_.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.0/Snippets/fontTools/ttLib/tables/T_S_I_J_.py 2015-08-31 17:57:15.000000000 +0000 @@ -0,0 +1,6 @@ +from __future__ import print_function, division, absolute_import +from fontTools.misc.py23 import * +from . import asciiTable + +class table_T_S_I_J_(asciiTable.asciiTable): + pass diff -Nru fonttools-2.4/Snippets/fontTools/ttLib/tables/T_S_I_P_.py fonttools-3.0/Snippets/fontTools/ttLib/tables/T_S_I_P_.py --- fonttools-2.4/Snippets/fontTools/ttLib/tables/T_S_I_P_.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.0/Snippets/fontTools/ttLib/tables/T_S_I_P_.py 2015-08-31 17:57:15.000000000 +0000 @@ -0,0 +1,6 @@ +from __future__ import print_function, division, absolute_import +from fontTools.misc.py23 import * +from . import asciiTable + +class table_T_S_I_P_(asciiTable.asciiTable): + pass diff -Nru fonttools-2.4/Snippets/fontTools/ttLib/tables/T_S_I_S_.py fonttools-3.0/Snippets/fontTools/ttLib/tables/T_S_I_S_.py --- fonttools-2.4/Snippets/fontTools/ttLib/tables/T_S_I_S_.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.0/Snippets/fontTools/ttLib/tables/T_S_I_S_.py 2015-08-31 17:57:15.000000000 +0000 @@ -0,0 +1,6 @@ +from __future__ import print_function, division, absolute_import +from fontTools.misc.py23 import * +from . import asciiTable + +class table_T_S_I_S_(asciiTable.asciiTable): + pass diff -Nru fonttools-2.4/Snippets/fontTools/ttLib/tables/T_S_I_V_.py fonttools-3.0/Snippets/fontTools/ttLib/tables/T_S_I_V_.py --- fonttools-2.4/Snippets/fontTools/ttLib/tables/T_S_I_V_.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.0/Snippets/fontTools/ttLib/tables/T_S_I_V_.py 2015-08-31 17:57:15.000000000 +0000 @@ -0,0 +1,6 @@ +from __future__ import print_function, division, absolute_import +from fontTools.misc.py23 import * +from . import asciiTable + +class table_T_S_I_V_(asciiTable.asciiTable): + pass diff -Nru fonttools-2.4/Snippets/fontTools/ttLib/tables/ttProgram.py fonttools-3.0/Snippets/fontTools/ttLib/tables/ttProgram.py --- fonttools-2.4/Snippets/fontTools/ttLib/tables/ttProgram.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.0/Snippets/fontTools/ttLib/tables/ttProgram.py 2015-08-31 17:57:15.000000000 +0000 @@ -0,0 +1,498 @@ +"""ttLib.tables.ttProgram.py -- Assembler/disassembler for TrueType bytecode programs.""" + +from __future__ import print_function, division, absolute_import +from fontTools.misc.py23 import * +from fontTools.misc.textTools import num2binary, binary2num, readHex +import array +import re + +# first, the list of instructions that eat bytes or words from the instruction stream + +streamInstructions = [ +# +# opcode mnemonic argBits descriptive name pops pushes eats from instruction stream pushes +# + (0x40, 'NPUSHB', 0, 'PushNBytes', 0, -1), # n, b1, b2,...bn b1,b2...bn + (0x41, 'NPUSHW', 0, 'PushNWords', 0, -1), # n, w1, w2,...w w1,w2...wn + (0xb0, 'PUSHB', 3, 'PushBytes', 0, -1), # b0, b1,..bn b0, b1, ...,bn + (0xb8, 'PUSHW', 3, 'PushWords', 0, -1), # w0,w1,..wn w0 ,w1, ...wn +] + + +# next, the list of "normal" instructions + +instructions = [ +# +#, opcode mnemonic argBits descriptive name pops pushes eats from instruction stream pushes +# + (0x7f, 'AA', 0, 'AdjustAngle', 1, 0), # p - + (0x64, 'ABS', 0, 'Absolute', 1, 1), # n |n| + (0x60, 'ADD', 0, 'Add', 2, 1), # n2, n1 (n1 + n2) + (0x27, 'ALIGNPTS', 0, 'AlignPts', 2, 0), # p2, p1 - + (0x3c, 'ALIGNRP', 0, 'AlignRelativePt', -1, 0), # p1, p2, ... , ploopvalue - + (0x5a, 'AND', 0, 'LogicalAnd', 2, 1), # e2, e1 b + (0x2b, 'CALL', 0, 'CallFunction', 1, 0), # f - + (0x67, 'CEILING', 0, 'Ceiling', 1, 1), # n ceil(n) + (0x25, 'CINDEX', 0, 'CopyXToTopStack', 1, 1), # k ek + (0x22, 'CLEAR', 0, 'ClearStack', -1, 0), # all items on the stack - + (0x4f, 'DEBUG', 0, 'DebugCall', 1, 0), # n - + (0x73, 'DELTAC1', 0, 'DeltaExceptionC1', -1, 0), # argn, cn, argn-1,cn-1, , arg1, c1 - + (0x74, 'DELTAC2', 0, 'DeltaExceptionC2', -1, 0), # argn, cn, argn-1,cn-1, , arg1, c1 - + (0x75, 'DELTAC3', 0, 'DeltaExceptionC3', -1, 0), # argn, cn, argn-1,cn-1, , arg1, c1 - + (0x5d, 'DELTAP1', 0, 'DeltaExceptionP1', -1, 0), # argn, pn, argn-1, pn-1, , arg1, p1 - + (0x71, 'DELTAP2', 0, 'DeltaExceptionP2', -1, 0), # argn, pn, argn-1, pn-1, , arg1, p1 - + (0x72, 'DELTAP3', 0, 'DeltaExceptionP3', -1, 0), # argn, pn, argn-1, pn-1, , arg1, p1 - + (0x24, 'DEPTH', 0, 'GetDepthStack', 0, 1), # - n + (0x62, 'DIV', 0, 'Divide', 2, 1), # n2, n1 (n1 * 64)/ n2 + (0x20, 'DUP', 0, 'DuplicateTopStack', 1, 2), # e e, e + (0x59, 'EIF', 0, 'EndIf', 0, 0), # - - + (0x1b, 'ELSE', 0, 'Else', 0, 0), # - - + (0x2d, 'ENDF', 0, 'EndFunctionDefinition', 0, 0), # - - + (0x54, 'EQ', 0, 'Equal', 2, 1), # e2, e1 b + (0x57, 'EVEN', 0, 'Even', 1, 1), # e b + (0x2c, 'FDEF', 0, 'FunctionDefinition', 1, 0), # f - + (0x4e, 'FLIPOFF', 0, 'SetAutoFlipOff', 0, 0), # - - + (0x4d, 'FLIPON', 0, 'SetAutoFlipOn', 0, 0), # - - + (0x80, 'FLIPPT', 0, 'FlipPoint', -1, 0), # p1, p2, ..., ploopvalue - + (0x82, 'FLIPRGOFF', 0, 'FlipRangeOff', 2, 0), # h, l - + (0x81, 'FLIPRGON', 0, 'FlipRangeOn', 2, 0), # h, l - + (0x66, 'FLOOR', 0, 'Floor', 1, 1), # n floor(n) + (0x46, 'GC', 1, 'GetCoordOnPVector', 1, 1), # p c + (0x88, 'GETINFO', 0, 'GetInfo', 1, 1), # selector result + (0x0d, 'GFV', 0, 'GetFVector', 0, 2), # - px, py + (0x0c, 'GPV', 0, 'GetPVector', 0, 2), # - px, py + (0x52, 'GT', 0, 'GreaterThan', 2, 1), # e2, e1 b + (0x53, 'GTEQ', 0, 'GreaterThanOrEqual', 2, 1), # e2, e1 b + (0x89, 'IDEF', 0, 'InstructionDefinition', 1, 0), # f - + (0x58, 'IF', 0, 'If', 1, 0), # e - + (0x8e, 'INSTCTRL', 0, 'SetInstrExecControl', 2, 0), # s, v - + (0x39, 'IP', 0, 'InterpolatePts', -1, 0), # p1, p2, ... , ploopvalue - + (0x0f, 'ISECT', 0, 'MovePtToIntersect', 5, 0), # a1, a0, b1, b0, p - + (0x30, 'IUP', 1, 'InterpolateUntPts', 0, 0), # - - + (0x1c, 'JMPR', 0, 'Jump', 1, 0), # offset - + (0x79, 'JROF', 0, 'JumpRelativeOnFalse', 2, 0), # e, offset - + (0x78, 'JROT', 0, 'JumpRelativeOnTrue', 2, 0), # e, offset - + (0x2a, 'LOOPCALL', 0, 'LoopAndCallFunction', 2, 0), # f, count - + (0x50, 'LT', 0, 'LessThan', 2, 1), # e2, e1 b + (0x51, 'LTEQ', 0, 'LessThenOrEqual', 2, 1), # e2, e1 b + (0x8b, 'MAX', 0, 'Maximum', 2, 1), # e2, e1 max(e1, e2) + (0x49, 'MD', 1, 'MeasureDistance', 2, 1), # p2,p1 d + (0x2e, 'MDAP', 1, 'MoveDirectAbsPt', 1, 0), # p - + (0xc0, 'MDRP', 5, 'MoveDirectRelPt', 1, 0), # p - + (0x3e, 'MIAP', 1, 'MoveIndirectAbsPt', 2, 0), # n, p - + (0x8c, 'MIN', 0, 'Minimum', 2, 1), # e2, e1 min(e1, e2) + (0x26, 'MINDEX', 0, 'MoveXToTopStack', 1, 1), # k ek + (0xe0, 'MIRP', 5, 'MoveIndirectRelPt', 2, 0), # n, p - + (0x4b, 'MPPEM', 0, 'MeasurePixelPerEm', 0, 1), # - ppem + (0x4c, 'MPS', 0, 'MeasurePointSize', 0, 1), # - pointSize + (0x3a, 'MSIRP', 1, 'MoveStackIndirRelPt', 2, 0), # d, p - + (0x63, 'MUL', 0, 'Multiply', 2, 1), # n2, n1 (n1 * n2)/64 + (0x65, 'NEG', 0, 'Negate', 1, 1), # n -n + (0x55, 'NEQ', 0, 'NotEqual', 2, 1), # e2, e1 b + (0x5c, 'NOT', 0, 'LogicalNot', 1, 1), # e ( not e ) + (0x6c, 'NROUND', 2, 'NoRound', 1, 1), # n1 n2 + (0x56, 'ODD', 0, 'Odd', 1, 1), # e b + (0x5b, 'OR', 0, 'LogicalOr', 2, 1), # e2, e1 b + (0x21, 'POP', 0, 'PopTopStack', 1, 0), # e - + (0x45, 'RCVT', 0, 'ReadCVT', 1, 1), # location value + (0x7d, 'RDTG', 0, 'RoundDownToGrid', 0, 0), # - - + (0x7a, 'ROFF', 0, 'RoundOff', 0, 0), # - - + (0x8a, 'ROLL', 0, 'RollTopThreeStack', 3, 3), # a,b,c b,a,c + (0x68, 'ROUND', 2, 'Round', 1, 1), # n1 n2 + (0x43, 'RS', 0, 'ReadStore', 1, 1), # n v + (0x3d, 'RTDG', 0, 'RoundToDoubleGrid', 0, 0), # - - + (0x18, 'RTG', 0, 'RoundToGrid', 0, 0), # - - + (0x19, 'RTHG', 0, 'RoundToHalfGrid', 0, 0), # - - + (0x7c, 'RUTG', 0, 'RoundUpToGrid', 0, 0), # - - + (0x77, 'S45ROUND', 0, 'SuperRound45Degrees', 1, 0), # n - + (0x7e, 'SANGW', 0, 'SetAngleWeight', 1, 0), # weight - + (0x85, 'SCANCTRL', 0, 'ScanConversionControl', 1, 0), # n - + (0x8d, 'SCANTYPE', 0, 'ScanType', 1, 0), # n - + (0x48, 'SCFS', 0, 'SetCoordFromStackFP', 2, 0), # c, p - + (0x1d, 'SCVTCI', 0, 'SetCVTCutIn', 1, 0), # n - + (0x5e, 'SDB', 0, 'SetDeltaBaseInGState', 1, 0), # n - + (0x86, 'SDPVTL', 1, 'SetDualPVectorToLine', 2, 0), # p2, p1 - + (0x5f, 'SDS', 0, 'SetDeltaShiftInGState',1, 0), # n - + (0x0b, 'SFVFS', 0, 'SetFVectorFromStack', 2, 0), # y, x - + (0x04, 'SFVTCA', 1, 'SetFVectorToAxis', 0, 0), # - - + (0x08, 'SFVTL', 1, 'SetFVectorToLine', 2, 0), # p2, p1 - + (0x0e, 'SFVTPV', 0, 'SetFVectorToPVector', 0, 0), # - - + (0x34, 'SHC', 1, 'ShiftContourByLastPt', 1, 0), # c - + (0x32, 'SHP', 1, 'ShiftPointByLastPoint',-1, 0), # p1, p2, ..., ploopvalue - + (0x38, 'SHPIX', 0, 'ShiftZoneByPixel', -1, 0), # d, p1, p2, ..., ploopvalue - + (0x36, 'SHZ', 1, 'ShiftZoneByLastPoint', 1, 0), # e - + (0x17, 'SLOOP', 0, 'SetLoopVariable', 1, 0), # n - + (0x1a, 'SMD', 0, 'SetMinimumDistance', 1, 0), # distance - + (0x0a, 'SPVFS', 0, 'SetPVectorFromStack', 2, 0), # y, x - + (0x02, 'SPVTCA', 1, 'SetPVectorToAxis', 0, 0), # - - + (0x06, 'SPVTL', 1, 'SetPVectorToLine', 2, 0), # p2, p1 - + (0x76, 'SROUND', 0, 'SuperRound', 1, 0), # n - + (0x10, 'SRP0', 0, 'SetRefPoint0', 1, 0), # p - + (0x11, 'SRP1', 0, 'SetRefPoint1', 1, 0), # p - + (0x12, 'SRP2', 0, 'SetRefPoint2', 1, 0), # p - + (0x1f, 'SSW', 0, 'SetSingleWidth', 1, 0), # n - + (0x1e, 'SSWCI', 0, 'SetSingleWidthCutIn', 1, 0), # n - + (0x61, 'SUB', 0, 'Subtract', 2, 1), # n2, n1 (n1 - n2) + (0x00, 'SVTCA', 1, 'SetFPVectorToAxis', 0, 0), # - - + (0x23, 'SWAP', 0, 'SwapTopStack', 2, 2), # e2, e1 e1, e2 + (0x13, 'SZP0', 0, 'SetZonePointer0', 1, 0), # n - + (0x14, 'SZP1', 0, 'SetZonePointer1', 1, 0), # n - + (0x15, 'SZP2', 0, 'SetZonePointer2', 1, 0), # n - + (0x16, 'SZPS', 0, 'SetZonePointerS', 1, 0), # n - + (0x29, 'UTP', 0, 'UnTouchPt', 1, 0), # p - + (0x70, 'WCVTF', 0, 'WriteCVTInFUnits', 2, 0), # n, l - + (0x44, 'WCVTP', 0, 'WriteCVTInPixels', 2, 0), # v, l - + (0x42, 'WS', 0, 'WriteStore', 2, 0), # v, l - +] + + +def bitRepr(value, bits): + s = "" + for i in range(bits): + s = "01"[value & 0x1] + s + value = value >> 1 + return s + + +_mnemonicPat = re.compile("[A-Z][A-Z0-9]*$") + +def _makeDict(instructionList): + opcodeDict = {} + mnemonicDict = {} + for op, mnemonic, argBits, name, pops, pushes in instructionList: + assert _mnemonicPat.match(mnemonic) + mnemonicDict[mnemonic] = op, argBits, name + if argBits: + argoffset = op + for i in range(1 << argBits): + opcodeDict[op+i] = mnemonic, argBits, argoffset, name + else: + opcodeDict[op] = mnemonic, 0, 0, name + return opcodeDict, mnemonicDict + +streamOpcodeDict, streamMnemonicDict = _makeDict(streamInstructions) +opcodeDict, mnemonicDict = _makeDict(instructions) + +class tt_instructions_error(Exception): + def __init__(self, error): + self.error = error + def __str__(self): + return "TT instructions error: %s" % repr(self.error) + + +_comment = r"/\*.*?\*/" +_instruction = r"([A-Z][A-Z0-9]*)\s*\[(.*?)\]" +_number = r"-?[0-9]+" +_token = "(%s)|(%s)|(%s)" % (_instruction, _number, _comment) + +_tokenRE = re.compile(_token) +_whiteRE = re.compile(r"\s*") + +_pushCountPat = re.compile(r"[A-Z][A-Z0-9]*\s*\[.*?\]\s*/\* ([0-9]+).*?\*/") + + +def _skipWhite(data, pos): + m = _whiteRE.match(data, pos) + newPos = m.regs[0][1] + assert newPos >= pos + return newPos + + +class Program(object): + + def __init__(self): + pass + + def fromBytecode(self, bytecode): + self.bytecode = array.array("B", bytecode) + if hasattr(self, "assembly"): + del self.assembly + + def fromAssembly(self, assembly): + self.assembly = assembly + if hasattr(self, "bytecode"): + del self.bytecode + + def getBytecode(self): + if not hasattr(self, "bytecode"): + self._assemble() + return self.bytecode.tostring() + + def getAssembly(self, preserve=False): + if not hasattr(self, "assembly"): + self._disassemble(preserve=preserve) + return self.assembly + + def toXML(self, writer, ttFont): + if not hasattr (ttFont, "disassembleInstructions") or ttFont.disassembleInstructions: + assembly = self.getAssembly() + writer.begintag("assembly") + writer.newline() + i = 0 + nInstr = len(assembly) + while i < nInstr: + instr = assembly[i] + writer.write(instr) + writer.newline() + m = _pushCountPat.match(instr) + i = i + 1 + if m: + nValues = int(m.group(1)) + line = [] + j = 0 + for j in range(nValues): + if j and not (j % 25): + writer.write(' '.join(line)) + writer.newline() + line = [] + line.append(assembly[i+j]) + writer.write(' '.join(line)) + writer.newline() + i = i + j + 1 + writer.endtag("assembly") + else: + writer.begintag("bytecode") + writer.newline() + writer.dumphex(self.getBytecode()) + writer.endtag("bytecode") + + def fromXML(self, name, attrs, content, ttFont): + if name == "assembly": + self.fromAssembly(strjoin(content)) + self._assemble() + del self.assembly + else: + assert name == "bytecode" + self.fromBytecode(readHex(content)) + + def _assemble(self): + assembly = self.assembly + if isinstance(assembly, type([])): + assembly = ' '.join(assembly) + bytecode = [] + push = bytecode.append + lenAssembly = len(assembly) + pos = _skipWhite(assembly, 0) + while pos < lenAssembly: + m = _tokenRE.match(assembly, pos) + if m is None: + raise tt_instructions_error("Syntax error in TT program (%s)" % assembly[pos-5:pos+15]) + dummy, mnemonic, arg, number, comment = m.groups() + pos = m.regs[0][1] + if comment: + pos = _skipWhite(assembly, pos) + continue + + arg = arg.strip() + if mnemonic.startswith("INSTR"): + # Unknown instruction + op = int(mnemonic[5:]) + push(op) + elif mnemonic not in ("PUSH", "NPUSHB", "NPUSHW", "PUSHB", "PUSHW"): + op, argBits, name = mnemonicDict[mnemonic] + if len(arg) != argBits: + raise tt_instructions_error("Incorrect number of argument bits (%s[%s])" % (mnemonic, arg)) + if arg: + arg = binary2num(arg) + push(op + arg) + else: + push(op) + else: + args = [] + pos = _skipWhite(assembly, pos) + while pos < lenAssembly: + m = _tokenRE.match(assembly, pos) + if m is None: + raise tt_instructions_error("Syntax error in TT program (%s)" % assembly[pos:pos+15]) + dummy, _mnemonic, arg, number, comment = m.groups() + if number is None and comment is None: + break + pos = m.regs[0][1] + pos = _skipWhite(assembly, pos) + if comment is not None: + continue + args.append(int(number)) + nArgs = len(args) + if mnemonic == "PUSH": + # Automatically choose the most compact representation + nWords = 0 + while nArgs: + while nWords < nArgs and nWords < 255 and not (0 <= args[nWords] <= 255): + nWords += 1 + nBytes = 0 + while nWords+nBytes < nArgs and nBytes < 255 and 0 <= args[nWords+nBytes] <= 255: + nBytes += 1 + if nBytes < 2 and nWords + nBytes < 255 and nWords + nBytes != nArgs: + # Will write bytes as words + nWords += nBytes + continue + + # Write words + if nWords: + if nWords <= 8: + op, argBits, name = streamMnemonicDict["PUSHW"] + op = op + nWords - 1 + push(op) + else: + op, argBits, name = streamMnemonicDict["NPUSHW"] + push(op) + push(nWords) + for value in args[:nWords]: + assert -32768 <= value < 32768, "PUSH value out of range %d" % value + push((value >> 8) & 0xff) + push(value & 0xff) + + # Write bytes + if nBytes: + pass + if nBytes <= 8: + op, argBits, name = streamMnemonicDict["PUSHB"] + op = op + nBytes - 1 + push(op) + else: + op, argBits, name = streamMnemonicDict["NPUSHB"] + push(op) + push(nBytes) + for value in args[nWords:nWords+nBytes]: + push(value) + + nTotal = nWords + nBytes + args = args[nTotal:] + nArgs -= nTotal + nWords = 0 + else: + # Write exactly what we've been asked to + words = mnemonic[-1] == "W" + op, argBits, name = streamMnemonicDict[mnemonic] + if mnemonic[0] != "N": + assert nArgs <= 8, nArgs + op = op + nArgs - 1 + push(op) + else: + assert nArgs < 256 + push(op) + push(nArgs) + if words: + for value in args: + assert -32768 <= value < 32768, "PUSHW value out of range %d" % value + push((value >> 8) & 0xff) + push(value & 0xff) + else: + for value in args: + assert 0 <= value < 256, "PUSHB value out of range %d" % value + push(value) + + pos = _skipWhite(assembly, pos) + + if bytecode: + assert max(bytecode) < 256 and min(bytecode) >= 0 + self.bytecode = array.array("B", bytecode) + + def _disassemble(self, preserve=False): + assembly = [] + i = 0 + bytecode = self.bytecode + numBytecode = len(bytecode) + while i < numBytecode: + op = bytecode[i] + try: + mnemonic, argBits, argoffset, name = opcodeDict[op] + except KeyError: + if op in streamOpcodeDict: + values = [] + + # Merge consecutive PUSH operations + while bytecode[i] in streamOpcodeDict: + op = bytecode[i] + mnemonic, argBits, argoffset, name = streamOpcodeDict[op] + words = mnemonic[-1] == "W" + if argBits: + nValues = op - argoffset + 1 + else: + i = i + 1 + nValues = bytecode[i] + i = i + 1 + assert nValues > 0 + if not words: + for j in range(nValues): + value = bytecode[i] + values.append(repr(value)) + i = i + 1 + else: + for j in range(nValues): + # cast to signed int16 + value = (bytecode[i] << 8) | bytecode[i+1] + if value >= 0x8000: + value = value - 0x10000 + values.append(repr(value)) + i = i + 2 + if preserve: + break + + if not preserve: + mnemonic = "PUSH" + nValues = len(values) + if nValues == 1: + assembly.append("%s[ ] /* 1 value pushed */" % mnemonic) + else: + assembly.append("%s[ ] /* %s values pushed */" % (mnemonic, nValues)) + assembly.extend(values) + else: + assembly.append("INSTR%d[ ]" % op) + i = i + 1 + else: + if argBits: + assembly.append(mnemonic + "[%s] /* %s */" % (num2binary(op - argoffset, argBits), name)) + else: + assembly.append(mnemonic + "[ ] /* %s */" % name) + i = i + 1 + self.assembly = assembly + + def __bool__(self): + """ + >>> p = Program() + >>> bool(p) + False + >>> bc = array.array("B", [0]) + >>> p.fromBytecode(bc) + >>> bool(p) + True + >>> p.bytecode.pop() + 0 + >>> bool(p) + False + + >>> p = Program() + >>> asm = ['SVTCA[0]'] + >>> p.fromAssembly(asm) + >>> bool(p) + True + >>> p.assembly.pop() + 'SVTCA[0]' + >>> bool(p) + False + """ + return ((hasattr(self, 'assembly') and len(self.assembly) > 0) or + (hasattr(self, 'bytecode') and len(self.bytecode) > 0)) + + __nonzero__ = __bool__ + + +def _test(): + """ + >>> _test() + True + """ + + bc = b"""@;:9876543210/.-,+*)(\'&%$#"! \037\036\035\034\033\032\031\030\027\026\025\024\023\022\021\020\017\016\015\014\013\012\011\010\007\006\005\004\003\002\001\000,\001\260\030CXEj\260\031C`\260F#D#\020 \260FN\360M/\260\000\022\033!#\0213Y-,\001\260\030CX\260\005+\260\000\023K\260\024PX\261\000@8Y\260\006+\033!#\0213Y-,\001\260\030CXN\260\003%\020\362!\260\000\022M\033 E\260\004%\260\004%#Jad\260(RX!#\020\326\033\260\003%\020\362!\260\000\022YY-,\260\032CX!!\033\260\002%\260\002%I\260\003%\260\003%Ja d\260\020PX!!!\033\260\003%\260\003%I\260\000PX\260\000PX\270\377\3428!\033\260\0208!Y\033\260\000RX\260\0368!\033\270\377\3608!YYYY-,\001\260\030CX\260\005+\260\000\023K\260\024PX\271\000\000\377\3008Y\260\006+\033!#\0213Y-,N\001\212\020\261F\031CD\260\000\024\261\000F\342\260\000\025\271\000\000\377\3608\000\260\000<\260(+\260\002%\020\260\000<-,\001\030\260\000/\260\001\024\362\260\001\023\260\001\025M\260\000\022-,\001\260\030CX\260\005+\260\000\023\271\000\000\377\3408\260\006+\033!#\0213Y-,\001\260\030CXEdj#Edi\260\031Cd``\260F#D#\020 \260F\360/\260\000\022\033!! \212 \212RX\0213\033!!YY-,\001\261\013\012C#Ce\012-,\000\261\012\013C#C\013-,\000\260F#p\261\001F>\001\260F#p\261\002FE:\261\002\000\010\015-,\260\022+\260\002%E\260\002%Ej\260@\213`\260\002%#D!!!-,\260\023+\260\002%E\260\002%Ej\270\377\300\214`\260\002%#D!!!-,\260\000\260\022+!!!-,\260\000\260\023+!!!-,\001\260\006C\260\007Ce\012-, i\260@a\260\000\213 \261,\300\212\214\270\020\000b`+\014d#da\\X\260\003aY-,\261\000\003%EhT\260\034KPZX\260\003%E\260\003%E`h \260\004%#D\260\004%#D\033\260\003% Eh \212#D\260\003%Eh`\260\003%#DY-,\260\003% Eh \212#D\260\003%Edhe`\260\004%\260\001`#D-,\260\011CX\207!\300\033\260\022CX\207E\260\021+\260G#D\260Gz\344\033\003\212E\030i \260G#D\212\212\207 \260\240QX\260\021+\260G#D\260Gz\344\033!\260Gz\344YYY\030-, \212E#Eh`D-,EjB-,\001\030/-,\001\260\030CX\260\004%\260\004%Id#Edi\260@\213a \260\200bj\260\002%\260\002%a\214\260\031C`\260F#D!\212\020\260F\366!\033!!!!Y-,\001\260\030CX\260\002%E\260\002%Ed`j\260\003%Eja \260\004%Ej \212\213e\260\004%#D\214\260\003%#D!!\033 EjD EjDY-,\001 E\260\000U\260\030CZXEh#Ei\260@\213a \260\200bj \212#a \260\003%\213e\260\004%#D\214\260\003%#D!!\033!!\260\031+Y-,\001\212\212Ed#EdadB-,\260\004%\260\004%\260\031+\260\030CX\260\004%\260\004%\260\003%\260\033+\001\260\002%C\260@T\260\002%C\260\000TZX\260\003% E\260@aDY\260\002%C\260\000T\260\002%C\260@TZX\260\004% E\260@`DYY!!!!-,\001KRXC\260\002%E#aD\033!!Y-,\001KRXC\260\002%E#`D\033!!Y-,KRXED\033!!Y-,\001 \260\003%#I\260@`\260 c \260\000RX#\260\002%8#\260\002%e8\000\212c8\033!!!!!Y\001-,KPXED\033!!Y-,\001\260\005%\020# \212\365\000\260\001`#\355\354-,\001\260\005%\020# \212\365\000\260\001a#\355\354-,\001\260\006%\020\365\000\355\354-,F#F`\212\212F# F\212`\212a\270\377\200b# \020#\212\261KK\212pE` \260\000PX\260\001a\270\377\272\213\033\260F\214Y\260\020`h\001:-, E\260\003%FRX\260\002%F ha\260\003%\260\003%?#!8\033!\021Y-, E\260\003%FPX\260\002%F ha\260\003%\260\003%?#!8\033!\021Y-,\000\260\007C\260\006C\013-,\212\020\354-,\260\014CX!\033 F\260\000RX\270\377\3608\033\260\0208YY-, \260\000UX\270\020\000c\260\003%Ed\260\003%Eda\260\000SX\260\002\033\260@a\260\003Y%EiSXED\033!!Y\033!\260\002%E\260\002%Ead\260(QXED\033!!YY-,!!\014d#d\213\270@\000b-,!\260\200QX\014d#d\213\270 \000b\033\262\000@/+Y\260\002`-,!\260\300QX\014d#d\213\270\025Ub\033\262\000\200/+Y\260\002`-,\014d#d\213\270@\000b`#!-,KSX\260\004%\260\004%Id#Edi\260@\213a \260\200bj\260\002%\260\002%a\214\260F#D!\212\020\260F\366!\033!\212\021#\022 9/Y-,\260\002%\260\002%Id\260\300TX\270\377\3708\260\0108\033!!Y-,\260\023CX\003\033\002Y-,\260\023CX\002\033\003Y-,\260\012+#\020 <\260\027+-,\260\002%\270\377\3608\260(+\212\020# \320#\260\020+\260\005CX\300\033<Y \020\021\260\000\022\001-,KS#KQZX8\033!!Y-,\001\260\002%\020\320#\311\001\260\001\023\260\000\024\020\260\001<\260\001\026-,\001\260\000\023\260\001\260\003%I\260\003\0278\260\001\023-,KS#KQZX E\212`D\033!!Y-, 9/-""" + + p = Program() + p.fromBytecode(bc) + asm = p.getAssembly(preserve=True) + p.fromAssembly(asm) + print(bc == p.getBytecode()) + +if __name__ == "__main__": + import sys + import doctest + sys.exit(doctest.testmod().failed) diff -Nru fonttools-2.4/Snippets/fontTools/ttLib/tables/V_D_M_X_.py fonttools-3.0/Snippets/fontTools/ttLib/tables/V_D_M_X_.py --- fonttools-2.4/Snippets/fontTools/ttLib/tables/V_D_M_X_.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.0/Snippets/fontTools/ttLib/tables/V_D_M_X_.py 2015-08-31 17:57:15.000000000 +0000 @@ -0,0 +1,234 @@ +from __future__ import print_function, division, absolute_import +from fontTools.misc.py23 import * +from . import DefaultTable +from fontTools.misc import sstruct +from fontTools.misc.textTools import safeEval +import struct + +VDMX_HeaderFmt = """ + > # big endian + version: H # Version number (0 or 1) + numRecs: H # Number of VDMX groups present + numRatios: H # Number of aspect ratio groupings +""" +# the VMDX header is followed by an array of RatRange[numRatios] (i.e. aspect +# ratio ranges); +VDMX_RatRangeFmt = """ + > # big endian + bCharSet: B # Character set + xRatio: B # Value to use for x-Ratio + yStartRatio: B # Starting y-Ratio value + yEndRatio: B # Ending y-Ratio value +""" +# followed by an array of offset[numRatios] from start of VDMX table to the +# VDMX Group for this ratio range (offsets will be re-calculated on compile); +# followed by an array of Group[numRecs] records; +VDMX_GroupFmt = """ + > # big endian + recs: H # Number of height records in this group + startsz: B # Starting yPelHeight + endsz: B # Ending yPelHeight +""" +# followed by an array of vTable[recs] records. +VDMX_vTableFmt = """ + > # big endian + yPelHeight: H # yPelHeight to which values apply + yMax: h # Maximum value (in pels) for this yPelHeight + yMin: h # Minimum value (in pels) for this yPelHeight +""" + + +class table_V_D_M_X_(DefaultTable.DefaultTable): + + def decompile(self, data, ttFont): + pos = 0 # track current position from to start of VDMX table + dummy, data = sstruct.unpack2(VDMX_HeaderFmt, data, self) + pos += sstruct.calcsize(VDMX_HeaderFmt) + self.ratRanges = [] + for i in range(self.numRatios): + ratio, data = sstruct.unpack2(VDMX_RatRangeFmt, data) + pos += sstruct.calcsize(VDMX_RatRangeFmt) + # the mapping between a ratio and a group is defined further below + ratio['groupIndex'] = None + self.ratRanges.append(ratio) + lenOffset = struct.calcsize('>H') + _offsets = [] # temporarily store offsets to groups + for i in range(self.numRatios): + offset = struct.unpack('>H', data[0:lenOffset])[0] + data = data[lenOffset:] + pos += lenOffset + _offsets.append(offset) + self.groups = [] + for groupIndex in range(self.numRecs): + # the offset to this group from beginning of the VDMX table + currOffset = pos + group, data = sstruct.unpack2(VDMX_GroupFmt, data) + # the group lenght and bounding sizes are re-calculated on compile + recs = group.pop('recs') + startsz = group.pop('startsz') + endsz = group.pop('endsz') + pos += sstruct.calcsize(VDMX_GroupFmt) + for j in range(recs): + vTable, data = sstruct.unpack2(VDMX_vTableFmt, data) + vTableLength = sstruct.calcsize(VDMX_vTableFmt) + pos += vTableLength + # group is a dict of (yMax, yMin) tuples keyed by yPelHeight + group[vTable['yPelHeight']] = (vTable['yMax'], vTable['yMin']) + # make sure startsz and endsz match the calculated values + minSize = min(group.keys()) + maxSize = max(group.keys()) + assert startsz == minSize, \ + "startsz (%s) must equal min yPelHeight (%s): group %d" % \ + (group.startsz, minSize, groupIndex) + assert endsz == maxSize, \ + "endsz (%s) must equal max yPelHeight (%s): group %d" % \ + (group.endsz, maxSize, groupIndex) + self.groups.append(group) + # match the defined offsets with the current group's offset + for offsetIndex, offsetValue in enumerate(_offsets): + # when numRecs < numRatios there can more than one ratio range + # sharing the same VDMX group + if currOffset == offsetValue: + # map the group with the ratio range thas has the same + # index as the offset to that group (it took me a while..) + self.ratRanges[offsetIndex]['groupIndex'] = groupIndex + # check that all ratio ranges have a group + for i in range(self.numRatios): + ratio = self.ratRanges[i] + if ratio['groupIndex'] is None: + from fontTools import ttLib + raise ttLib.TTLibError( + "no group defined for ratRange %d" % i) + + def _getOffsets(self): + """ + Calculate offsets to VDMX_Group records. + For each ratRange return a list of offset values from the beginning of + the VDMX table to a VDMX_Group. + """ + lenHeader = sstruct.calcsize(VDMX_HeaderFmt) + lenRatRange = sstruct.calcsize(VDMX_RatRangeFmt) + lenOffset = struct.calcsize('>H') + lenGroupHeader = sstruct.calcsize(VDMX_GroupFmt) + lenVTable = sstruct.calcsize(VDMX_vTableFmt) + # offset to the first group + pos = lenHeader + self.numRatios*lenRatRange + self.numRatios*lenOffset + groupOffsets = [] + for group in self.groups: + groupOffsets.append(pos) + lenGroup = lenGroupHeader + len(group) * lenVTable + pos += lenGroup # offset to next group + offsets = [] + for ratio in self.ratRanges: + groupIndex = ratio['groupIndex'] + offsets.append(groupOffsets[groupIndex]) + return offsets + + def compile(self, ttFont): + if not(self.version == 0 or self.version == 1): + from fontTools import ttLib + raise ttLib.TTLibError( + "unknown format for VDMX table: version %s" % self.version) + data = sstruct.pack(VDMX_HeaderFmt, self) + for ratio in self.ratRanges: + data += sstruct.pack(VDMX_RatRangeFmt, ratio) + # recalculate offsets to VDMX groups + for offset in self._getOffsets(): + data += struct.pack('>H', offset) + for group in self.groups: + recs = len(group) + startsz = min(group.keys()) + endsz = max(group.keys()) + gHeader = {'recs': recs, 'startsz': startsz, 'endsz': endsz} + data += sstruct.pack(VDMX_GroupFmt, gHeader) + for yPelHeight, (yMax, yMin) in sorted(group.items()): + vTable = {'yPelHeight': yPelHeight, 'yMax': yMax, 'yMin': yMin} + data += sstruct.pack(VDMX_vTableFmt, vTable) + return data + + def toXML(self, writer, ttFont): + writer.simpletag("version", value=self.version) + writer.newline() + writer.begintag("ratRanges") + writer.newline() + for ratio in self.ratRanges: + groupIndex = ratio['groupIndex'] + writer.simpletag( + "ratRange", + bCharSet=ratio['bCharSet'], + xRatio=ratio['xRatio'], + yStartRatio=ratio['yStartRatio'], + yEndRatio=ratio['yEndRatio'], + groupIndex=groupIndex + ) + writer.newline() + writer.endtag("ratRanges") + writer.newline() + writer.begintag("groups") + writer.newline() + for groupIndex in range(self.numRecs): + group = self.groups[groupIndex] + recs = len(group) + startsz = min(group.keys()) + endsz = max(group.keys()) + writer.begintag("group", index=groupIndex) + writer.newline() + writer.comment("recs=%d, startsz=%d, endsz=%d" % + (recs, startsz, endsz)) + writer.newline() + for yPelHeight in group.keys(): + yMax, yMin = group[yPelHeight] + writer.simpletag( + "record", yPelHeight=yPelHeight, yMax=yMax, yMin=yMin) + writer.newline() + writer.endtag("group") + writer.newline() + writer.endtag("groups") + writer.newline() + + def fromXML(self, name, attrs, content, ttFont): + if name == "version": + self.version = safeEval(attrs["value"]) + elif name == "ratRanges": + if not hasattr(self, "ratRanges"): + self.ratRanges = [] + for element in content: + if not isinstance(element, tuple): + continue + name, attrs, content = element + if name == "ratRange": + if not hasattr(self, "numRatios"): + self.numRatios = 1 + else: + self.numRatios += 1 + ratio = { + "bCharSet": safeEval(attrs["bCharSet"]), + "xRatio": safeEval(attrs["xRatio"]), + "yStartRatio": safeEval(attrs["yStartRatio"]), + "yEndRatio": safeEval(attrs["yEndRatio"]), + "groupIndex": safeEval(attrs["groupIndex"]) + } + self.ratRanges.append(ratio) + elif name == "groups": + if not hasattr(self, "groups"): + self.groups = [] + for element in content: + if not isinstance(element, tuple): + continue + name, attrs, content = element + if name == "group": + if not hasattr(self, "numRecs"): + self.numRecs = 1 + else: + self.numRecs += 1 + group = {} + for element in content: + if not isinstance(element, tuple): + continue + name, attrs, content = element + if name == "record": + yPelHeight = safeEval(attrs["yPelHeight"]) + yMax = safeEval(attrs["yMax"]) + yMin = safeEval(attrs["yMin"]) + group[yPelHeight] = (yMax, yMin) + self.groups.append(group) diff -Nru fonttools-2.4/Snippets/fontTools/ttLib/tables/_v_h_e_a.py fonttools-3.0/Snippets/fontTools/ttLib/tables/_v_h_e_a.py --- fonttools-2.4/Snippets/fontTools/ttLib/tables/_v_h_e_a.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.0/Snippets/fontTools/ttLib/tables/_v_h_e_a.py 2015-08-31 17:57:15.000000000 +0000 @@ -0,0 +1,90 @@ +from __future__ import print_function, division, absolute_import +from fontTools.misc.py23 import * +from fontTools.misc import sstruct +from fontTools.misc.textTools import safeEval +from . import DefaultTable + +vheaFormat = """ + > # big endian + tableVersion: 16.16F + ascent: h + descent: h + lineGap: h + advanceHeightMax: H + minTopSideBearing: h + minBottomSideBearing: h + yMaxExtent: h + caretSlopeRise: h + caretSlopeRun: h + reserved0: h + reserved1: h + reserved2: h + reserved3: h + reserved4: h + metricDataFormat: h + numberOfVMetrics: H +""" + +class table__v_h_e_a(DefaultTable.DefaultTable): + + # Note: Keep in sync with table__h_h_e_a + + dependencies = ['vmtx', 'glyf'] + + def decompile(self, data, ttFont): + sstruct.unpack(vheaFormat, data, self) + + def compile(self, ttFont): + if ttFont.isLoaded('glyf') and ttFont.recalcBBoxes: + self.recalc(ttFont) + return sstruct.pack(vheaFormat, self) + + def recalc(self, ttFont): + vtmxTable = ttFont['vmtx'] + if 'glyf' in ttFont: + glyfTable = ttFont['glyf'] + INFINITY = 100000 + advanceHeightMax = 0 + minTopSideBearing = +INFINITY # arbitrary big number + minBottomSideBearing = +INFINITY # arbitrary big number + yMaxExtent = -INFINITY # arbitrary big negative number + + for name in ttFont.getGlyphOrder(): + height, tsb = vtmxTable[name] + advanceHeightMax = max(advanceHeightMax, height) + g = glyfTable[name] + if g.numberOfContours == 0: + continue + if g.numberOfContours < 0 and not hasattr(g, "yMax"): + # Composite glyph without extents set. + # Calculate those. + g.recalcBounds(glyfTable) + minTopSideBearing = min(minTopSideBearing, tsb) + bsb = height - tsb - (g.yMax - g.yMin) + minBottomSideBearing = min(minBottomSideBearing, bsb) + extent = tsb + (g.yMax - g.yMin) + yMaxExtent = max(yMaxExtent, extent) + + if yMaxExtent == -INFINITY: + # No glyph has outlines. + minTopSideBearing = 0 + minBottomSideBearing = 0 + yMaxExtent = 0 + + self.advanceHeightMax = advanceHeightMax + self.minTopSideBearing = minTopSideBearing + self.minBottomSideBearing = minBottomSideBearing + self.yMaxExtent = yMaxExtent + else: + # XXX CFF recalc... + pass + + def toXML(self, writer, ttFont): + formatstring, names, fixes = sstruct.getformat(vheaFormat) + for name in names: + value = getattr(self, name) + writer.simpletag(name, value=value) + writer.newline() + + def fromXML(self, name, attrs, content, ttFont): + setattr(self, name, safeEval(attrs["value"])) diff -Nru fonttools-2.4/Snippets/fontTools/ttLib/tables/_v_m_t_x.py fonttools-3.0/Snippets/fontTools/ttLib/tables/_v_m_t_x.py --- fonttools-2.4/Snippets/fontTools/ttLib/tables/_v_m_t_x.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.0/Snippets/fontTools/ttLib/tables/_v_m_t_x.py 2015-08-31 17:57:15.000000000 +0000 @@ -0,0 +1,12 @@ +from __future__ import print_function, division, absolute_import +from fontTools.misc.py23 import * +from fontTools import ttLib + +superclass = ttLib.getTableClass("hmtx") + +class table__v_m_t_x(superclass): + + headerTag = 'vhea' + advanceName = 'height' + sideBearingName = 'tsb' + numberOfMetricsName = 'numberOfVMetrics' diff -Nru fonttools-2.4/Snippets/fontTools/ttLib/tables/V_O_R_G_.py fonttools-3.0/Snippets/fontTools/ttLib/tables/V_O_R_G_.py --- fonttools-2.4/Snippets/fontTools/ttLib/tables/V_O_R_G_.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.0/Snippets/fontTools/ttLib/tables/V_O_R_G_.py 2015-08-31 17:57:15.000000000 +0000 @@ -0,0 +1,140 @@ +from __future__ import print_function, division, absolute_import +from fontTools.misc.py23 import * +from fontTools.misc.textTools import safeEval +from . import DefaultTable +import operator +import struct + + +class table_V_O_R_G_(DefaultTable.DefaultTable): + + """ This table is structured so that you can treat it like a dictionary keyed by glyph name. + ttFont['VORG'][<glyphName>] will return the vertical origin for any glyph + ttFont['VORG'][<glyphName>] = <value> will set the vertical origin for any glyph. + """ + + def decompile(self, data, ttFont): + self.getGlyphName = ttFont.getGlyphName # for use in get/set item functions, for access by GID + self.majorVersion, self.minorVersion, self.defaultVertOriginY, self.numVertOriginYMetrics = struct.unpack(">HHhH", data[:8]) + assert (self.majorVersion <= 1), "Major version of VORG table is higher than I know how to handle" + data = data[8:] + vids = [] + gids = [] + pos = 0 + for i in range(self.numVertOriginYMetrics): + gid, vOrigin = struct.unpack(">Hh", data[pos:pos+4]) + pos += 4 + gids.append(gid) + vids.append(vOrigin) + + self.VOriginRecords = vOrig = {} + glyphOrder = ttFont.getGlyphOrder() + try: + names = map(operator.getitem, [glyphOrder]*self.numVertOriginYMetrics, gids) + except IndexError: + getGlyphName = self.getGlyphName + names = map(getGlyphName, gids ) + + list(map(operator.setitem, [vOrig]*self.numVertOriginYMetrics, names, vids)) + + def compile(self, ttFont): + vorgs = list(self.VOriginRecords.values()) + names = list(self.VOriginRecords.keys()) + nameMap = ttFont.getReverseGlyphMap() + lenRecords = len(vorgs) + try: + gids = map(operator.getitem, [nameMap]*lenRecords, names) + except KeyError: + nameMap = ttFont.getReverseGlyphMap(rebuild=True) + gids = map(operator.getitem, [nameMap]*lenRecords, names) + vOriginTable = list(zip(gids, vorgs)) + self.numVertOriginYMetrics = lenRecords + vOriginTable.sort() # must be in ascending GID order + dataList = [ struct.pack(">Hh", rec[0], rec[1]) for rec in vOriginTable] + header = struct.pack(">HHhH", self.majorVersion, self.minorVersion, self.defaultVertOriginY, self.numVertOriginYMetrics) + dataList.insert(0, header) + data = bytesjoin(dataList) + return data + + def toXML(self, writer, ttFont): + writer.simpletag("majorVersion", value=self.majorVersion) + writer.newline() + writer.simpletag("minorVersion", value=self.minorVersion) + writer.newline() + writer.simpletag("defaultVertOriginY", value=self.defaultVertOriginY) + writer.newline() + writer.simpletag("numVertOriginYMetrics", value=self.numVertOriginYMetrics) + writer.newline() + vOriginTable = [] + glyphNames = self.VOriginRecords.keys() + for glyphName in glyphNames: + try: + gid = ttFont.getGlyphID(glyphName) + except: + assert 0, "VORG table contains a glyph name not in ttFont.getGlyphNames(): " + str(glyphName) + vOriginTable.append([gid, glyphName, self.VOriginRecords[glyphName]]) + vOriginTable.sort() + for entry in vOriginTable: + vOriginRec = VOriginRecord(entry[1], entry[2]) + vOriginRec.toXML(writer, ttFont) + + def fromXML(self, name, attrs, content, ttFont): + if not hasattr(self, "VOriginRecords"): + self.VOriginRecords = {} + self.getGlyphName = ttFont.getGlyphName # for use in get/set item functions, for access by GID + if name == "VOriginRecord": + vOriginRec = VOriginRecord() + for element in content: + if isinstance(element, basestring): + continue + name, attrs, content = element + vOriginRec.fromXML(name, attrs, content, ttFont) + self.VOriginRecords[vOriginRec.glyphName] = vOriginRec.vOrigin + elif "value" in attrs: + setattr(self, name, safeEval(attrs["value"])) + + def __getitem__(self, glyphSelector): + if isinstance(glyphSelector, int): + # its a gid, convert to glyph name + glyphSelector = self.getGlyphName(glyphSelector) + + if glyphSelector not in self.VOriginRecords: + return self.defaultVertOriginY + + return self.VOriginRecords[glyphSelector] + + def __setitem__(self, glyphSelector, value): + if isinstance(glyphSelector, int): + # its a gid, convert to glyph name + glyphSelector = self.getGlyphName(glyphSelector) + + if value != self.defaultVertOriginY: + self.VOriginRecords[glyphSelector] = value + elif glyphSelector in self.VOriginRecords: + del self.VOriginRecords[glyphSelector] + + def __delitem__(self, glyphSelector): + del self.VOriginRecords[glyphSelector] + +class VOriginRecord(object): + + def __init__(self, name=None, vOrigin=None): + self.glyphName = name + self.vOrigin = vOrigin + + def toXML(self, writer, ttFont): + writer.begintag("VOriginRecord") + writer.newline() + writer.simpletag("glyphName", value=self.glyphName) + writer.newline() + writer.simpletag("vOrigin", value=self.vOrigin) + writer.newline() + writer.endtag("VOriginRecord") + writer.newline() + + def fromXML(self, name, attrs, content, ttFont): + value = attrs["value"] + if name == "glyphName": + setattr(self, name, value) + else: + setattr(self, name, safeEval(value)) diff -Nru fonttools-2.4/Snippets/fontTools/ttLib/testdata/TestOTF-Regular.otx fonttools-3.0/Snippets/fontTools/ttLib/testdata/TestOTF-Regular.otx --- fonttools-2.4/Snippets/fontTools/ttLib/testdata/TestOTF-Regular.otx 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.0/Snippets/fontTools/ttLib/testdata/TestOTF-Regular.otx 2015-08-31 17:57:15.000000000 +0000 @@ -0,0 +1,519 @@ +<?xml version="1.0" encoding="UTF-8"?> +<ttFont sfntVersion="OTTO" ttLibVersion="2.5"> + + <GlyphOrder> + <!-- The 'id' attribute is only for humans; it is ignored when parsed. --> + <GlyphID id="0" name=".notdef"/> + <GlyphID id="1" name=".null"/> + <GlyphID id="2" name="CR"/> + <GlyphID id="3" name="space"/> + <GlyphID id="4" name="period"/> + <GlyphID id="5" name="ellipsis"/> + </GlyphOrder> + + <head> + <!-- Most of this table will be recalculated by the compiler --> + <tableVersion value="1.0"/> + <fontRevision value="1.0"/> + <checkSumAdjustment value="0x34034793"/> + <magicNumber value="0x5f0f3cf5"/> + <flags value="00000000 00000011"/> + <unitsPerEm value="1000"/> + <created value="Thu Jun 4 14:29:11 2015"/> + <modified value="Sat Aug 1 10:07:17 2015"/> + <xMin value="50"/> + <yMin value="0"/> + <xMax value="668"/> + <yMax value="750"/> + <macStyle value="00000000 00000000"/> + <lowestRecPPEM value="9"/> + <fontDirectionHint value="2"/> + <indexToLocFormat value="0"/> + <glyphDataFormat value="0"/> + </head> + + <hhea> + <tableVersion value="1.0"/> + <ascent value="900"/> + <descent value="-300"/> + <lineGap value="0"/> + <advanceWidthMax value="723"/> + <minLeftSideBearing value="50"/> + <minRightSideBearing value="50"/> + <xMaxExtent value="668"/> + <caretSlopeRise value="1"/> + <caretSlopeRun value="0"/> + <caretOffset value="0"/> + <reserved0 value="0"/> + <reserved1 value="0"/> + <reserved2 value="0"/> + <reserved3 value="0"/> + <metricDataFormat value="0"/> + <numberOfHMetrics value="6"/> + </hhea> + + <maxp> + <tableVersion value="0x5000"/> + <numGlyphs value="6"/> + </maxp> + + <OS_2> + <!-- The fields 'usFirstCharIndex' and 'usLastCharIndex' + will be recalculated by the compiler --> + <version value="4"/> + <xAvgCharWidth value="392"/> + <usWeightClass value="400"/> + <usWidthClass value="5"/> + <fsType value="00000000 00000000"/> + <ySubscriptXSize value="700"/> + <ySubscriptYSize value="650"/> + <ySubscriptXOffset value="0"/> + <ySubscriptYOffset value="140"/> + <ySuperscriptXSize value="700"/> + <ySuperscriptYSize value="650"/> + <ySuperscriptXOffset value="0"/> + <ySuperscriptYOffset value="477"/> + <yStrikeoutSize value="50"/> + <yStrikeoutPosition value="250"/> + <sFamilyClass value="2050"/> + <panose> + <bFamilyType value="2"/> + <bSerifStyle value="11"/> + <bWeight value="6"/> + <bProportion value="4"/> + <bContrast value="4"/> + <bStrokeVariation value="2"/> + <bArmStyle value="7"/> + <bLetterForm value="8"/> + <bMidline value="1"/> + <bXHeight value="4"/> + </panose> + <ulUnicodeRange1 value="10000000 00000000 00000000 00000001"/> + <ulUnicodeRange2 value="00000000 00000000 00000000 00000000"/> + <ulUnicodeRange3 value="00000000 00000000 00000000 00000000"/> + <ulUnicodeRange4 value="00000000 00000000 00000000 00000000"/> + <achVendID value="NONE"/> + <fsSelection value="00000000 11000000"/> + <usFirstCharIndex value="0"/> + <usLastCharIndex value="8230"/> + <sTypoAscender value="750"/> + <sTypoDescender value="-250"/> + <sTypoLineGap value="200"/> + <usWinAscent value="900"/> + <usWinDescent value="300"/> + <ulCodePageRange1 value="00000000 00000000 00000000 00000001"/> + <ulCodePageRange2 value="00000000 00000000 00000000 00000000"/> + <sxHeight value="500"/> + <sCapHeight value="700"/> + <usDefaultChar value="0"/> + <usBreakChar value="32"/> + <usMaxContext value="0"/> + </OS_2> + + <name> + <namerecord nameID="0" platformID="1" platEncID="0" langID="0x0" unicode="True"> + Copyright (c) 2015 by FontTools. No rights reserved. + </namerecord> + <namerecord nameID="1" platformID="1" platEncID="0" langID="0x0" unicode="True"> + Test OTF + </namerecord> + <namerecord nameID="2" platformID="1" platEncID="0" langID="0x0" unicode="True"> + Regular + </namerecord> + <namerecord nameID="3" platformID="1" platEncID="0" langID="0x0" unicode="True"> + FontTools: Test OTF: 2015 + </namerecord> + <namerecord nameID="4" platformID="1" platEncID="0" langID="0x0" unicode="True"> + Test OTF + </namerecord> + <namerecord nameID="5" platformID="1" platEncID="0" langID="0x0" unicode="True"> + Version 1.000 + </namerecord> + <namerecord nameID="6" platformID="1" platEncID="0" langID="0x0" unicode="True"> + TestOTF-Regular + </namerecord> + <namerecord nameID="7" platformID="1" platEncID="0" langID="0x0" unicode="True"> + Test OTF is not a trademark of FontTools. + </namerecord> + <namerecord nameID="8" platformID="1" platEncID="0" langID="0x0" unicode="True"> + FontTools + </namerecord> + <namerecord nameID="9" platformID="1" platEncID="0" langID="0x0" unicode="True"> + FontTools + </namerecord> + <namerecord nameID="11" platformID="1" platEncID="0" langID="0x0" unicode="True"> + https://github.com/behdad/fonttools + </namerecord> + <namerecord nameID="12" platformID="1" platEncID="0" langID="0x0" unicode="True"> + https://github.com/behdad/fonttools + </namerecord> + <namerecord nameID="14" platformID="1" platEncID="0" langID="0x0" unicode="True"> + https://github.com/behdad/fonttools/blob/master/LICENSE.txt + </namerecord> + <namerecord nameID="18" platformID="1" platEncID="0" langID="0x0" unicode="True"> + Test TTF + </namerecord> + <namerecord nameID="0" platformID="3" platEncID="1" langID="0x409"> + Copyright (c) 2015 by FontTools. No rights reserved. + </namerecord> + <namerecord nameID="1" platformID="3" platEncID="1" langID="0x409"> + Test OTF + </namerecord> + <namerecord nameID="2" platformID="3" platEncID="1" langID="0x409"> + Regular + </namerecord> + <namerecord nameID="3" platformID="3" platEncID="1" langID="0x409"> + FontTools: Test OTF: 2015 + </namerecord> + <namerecord nameID="4" platformID="3" platEncID="1" langID="0x409"> + Test OTF + </namerecord> + <namerecord nameID="5" platformID="3" platEncID="1" langID="0x409"> + Version 1.000 + </namerecord> + <namerecord nameID="6" platformID="3" platEncID="1" langID="0x409"> + TestOTF-Regular + </namerecord> + <namerecord nameID="7" platformID="3" platEncID="1" langID="0x409"> + Test OTF is not a trademark of FontTools. + </namerecord> + <namerecord nameID="8" platformID="3" platEncID="1" langID="0x409"> + FontTools + </namerecord> + <namerecord nameID="9" platformID="3" platEncID="1" langID="0x409"> + FontTools + </namerecord> + <namerecord nameID="11" platformID="3" platEncID="1" langID="0x409"> + https://github.com/behdad/fonttools + </namerecord> + <namerecord nameID="12" platformID="3" platEncID="1" langID="0x409"> + https://github.com/behdad/fonttools + </namerecord> + <namerecord nameID="14" platformID="3" platEncID="1" langID="0x409"> + https://github.com/behdad/fonttools/blob/master/LICENSE.txt + </namerecord> + </name> + + <cmap> + <tableVersion version="0"/> + <cmap_format_4 platformID="0" platEncID="3" language="0"> + <map code="0x0" name=".null"/><!-- ???? --> + <map code="0xd" name="CR"/><!-- ???? --> + <map code="0x20" name="space"/><!-- SPACE --> + <map code="0x2e" name="period"/><!-- FULL STOP --> + <map code="0x2026" name="ellipsis"/><!-- HORIZONTAL ELLIPSIS --> + </cmap_format_4> + <cmap_format_6 platformID="1" platEncID="0" language="0"> + <map code="0x0" name=".null"/> + <map code="0x1" name=".notdef"/> + <map code="0x2" name=".notdef"/> + <map code="0x3" name=".notdef"/> + <map code="0x4" name=".notdef"/> + <map code="0x5" name=".notdef"/> + <map code="0x6" name=".notdef"/> + <map code="0x7" name=".notdef"/> + <map code="0x8" name=".notdef"/> + <map code="0x9" name=".notdef"/> + <map code="0xa" name=".notdef"/> + <map code="0xb" name=".notdef"/> + <map code="0xc" name=".notdef"/> + <map code="0xd" name="CR"/> + <map code="0xe" name=".notdef"/> + <map code="0xf" name=".notdef"/> + <map code="0x10" name=".notdef"/> + <map code="0x11" name=".notdef"/> + <map code="0x12" name=".notdef"/> + <map code="0x13" name=".notdef"/> + <map code="0x14" name=".notdef"/> + <map code="0x15" name=".notdef"/> + <map code="0x16" name=".notdef"/> + <map code="0x17" name=".notdef"/> + <map code="0x18" name=".notdef"/> + <map code="0x19" name=".notdef"/> + <map code="0x1a" name=".notdef"/> + <map code="0x1b" name=".notdef"/> + <map code="0x1c" name=".notdef"/> + <map code="0x1d" name=".notdef"/> + <map code="0x1e" name=".notdef"/> + <map code="0x1f" name=".notdef"/> + <map code="0x20" name="space"/> + <map code="0x21" name=".notdef"/> + <map code="0x22" name=".notdef"/> + <map code="0x23" name=".notdef"/> + <map code="0x24" name=".notdef"/> + <map code="0x25" name=".notdef"/> + <map code="0x26" name=".notdef"/> + <map code="0x27" name=".notdef"/> + <map code="0x28" name=".notdef"/> + <map code="0x29" name=".notdef"/> + <map code="0x2a" name=".notdef"/> + <map code="0x2b" name=".notdef"/> + <map code="0x2c" name=".notdef"/> + <map code="0x2d" name=".notdef"/> + <map code="0x2e" name="period"/> + <map code="0x2f" name=".notdef"/> + <map code="0x30" name=".notdef"/> + <map code="0x31" name=".notdef"/> + <map code="0x32" name=".notdef"/> + <map code="0x33" name=".notdef"/> + <map code="0x34" name=".notdef"/> + <map code="0x35" name=".notdef"/> + <map code="0x36" name=".notdef"/> + <map code="0x37" name=".notdef"/> + <map code="0x38" name=".notdef"/> + <map code="0x39" name=".notdef"/> + <map code="0x3a" name=".notdef"/> + <map code="0x3b" name=".notdef"/> + <map code="0x3c" name=".notdef"/> + <map code="0x3d" name=".notdef"/> + <map code="0x3e" name=".notdef"/> + <map code="0x3f" name=".notdef"/> + <map code="0x40" name=".notdef"/> + <map code="0x41" name=".notdef"/> + <map code="0x42" name=".notdef"/> + <map code="0x43" name=".notdef"/> + <map code="0x44" name=".notdef"/> + <map code="0x45" name=".notdef"/> + <map code="0x46" name=".notdef"/> + <map code="0x47" name=".notdef"/> + <map code="0x48" name=".notdef"/> + <map code="0x49" name=".notdef"/> + <map code="0x4a" name=".notdef"/> + <map code="0x4b" name=".notdef"/> + <map code="0x4c" name=".notdef"/> + <map code="0x4d" name=".notdef"/> + <map code="0x4e" name=".notdef"/> + <map code="0x4f" name=".notdef"/> + <map code="0x50" name=".notdef"/> + <map code="0x51" name=".notdef"/> + <map code="0x52" name=".notdef"/> + <map code="0x53" name=".notdef"/> + <map code="0x54" name=".notdef"/> + <map code="0x55" name=".notdef"/> + <map code="0x56" name=".notdef"/> + <map code="0x57" name=".notdef"/> + <map code="0x58" name=".notdef"/> + <map code="0x59" name=".notdef"/> + <map code="0x5a" name=".notdef"/> + <map code="0x5b" name=".notdef"/> + <map code="0x5c" name=".notdef"/> + <map code="0x5d" name=".notdef"/> + <map code="0x5e" name=".notdef"/> + <map code="0x5f" name=".notdef"/> + <map code="0x60" name=".notdef"/> + <map code="0x61" name=".notdef"/> + <map code="0x62" name=".notdef"/> + <map code="0x63" name=".notdef"/> + <map code="0x64" name=".notdef"/> + <map code="0x65" name=".notdef"/> + <map code="0x66" name=".notdef"/> + <map code="0x67" name=".notdef"/> + <map code="0x68" name=".notdef"/> + <map code="0x69" name=".notdef"/> + <map code="0x6a" name=".notdef"/> + <map code="0x6b" name=".notdef"/> + <map code="0x6c" name=".notdef"/> + <map code="0x6d" name=".notdef"/> + <map code="0x6e" name=".notdef"/> + <map code="0x6f" name=".notdef"/> + <map code="0x70" name=".notdef"/> + <map code="0x71" name=".notdef"/> + <map code="0x72" name=".notdef"/> + <map code="0x73" name=".notdef"/> + <map code="0x74" name=".notdef"/> + <map code="0x75" name=".notdef"/> + <map code="0x76" name=".notdef"/> + <map code="0x77" name=".notdef"/> + <map code="0x78" name=".notdef"/> + <map code="0x79" name=".notdef"/> + <map code="0x7a" name=".notdef"/> + <map code="0x7b" name=".notdef"/> + <map code="0x7c" name=".notdef"/> + <map code="0x7d" name=".notdef"/> + <map code="0x7e" name=".notdef"/> + <map code="0x7f" name=".notdef"/> + <map code="0x80" name=".notdef"/> + <map code="0x81" name=".notdef"/> + <map code="0x82" name=".notdef"/> + <map code="0x83" name=".notdef"/> + <map code="0x84" name=".notdef"/> + <map code="0x85" name=".notdef"/> + <map code="0x86" name=".notdef"/> + <map code="0x87" name=".notdef"/> + <map code="0x88" name=".notdef"/> + <map code="0x89" name=".notdef"/> + <map code="0x8a" name=".notdef"/> + <map code="0x8b" name=".notdef"/> + <map code="0x8c" name=".notdef"/> + <map code="0x8d" name=".notdef"/> + <map code="0x8e" name=".notdef"/> + <map code="0x8f" name=".notdef"/> + <map code="0x90" name=".notdef"/> + <map code="0x91" name=".notdef"/> + <map code="0x92" name=".notdef"/> + <map code="0x93" name=".notdef"/> + <map code="0x94" name=".notdef"/> + <map code="0x95" name=".notdef"/> + <map code="0x96" name=".notdef"/> + <map code="0x97" name=".notdef"/> + <map code="0x98" name=".notdef"/> + <map code="0x99" name=".notdef"/> + <map code="0x9a" name=".notdef"/> + <map code="0x9b" name=".notdef"/> + <map code="0x9c" name=".notdef"/> + <map code="0x9d" name=".notdef"/> + <map code="0x9e" name=".notdef"/> + <map code="0x9f" name=".notdef"/> + <map code="0xa0" name=".notdef"/> + <map code="0xa1" name=".notdef"/> + <map code="0xa2" name=".notdef"/> + <map code="0xa3" name=".notdef"/> + <map code="0xa4" name=".notdef"/> + <map code="0xa5" name=".notdef"/> + <map code="0xa6" name=".notdef"/> + <map code="0xa7" name=".notdef"/> + <map code="0xa8" name=".notdef"/> + <map code="0xa9" name=".notdef"/> + <map code="0xaa" name=".notdef"/> + <map code="0xab" name=".notdef"/> + <map code="0xac" name=".notdef"/> + <map code="0xad" name=".notdef"/> + <map code="0xae" name=".notdef"/> + <map code="0xaf" name=".notdef"/> + <map code="0xb0" name=".notdef"/> + <map code="0xb1" name=".notdef"/> + <map code="0xb2" name=".notdef"/> + <map code="0xb3" name=".notdef"/> + <map code="0xb4" name=".notdef"/> + <map code="0xb5" name=".notdef"/> + <map code="0xb6" name=".notdef"/> + <map code="0xb7" name=".notdef"/> + <map code="0xb8" name=".notdef"/> + <map code="0xb9" name=".notdef"/> + <map code="0xba" name=".notdef"/> + <map code="0xbb" name=".notdef"/> + <map code="0xbc" name=".notdef"/> + <map code="0xbd" name=".notdef"/> + <map code="0xbe" name=".notdef"/> + <map code="0xbf" name=".notdef"/> + <map code="0xc0" name=".notdef"/> + <map code="0xc1" name=".notdef"/> + <map code="0xc2" name=".notdef"/> + <map code="0xc3" name=".notdef"/> + <map code="0xc4" name=".notdef"/> + <map code="0xc5" name=".notdef"/> + <map code="0xc6" name=".notdef"/> + <map code="0xc7" name=".notdef"/> + <map code="0xc8" name=".notdef"/> + <map code="0xc9" name="ellipsis"/> + </cmap_format_6> + <cmap_format_4 platformID="3" platEncID="1" language="0"> + <map code="0x0" name=".null"/><!-- ???? --> + <map code="0xd" name="CR"/><!-- ???? --> + <map code="0x20" name="space"/><!-- SPACE --> + <map code="0x2e" name="period"/><!-- FULL STOP --> + <map code="0x2026" name="ellipsis"/><!-- HORIZONTAL ELLIPSIS --> + </cmap_format_4> + </cmap> + + <post> + <formatType value="3.0"/> + <italicAngle value="0.0"/> + <underlinePosition value="-75"/> + <underlineThickness value="50"/> + <isFixedPitch value="0"/> + <minMemType42 value="0"/> + <maxMemType42 value="0"/> + <minMemType1 value="0"/> + <maxMemType1 value="0"/> + </post> + + <CFF> + <CFFFont name="TestOTF-Regular"> + <version value="001.001"/> + <Notice value="Copyright \(c\) 2015 by FontTools. No rights reserved."/> + <FullName value="Test OTF"/> + <FamilyName value="Test OTF"/> + <Weight value="Regular"/> + <isFixedPitch value="0"/> + <ItalicAngle value="0"/> + <UnderlineThickness value="50"/> + <PaintType value="0"/> + <CharstringType value="2"/> + <FontMatrix value="0.001 0 0 0.001 0 0"/> + <FontBBox value="50 0 668 750"/> + <StrokeWidth value="0"/> + <!-- charset is dumped separately as the 'GlyphOrder' element --> + <Encoding name="StandardEncoding"/> + <Private> + <BlueScale value="0.039625"/> + <BlueShift value="7"/> + <BlueFuzz value="1"/> + <ForceBold value="0"/> + <LanguageGroup value="0"/> + <ExpansionFactor value="0.06"/> + <initialRandomSeed value="0"/> + <defaultWidthX value="0"/> + <nominalWidthX value="0"/> + <Subrs> + <!-- The 'index' attribute is only for humans; it is ignored when parsed. --> + <CharString index="0"> + 131 122 -131 hlineto + return + </CharString> + </Subrs> + </Private> + <CharStrings> + <CharString name=".notdef"> + 500 450 hmoveto + 750 -400 -750 vlineto + 50 50 rmoveto + 650 300 -650 vlineto + endchar + </CharString> + <CharString name=".null"> + 0 endchar + </CharString> + <CharString name="CR"> + 250 endchar + </CharString> + <CharString name="ellipsis"> + 723 55 hmoveto + -107 callsubr + 241 -122 rmoveto + -107 callsubr + 241 -122 rmoveto + -107 callsubr + endchar + </CharString> + <CharString name="period"> + 241 55 hmoveto + -107 callsubr + endchar + </CharString> + <CharString name="space"> + 250 endchar + </CharString> + </CharStrings> + </CFFFont> + + <GlobalSubrs> + <!-- The 'index' attribute is only for humans; it is ignored when parsed. --> + </GlobalSubrs> + </CFF> + + <hmtx> + <mtx name=".notdef" width="500" lsb="50"/> + <mtx name=".null" width="0" lsb="0"/> + <mtx name="CR" width="250" lsb="0"/> + <mtx name="ellipsis" width="723" lsb="55"/> + <mtx name="period" width="241" lsb="55"/> + <mtx name="space" width="250" lsb="0"/> + </hmtx> + + <DSIG> + <!-- note that the Digital Signature will be invalid after recompilation! --> + <tableHeader flag="0x0" numSigs="0" version="1"/> + </DSIG> + +</ttFont> diff -Nru fonttools-2.4/Snippets/fontTools/ttLib/testdata/TestTTF-Regular.ttx fonttools-3.0/Snippets/fontTools/ttLib/testdata/TestTTF-Regular.ttx --- fonttools-2.4/Snippets/fontTools/ttLib/testdata/TestTTF-Regular.ttx 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.0/Snippets/fontTools/ttLib/testdata/TestTTF-Regular.ttx 2015-08-31 17:57:15.000000000 +0000 @@ -0,0 +1,553 @@ +<?xml version="1.0" encoding="UTF-8"?> +<ttFont sfntVersion="\x00\x01\x00\x00" ttLibVersion="2.5"> + + <GlyphOrder> + <!-- The 'id' attribute is only for humans; it is ignored when parsed. --> + <GlyphID id="0" name=".notdef"/> + <GlyphID id="1" name=".null"/> + <GlyphID id="2" name="CR"/> + <GlyphID id="3" name="space"/> + <GlyphID id="4" name="period"/> + <GlyphID id="5" name="ellipsis"/> + </GlyphOrder> + + <head> + <!-- Most of this table will be recalculated by the compiler --> + <tableVersion value="1.0"/> + <fontRevision value="1.0"/> + <checkSumAdjustment value="0x2ee689e2"/> + <magicNumber value="0x5f0f3cf5"/> + <flags value="00000000 00000011"/> + <unitsPerEm value="1000"/> + <created value="Thu Jun 4 14:29:11 2015"/> + <modified value="Mon Aug 3 13:04:43 2015"/> + <xMin value="50"/> + <yMin value="0"/> + <xMax value="668"/> + <yMax value="750"/> + <macStyle value="00000000 00000000"/> + <lowestRecPPEM value="9"/> + <fontDirectionHint value="2"/> + <indexToLocFormat value="0"/> + <glyphDataFormat value="0"/> + </head> + + <hhea> + <tableVersion value="1.0"/> + <ascent value="900"/> + <descent value="-300"/> + <lineGap value="0"/> + <advanceWidthMax value="723"/> + <minLeftSideBearing value="50"/> + <minRightSideBearing value="50"/> + <xMaxExtent value="668"/> + <caretSlopeRise value="1"/> + <caretSlopeRun value="0"/> + <caretOffset value="0"/> + <reserved0 value="0"/> + <reserved1 value="0"/> + <reserved2 value="0"/> + <reserved3 value="0"/> + <metricDataFormat value="0"/> + <numberOfHMetrics value="6"/> + </hhea> + + <maxp> + <!-- Most of this table will be recalculated by the compiler --> + <tableVersion value="0x10000"/> + <numGlyphs value="6"/> + <maxPoints value="8"/> + <maxContours value="2"/> + <maxCompositePoints value="12"/> + <maxCompositeContours value="3"/> + <maxZones value="1"/> + <maxTwilightPoints value="0"/> + <maxStorage value="0"/> + <maxFunctionDefs value="0"/> + <maxInstructionDefs value="0"/> + <maxStackElements value="0"/> + <maxSizeOfInstructions value="0"/> + <maxComponentElements value="3"/> + <maxComponentDepth value="1"/> + </maxp> + + <OS_2> + <!-- The fields 'usFirstCharIndex' and 'usLastCharIndex' + will be recalculated by the compiler --> + <version value="4"/> + <xAvgCharWidth value="392"/> + <usWeightClass value="400"/> + <usWidthClass value="5"/> + <fsType value="00000000 00000000"/> + <ySubscriptXSize value="700"/> + <ySubscriptYSize value="650"/> + <ySubscriptXOffset value="0"/> + <ySubscriptYOffset value="140"/> + <ySuperscriptXSize value="700"/> + <ySuperscriptYSize value="650"/> + <ySuperscriptXOffset value="0"/> + <ySuperscriptYOffset value="477"/> + <yStrikeoutSize value="50"/> + <yStrikeoutPosition value="250"/> + <sFamilyClass value="2050"/> + <panose> + <bFamilyType value="2"/> + <bSerifStyle value="11"/> + <bWeight value="6"/> + <bProportion value="4"/> + <bContrast value="4"/> + <bStrokeVariation value="2"/> + <bArmStyle value="7"/> + <bLetterForm value="8"/> + <bMidline value="1"/> + <bXHeight value="4"/> + </panose> + <ulUnicodeRange1 value="10000000 00000000 00000000 00000001"/> + <ulUnicodeRange2 value="00000000 00000000 00000000 00000000"/> + <ulUnicodeRange3 value="00000000 00000000 00000000 00000000"/> + <ulUnicodeRange4 value="00000000 00000000 00000000 00000000"/> + <achVendID value="NONE"/> + <fsSelection value="00000000 11000000"/> + <usFirstCharIndex value="0"/> + <usLastCharIndex value="8230"/> + <sTypoAscender value="750"/> + <sTypoDescender value="-250"/> + <sTypoLineGap value="200"/> + <usWinAscent value="900"/> + <usWinDescent value="300"/> + <ulCodePageRange1 value="00000000 00000000 00000000 00000001"/> + <ulCodePageRange2 value="00000000 00000000 00000000 00000000"/> + <sxHeight value="500"/> + <sCapHeight value="700"/> + <usDefaultChar value="0"/> + <usBreakChar value="32"/> + <usMaxContext value="0"/> + </OS_2> + + <hmtx> + <mtx name=".notdef" width="500" lsb="50"/> + <mtx name=".null" width="0" lsb="0"/> + <mtx name="CR" width="250" lsb="0"/> + <mtx name="ellipsis" width="723" lsb="55"/> + <mtx name="period" width="241" lsb="55"/> + <mtx name="space" width="250" lsb="0"/> + </hmtx> + + <cmap> + <tableVersion version="0"/> + <cmap_format_4 platformID="0" platEncID="3" language="0"> + <map code="0x0" name=".null"/><!-- ???? --> + <map code="0xd" name="CR"/><!-- ???? --> + <map code="0x20" name="space"/><!-- SPACE --> + <map code="0x2e" name="period"/><!-- FULL STOP --> + <map code="0x2026" name="ellipsis"/><!-- HORIZONTAL ELLIPSIS --> + </cmap_format_4> + <cmap_format_6 platformID="1" platEncID="0" language="0"> + <map code="0x0" name=".null"/> + <map code="0x1" name=".notdef"/> + <map code="0x2" name=".notdef"/> + <map code="0x3" name=".notdef"/> + <map code="0x4" name=".notdef"/> + <map code="0x5" name=".notdef"/> + <map code="0x6" name=".notdef"/> + <map code="0x7" name=".notdef"/> + <map code="0x8" name=".notdef"/> + <map code="0x9" name=".notdef"/> + <map code="0xa" name=".notdef"/> + <map code="0xb" name=".notdef"/> + <map code="0xc" name=".notdef"/> + <map code="0xd" name="CR"/> + <map code="0xe" name=".notdef"/> + <map code="0xf" name=".notdef"/> + <map code="0x10" name=".notdef"/> + <map code="0x11" name=".notdef"/> + <map code="0x12" name=".notdef"/> + <map code="0x13" name=".notdef"/> + <map code="0x14" name=".notdef"/> + <map code="0x15" name=".notdef"/> + <map code="0x16" name=".notdef"/> + <map code="0x17" name=".notdef"/> + <map code="0x18" name=".notdef"/> + <map code="0x19" name=".notdef"/> + <map code="0x1a" name=".notdef"/> + <map code="0x1b" name=".notdef"/> + <map code="0x1c" name=".notdef"/> + <map code="0x1d" name=".notdef"/> + <map code="0x1e" name=".notdef"/> + <map code="0x1f" name=".notdef"/> + <map code="0x20" name="space"/> + <map code="0x21" name=".notdef"/> + <map code="0x22" name=".notdef"/> + <map code="0x23" name=".notdef"/> + <map code="0x24" name=".notdef"/> + <map code="0x25" name=".notdef"/> + <map code="0x26" name=".notdef"/> + <map code="0x27" name=".notdef"/> + <map code="0x28" name=".notdef"/> + <map code="0x29" name=".notdef"/> + <map code="0x2a" name=".notdef"/> + <map code="0x2b" name=".notdef"/> + <map code="0x2c" name=".notdef"/> + <map code="0x2d" name=".notdef"/> + <map code="0x2e" name="period"/> + <map code="0x2f" name=".notdef"/> + <map code="0x30" name=".notdef"/> + <map code="0x31" name=".notdef"/> + <map code="0x32" name=".notdef"/> + <map code="0x33" name=".notdef"/> + <map code="0x34" name=".notdef"/> + <map code="0x35" name=".notdef"/> + <map code="0x36" name=".notdef"/> + <map code="0x37" name=".notdef"/> + <map code="0x38" name=".notdef"/> + <map code="0x39" name=".notdef"/> + <map code="0x3a" name=".notdef"/> + <map code="0x3b" name=".notdef"/> + <map code="0x3c" name=".notdef"/> + <map code="0x3d" name=".notdef"/> + <map code="0x3e" name=".notdef"/> + <map code="0x3f" name=".notdef"/> + <map code="0x40" name=".notdef"/> + <map code="0x41" name=".notdef"/> + <map code="0x42" name=".notdef"/> + <map code="0x43" name=".notdef"/> + <map code="0x44" name=".notdef"/> + <map code="0x45" name=".notdef"/> + <map code="0x46" name=".notdef"/> + <map code="0x47" name=".notdef"/> + <map code="0x48" name=".notdef"/> + <map code="0x49" name=".notdef"/> + <map code="0x4a" name=".notdef"/> + <map code="0x4b" name=".notdef"/> + <map code="0x4c" name=".notdef"/> + <map code="0x4d" name=".notdef"/> + <map code="0x4e" name=".notdef"/> + <map code="0x4f" name=".notdef"/> + <map code="0x50" name=".notdef"/> + <map code="0x51" name=".notdef"/> + <map code="0x52" name=".notdef"/> + <map code="0x53" name=".notdef"/> + <map code="0x54" name=".notdef"/> + <map code="0x55" name=".notdef"/> + <map code="0x56" name=".notdef"/> + <map code="0x57" name=".notdef"/> + <map code="0x58" name=".notdef"/> + <map code="0x59" name=".notdef"/> + <map code="0x5a" name=".notdef"/> + <map code="0x5b" name=".notdef"/> + <map code="0x5c" name=".notdef"/> + <map code="0x5d" name=".notdef"/> + <map code="0x5e" name=".notdef"/> + <map code="0x5f" name=".notdef"/> + <map code="0x60" name=".notdef"/> + <map code="0x61" name=".notdef"/> + <map code="0x62" name=".notdef"/> + <map code="0x63" name=".notdef"/> + <map code="0x64" name=".notdef"/> + <map code="0x65" name=".notdef"/> + <map code="0x66" name=".notdef"/> + <map code="0x67" name=".notdef"/> + <map code="0x68" name=".notdef"/> + <map code="0x69" name=".notdef"/> + <map code="0x6a" name=".notdef"/> + <map code="0x6b" name=".notdef"/> + <map code="0x6c" name=".notdef"/> + <map code="0x6d" name=".notdef"/> + <map code="0x6e" name=".notdef"/> + <map code="0x6f" name=".notdef"/> + <map code="0x70" name=".notdef"/> + <map code="0x71" name=".notdef"/> + <map code="0x72" name=".notdef"/> + <map code="0x73" name=".notdef"/> + <map code="0x74" name=".notdef"/> + <map code="0x75" name=".notdef"/> + <map code="0x76" name=".notdef"/> + <map code="0x77" name=".notdef"/> + <map code="0x78" name=".notdef"/> + <map code="0x79" name=".notdef"/> + <map code="0x7a" name=".notdef"/> + <map code="0x7b" name=".notdef"/> + <map code="0x7c" name=".notdef"/> + <map code="0x7d" name=".notdef"/> + <map code="0x7e" name=".notdef"/> + <map code="0x7f" name=".notdef"/> + <map code="0x80" name=".notdef"/> + <map code="0x81" name=".notdef"/> + <map code="0x82" name=".notdef"/> + <map code="0x83" name=".notdef"/> + <map code="0x84" name=".notdef"/> + <map code="0x85" name=".notdef"/> + <map code="0x86" name=".notdef"/> + <map code="0x87" name=".notdef"/> + <map code="0x88" name=".notdef"/> + <map code="0x89" name=".notdef"/> + <map code="0x8a" name=".notdef"/> + <map code="0x8b" name=".notdef"/> + <map code="0x8c" name=".notdef"/> + <map code="0x8d" name=".notdef"/> + <map code="0x8e" name=".notdef"/> + <map code="0x8f" name=".notdef"/> + <map code="0x90" name=".notdef"/> + <map code="0x91" name=".notdef"/> + <map code="0x92" name=".notdef"/> + <map code="0x93" name=".notdef"/> + <map code="0x94" name=".notdef"/> + <map code="0x95" name=".notdef"/> + <map code="0x96" name=".notdef"/> + <map code="0x97" name=".notdef"/> + <map code="0x98" name=".notdef"/> + <map code="0x99" name=".notdef"/> + <map code="0x9a" name=".notdef"/> + <map code="0x9b" name=".notdef"/> + <map code="0x9c" name=".notdef"/> + <map code="0x9d" name=".notdef"/> + <map code="0x9e" name=".notdef"/> + <map code="0x9f" name=".notdef"/> + <map code="0xa0" name=".notdef"/> + <map code="0xa1" name=".notdef"/> + <map code="0xa2" name=".notdef"/> + <map code="0xa3" name=".notdef"/> + <map code="0xa4" name=".notdef"/> + <map code="0xa5" name=".notdef"/> + <map code="0xa6" name=".notdef"/> + <map code="0xa7" name=".notdef"/> + <map code="0xa8" name=".notdef"/> + <map code="0xa9" name=".notdef"/> + <map code="0xaa" name=".notdef"/> + <map code="0xab" name=".notdef"/> + <map code="0xac" name=".notdef"/> + <map code="0xad" name=".notdef"/> + <map code="0xae" name=".notdef"/> + <map code="0xaf" name=".notdef"/> + <map code="0xb0" name=".notdef"/> + <map code="0xb1" name=".notdef"/> + <map code="0xb2" name=".notdef"/> + <map code="0xb3" name=".notdef"/> + <map code="0xb4" name=".notdef"/> + <map code="0xb5" name=".notdef"/> + <map code="0xb6" name=".notdef"/> + <map code="0xb7" name=".notdef"/> + <map code="0xb8" name=".notdef"/> + <map code="0xb9" name=".notdef"/> + <map code="0xba" name=".notdef"/> + <map code="0xbb" name=".notdef"/> + <map code="0xbc" name=".notdef"/> + <map code="0xbd" name=".notdef"/> + <map code="0xbe" name=".notdef"/> + <map code="0xbf" name=".notdef"/> + <map code="0xc0" name=".notdef"/> + <map code="0xc1" name=".notdef"/> + <map code="0xc2" name=".notdef"/> + <map code="0xc3" name=".notdef"/> + <map code="0xc4" name=".notdef"/> + <map code="0xc5" name=".notdef"/> + <map code="0xc6" name=".notdef"/> + <map code="0xc7" name=".notdef"/> + <map code="0xc8" name=".notdef"/> + <map code="0xc9" name="ellipsis"/> + </cmap_format_6> + <cmap_format_4 platformID="3" platEncID="1" language="0"> + <map code="0x0" name=".null"/><!-- ???? --> + <map code="0xd" name="CR"/><!-- ???? --> + <map code="0x20" name="space"/><!-- SPACE --> + <map code="0x2e" name="period"/><!-- FULL STOP --> + <map code="0x2026" name="ellipsis"/><!-- HORIZONTAL ELLIPSIS --> + </cmap_format_4> + </cmap> + + <fpgm> + <assembly> + SVTCA[0] /* SetFPVectorToAxis */ + </assembly> + </fpgm> + + <prep> + <assembly> + SVTCA[0] /* SetFPVectorToAxis */ + </assembly> + </prep> + + <cvt> + <cv index="0" value="0"/> + </cvt> + + <loca> + <!-- The 'loca' table will be calculated by the compiler --> + </loca> + + <glyf> + + <!-- The xMin, yMin, xMax and yMax values + will be recalculated by the compiler. --> + + <TTGlyph name=".notdef" xMin="50" yMin="0" xMax="450" yMax="750"> + <contour> + <pt x="50" y="0" on="1"/> + <pt x="50" y="750" on="1"/> + <pt x="450" y="750" on="1"/> + <pt x="450" y="0" on="1"/> + </contour> + <contour> + <pt x="400" y="50" on="1"/> + <pt x="400" y="700" on="1"/> + <pt x="100" y="700" on="1"/> + <pt x="100" y="50" on="1"/> + </contour> + <instructions><assembly> + SVTCA[0] /* SetFPVectorToAxis */ + SVTCA[1] /* SetFPVectorToAxis */ + </assembly></instructions> + </TTGlyph> + + <TTGlyph name=".null"/><!-- contains no outline data --> + + <TTGlyph name="CR"/><!-- contains no outline data --> + + <TTGlyph name="ellipsis" xMin="55" yMin="0" xMax="668" yMax="122"> + <component glyphName="period" x="0" y="0" flags="0x4"/> + <component glyphName="period" x="241" y="0" flags="0x4"/> + <component glyphName="period" x="482" y="0" flags="0x4"/> + <instructions><assembly> + SVTCA[0] /* SetFPVectorToAxis */ + SVTCA[1] /* SetFPVectorToAxis */ + </assembly></instructions> + </TTGlyph> + + <TTGlyph name="period" xMin="55" yMin="0" xMax="186" yMax="122"> + <contour> + <pt x="55" y="122" on="1"/> + <pt x="186" y="122" on="1"/> + <pt x="186" y="0" on="1"/> + <pt x="55" y="0" on="1"/> + </contour> + <instructions><assembly> + SVTCA[0] /* SetFPVectorToAxis */ + SVTCA[1] /* SetFPVectorToAxis */ + </assembly></instructions> + </TTGlyph> + + <TTGlyph name="space"/><!-- contains no outline data --> + + </glyf> + + <name> + <namerecord nameID="0" platformID="1" platEncID="0" langID="0x0" unicode="True"> + Copyright (c) 2015 by FontTools. No rights reserved. + </namerecord> + <namerecord nameID="1" platformID="1" platEncID="0" langID="0x0" unicode="True"> + Test TTF + </namerecord> + <namerecord nameID="2" platformID="1" platEncID="0" langID="0x0" unicode="True"> + Regular + </namerecord> + <namerecord nameID="3" platformID="1" platEncID="0" langID="0x0" unicode="True"> + FontTools: Test TTF: 2015 + </namerecord> + <namerecord nameID="4" platformID="1" platEncID="0" langID="0x0" unicode="True"> + Test TTF + </namerecord> + <namerecord nameID="5" platformID="1" platEncID="0" langID="0x0" unicode="True"> + Version 1.000 + </namerecord> + <namerecord nameID="6" platformID="1" platEncID="0" langID="0x0" unicode="True"> + TestTTF-Regular + </namerecord> + <namerecord nameID="7" platformID="1" platEncID="0" langID="0x0" unicode="True"> + Test TTF is not a trademark of FontTools. + </namerecord> + <namerecord nameID="8" platformID="1" platEncID="0" langID="0x0" unicode="True"> + FontTools + </namerecord> + <namerecord nameID="9" platformID="1" platEncID="0" langID="0x0" unicode="True"> + FontTools + </namerecord> + <namerecord nameID="11" platformID="1" platEncID="0" langID="0x0" unicode="True"> + https://github.com/behdad/fonttools + </namerecord> + <namerecord nameID="12" platformID="1" platEncID="0" langID="0x0" unicode="True"> + https://github.com/behdad/fonttools + </namerecord> + <namerecord nameID="14" platformID="1" platEncID="0" langID="0x0" unicode="True"> + https://github.com/behdad/fonttools/blob/master/LICENSE.txt + </namerecord> + <namerecord nameID="18" platformID="1" platEncID="0" langID="0x0" unicode="True"> + Test TTF + </namerecord> + <namerecord nameID="0" platformID="3" platEncID="1" langID="0x409"> + Copyright (c) 2015 by FontTools. No rights reserved. + </namerecord> + <namerecord nameID="1" platformID="3" platEncID="1" langID="0x409"> + Test TTF + </namerecord> + <namerecord nameID="2" platformID="3" platEncID="1" langID="0x409"> + Regular + </namerecord> + <namerecord nameID="3" platformID="3" platEncID="1" langID="0x409"> + FontTools: Test TTF: 2015 + </namerecord> + <namerecord nameID="4" platformID="3" platEncID="1" langID="0x409"> + Test TTF + </namerecord> + <namerecord nameID="5" platformID="3" platEncID="1" langID="0x409"> + Version 1.000 + </namerecord> + <namerecord nameID="6" platformID="3" platEncID="1" langID="0x409"> + TestTTF-Regular + </namerecord> + <namerecord nameID="7" platformID="3" platEncID="1" langID="0x409"> + Test TTF is not a trademark of FontTools. + </namerecord> + <namerecord nameID="8" platformID="3" platEncID="1" langID="0x409"> + FontTools + </namerecord> + <namerecord nameID="9" platformID="3" platEncID="1" langID="0x409"> + FontTools + </namerecord> + <namerecord nameID="11" platformID="3" platEncID="1" langID="0x409"> + https://github.com/behdad/fonttools + </namerecord> + <namerecord nameID="12" platformID="3" platEncID="1" langID="0x409"> + https://github.com/behdad/fonttools + </namerecord> + <namerecord nameID="14" platformID="3" platEncID="1" langID="0x409"> + https://github.com/behdad/fonttools/blob/master/LICENSE.txt + </namerecord> + </name> + + <post> + <formatType value="2.0"/> + <italicAngle value="0.0"/> + <underlinePosition value="-75"/> + <underlineThickness value="50"/> + <isFixedPitch value="0"/> + <minMemType42 value="0"/> + <maxMemType42 value="0"/> + <minMemType1 value="0"/> + <maxMemType1 value="0"/> + <psNames> + <!-- This file uses unique glyph names based on the information + found in the 'post' table. Since these names might not be unique, + we have to invent artificial names in case of clashes. In order to + be able to retain the original information, we need a name to + ps name mapping for those cases where they differ. That's what + you see below. + --> + </psNames> + <extraNames> + <!-- following are the name that are not taken from the standard Mac glyph order --> + <psName name=".null"/> + <psName name="CR"/> + </extraNames> + </post> + + <gasp> + <gaspRange rangeMaxPPEM="8" rangeGaspBehavior="10"/> + <gaspRange rangeMaxPPEM="65535" rangeGaspBehavior="15"/> + </gasp> + + <DSIG> + <!-- note that the Digital Signature will be invalid after recompilation! --> + <tableHeader flag="0x0" numSigs="0" version="1"/> + </DSIG> + +</ttFont> diff -Nru fonttools-2.4/Snippets/fontTools/ttLib/testdata/test_woff2_metadata.xml fonttools-3.0/Snippets/fontTools/ttLib/testdata/test_woff2_metadata.xml --- fonttools-2.4/Snippets/fontTools/ttLib/testdata/test_woff2_metadata.xml 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.0/Snippets/fontTools/ttLib/testdata/test_woff2_metadata.xml 2015-08-31 17:57:15.000000000 +0000 @@ -0,0 +1,103 @@ +<?xml version="1.0" encoding="UTF-8"?> +<metadata version="1.0"> + <uniqueid id="org.w3.webfonts.wofftest" /> + <vendor name="Test Vendor" url="http://w3c.org/Fonts" /> + <credits> + <credit name="Credit 1" role="Role 1" url="http://w3c.org/Fonts" /> + <credit name="Credit 2" role="Role 2" url="http://w3c.org/Fonts" /> + </credits> + <description url="http://w3c.org/Fonts"> + <text> + Description without language. + </text> + <text lang="en"> + Description with "en" language. + </text> + <text lang="fr"> + Description with "fr" language. + </text> + </description> + <license url="http://w3c.org/Fonts" id="License ID"> + <text> + License without language. + </text> + <text lang="en"> + License with "en" language. + </text> + <text lang="fr"> + License with "fr" language. + </text> + </license> + <copyright> + <text> + Copyright without language. + </text> + <text lang="en"> + Copyright with "en" language. + </text> + <text lang="fr"> + Copyright with "fr" language. + </text> + </copyright> + <trademark> + <text> + Trademark without language. + </text> + <text lang="en"> + Trademark with "en" language. + </text> + <text lang="fr"> + Trademark with "fr" language. + </text> + </trademark> + <licensee name="Licensee Name" /> + <extension id="Extension 1"> + <name>Extension 1 - Name Without Language</name> + <name lang="en">Extension 1 - Name With "en" Language</name> + <name lang="fr">Extension 1 - Name With "fr" Language</name> + <item id="Extension 1 - Item 1 ID"> + <name>Extension 1 - Item 1 - Name Without Language</name> + <name lang="en">Extension 1 - Item 1 - Name With "en" Language</name> + <name lang="fr">Extension 1 - Item 1 - Name With "fr" Language</name> + <value>Extension 1 - Item 1 - Value Without Language</value> + <value lang="en">Extension 1 - Item 1 - Value With "en" Language</value> + <value lang="fr">Extension 1 - Item 1 - Value With "fr" Language</value> + </item> + <item id="Extension 1 - Item 2 ID"> + <name>Extension 1 - Item 2 - Name Without Language</name> + <name lang="en">Extension 1 - Item 2 - Name With "en" Language</name> + <name lang="fr">Extension 1 - Item 2 - Name With "fr" Language</name> + <value>Extension 1 - Item 2 - Value Without Language</value> + <value lang="en">Extension 1 - Item 2 - Value With "en" Language</value> + <value lang="fr">Extension 1 - Item 2 - Value With "fr" Language</value> + </item> + </extension> + <extension id="Extension 2"> + <name>Extension 2 - Name Without Language</name> + <name lang="en">Extension 2 - Name With "en" Language</name> + <name lang="fr">Extension 2 - Name With "fr" Language</name> + <item id="Extension 2 - Item 1 ID"> + <name>Extension 2 - Item 1 - Name Without Language</name> + <name lang="en">Extension 2 - Item 1 - Name With "en" Language</name> + <name lang="fr">Extension 2 - Item 1 - Name With "fr" Language</name> + <value>Extension 2 - Item 1 - Value Without Language</value> + <value lang="en">Extension 2 - Item 1 - Value With "en" Language</value> + <value lang="fr">Extension 2 - Item 1 - Value With "fr" Language</value> + </item> + <item id="Extension 2 - Item 2 ID"> + <name>Extension 2 - Item 2 - Name Without Language</name> + <name lang="en">Extension 2 - Item 2 - Name With "en" Language</name> + <name lang="fr">Extension 2 - Item 2 - Name With "fr" Language</name> + <value>Extension 2 - Item 2 - Value Without Language</value> + <value lang="en">Extension 2 - Item 2 - Value With "en" Language</value> + <value lang="fr">Extension 2 - Item 2 - Value With "fr" Language</value> + </item> + <item id="Extension 2 - Item 3 ID"> + <name>Extension 2 - Item 3 - Name Without Language</name> + <name lang="en">Extension 2 - Item 3 - Name With "en" Language</name> + <name lang="fr">Extension 2 - Item 3 - Name With "fr" Language</name> + <value>Extension 2 - Item 3 - Value Without Language</value> + <value lang="en">Extension 2 - Item 3 - Value With "en" Language</value> + </item> + </extension> +</metadata> diff -Nru fonttools-2.4/Snippets/fontTools/ttLib/woff2.py fonttools-3.0/Snippets/fontTools/ttLib/woff2.py --- fonttools-2.4/Snippets/fontTools/ttLib/woff2.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.0/Snippets/fontTools/ttLib/woff2.py 2015-08-31 17:57:15.000000000 +0000 @@ -0,0 +1,1084 @@ +from __future__ import print_function, division, absolute_import +from fontTools.misc.py23 import * +import sys +import array +import struct +from collections import OrderedDict +from fontTools.misc import sstruct +from fontTools.misc.arrayTools import calcIntBounds +from fontTools.misc.textTools import pad +from fontTools.ttLib import (TTFont, TTLibError, getTableModule, getTableClass, + getSearchRange) +from fontTools.ttLib.sfnt import (SFNTReader, SFNTWriter, DirectoryEntry, + WOFFFlavorData, sfntDirectoryFormat, sfntDirectorySize, SFNTDirectoryEntry, + sfntDirectoryEntrySize, calcChecksum) +from fontTools.ttLib.tables import ttProgram + +haveBrotli = False +try: + import brotli + haveBrotli = True +except ImportError: + pass + + +class WOFF2Reader(SFNTReader): + + flavor = "woff2" + + def __init__(self, file, checkChecksums=1, fontNumber=-1): + if not haveBrotli: + print('The WOFF2 decoder requires the Brotli Python extension, available at:\n' + 'https://github.com/google/brotli', file=sys.stderr) + raise ImportError("No module named brotli") + + self.file = file + + signature = Tag(self.file.read(4)) + if signature != b"wOF2": + raise TTLibError("Not a WOFF2 font (bad signature)") + + self.file.seek(0) + self.DirectoryEntry = WOFF2DirectoryEntry + data = self.file.read(woff2DirectorySize) + if len(data) != woff2DirectorySize: + raise TTLibError('Not a WOFF2 font (not enough data)') + sstruct.unpack(woff2DirectoryFormat, data, self) + + self.tables = OrderedDict() + offset = 0 + for i in range(self.numTables): + entry = self.DirectoryEntry() + entry.fromFile(self.file) + tag = Tag(entry.tag) + self.tables[tag] = entry + entry.offset = offset + offset += entry.length + + totalUncompressedSize = offset + compressedData = self.file.read(self.totalCompressedSize) + decompressedData = brotli.decompress(compressedData) + if len(decompressedData) != totalUncompressedSize: + raise TTLibError( + 'unexpected size for decompressed font data: expected %d, found %d' + % (totalUncompressedSize, len(decompressedData))) + self.transformBuffer = BytesIO(decompressedData) + + self.file.seek(0, 2) + if self.length != self.file.tell(): + raise TTLibError("reported 'length' doesn't match the actual file size") + + self.flavorData = WOFF2FlavorData(self) + + # make empty TTFont to store data while reconstructing tables + self.ttFont = TTFont(recalcBBoxes=False, recalcTimestamp=False) + + def __getitem__(self, tag): + """Fetch the raw table data. Reconstruct transformed tables.""" + entry = self.tables[Tag(tag)] + if not hasattr(entry, 'data'): + if tag in woff2TransformedTableTags: + entry.data = self.reconstructTable(tag) + else: + entry.data = entry.loadData(self.transformBuffer) + return entry.data + + def reconstructTable(self, tag): + """Reconstruct table named 'tag' from transformed data.""" + if tag not in woff2TransformedTableTags: + raise TTLibError("transform for table '%s' is unknown" % tag) + entry = self.tables[Tag(tag)] + rawData = entry.loadData(self.transformBuffer) + if tag == 'glyf': + # no need to pad glyph data when reconstructing + padding = self.padding if hasattr(self, 'padding') else None + data = self._reconstructGlyf(rawData, padding) + elif tag == 'loca': + data = self._reconstructLoca() + else: + raise NotImplementedError + return data + + def _reconstructGlyf(self, data, padding=None): + """ Return recostructed glyf table data, and set the corresponding loca's + locations. Optionally pad glyph offsets to the specified number of bytes. + """ + self.ttFont['loca'] = WOFF2LocaTable() + glyfTable = self.ttFont['glyf'] = WOFF2GlyfTable() + glyfTable.reconstruct(data, self.ttFont) + glyfTable.padding = padding + data = glyfTable.compile(self.ttFont) + return data + + def _reconstructLoca(self): + """ Return reconstructed loca table data. """ + if 'loca' not in self.ttFont: + # make sure glyf is reconstructed first + self.tables['glyf'].data = self.reconstructTable('glyf') + locaTable = self.ttFont['loca'] + data = locaTable.compile(self.ttFont) + if len(data) != self.tables['loca'].origLength: + raise TTLibError( + "reconstructed 'loca' table doesn't match original size: " + "expected %d, found %d" + % (self.tables['loca'].origLength, len(data))) + return data + + +class WOFF2Writer(SFNTWriter): + + flavor = "woff2" + + def __init__(self, file, numTables, sfntVersion="\000\001\000\000", + flavor=None, flavorData=None): + if not haveBrotli: + print('The WOFF2 encoder requires the Brotli Python extension, available at:\n' + 'https://github.com/google/brotli', file=sys.stderr) + raise ImportError("No module named brotli") + + self.file = file + self.numTables = numTables + self.sfntVersion = Tag(sfntVersion) + self.flavorData = flavorData or WOFF2FlavorData() + + self.directoryFormat = woff2DirectoryFormat + self.directorySize = woff2DirectorySize + self.DirectoryEntry = WOFF2DirectoryEntry + + self.signature = Tag("wOF2") + + self.nextTableOffset = 0 + self.transformBuffer = BytesIO() + + self.tables = OrderedDict() + + # make empty TTFont to store data while normalising and transforming tables + self.ttFont = TTFont(recalcBBoxes=False, recalcTimestamp=False) + + def __setitem__(self, tag, data): + """Associate new entry named 'tag' with raw table data.""" + if tag in self.tables: + raise TTLibError("cannot rewrite '%s' table" % tag) + if tag == 'DSIG': + # always drop DSIG table, since the encoding process can invalidate it + self.numTables -= 1 + return + + entry = self.DirectoryEntry() + entry.tag = Tag(tag) + entry.flags = getKnownTagIndex(entry.tag) + # WOFF2 table data are written to disk only on close(), after all tags + # have been specified + entry.data = data + + self.tables[tag] = entry + + def close(self): + """ All tags must have been specified. Now write the table data and directory. + """ + if len(self.tables) != self.numTables: + raise TTLibError("wrong number of tables; expected %d, found %d" % (self.numTables, len(self.tables))) + + if self.sfntVersion in ("\x00\x01\x00\x00", "true"): + isTrueType = True + elif self.sfntVersion == "OTTO": + isTrueType = False + else: + raise TTLibError("Not a TrueType or OpenType font (bad sfntVersion)") + + # The WOFF2 spec no longer requires the glyph offsets to be 4-byte aligned. + # However, the reference WOFF2 implementation still fails to reconstruct + # 'unpadded' glyf tables, therefore we need to 'normalise' them. + # See: + # https://github.com/khaledhosny/ots/issues/60 + # https://github.com/google/woff2/issues/15 + if isTrueType: + self._normaliseGlyfAndLoca(padding=4) + self._setHeadTransformFlag() + + # To pass the legacy OpenType Sanitiser currently included in browsers, + # we must sort the table directory and data alphabetically by tag. + # See: + # https://github.com/google/woff2/pull/3 + # https://lists.w3.org/Archives/Public/public-webfonts-wg/2015Mar/0000.html + # TODO(user): remove to match spec once browsers are on newer OTS + self.tables = OrderedDict(sorted(self.tables.items())) + + self.totalSfntSize = self._calcSFNTChecksumsLengthsAndOffsets() + + fontData = self._transformTables() + compressedFont = brotli.compress(fontData, mode=brotli.MODE_FONT) + + self.totalCompressedSize = len(compressedFont) + self.length = self._calcTotalSize() + self.majorVersion, self.minorVersion = self._getVersion() + self.reserved = 0 + + directory = self._packTableDirectory() + self.file.seek(0) + self.file.write(pad(directory + compressedFont, size=4)) + self._writeFlavorData() + + def _normaliseGlyfAndLoca(self, padding=4): + """ Recompile glyf and loca tables, aligning glyph offsets to multiples of + 'padding' size. Update the head table's 'indexToLocFormat' accordingly while + compiling loca. + """ + if self.sfntVersion == "OTTO": + return + for tag in ('maxp', 'head', 'loca', 'glyf'): + self._decompileTable(tag) + self.ttFont['glyf'].padding = padding + for tag in ('glyf', 'loca'): + self._compileTable(tag) + + def _setHeadTransformFlag(self): + """ Set bit 11 of 'head' table flags to indicate that the font has undergone + a lossless modifying transform. Re-compile head table data.""" + self._decompileTable('head') + self.ttFont['head'].flags |= (1 << 11) + self._compileTable('head') + + def _decompileTable(self, tag): + """ Fetch table data, decompile it, and store it inside self.ttFont. """ + tag = Tag(tag) + if tag not in self.tables: + raise TTLibError("missing required table: %s" % tag) + if self.ttFont.isLoaded(tag): + return + data = self.tables[tag].data + if tag == 'loca': + tableClass = WOFF2LocaTable + elif tag == 'glyf': + tableClass = WOFF2GlyfTable + else: + tableClass = getTableClass(tag) + table = tableClass(tag) + self.ttFont.tables[tag] = table + table.decompile(data, self.ttFont) + + def _compileTable(self, tag): + """ Compile table and store it in its 'data' attribute. """ + self.tables[tag].data = self.ttFont[tag].compile(self.ttFont) + + def _calcSFNTChecksumsLengthsAndOffsets(self): + """ Compute the 'original' SFNT checksums, lengths and offsets for checksum + adjustment calculation. Return the total size of the uncompressed font. + """ + offset = sfntDirectorySize + sfntDirectoryEntrySize * len(self.tables) + for tag, entry in self.tables.items(): + data = entry.data + entry.origOffset = offset + entry.origLength = len(data) + if tag == 'head': + entry.checkSum = calcChecksum(data[:8] + b'\0\0\0\0' + data[12:]) + else: + entry.checkSum = calcChecksum(data) + offset += (entry.origLength + 3) & ~3 + return offset + + def _transformTables(self): + """Return transformed font data.""" + for tag, entry in self.tables.items(): + if tag in woff2TransformedTableTags: + data = self.transformTable(tag) + else: + data = entry.data + entry.offset = self.nextTableOffset + entry.saveData(self.transformBuffer, data) + self.nextTableOffset += entry.length + self.writeMasterChecksum() + fontData = self.transformBuffer.getvalue() + return fontData + + def transformTable(self, tag): + """Return transformed table data.""" + if tag not in woff2TransformedTableTags: + raise TTLibError("Transform for table '%s' is unknown" % tag) + if tag == "loca": + data = b"" + elif tag == "glyf": + for tag in ('maxp', 'head', 'loca', 'glyf'): + self._decompileTable(tag) + glyfTable = self.ttFont['glyf'] + data = glyfTable.transform(self.ttFont) + else: + raise NotImplementedError + return data + + def _calcMasterChecksum(self): + """Calculate checkSumAdjustment.""" + tags = list(self.tables.keys()) + checksums = [] + for i in range(len(tags)): + checksums.append(self.tables[tags[i]].checkSum) + + # Create a SFNT directory for checksum calculation purposes + self.searchRange, self.entrySelector, self.rangeShift = getSearchRange(self.numTables, 16) + directory = sstruct.pack(sfntDirectoryFormat, self) + tables = sorted(self.tables.items()) + for tag, entry in tables: + sfntEntry = SFNTDirectoryEntry() + sfntEntry.tag = entry.tag + sfntEntry.checkSum = entry.checkSum + sfntEntry.offset = entry.origOffset + sfntEntry.length = entry.origLength + directory = directory + sfntEntry.toString() + + directory_end = sfntDirectorySize + len(self.tables) * sfntDirectoryEntrySize + assert directory_end == len(directory) + + checksums.append(calcChecksum(directory)) + checksum = sum(checksums) & 0xffffffff + # BiboAfba! + checksumadjustment = (0xB1B0AFBA - checksum) & 0xffffffff + return checksumadjustment + + def writeMasterChecksum(self): + """Write checkSumAdjustment to the transformBuffer.""" + checksumadjustment = self._calcMasterChecksum() + self.transformBuffer.seek(self.tables['head'].offset + 8) + self.transformBuffer.write(struct.pack(">L", checksumadjustment)) + + def _calcTotalSize(self): + """Calculate total size of WOFF2 font, including any meta- and/or private data.""" + offset = self.directorySize + for entry in self.tables.values(): + offset += len(entry.toString()) + offset += self.totalCompressedSize + offset = (offset + 3) & ~3 + offset = self._calcFlavorDataOffsetsAndSize(offset) + return offset + + def _calcFlavorDataOffsetsAndSize(self, start): + """Calculate offsets and lengths for any meta- and/or private data.""" + offset = start + data = self.flavorData + if data.metaData: + self.metaOrigLength = len(data.metaData) + self.metaOffset = offset + self.compressedMetaData = brotli.compress( + data.metaData, mode=brotli.MODE_TEXT) + self.metaLength = len(self.compressedMetaData) + offset += self.metaLength + else: + self.metaOffset = self.metaLength = self.metaOrigLength = 0 + self.compressedMetaData = b"" + if data.privData: + # make sure private data is padded to 4-byte boundary + offset = (offset + 3) & ~3 + self.privOffset = offset + self.privLength = len(data.privData) + offset += self.privLength + else: + self.privOffset = self.privLength = 0 + return offset + + def _getVersion(self): + """Return the WOFF2 font's (majorVersion, minorVersion) tuple.""" + data = self.flavorData + if data.majorVersion is not None and data.minorVersion is not None: + return data.majorVersion, data.minorVersion + else: + # if None, return 'fontRevision' from 'head' table + if 'head' in self.tables: + return struct.unpack(">HH", self.tables['head'].data[4:8]) + else: + return 0, 0 + + def _packTableDirectory(self): + """Return WOFF2 table directory data.""" + directory = sstruct.pack(self.directoryFormat, self) + for entry in self.tables.values(): + directory = directory + entry.toString() + return directory + + def _writeFlavorData(self): + """Write metadata and/or private data using appropiate padding.""" + compressedMetaData = self.compressedMetaData + privData = self.flavorData.privData + if compressedMetaData and privData: + compressedMetaData = pad(compressedMetaData, size=4) + if compressedMetaData: + self.file.seek(self.metaOffset) + assert self.file.tell() == self.metaOffset + self.file.write(compressedMetaData) + if privData: + self.file.seek(self.privOffset) + assert self.file.tell() == self.privOffset + self.file.write(privData) + + def reordersTables(self): + return True + + +# -- woff2 directory helpers and cruft + +woff2DirectoryFormat = """ + > # big endian + signature: 4s # "wOF2" + sfntVersion: 4s + length: L # total woff2 file size + numTables: H # number of tables + reserved: H # set to 0 + totalSfntSize: L # uncompressed size + totalCompressedSize: L # compressed size + majorVersion: H # major version of WOFF file + minorVersion: H # minor version of WOFF file + metaOffset: L # offset to metadata block + metaLength: L # length of compressed metadata + metaOrigLength: L # length of uncompressed metadata + privOffset: L # offset to private data block + privLength: L # length of private data block +""" + +woff2DirectorySize = sstruct.calcsize(woff2DirectoryFormat) + +woff2KnownTags = ( + "cmap", "head", "hhea", "hmtx", "maxp", "name", "OS/2", "post", "cvt ", + "fpgm", "glyf", "loca", "prep", "CFF ", "VORG", "EBDT", "EBLC", "gasp", + "hdmx", "kern", "LTSH", "PCLT", "VDMX", "vhea", "vmtx", "BASE", "GDEF", + "GPOS", "GSUB", "EBSC", "JSTF", "MATH", "CBDT", "CBLC", "COLR", "CPAL", + "SVG ", "sbix", "acnt", "avar", "bdat", "bloc", "bsln", "cvar", "fdsc", + "feat", "fmtx", "fvar", "gvar", "hsty", "just", "lcar", "mort", "morx", + "opbd", "prop", "trak", "Zapf", "Silf", "Glat", "Gloc", "Feat", "Sill") + +woff2FlagsFormat = """ + > # big endian + flags: B # table type and flags +""" + +woff2FlagsSize = sstruct.calcsize(woff2FlagsFormat) + +woff2UnknownTagFormat = """ + > # big endian + tag: 4s # 4-byte tag (optional) +""" + +woff2UnknownTagSize = sstruct.calcsize(woff2UnknownTagFormat) + +woff2UnknownTagIndex = 0x3F + +woff2Base128MaxSize = 5 +woff2DirectoryEntryMaxSize = woff2FlagsSize + woff2UnknownTagSize + 2 * woff2Base128MaxSize + +woff2TransformedTableTags = ('glyf', 'loca') + +woff2GlyfTableFormat = """ + > # big endian + version: L # = 0x00000000 + numGlyphs: H # Number of glyphs + indexFormat: H # Offset format for loca table + nContourStreamSize: L # Size of nContour stream + nPointsStreamSize: L # Size of nPoints stream + flagStreamSize: L # Size of flag stream + glyphStreamSize: L # Size of glyph stream + compositeStreamSize: L # Size of composite stream + bboxStreamSize: L # Comnined size of bboxBitmap and bboxStream + instructionStreamSize: L # Size of instruction stream +""" + +woff2GlyfTableFormatSize = sstruct.calcsize(woff2GlyfTableFormat) + +bboxFormat = """ + > # big endian + xMin: h + yMin: h + xMax: h + yMax: h +""" + + +def getKnownTagIndex(tag): + """Return index of 'tag' in woff2KnownTags list. Return 63 if not found.""" + for i in range(len(woff2KnownTags)): + if tag == woff2KnownTags[i]: + return i + return woff2UnknownTagIndex + + +class WOFF2DirectoryEntry(DirectoryEntry): + + def fromFile(self, file): + pos = file.tell() + data = file.read(woff2DirectoryEntryMaxSize) + left = self.fromString(data) + consumed = len(data) - len(left) + file.seek(pos + consumed) + + def fromString(self, data): + if len(data) < 1: + raise TTLibError("can't read table 'flags': not enough data") + dummy, data = sstruct.unpack2(woff2FlagsFormat, data, self) + if self.flags & 0x3F == 0x3F: + # if bits [0..5] of the flags byte == 63, read a 4-byte arbitrary tag value + if len(data) < woff2UnknownTagSize: + raise TTLibError("can't read table 'tag': not enough data") + dummy, data = sstruct.unpack2(woff2UnknownTagFormat, data, self) + else: + # otherwise, tag is derived from a fixed 'Known Tags' table + self.tag = woff2KnownTags[self.flags & 0x3F] + self.tag = Tag(self.tag) + if self.flags & 0xC0 != 0: + raise TTLibError('bits 6-7 are reserved and must be 0') + self.origLength, data = unpackBase128(data) + self.length = self.origLength + if self.tag in woff2TransformedTableTags: + self.length, data = unpackBase128(data) + if self.tag == 'loca' and self.length != 0: + raise TTLibError( + "the transformLength of the 'loca' table must be 0") + # return left over data + return data + + def toString(self): + data = bytechr(self.flags) + if (self.flags & 0x3F) == 0x3F: + data += struct.pack('>4s', self.tag.tobytes()) + data += packBase128(self.origLength) + if self.tag in woff2TransformedTableTags: + data += packBase128(self.length) + return data + + +class WOFF2LocaTable(getTableClass('loca')): + """Same as parent class. The only difference is that it attempts to preserve + the 'indexFormat' as encoded in the WOFF2 glyf table. + """ + + def __init__(self, tag=None): + self.tableTag = Tag(tag or 'loca') + + def compile(self, ttFont): + try: + max_location = max(self.locations) + except AttributeError: + self.set([]) + max_location = 0 + if 'glyf' in ttFont and hasattr(ttFont['glyf'], 'indexFormat'): + # copile loca using the indexFormat specified in the WOFF2 glyf table + indexFormat = ttFont['glyf'].indexFormat + if indexFormat == 0: + if max_location >= 0x20000: + raise TTLibError("indexFormat is 0 but local offsets > 0x20000") + if not all(l % 2 == 0 for l in self.locations): + raise TTLibError("indexFormat is 0 but local offsets not multiples of 2") + locations = array.array("H") + for i in range(len(self.locations)): + locations.append(self.locations[i] // 2) + else: + locations = array.array("I", self.locations) + if sys.byteorder != "big": + locations.byteswap() + data = locations.tostring() + else: + # use the most compact indexFormat given the current glyph offsets + data = super(WOFF2LocaTable, self).compile(ttFont) + return data + + +class WOFF2GlyfTable(getTableClass('glyf')): + """Decoder/Encoder for WOFF2 'glyf' table transform.""" + + subStreams = ( + 'nContourStream', 'nPointsStream', 'flagStream', 'glyphStream', + 'compositeStream', 'bboxStream', 'instructionStream') + + def __init__(self, tag=None): + self.tableTag = Tag(tag or 'glyf') + + def reconstruct(self, data, ttFont): + """ Decompile transformed 'glyf' data. """ + inputDataSize = len(data) + + if inputDataSize < woff2GlyfTableFormatSize: + raise TTLibError("not enough 'glyf' data") + dummy, data = sstruct.unpack2(woff2GlyfTableFormat, data, self) + offset = woff2GlyfTableFormatSize + + for stream in self.subStreams: + size = getattr(self, stream + 'Size') + setattr(self, stream, data[:size]) + data = data[size:] + offset += size + + if offset != inputDataSize: + raise TTLibError( + "incorrect size of transformed 'glyf' table: expected %d, received %d bytes" + % (offset, inputDataSize)) + + bboxBitmapSize = ((self.numGlyphs + 31) >> 5) << 2 + bboxBitmap = self.bboxStream[:bboxBitmapSize] + self.bboxBitmap = array.array('B', bboxBitmap) + self.bboxStream = self.bboxStream[bboxBitmapSize:] + + self.nContourStream = array.array("h", self.nContourStream) + if sys.byteorder != "big": + self.nContourStream.byteswap() + assert len(self.nContourStream) == self.numGlyphs + + if 'head' in ttFont: + ttFont['head'].indexToLocFormat = self.indexFormat + try: + self.glyphOrder = ttFont.getGlyphOrder() + except: + self.glyphOrder = None + if self.glyphOrder is None: + self.glyphOrder = [".notdef"] + self.glyphOrder.extend(["glyph%.5d" % i for i in range(1, self.numGlyphs)]) + else: + if len(self.glyphOrder) != self.numGlyphs: + raise TTLibError( + "incorrect glyphOrder: expected %d glyphs, found %d" % + (len(self.glyphOrder), self.numGlyphs)) + + glyphs = self.glyphs = {} + for glyphID, glyphName in enumerate(self.glyphOrder): + glyph = self._decodeGlyph(glyphID) + glyphs[glyphName] = glyph + + def transform(self, ttFont): + """ Return transformed 'glyf' data """ + self.numGlyphs = len(self.glyphs) + if not hasattr(self, "glyphOrder"): + try: + self.glyphOrder = ttFont.getGlyphOrder() + except: + self.glyphOrder = None + if self.glyphOrder is None: + self.glyphOrder = [".notdef"] + self.glyphOrder.extend(["glyph%.5d" % i for i in range(1, self.numGlyphs)]) + if len(self.glyphOrder) != self.numGlyphs: + raise TTLibError( + "incorrect glyphOrder: expected %d glyphs, found %d" % + (len(self.glyphOrder), self.numGlyphs)) + + if 'maxp' in ttFont: + ttFont['maxp'].numGlyphs = self.numGlyphs + self.indexFormat = ttFont['head'].indexToLocFormat + + for stream in self.subStreams: + setattr(self, stream, b"") + bboxBitmapSize = ((self.numGlyphs + 31) >> 5) << 2 + self.bboxBitmap = array.array('B', [0]*bboxBitmapSize) + + for glyphID in range(self.numGlyphs): + self._encodeGlyph(glyphID) + + self.bboxStream = self.bboxBitmap.tostring() + self.bboxStream + for stream in self.subStreams: + setattr(self, stream + 'Size', len(getattr(self, stream))) + self.version = 0 + data = sstruct.pack(woff2GlyfTableFormat, self) + data += bytesjoin([getattr(self, s) for s in self.subStreams]) + return data + + def _decodeGlyph(self, glyphID): + glyph = getTableModule('glyf').Glyph() + glyph.numberOfContours = self.nContourStream[glyphID] + if glyph.numberOfContours == 0: + return glyph + elif glyph.isComposite(): + self._decodeComponents(glyph) + else: + self._decodeCoordinates(glyph) + self._decodeBBox(glyphID, glyph) + return glyph + + def _decodeComponents(self, glyph): + data = self.compositeStream + glyph.components = [] + more = 1 + haveInstructions = 0 + while more: + component = getTableModule('glyf').GlyphComponent() + more, haveInstr, data = component.decompile(data, self) + haveInstructions = haveInstructions | haveInstr + glyph.components.append(component) + self.compositeStream = data + if haveInstructions: + self._decodeInstructions(glyph) + + def _decodeCoordinates(self, glyph): + data = self.nPointsStream + endPtsOfContours = [] + endPoint = -1 + for i in range(glyph.numberOfContours): + ptsOfContour, data = unpack255UShort(data) + endPoint += ptsOfContour + endPtsOfContours.append(endPoint) + glyph.endPtsOfContours = endPtsOfContours + self.nPointsStream = data + self._decodeTriplets(glyph) + self._decodeInstructions(glyph) + + def _decodeInstructions(self, glyph): + glyphStream = self.glyphStream + instructionStream = self.instructionStream + instructionLength, glyphStream = unpack255UShort(glyphStream) + glyph.program = ttProgram.Program() + glyph.program.fromBytecode(instructionStream[:instructionLength]) + self.glyphStream = glyphStream + self.instructionStream = instructionStream[instructionLength:] + + def _decodeBBox(self, glyphID, glyph): + haveBBox = bool(self.bboxBitmap[glyphID >> 3] & (0x80 >> (glyphID & 7))) + if glyph.isComposite() and not haveBBox: + raise TTLibError('no bbox values for composite glyph %d' % glyphID) + if haveBBox: + dummy, self.bboxStream = sstruct.unpack2(bboxFormat, self.bboxStream, glyph) + else: + glyph.recalcBounds(self) + + def _decodeTriplets(self, glyph): + + def withSign(flag, baseval): + assert 0 <= baseval and baseval < 65536, 'integer overflow' + return baseval if flag & 1 else -baseval + + nPoints = glyph.endPtsOfContours[-1] + 1 + flagSize = nPoints + if flagSize > len(self.flagStream): + raise TTLibError("not enough 'flagStream' data") + flagsData = self.flagStream[:flagSize] + self.flagStream = self.flagStream[flagSize:] + flags = array.array('B', flagsData) + + triplets = array.array('B', self.glyphStream) + nTriplets = len(triplets) + assert nPoints <= nTriplets + + x = 0 + y = 0 + glyph.coordinates = getTableModule('glyf').GlyphCoordinates.zeros(nPoints) + glyph.flags = array.array("B") + tripletIndex = 0 + for i in range(nPoints): + flag = flags[i] + onCurve = not bool(flag >> 7) + flag &= 0x7f + if flag < 84: + nBytes = 1 + elif flag < 120: + nBytes = 2 + elif flag < 124: + nBytes = 3 + else: + nBytes = 4 + assert ((tripletIndex + nBytes) <= nTriplets) + if flag < 10: + dx = 0 + dy = withSign(flag, ((flag & 14) << 7) + triplets[tripletIndex]) + elif flag < 20: + dx = withSign(flag, (((flag - 10) & 14) << 7) + triplets[tripletIndex]) + dy = 0 + elif flag < 84: + b0 = flag - 20 + b1 = triplets[tripletIndex] + dx = withSign(flag, 1 + (b0 & 0x30) + (b1 >> 4)) + dy = withSign(flag >> 1, 1 + ((b0 & 0x0c) << 2) + (b1 & 0x0f)) + elif flag < 120: + b0 = flag - 84 + dx = withSign(flag, 1 + ((b0 // 12) << 8) + triplets[tripletIndex]) + dy = withSign(flag >> 1, + 1 + (((b0 % 12) >> 2) << 8) + triplets[tripletIndex + 1]) + elif flag < 124: + b2 = triplets[tripletIndex + 1] + dx = withSign(flag, (triplets[tripletIndex] << 4) + (b2 >> 4)) + dy = withSign(flag >> 1, + ((b2 & 0x0f) << 8) + triplets[tripletIndex + 2]) + else: + dx = withSign(flag, + (triplets[tripletIndex] << 8) + triplets[tripletIndex + 1]) + dy = withSign(flag >> 1, + (triplets[tripletIndex + 2] << 8) + triplets[tripletIndex + 3]) + tripletIndex += nBytes + x += dx + y += dy + glyph.coordinates[i] = (x, y) + glyph.flags.append(int(onCurve)) + bytesConsumed = tripletIndex + self.glyphStream = self.glyphStream[bytesConsumed:] + + def _encodeGlyph(self, glyphID): + glyphName = self.getGlyphName(glyphID) + glyph = self[glyphName] + self.nContourStream += struct.pack(">h", glyph.numberOfContours) + if glyph.numberOfContours == 0: + return + elif glyph.isComposite(): + self._encodeComponents(glyph) + else: + self._encodeCoordinates(glyph) + self._encodeBBox(glyphID, glyph) + + def _encodeComponents(self, glyph): + lastcomponent = len(glyph.components) - 1 + more = 1 + haveInstructions = 0 + for i in range(len(glyph.components)): + if i == lastcomponent: + haveInstructions = hasattr(glyph, "program") + more = 0 + component = glyph.components[i] + self.compositeStream += component.compile(more, haveInstructions, self) + if haveInstructions: + self._encodeInstructions(glyph) + + def _encodeCoordinates(self, glyph): + lastEndPoint = -1 + for endPoint in glyph.endPtsOfContours: + ptsOfContour = endPoint - lastEndPoint + self.nPointsStream += pack255UShort(ptsOfContour) + lastEndPoint = endPoint + self._encodeTriplets(glyph) + self._encodeInstructions(glyph) + + def _encodeInstructions(self, glyph): + instructions = glyph.program.getBytecode() + self.glyphStream += pack255UShort(len(instructions)) + self.instructionStream += instructions + + def _encodeBBox(self, glyphID, glyph): + assert glyph.numberOfContours != 0, "empty glyph has no bbox" + if not glyph.isComposite(): + # for simple glyphs, compare the encoded bounding box info with the calculated + # values, and if they match omit the bounding box info + currentBBox = glyph.xMin, glyph.yMin, glyph.xMax, glyph.yMax + calculatedBBox = calcIntBounds(glyph.coordinates) + if currentBBox == calculatedBBox: + return + self.bboxBitmap[glyphID >> 3] |= 0x80 >> (glyphID & 7) + self.bboxStream += sstruct.pack(bboxFormat, glyph) + + def _encodeTriplets(self, glyph): + assert len(glyph.coordinates) == len(glyph.flags) + coordinates = glyph.coordinates.copy() + coordinates.absoluteToRelative() + + flags = array.array('B') + triplets = array.array('B') + for i in range(len(coordinates)): + onCurve = glyph.flags[i] + x, y = coordinates[i] + absX = abs(x) + absY = abs(y) + onCurveBit = 0 if onCurve else 128 + xSignBit = 0 if (x < 0) else 1 + ySignBit = 0 if (y < 0) else 1 + xySignBits = xSignBit + 2 * ySignBit + + if x == 0 and absY < 1280: + flags.append(onCurveBit + ((absY & 0xf00) >> 7) + ySignBit) + triplets.append(absY & 0xff) + elif y == 0 and absX < 1280: + flags.append(onCurveBit + 10 + ((absX & 0xf00) >> 7) + xSignBit) + triplets.append(absX & 0xff) + elif absX < 65 and absY < 65: + flags.append(onCurveBit + 20 + ((absX - 1) & 0x30) + (((absY - 1) & 0x30) >> 2) + xySignBits) + triplets.append((((absX - 1) & 0xf) << 4) | ((absY - 1) & 0xf)) + elif absX < 769 and absY < 769: + flags.append(onCurveBit + 84 + 12 * (((absX - 1) & 0x300) >> 8) + (((absY - 1) & 0x300) >> 6) + xySignBits) + triplets.append((absX - 1) & 0xff) + triplets.append((absY - 1) & 0xff) + elif absX < 4096 and absY < 4096: + flags.append(onCurveBit + 120 + xySignBits) + triplets.append(absX >> 4) + triplets.append(((absX & 0xf) << 4) | (absY >> 8)) + triplets.append(absY & 0xff) + else: + flags.append(onCurveBit + 124 + xySignBits) + triplets.append(absX >> 8) + triplets.append(absX & 0xff) + triplets.append(absY >> 8) + triplets.append(absY & 0xff) + + self.flagStream += flags.tostring() + self.glyphStream += triplets.tostring() + + +class WOFF2FlavorData(WOFFFlavorData): + + Flavor = 'woff2' + + def __init__(self, reader=None): + if not haveBrotli: + raise ImportError("No module named brotli") + self.majorVersion = None + self.minorVersion = None + self.metaData = None + self.privData = None + if reader: + self.majorVersion = reader.majorVersion + self.minorVersion = reader.minorVersion + if reader.metaLength: + reader.file.seek(reader.metaOffset) + rawData = reader.file.read(reader.metaLength) + assert len(rawData) == reader.metaLength + data = brotli.decompress(rawData) + assert len(data) == reader.metaOrigLength + self.metaData = data + if reader.privLength: + reader.file.seek(reader.privOffset) + data = reader.file.read(reader.privLength) + assert len(data) == reader.privLength + self.privData = data + + +def unpackBase128(data): + r""" Read one to five bytes from UIntBase128-encoded input string, and return + a tuple containing the decoded integer plus any leftover data. + + >>> unpackBase128(b'\x3f\x00\x00') == (63, b"\x00\x00") + True + >>> unpackBase128(b'\x8f\xff\xff\xff\x7f')[0] == 4294967295 + True + >>> unpackBase128(b'\x80\x80\x3f') # doctest: +IGNORE_EXCEPTION_DETAIL + Traceback (most recent call last): + File "<stdin>", line 1, in ? + TTLibError: UIntBase128 value must not start with leading zeros + >>> unpackBase128(b'\x8f\xff\xff\xff\xff\x7f')[0] # doctest: +IGNORE_EXCEPTION_DETAIL + Traceback (most recent call last): + File "<stdin>", line 1, in ? + TTLibError: UIntBase128-encoded sequence is longer than 5 bytes + >>> unpackBase128(b'\x90\x80\x80\x80\x00')[0] # doctest: +IGNORE_EXCEPTION_DETAIL + Traceback (most recent call last): + File "<stdin>", line 1, in ? + TTLibError: UIntBase128 value exceeds 2**32-1 + """ + if len(data) == 0: + raise TTLibError('not enough data to unpack UIntBase128') + result = 0 + if byteord(data[0]) == 0x80: + # font must be rejected if UIntBase128 value starts with 0x80 + raise TTLibError('UIntBase128 value must not start with leading zeros') + for i in range(woff2Base128MaxSize): + if len(data) == 0: + raise TTLibError('not enough data to unpack UIntBase128') + code = byteord(data[0]) + data = data[1:] + # if any of the top seven bits are set then we're about to overflow + if result & 0xFE000000: + raise TTLibError('UIntBase128 value exceeds 2**32-1') + # set current value = old value times 128 bitwise-or (byte bitwise-and 127) + result = (result << 7) | (code & 0x7f) + # repeat until the most significant bit of byte is false + if (code & 0x80) == 0: + # return result plus left over data + return result, data + # make sure not to exceed the size bound + raise TTLibError('UIntBase128-encoded sequence is longer than 5 bytes') + + +def base128Size(n): + """ Return the length in bytes of a UIntBase128-encoded sequence with value n. + + >>> base128Size(0) + 1 + >>> base128Size(24567) + 3 + >>> base128Size(2**32-1) + 5 + """ + assert n >= 0 + size = 1 + while n >= 128: + size += 1 + n >>= 7 + return size + + +def packBase128(n): + r""" Encode unsigned integer in range 0 to 2**32-1 (inclusive) to a string of + bytes using UIntBase128 variable-length encoding. Produce the shortest possible + encoding. + + >>> packBase128(63) == b"\x3f" + True + >>> packBase128(2**32-1) == b'\x8f\xff\xff\xff\x7f' + True + """ + if n < 0 or n >= 2**32: + raise TTLibError( + "UIntBase128 format requires 0 <= integer <= 2**32-1") + data = b'' + size = base128Size(n) + for i in range(size): + b = (n >> (7 * (size - i - 1))) & 0x7f + if i < size - 1: + b |= 0x80 + data += struct.pack('B', b) + return data + + +def unpack255UShort(data): + """ Read one to three bytes from 255UInt16-encoded input string, and return a + tuple containing the decoded integer plus any leftover data. + + >>> unpack255UShort(bytechr(252))[0] + 252 + + Note that some numbers (e.g. 506) can have multiple encodings: + >>> unpack255UShort(struct.pack("BB", 254, 0))[0] + 506 + >>> unpack255UShort(struct.pack("BB", 255, 253))[0] + 506 + >>> unpack255UShort(struct.pack("BBB", 253, 1, 250))[0] + 506 + """ + code = byteord(data[:1]) + data = data[1:] + if code == 253: + # read two more bytes as an unsigned short + if len(data) < 2: + raise TTLibError('not enough data to unpack 255UInt16') + result, = struct.unpack(">H", data[:2]) + data = data[2:] + elif code == 254: + # read another byte, plus 253 * 2 + if len(data) == 0: + raise TTLibError('not enough data to unpack 255UInt16') + result = byteord(data[:1]) + result += 506 + data = data[1:] + elif code == 255: + # read another byte, plus 253 + if len(data) == 0: + raise TTLibError('not enough data to unpack 255UInt16') + result = byteord(data[:1]) + result += 253 + data = data[1:] + else: + # leave as is if lower than 253 + result = code + # return result plus left over data + return result, data + + +def pack255UShort(value): + r""" Encode unsigned integer in range 0 to 65535 (inclusive) to a bytestring + using 255UInt16 variable-length encoding. + + >>> pack255UShort(252) == b'\xfc' + True + >>> pack255UShort(506) == b'\xfe\x00' + True + >>> pack255UShort(762) == b'\xfd\x02\xfa' + True + """ + if value < 0 or value > 0xFFFF: + raise TTLibError( + "255UInt16 format requires 0 <= integer <= 65535") + if value < 253: + return struct.pack(">B", value) + elif value < 506: + return struct.pack(">BB", 255, value - 253) + elif value < 762: + return struct.pack(">BB", 254, value - 506) + else: + return struct.pack(">BH", 253, value) + + +if __name__ == "__main__": + import doctest + sys.exit(doctest.testmod().failed) diff -Nru fonttools-2.4/Snippets/fontTools/ttLib/woff2_test.py fonttools-3.0/Snippets/fontTools/ttLib/woff2_test.py --- fonttools-2.4/Snippets/fontTools/ttLib/woff2_test.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.0/Snippets/fontTools/ttLib/woff2_test.py 2015-08-31 17:57:15.000000000 +0000 @@ -0,0 +1,747 @@ +from __future__ import print_function, division, absolute_import, unicode_literals +from fontTools.misc.py23 import * +from fontTools import ttLib +from .woff2 import (WOFF2Reader, woff2DirectorySize, woff2DirectoryFormat, + woff2FlagsSize, woff2UnknownTagSize, woff2Base128MaxSize, WOFF2DirectoryEntry, + getKnownTagIndex, packBase128, base128Size, woff2UnknownTagIndex, + WOFF2FlavorData, woff2TransformedTableTags, WOFF2GlyfTable, WOFF2LocaTable, + WOFF2Writer) +import unittest +import sstruct +import os +import random +import copy +from collections import OrderedDict + +haveBrotli = False +try: + import brotli + haveBrotli = True +except ImportError: + pass + + +# Python 3 renamed 'assertRaisesRegexp' to 'assertRaisesRegex', and fires +# deprecation warnings if a program uses the old name. +if not hasattr(unittest.TestCase, 'assertRaisesRegex'): + unittest.TestCase.assertRaisesRegex = unittest.TestCase.assertRaisesRegexp + + +current_dir = os.path.abspath(os.path.dirname(os.path.realpath(__file__))) +data_dir = os.path.join(current_dir, 'testdata') +TTX = os.path.join(data_dir, 'TestTTF-Regular.ttx') +OTX = os.path.join(data_dir, 'TestOTF-Regular.otx') +METADATA = os.path.join(data_dir, 'test_woff2_metadata.xml') + +TT_WOFF2 = BytesIO() +CFF_WOFF2 = BytesIO() + + +def setUpModule(): + if not haveBrotli: + raise unittest.SkipTest("No module named brotli") + assert os.path.exists(TTX) + assert os.path.exists(OTX) + # import TT-flavoured test font and save it as WOFF2 + ttf = ttLib.TTFont(recalcBBoxes=False, recalcTimestamp=False) + ttf.importXML(TTX, quiet=True) + ttf.flavor = "woff2" + ttf.save(TT_WOFF2, reorderTables=None) + # import CFF-flavoured test font and save it as WOFF2 + otf = ttLib.TTFont(recalcBBoxes=False, recalcTimestamp=False) + otf.importXML(OTX, quiet=True) + otf.flavor = "woff2" + otf.save(CFF_WOFF2, reorderTables=None) + + +class WOFF2ReaderTest(unittest.TestCase): + + @classmethod + def setUpClass(cls): + cls.file = BytesIO(CFF_WOFF2.getvalue()) + cls.font = ttLib.TTFont(recalcBBoxes=False, recalcTimestamp=False) + cls.font.importXML(OTX, quiet=True) + + def setUp(self): + self.file.seek(0) + + def test_bad_signature(self): + with self.assertRaisesRegex(ttLib.TTLibError, 'bad signature'): + WOFF2Reader(BytesIO(b"wOFF")) + + def test_not_enough_data_header(self): + incomplete_header = self.file.read(woff2DirectorySize - 1) + with self.assertRaisesRegex(ttLib.TTLibError, 'not enough data'): + WOFF2Reader(BytesIO(incomplete_header)) + + def test_incorrect_compressed_size(self): + data = self.file.read(woff2DirectorySize) + header = sstruct.unpack(woff2DirectoryFormat, data) + header['totalCompressedSize'] = 0 + data = sstruct.pack(woff2DirectoryFormat, header) + with self.assertRaises(brotli.error): + WOFF2Reader(BytesIO(data + self.file.read())) + + def test_incorrect_uncompressed_size(self): + decompress_backup = brotli.decompress + brotli.decompress = lambda data: b"" # return empty byte string + with self.assertRaisesRegex(ttLib.TTLibError, 'unexpected size for decompressed'): + WOFF2Reader(self.file) + brotli.decompress = decompress_backup + + def test_incorrect_file_size(self): + data = self.file.read(woff2DirectorySize) + header = sstruct.unpack(woff2DirectoryFormat, data) + header['length'] -= 1 + data = sstruct.pack(woff2DirectoryFormat, header) + with self.assertRaisesRegex( + ttLib.TTLibError, "doesn't match the actual file size"): + WOFF2Reader(BytesIO(data + self.file.read())) + + def test_num_tables(self): + tags = [t for t in self.font.keys() if t not in ('GlyphOrder', 'DSIG')] + data = self.file.read(woff2DirectorySize) + header = sstruct.unpack(woff2DirectoryFormat, data) + self.assertEqual(header['numTables'], len(tags)) + + def test_table_tags(self): + tags = set([t for t in self.font.keys() if t not in ('GlyphOrder', 'DSIG')]) + reader = WOFF2Reader(self.file) + self.assertEqual(set(reader.keys()), tags) + + def test_get_normal_tables(self): + woff2Reader = WOFF2Reader(self.file) + specialTags = woff2TransformedTableTags + ('head', 'GlyphOrder', 'DSIG') + for tag in [t for t in self.font.keys() if t not in specialTags]: + origData = self.font.getTableData(tag) + decompressedData = woff2Reader[tag] + self.assertEqual(origData, decompressedData) + + def test_reconstruct_unknown(self): + reader = WOFF2Reader(self.file) + with self.assertRaisesRegex(ttLib.TTLibError, 'transform for table .* unknown'): + reader.reconstructTable('ZZZZ') + + +class WOFF2ReaderTTFTest(WOFF2ReaderTest): + """ Tests specific to TT-flavored fonts. """ + + @classmethod + def setUpClass(cls): + cls.file = BytesIO(TT_WOFF2.getvalue()) + cls.font = ttLib.TTFont(recalcBBoxes=False, recalcTimestamp=False) + cls.font.importXML(TTX, quiet=True) + + def setUp(self): + self.file.seek(0) + + def test_reconstruct_glyf(self): + woff2Reader = WOFF2Reader(self.file) + reconstructedData = woff2Reader['glyf'] + self.assertEqual(self.font.getTableData('glyf'), reconstructedData) + + def test_reconstruct_loca(self): + woff2Reader = WOFF2Reader(self.file) + reconstructedData = woff2Reader['loca'] + self.assertEqual(self.font.getTableData('loca'), reconstructedData) + self.assertTrue(hasattr(woff2Reader.tables['glyf'], 'data')) + + def test_reconstruct_loca_not_match_orig_size(self): + reader = WOFF2Reader(self.file) + reader.tables['loca'].origLength -= 1 + with self.assertRaisesRegex( + ttLib.TTLibError, "'loca' table doesn't match original size"): + reader.reconstructTable('loca') + + +def normalise_table(font, tag, padding=4): + """ Return normalised table data. Keep 'font' instance unmodified. """ + assert tag in ('glyf', 'loca', 'head') + assert tag in font + if tag == 'head': + origHeadFlags = font['head'].flags + font['head'].flags |= (1 << 11) + tableData = font['head'].compile(font) + if font.sfntVersion in ("\x00\x01\x00\x00", "true"): + assert {'glyf', 'loca', 'head'}.issubset(font.keys()) + origIndexFormat = font['head'].indexToLocFormat + if hasattr(font['loca'], 'locations'): + origLocations = font['loca'].locations[:] + else: + origLocations = [] + glyfTable = ttLib.getTableClass('glyf')() + glyfTable.decompile(font.getTableData('glyf'), font) + glyfTable.padding = padding + if tag == 'glyf': + tableData = glyfTable.compile(font) + elif tag == 'loca': + glyfTable.compile(font) + tableData = font['loca'].compile(font) + if tag == 'head': + glyfTable.compile(font) + font['loca'].compile(font) + tableData = font['head'].compile(font) + font['head'].indexToLocFormat = origIndexFormat + font['loca'].set(origLocations) + if tag == 'head': + font['head'].flags = origHeadFlags + return tableData + + +def normalise_font(font, padding=4): + """ Return normalised font data. Keep 'font' instance unmodified. """ + # drop DSIG but keep a copy + DSIG_copy = copy.deepcopy(font['DSIG']) + del font['DSIG'] + # ovverride TTFont attributes + origFlavor = font.flavor + origRecalcBBoxes = font.recalcBBoxes + origRecalcTimestamp = font.recalcTimestamp + origLazy = font.lazy + font.flavor = None + font.recalcBBoxes = False + font.recalcTimestamp = False + font.lazy = True + # save font to temporary stream + infile = BytesIO() + font.save(infile) + infile.seek(0) + # reorder tables alphabetically + outfile = BytesIO() + reader = ttLib.sfnt.SFNTReader(infile) + writer = ttLib.sfnt.SFNTWriter( + outfile, len(reader.tables), reader.sfntVersion, reader.flavor, reader.flavorData) + for tag in sorted(reader.keys()): + if tag in woff2TransformedTableTags + ('head',): + writer[tag] = normalise_table(font, tag, padding) + else: + writer[tag] = reader[tag] + writer.close() + # restore font attributes + font['DSIG'] = DSIG_copy + font.flavor = origFlavor + font.recalcBBoxes = origRecalcBBoxes + font.recalcTimestamp = origRecalcTimestamp + font.lazy = origLazy + return outfile.getvalue() + + +class WOFF2DirectoryEntryTest(unittest.TestCase): + + def setUp(self): + self.entry = WOFF2DirectoryEntry() + + def test_not_enough_data_table_flags(self): + with self.assertRaisesRegex(ttLib.TTLibError, "can't read table 'flags'"): + self.entry.fromString(b"") + + def test_not_enough_data_table_tag(self): + incompleteData = bytearray([0x3F, 0, 0, 0]) + with self.assertRaisesRegex(ttLib.TTLibError, "can't read table 'tag'"): + self.entry.fromString(bytes(incompleteData)) + + def test_table_reserved_flags(self): + with self.assertRaisesRegex(ttLib.TTLibError, "bits 6-7 are reserved"): + self.entry.fromString(bytechr(0xC0)) + + def test_loca_zero_transformLength(self): + data = bytechr(getKnownTagIndex('loca')) # flags + data += packBase128(random.randint(1, 100)) # origLength + data += packBase128(1) # non-zero transformLength + with self.assertRaisesRegex( + ttLib.TTLibError, "transformLength of the 'loca' table must be 0"): + self.entry.fromString(data) + + def test_fromFile(self): + unknownTag = Tag('ZZZZ') + data = bytechr(getKnownTagIndex(unknownTag)) + data += unknownTag.tobytes() + data += packBase128(random.randint(1, 100)) + expectedPos = len(data) + f = BytesIO(data + b'\0'*100) + self.entry.fromFile(f) + self.assertEqual(f.tell(), expectedPos) + + def test_transformed_toString(self): + self.entry.tag = Tag('glyf') + self.entry.flags = getKnownTagIndex(self.entry.tag) + self.entry.origLength = random.randint(101, 200) + self.entry.length = random.randint(1, 100) + expectedSize = (woff2FlagsSize + base128Size(self.entry.origLength) + + base128Size(self.entry.length)) + data = self.entry.toString() + self.assertEqual(len(data), expectedSize) + + def test_known_toString(self): + self.entry.tag = Tag('head') + self.entry.flags = getKnownTagIndex(self.entry.tag) + self.entry.origLength = 54 + expectedSize = (woff2FlagsSize + base128Size(self.entry.origLength)) + data = self.entry.toString() + self.assertEqual(len(data), expectedSize) + + def test_unknown_toString(self): + self.entry.tag = Tag('ZZZZ') + self.entry.flags = woff2UnknownTagIndex + self.entry.origLength = random.randint(1, 100) + expectedSize = (woff2FlagsSize + woff2UnknownTagSize + + base128Size(self.entry.origLength)) + data = self.entry.toString() + self.assertEqual(len(data), expectedSize) + + +class DummyReader(WOFF2Reader): + + def __init__(self, file, checkChecksums=1, fontNumber=-1): + self.file = file + for attr in ('majorVersion', 'minorVersion', 'metaOffset', 'metaLength', + 'metaOrigLength', 'privLength', 'privOffset'): + setattr(self, attr, 0) + + +class WOFF2FlavorDataTest(unittest.TestCase): + + @classmethod + def setUpClass(cls): + assert os.path.exists(METADATA) + with open(METADATA, 'rb') as f: + cls.xml_metadata = f.read() + cls.compressed_metadata = brotli.compress(cls.xml_metadata, mode=brotli.MODE_TEXT) + # make random byte strings; font data must be 4-byte aligned + cls.fontdata = bytes(bytearray(random.sample(range(0, 256), 80))) + cls.privData = bytes(bytearray(random.sample(range(0, 256), 20))) + + def setUp(self): + self.file = BytesIO(self.fontdata) + self.file.seek(0, 2) + + def test_get_metaData_no_privData(self): + self.file.write(self.compressed_metadata) + reader = DummyReader(self.file) + reader.metaOffset = len(self.fontdata) + reader.metaLength = len(self.compressed_metadata) + reader.metaOrigLength = len(self.xml_metadata) + flavorData = WOFF2FlavorData(reader) + self.assertEqual(self.xml_metadata, flavorData.metaData) + + def test_get_privData_no_metaData(self): + self.file.write(self.privData) + reader = DummyReader(self.file) + reader.privOffset = len(self.fontdata) + reader.privLength = len(self.privData) + flavorData = WOFF2FlavorData(reader) + self.assertEqual(self.privData, flavorData.privData) + + def test_get_metaData_and_privData(self): + self.file.write(self.compressed_metadata + self.privData) + reader = DummyReader(self.file) + reader.metaOffset = len(self.fontdata) + reader.metaLength = len(self.compressed_metadata) + reader.metaOrigLength = len(self.xml_metadata) + reader.privOffset = reader.metaOffset + reader.metaLength + reader.privLength = len(self.privData) + flavorData = WOFF2FlavorData(reader) + self.assertEqual(self.xml_metadata, flavorData.metaData) + self.assertEqual(self.privData, flavorData.privData) + + def test_get_major_minorVersion(self): + reader = DummyReader(self.file) + reader.majorVersion = reader.minorVersion = 1 + flavorData = WOFF2FlavorData(reader) + self.assertEqual(flavorData.majorVersion, 1) + self.assertEqual(flavorData.minorVersion, 1) + + +class WOFF2WriterTest(unittest.TestCase): + + @classmethod + def setUpClass(cls): + cls.font = ttLib.TTFont(recalcBBoxes=False, recalcTimestamp=False, flavor="woff2") + cls.font.importXML(OTX, quiet=True) + cls.tags = [t for t in cls.font.keys() if t != 'GlyphOrder'] + cls.numTables = len(cls.tags) + cls.file = BytesIO(CFF_WOFF2.getvalue()) + cls.file.seek(0, 2) + cls.length = (cls.file.tell() + 3) & ~3 + cls.setUpFlavorData() + + @classmethod + def setUpFlavorData(cls): + assert os.path.exists(METADATA) + with open(METADATA, 'rb') as f: + cls.xml_metadata = f.read() + cls.compressed_metadata = brotli.compress(cls.xml_metadata, mode=brotli.MODE_TEXT) + cls.privData = bytes(bytearray(random.sample(range(0, 256), 20))) + + def setUp(self): + self.file.seek(0) + self.writer = WOFF2Writer(BytesIO(), self.numTables, self.font.sfntVersion) + + def test_DSIG_dropped(self): + self.writer['DSIG'] = b"\0" + self.assertEqual(len(self.writer.tables), 0) + self.assertEqual(self.writer.numTables, self.numTables-1) + + def test_no_rewrite_table(self): + self.writer['ZZZZ'] = b"\0" + with self.assertRaisesRegex(ttLib.TTLibError, "cannot rewrite"): + self.writer['ZZZZ'] = b"\0" + + def test_num_tables(self): + self.writer['ABCD'] = b"\0" + with self.assertRaisesRegex(ttLib.TTLibError, "wrong number of tables"): + self.writer.close() + + def test_required_tables(self): + font = ttLib.TTFont(flavor="woff2") + with self.assertRaisesRegex(ttLib.TTLibError, "missing required table"): + font.save(BytesIO()) + + def test_head_transform_flag(self): + headData = self.font.getTableData('head') + origFlags = byteord(headData[16]) + woff2font = ttLib.TTFont(self.file) + newHeadData = woff2font.getTableData('head') + modifiedFlags = byteord(newHeadData[16]) + self.assertNotEqual(origFlags, modifiedFlags) + restoredFlags = modifiedFlags & ~0x08 # turn off bit 11 + self.assertEqual(origFlags, restoredFlags) + + def test_tables_sorted_alphabetically(self): + expected = sorted([t for t in self.tags if t != 'DSIG']) + woff2font = ttLib.TTFont(self.file) + self.assertEqual(expected, list(woff2font.reader.keys())) + + def test_checksums(self): + normFile = BytesIO(normalise_font(self.font, padding=4)) + normFile.seek(0) + normFont = ttLib.TTFont(normFile, checkChecksums=2) + w2font = ttLib.TTFont(self.file) + # force reconstructing glyf table using 4-byte padding + w2font.reader.padding = 4 + for tag in [t for t in self.tags if t != 'DSIG']: + w2data = w2font.reader[tag] + normData = normFont.reader[tag] + if tag == "head": + w2data = w2data[:8] + b'\0\0\0\0' + w2data[12:] + normData = normData[:8] + b'\0\0\0\0' + normData[12:] + w2CheckSum = ttLib.sfnt.calcChecksum(w2data) + normCheckSum = ttLib.sfnt.calcChecksum(normData) + self.assertEqual(w2CheckSum, normCheckSum) + normCheckSumAdjustment = normFont['head'].checkSumAdjustment + self.assertEqual(normCheckSumAdjustment, w2font['head'].checkSumAdjustment) + + def test_calcSFNTChecksumsLengthsAndOffsets(self): + normFont = ttLib.TTFont(BytesIO(normalise_font(self.font, padding=4))) + for tag in self.tags: + self.writer[tag] = self.font.getTableData(tag) + self.writer._normaliseGlyfAndLoca(padding=4) + self.writer._setHeadTransformFlag() + self.writer.tables = OrderedDict(sorted(self.writer.tables.items())) + self.writer._calcSFNTChecksumsLengthsAndOffsets() + for tag, entry in normFont.reader.tables.items(): + self.assertEqual(entry.offset, self.writer.tables[tag].origOffset) + self.assertEqual(entry.length, self.writer.tables[tag].origLength) + self.assertEqual(entry.checkSum, self.writer.tables[tag].checkSum) + + def test_bad_sfntVersion(self): + for i in range(self.numTables): + self.writer[bytechr(65 + i)*4] = b"\0" + self.writer.sfntVersion = 'ZZZZ' + with self.assertRaisesRegex(ttLib.TTLibError, "bad sfntVersion"): + self.writer.close() + + def test_calcTotalSize_no_flavorData(self): + expected = self.length + self.writer.file = BytesIO() + for tag in self.tags: + self.writer[tag] = self.font.getTableData(tag) + self.writer.close() + self.assertEqual(expected, self.writer.length) + self.assertEqual(expected, self.writer.file.tell()) + + def test_calcTotalSize_with_metaData(self): + expected = self.length + len(self.compressed_metadata) + flavorData = self.writer.flavorData = WOFF2FlavorData() + flavorData.metaData = self.xml_metadata + self.writer.file = BytesIO() + for tag in self.tags: + self.writer[tag] = self.font.getTableData(tag) + self.writer.close() + self.assertEqual(expected, self.writer.length) + self.assertEqual(expected, self.writer.file.tell()) + + def test_calcTotalSize_with_privData(self): + expected = self.length + len(self.privData) + flavorData = self.writer.flavorData = WOFF2FlavorData() + flavorData.privData = self.privData + self.writer.file = BytesIO() + for tag in self.tags: + self.writer[tag] = self.font.getTableData(tag) + self.writer.close() + self.assertEqual(expected, self.writer.length) + self.assertEqual(expected, self.writer.file.tell()) + + def test_calcTotalSize_with_metaData_and_privData(self): + metaDataLength = (len(self.compressed_metadata) + 3) & ~3 + expected = self.length + metaDataLength + len(self.privData) + flavorData = self.writer.flavorData = WOFF2FlavorData() + flavorData.metaData = self.xml_metadata + flavorData.privData = self.privData + self.writer.file = BytesIO() + for tag in self.tags: + self.writer[tag] = self.font.getTableData(tag) + self.writer.close() + self.assertEqual(expected, self.writer.length) + self.assertEqual(expected, self.writer.file.tell()) + + def test_getVersion(self): + # no version + self.assertEqual((0, 0), self.writer._getVersion()) + # version from head.fontRevision + fontRevision = self.font['head'].fontRevision + versionTuple = tuple(int(i) for i in str(fontRevision).split(".")) + entry = self.writer.tables['head'] = ttLib.getTableClass('head')() + entry.data = self.font.getTableData('head') + self.assertEqual(versionTuple, self.writer._getVersion()) + # version from writer.flavorData + flavorData = self.writer.flavorData = WOFF2FlavorData() + flavorData.majorVersion, flavorData.minorVersion = (10, 11) + self.assertEqual((10, 11), self.writer._getVersion()) + + +class WOFF2WriterTTFTest(WOFF2WriterTest): + + @classmethod + def setUpClass(cls): + cls.font = ttLib.TTFont(recalcBBoxes=False, recalcTimestamp=False, flavor="woff2") + cls.font.importXML(TTX, quiet=True) + cls.tags = [t for t in cls.font.keys() if t != 'GlyphOrder'] + cls.numTables = len(cls.tags) + cls.file = BytesIO(TT_WOFF2.getvalue()) + cls.file.seek(0, 2) + cls.length = (cls.file.tell() + 3) & ~3 + cls.setUpFlavorData() + + def test_normaliseGlyfAndLoca(self): + normTables = {} + for tag in ('head', 'loca', 'glyf'): + normTables[tag] = normalise_table(self.font, tag, padding=4) + for tag in self.tags: + tableData = self.font.getTableData(tag) + self.writer[tag] = tableData + if tag in normTables: + self.assertNotEqual(tableData, normTables[tag]) + self.writer._normaliseGlyfAndLoca(padding=4) + self.writer._setHeadTransformFlag() + for tag in normTables: + self.assertEqual(self.writer.tables[tag].data, normTables[tag]) + + +class WOFF2LocaTableTest(unittest.TestCase): + + def setUp(self): + self.font = font = ttLib.TTFont(recalcBBoxes=False, recalcTimestamp=False) + font['head'] = ttLib.getTableClass('head') + font['loca'] = WOFF2LocaTable() + font['glyf'] = WOFF2GlyfTable() + + def test_compile_short_loca(self): + locaTable = self.font['loca'] + locaTable.set(list(range(0, 0x20000, 2))) + self.font['glyf'].indexFormat = 0 + locaData = locaTable.compile(self.font) + self.assertEqual(len(locaData), 0x20000) + + def test_compile_short_loca_overflow(self): + locaTable = self.font['loca'] + locaTable.set(list(range(0x20000 + 1))) + self.font['glyf'].indexFormat = 0 + with self.assertRaisesRegex( + ttLib.TTLibError, "indexFormat is 0 but local offsets > 0x20000"): + locaTable.compile(self.font) + + def test_compile_short_loca_not_multiples_of_2(self): + locaTable = self.font['loca'] + locaTable.set([1, 3, 5, 7]) + self.font['glyf'].indexFormat = 0 + with self.assertRaisesRegex(ttLib.TTLibError, "offsets not multiples of 2"): + locaTable.compile(self.font) + + def test_compile_long_loca(self): + locaTable = self.font['loca'] + locaTable.set(list(range(0x20001))) + self.font['glyf'].indexFormat = 1 + locaData = locaTable.compile(self.font) + self.assertEqual(len(locaData), 0x20001 * 4) + + def test_compile_set_indexToLocFormat_0(self): + locaTable = self.font['loca'] + # offsets are all multiples of 2 and max length is < 0x10000 + locaTable.set(list(range(0, 0x20000, 2))) + locaTable.compile(self.font) + newIndexFormat = self.font['head'].indexToLocFormat + self.assertEqual(0, newIndexFormat) + + def test_compile_set_indexToLocFormat_1(self): + locaTable = self.font['loca'] + # offsets are not multiples of 2 + locaTable.set(list(range(10))) + locaTable.compile(self.font) + newIndexFormat = self.font['head'].indexToLocFormat + self.assertEqual(1, newIndexFormat) + # max length is >= 0x10000 + locaTable.set(list(range(0, 0x20000 + 1, 2))) + locaTable.compile(self.font) + newIndexFormat = self.font['head'].indexToLocFormat + self.assertEqual(1, newIndexFormat) + + +class WOFF2GlyfTableTest(unittest.TestCase): + + @classmethod + def setUpClass(cls): + font = ttLib.TTFont(recalcBBoxes=False, recalcTimestamp=False) + font.importXML(TTX, quiet=True) + cls.tables = {} + cls.transformedTags = ('maxp', 'head', 'loca', 'glyf') + for tag in reversed(cls.transformedTags): # compile in inverse order + cls.tables[tag] = font.getTableData(tag) + infile = BytesIO(TT_WOFF2.getvalue()) + reader = WOFF2Reader(infile) + cls.transformedGlyfData = reader.tables['glyf'].loadData( + reader.transformBuffer) + + def setUp(self): + self.font = font = ttLib.TTFont(recalcBBoxes=False, recalcTimestamp=False) + font['head'] = ttLib.getTableClass('head')() + font['maxp'] = ttLib.getTableClass('maxp')() + font['loca'] = WOFF2LocaTable() + font['glyf'] = WOFF2GlyfTable() + for tag in self.transformedTags: + font[tag].decompile(self.tables[tag], font) + + def test_reconstruct_glyf_padded_4(self): + glyfTable = WOFF2GlyfTable() + glyfTable.reconstruct(self.transformedGlyfData, self.font) + glyfTable.padding = 4 + data = glyfTable.compile(self.font) + normGlyfData = normalise_table(self.font, 'glyf', glyfTable.padding) + self.assertEqual(normGlyfData, data) + + def test_reconstruct_glyf_padded_2(self): + glyfTable = WOFF2GlyfTable() + glyfTable.reconstruct(self.transformedGlyfData, self.font) + glyfTable.padding = 2 + data = glyfTable.compile(self.font) + normGlyfData = normalise_table(self.font, 'glyf', glyfTable.padding) + self.assertEqual(normGlyfData, data) + + def test_reconstruct_glyf_unpadded(self): + glyfTable = WOFF2GlyfTable() + glyfTable.reconstruct(self.transformedGlyfData, self.font) + data = glyfTable.compile(self.font) + self.assertEqual(self.tables['glyf'], data) + + def test_reconstruct_glyf_incorrect_glyphOrder(self): + glyfTable = WOFF2GlyfTable() + badGlyphOrder = self.font.getGlyphOrder()[:-1] + self.font.setGlyphOrder(badGlyphOrder) + with self.assertRaisesRegex(ttLib.TTLibError, "incorrect glyphOrder"): + glyfTable.reconstruct(self.transformedGlyfData, self.font) + + def test_reconstruct_glyf_missing_glyphOrder(self): + glyfTable = WOFF2GlyfTable() + del self.font.glyphOrder + numGlyphs = self.font['maxp'].numGlyphs + del self.font['maxp'] + glyfTable.reconstruct(self.transformedGlyfData, self.font) + expected = [".notdef"] + expected.extend(["glyph%.5d" % i for i in range(1, numGlyphs)]) + self.assertEqual(expected, glyfTable.glyphOrder) + + def test_reconstruct_loca_padded_4(self): + locaTable = self.font['loca'] = WOFF2LocaTable() + glyfTable = self.font['glyf'] = WOFF2GlyfTable() + glyfTable.reconstruct(self.transformedGlyfData, self.font) + glyfTable.padding = 4 + glyfTable.compile(self.font) + data = locaTable.compile(self.font) + normLocaData = normalise_table(self.font, 'loca', glyfTable.padding) + self.assertEqual(normLocaData, data) + + def test_reconstruct_loca_padded_2(self): + locaTable = self.font['loca'] = WOFF2LocaTable() + glyfTable = self.font['glyf'] = WOFF2GlyfTable() + glyfTable.reconstruct(self.transformedGlyfData, self.font) + glyfTable.padding = 2 + glyfTable.compile(self.font) + data = locaTable.compile(self.font) + normLocaData = normalise_table(self.font, 'loca', glyfTable.padding) + self.assertEqual(normLocaData, data) + + def test_reconstruct_loca_unpadded(self): + locaTable = self.font['loca'] = WOFF2LocaTable() + glyfTable = self.font['glyf'] = WOFF2GlyfTable() + glyfTable.reconstruct(self.transformedGlyfData, self.font) + glyfTable.compile(self.font) + data = locaTable.compile(self.font) + self.assertEqual(self.tables['loca'], data) + + def test_reconstruct_glyf_header_not_enough_data(self): + with self.assertRaisesRegex(ttLib.TTLibError, "not enough 'glyf' data"): + WOFF2GlyfTable().reconstruct(b"", self.font) + + def test_reconstruct_glyf_table_incorrect_size(self): + msg = "incorrect size of transformed 'glyf'" + with self.assertRaisesRegex(ttLib.TTLibError, msg): + WOFF2GlyfTable().reconstruct(self.transformedGlyfData + b"\x00", self.font) + with self.assertRaisesRegex(ttLib.TTLibError, msg): + WOFF2GlyfTable().reconstruct(self.transformedGlyfData[:-1], self.font) + + def test_transform_glyf(self): + glyfTable = self.font['glyf'] + data = glyfTable.transform(self.font) + self.assertEqual(self.transformedGlyfData, data) + + def test_transform_glyf_incorrect_glyphOrder(self): + glyfTable = self.font['glyf'] + badGlyphOrder = self.font.getGlyphOrder()[:-1] + del glyfTable.glyphOrder + self.font.setGlyphOrder(badGlyphOrder) + with self.assertRaisesRegex(ttLib.TTLibError, "incorrect glyphOrder"): + glyfTable.transform(self.font) + glyfTable.glyphOrder = badGlyphOrder + with self.assertRaisesRegex(ttLib.TTLibError, "incorrect glyphOrder"): + glyfTable.transform(self.font) + + def test_transform_glyf_missing_glyphOrder(self): + glyfTable = self.font['glyf'] + del glyfTable.glyphOrder + del self.font.glyphOrder + numGlyphs = self.font['maxp'].numGlyphs + del self.font['maxp'] + glyfTable.transform(self.font) + expected = [".notdef"] + expected.extend(["glyph%.5d" % i for i in range(1, numGlyphs)]) + self.assertEqual(expected, glyfTable.glyphOrder) + + def test_roundtrip_glyf_reconstruct_and_transform(self): + glyfTable = WOFF2GlyfTable() + glyfTable.reconstruct(self.transformedGlyfData, self.font) + data = glyfTable.transform(self.font) + self.assertEqual(self.transformedGlyfData, data) + + def test_roundtrip_glyf_transform_and_reconstruct(self): + glyfTable = self.font['glyf'] + transformedData = glyfTable.transform(self.font) + newGlyfTable = WOFF2GlyfTable() + newGlyfTable.reconstruct(transformedData, self.font) + newGlyfTable.padding = 4 + reconstructedData = newGlyfTable.compile(self.font) + normGlyfData = normalise_table(self.font, 'glyf', newGlyfTable.padding) + self.assertEqual(normGlyfData, reconstructedData) + + +if __name__ == "__main__": + unittest.main() diff -Nru fonttools-2.4/Snippets/fontTools/ttx.py fonttools-3.0/Snippets/fontTools/ttx.py --- fonttools-2.4/Snippets/fontTools/ttx.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.0/Snippets/fontTools/ttx.py 2015-08-31 17:57:15.000000000 +0000 @@ -0,0 +1,370 @@ +"""\ +usage: ttx [options] inputfile1 [... inputfileN] + + TTX %s -- From OpenType To XML And Back + + If an input file is a TrueType or OpenType font file, it will be + dumped to an TTX file (an XML-based text format). + If an input file is a TTX file, it will be compiled to a TrueType + or OpenType font file. + + Output files are created so they are unique: an existing file is + never overwritten. + + General options: + -h Help: print this message + -d <outputfolder> Specify a directory where the output files are + to be created. + -o <outputfile> Specify a file to write the output to. A special + value of of - would use the standard output. + -f Overwrite existing output file(s), ie. don't append numbers. + -v Verbose: more messages will be written to stdout about what + is being done. + -q Quiet: No messages will be written to stdout about what + is being done. + -a allow virtual glyphs ID's on compile or decompile. + + Dump options: + -l List table info: instead of dumping to a TTX file, list some + minimal info about each table. + -t <table> Specify a table to dump. Multiple -t options + are allowed. When no -t option is specified, all tables + will be dumped. + -x <table> Specify a table to exclude from the dump. Multiple + -x options are allowed. -t and -x are mutually exclusive. + -s Split tables: save the TTX data into separate TTX files per + table and write one small TTX file that contains references + to the individual table dumps. This file can be used as + input to ttx, as long as the table files are in the + same directory. + -i Do NOT disassemble TT instructions: when this option is given, + all TrueType programs (glyph programs, the font program and the + pre-program) will be written to the TTX file as hex data + instead of assembly. This saves some time and makes the TTX + file smaller. + -z <format> Specify a bitmap data export option for EBDT: + {'raw', 'row', 'bitwise', 'extfile'} or for the CBDT: + {'raw', 'extfile'} Each option does one of the following: + -z raw + * export the bitmap data as a hex dump + -z row + * export each row as hex data + -z bitwise + * export each row as binary in an ASCII art style + -z extfile + * export the data as external files with XML references + If no export format is specified 'raw' format is used. + -e Don't ignore decompilation errors, but show a full traceback + and abort. + -y <number> Select font number for TrueType Collection, + starting from 0. + --unicodedata <UnicodeData.txt> Use custom database file to write + character names in the comments of the cmap TTX output. + + Compile options: + -m Merge with TrueType-input-file: specify a TrueType or OpenType + font file to be merged with the TTX file. This option is only + valid when at most one TTX file is specified. + -b Don't recalc glyph bounding boxes: use the values in the TTX + file as-is. + --recalc-timestamp Set font 'modified' timestamp to current time. + By default, the modification time of the TTX file will be used. +""" + + +from __future__ import print_function, division, absolute_import +from fontTools.misc.py23 import * +from fontTools.ttLib import TTFont, TTLibError +from fontTools.misc.macCreatorType import getMacCreatorAndType +from fontTools.unicode import setUnicodeData +from fontTools.misc.timeTools import timestampSinceEpoch +import os +import sys +import getopt +import re + +def usage(): + from fontTools import version + print(__doc__ % version) + sys.exit(2) + + +numberAddedRE = re.compile("#\d+$") +opentypeheaderRE = re.compile('''sfntVersion=['"]OTTO["']''') + +def makeOutputFileName(input, outputDir, extension, overWrite=False): + dirName, fileName = os.path.split(input) + fileName, ext = os.path.splitext(fileName) + if outputDir: + dirName = outputDir + fileName = numberAddedRE.split(fileName)[0] + output = os.path.join(dirName, fileName + extension) + n = 1 + if not overWrite: + while os.path.exists(output): + output = os.path.join(dirName, fileName + "#" + repr(n) + extension) + n = n + 1 + return output + + +class Options(object): + + listTables = False + outputDir = None + outputFile = None + overWrite = False + verbose = False + quiet = False + splitTables = False + disassembleInstructions = True + mergeFile = None + recalcBBoxes = True + allowVID = False + ignoreDecompileErrors = True + bitmapGlyphDataFormat = 'raw' + unicodedata = None + recalcTimestamp = False + + def __init__(self, rawOptions, numFiles): + self.onlyTables = [] + self.skipTables = [] + self.fontNumber = -1 + for option, value in rawOptions: + # general options + if option == "-h": + from fontTools import version + print(__doc__ % version) + sys.exit(0) + elif option == "-d": + if not os.path.isdir(value): + print("The -d option value must be an existing directory") + sys.exit(2) + self.outputDir = value + elif option == "-o": + self.outputFile = value + elif option == "-f": + self.overWrite = True + elif option == "-v": + self.verbose = True + elif option == "-q": + self.quiet = True + # dump options + elif option == "-l": + self.listTables = True + elif option == "-t": + self.onlyTables.append(value) + elif option == "-x": + self.skipTables.append(value) + elif option == "-s": + self.splitTables = True + elif option == "-i": + self.disassembleInstructions = False + elif option == "-z": + validOptions = ('raw', 'row', 'bitwise', 'extfile') + if value not in validOptions: + print("-z does not allow %s as a format. Use %s" % (option, validOptions)) + sys.exit(2) + self.bitmapGlyphDataFormat = value + elif option == "-y": + self.fontNumber = int(value) + # compile options + elif option == "-m": + self.mergeFile = value + elif option == "-b": + self.recalcBBoxes = False + elif option == "-a": + self.allowVID = True + elif option == "-e": + self.ignoreDecompileErrors = False + elif option == "--unicodedata": + self.unicodedata = value + elif option == "--recalc-timestamp": + self.recalcTimestamp = True + if self.onlyTables and self.skipTables: + print("-t and -x options are mutually exclusive") + sys.exit(2) + if self.mergeFile and numFiles > 1: + print("Must specify exactly one TTX source file when using -m") + sys.exit(2) + + +def ttList(input, output, options): + ttf = TTFont(input, fontNumber=options.fontNumber, lazy=True) + reader = ttf.reader + tags = sorted(reader.keys()) + print('Listing table info for "%s":' % input) + format = " %4s %10s %7s %7s" + print(format % ("tag ", " checksum", " length", " offset")) + print(format % ("----", "----------", "-------", "-------")) + for tag in tags: + entry = reader.tables[tag] + if ttf.flavor == "woff2": + # WOFF2 doesn't store table checksums, so they must be calculated + from fontTools.ttLib.sfnt import calcChecksum + data = entry.loadData(reader.transformBuffer) + checkSum = calcChecksum(data) + else: + checkSum = int(entry.checkSum) + if checkSum < 0: + checkSum = checkSum + 0x100000000 + checksum = "0x%08X" % checkSum + print(format % (tag, checksum, entry.length, entry.offset)) + print() + ttf.close() + + +def ttDump(input, output, options): + if not options.quiet: + print('Dumping "%s" to "%s"...' % (input, output)) + if options.unicodedata: + setUnicodeData(options.unicodedata) + ttf = TTFont(input, 0, verbose=options.verbose, allowVID=options.allowVID, + quiet=options.quiet, + ignoreDecompileErrors=options.ignoreDecompileErrors, + fontNumber=options.fontNumber) + ttf.saveXML(output, + quiet=options.quiet, + tables=options.onlyTables, + skipTables=options.skipTables, + splitTables=options.splitTables, + disassembleInstructions=options.disassembleInstructions, + bitmapGlyphDataFormat=options.bitmapGlyphDataFormat) + ttf.close() + + +def ttCompile(input, output, options): + if not options.quiet: + print('Compiling "%s" to "%s"...' % (input, output)) + ttf = TTFont(options.mergeFile, + recalcBBoxes=options.recalcBBoxes, + recalcTimestamp=options.recalcTimestamp, + verbose=options.verbose, allowVID=options.allowVID) + ttf.importXML(input, quiet=options.quiet) + + if not options.recalcTimestamp: + # use TTX file modification time for head "modified" timestamp + mtime = os.path.getmtime(input) + ttf['head'].modified = timestampSinceEpoch(mtime) + + ttf.save(output) + + if options.verbose: + import time + print("finished at", time.strftime("%H:%M:%S", time.localtime(time.time()))) + + +def guessFileType(fileName): + base, ext = os.path.splitext(fileName) + try: + f = open(fileName, "rb") + except IOError: + return None + cr, tp = getMacCreatorAndType(fileName) + if tp in ("sfnt", "FFIL"): + return "TTF" + if ext == ".dfont": + return "TTF" + header = f.read(256) + head = Tag(header[:4]) + if head == "OTTO": + return "OTF" + elif head == "ttcf": + return "TTC" + elif head in ("\0\1\0\0", "true"): + return "TTF" + elif head == "wOFF": + return "WOFF" + elif head == "wOF2": + return "WOFF2" + elif head.lower() == "<?xm": + # Use 'latin1' because that can't fail. + header = tostr(header, 'latin1') + if opentypeheaderRE.search(header): + return "OTX" + else: + return "TTX" + return None + + +def parseOptions(args): + try: + rawOptions, files = getopt.getopt(args, "ld:o:fvqht:x:sim:z:baey:", + ['unicodedata=', "recalc-timestamp"]) + except getopt.GetoptError: + usage() + + if not files: + usage() + + options = Options(rawOptions, len(files)) + jobs = [] + + for input in files: + tp = guessFileType(input) + if tp in ("OTF", "TTF", "TTC", "WOFF", "WOFF2"): + extension = ".ttx" + if options.listTables: + action = ttList + else: + action = ttDump + elif tp == "TTX": + extension = ".ttf" + action = ttCompile + elif tp == "OTX": + extension = ".otf" + action = ttCompile + else: + print('Unknown file type: "%s"' % input) + continue + + if options.outputFile: + output = options.outputFile + else: + output = makeOutputFileName(input, options.outputDir, extension, options.overWrite) + # 'touch' output file to avoid race condition in choosing file names + if action != ttList: + open(output, 'a').close() + jobs.append((action, input, output)) + return jobs, options + + +def process(jobs, options): + for action, input, output in jobs: + action(input, output, options) + + +def waitForKeyPress(): + """Force the DOS Prompt window to stay open so the user gets + a chance to see what's wrong.""" + import msvcrt + print('(Hit any key to exit)') + while not msvcrt.kbhit(): + pass + + +def main(args=None): + if args is None: + args = sys.argv[1:] + jobs, options = parseOptions(args) + try: + process(jobs, options) + except KeyboardInterrupt: + print("(Cancelled.)") + except SystemExit: + if sys.platform == "win32": + waitForKeyPress() + else: + raise + except TTLibError as e: + print("Error:",e) + except: + if sys.platform == "win32": + import traceback + traceback.print_exc() + waitForKeyPress() + else: + raise + + +if __name__ == "__main__": + main() diff -Nru fonttools-2.4/Snippets/fontTools/unicode.py fonttools-3.0/Snippets/fontTools/unicode.py --- fonttools-2.4/Snippets/fontTools/unicode.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.0/Snippets/fontTools/unicode.py 2015-08-31 17:57:15.000000000 +0000 @@ -0,0 +1,43 @@ +from __future__ import print_function, division, absolute_import +from fontTools.misc.py23 import * + +def _makeunicodes(f): + import re + lines = iter(f.readlines()) + unicodes = {} + for line in lines: + if not line: continue + num, name = line.split(';')[:2] + if name[0] == '<': continue # "<control>", etc. + num = int(num, 16) + unicodes[num] = name + return unicodes + + +class _UnicodeCustom(object): + + def __init__(self, f): + if isinstance(f, basestring): + f = open(f) + self.codes = _makeunicodes(f) + + def __getitem__(self, charCode): + try: + return self.codes[charCode] + except KeyError: + return "????" + +class _UnicodeBuiltin(object): + + def __getitem__(self, charCode): + import unicodedata + try: + return unicodedata.name(unichr(charCode)) + except ValueError: + return "????" + +Unicode = _UnicodeBuiltin() + +def setUnicodeData(f): + global Unicode + Unicode = _UnicodeCustom(f) diff -Nru fonttools-2.4/Snippets/interpolate.py fonttools-3.0/Snippets/interpolate.py --- fonttools-2.4/Snippets/interpolate.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.0/Snippets/interpolate.py 2015-08-31 17:57:15.000000000 +0000 @@ -0,0 +1,142 @@ +#! /usr/bin/env python + +# Illustrates how a fonttools script can construct variable fonts. +# +# This script reads Roboto-Thin.ttf, Roboto-Regular.ttf, and +# Roboto-Black.ttf from /tmp/Roboto, and writes a Multiple Master GX +# font named "Roboto.ttf" into the current working directory. +# This output font supports interpolation along the Weight axis, +# and it contains named instances for "Thin", "Light", "Regular", +# "Bold", and "Black". +# +# All input fonts must contain the same set of glyphs, and these glyphs +# need to have the same control points in the same order. Note that this +# is *not* the case for the normal Roboto fonts that can be downloaded +# from Google. This demo script prints a warning for any problematic +# glyphs; in the resulting font, these glyphs will not be interpolated +# and get rendered in the "Regular" weight. +# +# Usage: +# $ mkdir /tmp/Roboto && cp Roboto-*.ttf /tmp/Roboto +# $ ./interpolate.py && open Roboto.ttf + + +from __future__ import print_function, division, absolute_import +from fontTools.misc.py23 import * +from fontTools.ttLib import TTFont +from fontTools.ttLib.tables._n_a_m_e import NameRecord +from fontTools.ttLib.tables._f_v_a_r import table__f_v_a_r, Axis, NamedInstance +from fontTools.ttLib.tables._g_v_a_r import table__g_v_a_r, GlyphVariation +import warnings + + +def AddFontVariations(font): + assert "fvar" not in font + fvar = font["fvar"] = table__f_v_a_r() + + weight = Axis() + weight.axisTag = "wght" + weight.nameID = AddName(font, "Weight").nameID + weight.minValue, weight.defaultValue, weight.maxValue = (100, 400, 900) + fvar.axes.append(weight) + + # https://www.microsoft.com/typography/otspec/os2.htm#wtc + for name, wght in ( + ("Thin", 100), + ("Light", 300), + ("Regular", 400), + ("Bold", 700), + ("Black", 900)): + inst = NamedInstance() + inst.nameID = AddName(font, name).nameID + inst.coordinates = {"wght": wght} + fvar.instances.append(inst) + + +def AddName(font, name): + """(font, "Bold") --> NameRecord""" + nameTable = font.get("name") + namerec = NameRecord() + namerec.nameID = 1 + max([n.nameID for n in nameTable.names] + [256]) + namerec.string = name.encode("mac_roman") + namerec.platformID, namerec.platEncID, namerec.langID = (1, 0, 0) + nameTable.names.append(namerec) + return namerec + + +def AddGlyphVariations(font, thin, regular, black): + assert "gvar" not in font + gvar = font["gvar"] = table__g_v_a_r() + gvar.version = 1 + gvar.reserved = 0 + gvar.variations = {} + for glyphName in regular.getGlyphOrder(): + regularCoord = GetCoordinates(regular, glyphName) + thinCoord = GetCoordinates(thin, glyphName) + blackCoord = GetCoordinates(black, glyphName) + if not regularCoord or not blackCoord or not thinCoord: + warnings.warn("glyph %s not present in all input fonts" % + glyphName) + continue + if (len(regularCoord) != len(blackCoord) or + len(regularCoord) != len(thinCoord)): + warnings.warn("glyph %s has not the same number of " + "control points in all input fonts" % glyphName) + continue + thinDelta = [] + blackDelta = [] + for ((regX, regY), (blackX, blackY), (thinX, thinY)) in \ + zip(regularCoord, blackCoord, thinCoord): + thinDelta.append(((thinX - regX, thinY - regY))) + blackDelta.append((blackX - regX, blackY - regY)) + thinVar = GlyphVariation({"wght": (-1.0, -1.0, 0.0)}, thinDelta) + blackVar = GlyphVariation({"wght": (0.0, 1.0, 1.0)}, blackDelta) + gvar.variations[glyphName] = [thinVar, blackVar] + + +def GetCoordinates(font, glyphName): + """font, glyphName --> glyph coordinates as expected by "gvar" table + + The result includes four "phantom points" for the glyph metrics, + as mandated by the "gvar" spec. + """ + glyphTable = font["glyf"] + glyph = glyphTable.glyphs.get(glyphName) + if glyph is None: + return None + glyph.expand(glyphTable) + glyph.recalcBounds(glyphTable) + if glyph.isComposite(): + coord = [c.getComponentInfo()[1][-2:] for c in glyph.components] + else: + coord = [c for c in glyph.getCoordinates(glyphTable)[0]] + # Add phantom points for (left, right, top, bottom) positions. + horizontalAdvanceWidth, leftSideBearing = font["hmtx"].metrics[glyphName] + + + leftSideX = glyph.xMin - leftSideBearing + rightSideX = leftSideX + horizontalAdvanceWidth + + # XXX these are incorrect. Load vmtx and fix. + topSideY = glyph.yMax + bottomSideY = -glyph.yMin + + coord.extend([(leftSideX, 0), + (rightSideX, 0), + (0, topSideY), + (0, bottomSideY)]) + return coord + + +def main(): + thin = TTFont("/tmp/Roboto/Roboto-Thin.ttf") + regular = TTFont("/tmp/Roboto/Roboto-Regular.ttf") + black = TTFont("/tmp/Roboto/Roboto-Black.ttf") + out = regular + AddFontVariations(out) + AddGlyphVariations(out, thin, regular, black) + out.save("./Roboto.ttf") + + +if __name__ == "__main__": + main() diff -Nru fonttools-2.4/Snippets/layout-features.py fonttools-3.0/Snippets/layout-features.py --- fonttools-2.4/Snippets/layout-features.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.0/Snippets/layout-features.py 2015-08-31 17:57:15.000000000 +0000 @@ -0,0 +1,47 @@ +#! /usr/bin/env python + +from __future__ import print_function, division, absolute_import +from fontTools.misc.py23 import * +from fontTools.ttLib import TTFont +from fontTools.ttLib.tables import otTables +import sys + +if len(sys.argv) != 2: + print("usage: layout-features.py fontfile.ttf") + sys.exit(1) +fontfile = sys.argv[1] +font = TTFont(fontfile) + +for tag in ('GSUB', 'GPOS'): + if not tag in font: continue + print("Table:", tag) + table = font[tag].table + if not table.ScriptList or not table.FeatureList: continue + featureRecords = table.FeatureList.FeatureRecord + for script in table.ScriptList.ScriptRecord: + print(" Script:", script.ScriptTag) + if not script.Script: + print (" Null script.") + continue + languages = list(script.Script.LangSysRecord) + if script.Script.DefaultLangSys: + defaultlangsys = otTables.LangSysRecord() + defaultlangsys.LangSysTag = "default" + defaultlangsys.LangSys = script.Script.DefaultLangSys + languages.insert(0, defaultlangsys) + for langsys in languages: + print(" Language:", langsys.LangSysTag) + if not langsys.LangSys: + print (" Null language.") + continue + features = [featureRecords[index] for index in langsys.LangSys.FeatureIndex] + if langsys.LangSys.ReqFeatureIndex != 0xFFFF: + record = featureRecords[langsys.LangSys.ReqFeatureIndex] + requiredfeature = otTables.FeatureRecord() + requiredfeature.FeatureTag = 'required(%s)' % record.FeatureTag + requiredfeature.Feature = record.Feature + features.insert(0, requiredfeature) + for feature in features: + print(" Feature:", feature.FeatureTag) + lookups = feature.Feature.LookupListIndex + print(" Lookups:", ','.join(str(l) for l in lookups)) diff -Nru fonttools-2.4/Snippets/README fonttools-3.0/Snippets/README --- fonttools-2.4/Snippets/README 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.0/Snippets/README 2015-08-31 17:57:15.000000000 +0000 @@ -0,0 +1,3 @@ +This directory includes snippets that people might useful to get ideas +from. The contents will come and go, don't rely on them being there or +having a certain API. If you need it, copy it and modify it. diff -Nru fonttools-2.4/Snippets/subset-fpgm.py fonttools-3.0/Snippets/subset-fpgm.py --- fonttools-2.4/Snippets/subset-fpgm.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.0/Snippets/subset-fpgm.py 2015-08-31 17:57:15.000000000 +0000 @@ -0,0 +1,60 @@ +#! /usr/bin/env python + +from __future__ import print_function, division, absolute_import +from fontTools.misc.py23 import * +from fontTools.ttLib import TTFont +import sys + +if len(sys.argv) < 2: + print("usage: subset-fpgm.py fontfile.ttf func-number...") + sys.exit(1) +fontfile = sys.argv[1] +func_nums = [int(x) for x in sys.argv[2:]] + +font = TTFont(fontfile) +fpgm = font['fpgm'] + +# Parse fpgm +asm = fpgm.program.getAssembly() +funcs = {} +stack = [] +tokens = iter(asm) +for token in tokens: + if token.startswith("PUSH") or token.startswith("NPUSH"): + for token in tokens: + try: + num = int(token) + stack.append(num) + except ValueError: + break + if token.startswith("FDEF"): + num = stack.pop() + body = [] + for token in tokens: + if token.startswith("ENDF"): + break + body.append(token) + funcs[num] = body + continue + assert 0, "Unexpected token in fpgm: %s" % token + +# Subset! +funcs = {i:funcs[i] for i in func_nums} + +# Put it back together: +asm = [] +if funcs: + asm.append("PUSH[ ]") +nums = sorted(funcs.keys()) +asm.extend(str(i) for i in nums) +for i in nums: + asm.append("FDEF[ ]") + asm.extend(funcs[i]) + asm.append("ENDF[ ]") + +import pprint +pprint.pprint(asm) + +fpgm.program.fromAssembly(asm) +# Make sure it compiles +fpgm.program.getBytecode() diff -Nru fonttools-2.4/Snippets/woff2_compress.py fonttools-3.0/Snippets/woff2_compress.py --- fonttools-2.4/Snippets/woff2_compress.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.0/Snippets/woff2_compress.py 2015-08-31 17:57:15.000000000 +0000 @@ -0,0 +1,29 @@ +#!/usr/bin/env python + +from __future__ import print_function, division, absolute_import +from fontTools.misc.py23 import * +from fontTools.ttLib import TTFont +from fontTools.ttx import makeOutputFileName +import sys +import os + + +def main(args=None): + if args is None: + args = sys.argv[1:] + if len(args) < 1: + print("One argument, the input filename, must be provided.", file=sys.stderr) + sys.exit(1) + + filename = args[0] + outfilename = makeOutputFileName(filename, outputDir=None, extension='.woff2') + + print("Processing %s => %s" % (filename, outfilename)) + + font = TTFont(filename, recalcBBoxes=False, recalcTimestamp=False) + font.flavor = "woff2" + font.save(outfilename, reorderTables=False) + + +if __name__ == '__main__': + main() diff -Nru fonttools-2.4/Snippets/woff2_decompress.py fonttools-3.0/Snippets/woff2_decompress.py --- fonttools-2.4/Snippets/woff2_decompress.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.0/Snippets/woff2_decompress.py 2015-08-31 17:57:15.000000000 +0000 @@ -0,0 +1,39 @@ +#!/usr/bin/env python + +from __future__ import print_function, division, absolute_import +from fontTools.misc.py23 import * +from fontTools.ttLib import TTFont +from fontTools.ttx import makeOutputFileName +import sys +import os + + +def make_output_name(filename): + with open(filename, "rb") as f: + f.seek(4) + sfntVersion = f.read(4) + assert len(sfntVersion) == 4, "not enough data" + ext = '.ttf' if sfntVersion == b"\x00\x01\x00\x00" else ".otf" + outfilename = makeOutputFileName(filename, outputDir=None, extension=ext) + return outfilename + + +def main(args=None): + if args is None: + args = sys.argv[1:] + if len(args) < 1: + print("One argument, the input filename, must be provided.", file=sys.stderr) + sys.exit(1) + + filename = args[0] + outfilename = make_output_name(filename) + + print("Processing %s => %s" % (filename, outfilename)) + + font = TTFont(filename, recalcBBoxes=False, recalcTimestamp=False) + font.flavor = None + font.save(outfilename, reorderTables=True) + + +if __name__ == '__main__': + main() diff -Nru fonttools-2.4/Src/eexecOp/eexecOpmodule.c fonttools-3.0/Src/eexecOp/eexecOpmodule.c --- fonttools-2.4/Src/eexecOp/eexecOpmodule.c 2013-04-23 04:08:08.000000000 +0000 +++ fonttools-3.0/Src/eexecOp/eexecOpmodule.c 1970-01-01 00:00:00.000000000 +0000 @@ -1,203 +0,0 @@ -/* -** Copyright 1996-2001 by Letterror: Just van Rossum, The Netherlands. -** -** Open source. -** -** Module implementing the eexec and charstring encryption algorithm as -** used by PostScript Type 1 fonts. -** -*/ - -#include "Python.h" -#include <ctype.h> - -static PyObject *ErrorObject; - -/* ----------------------------------------------------- */ - -static char eexec_decrypt__doc__[] = -"" -; - -static PyObject * -eexec_decrypt(PyObject *self, PyObject *args) -{ - PyObject *_res = NULL; - unsigned short R; - int tempR; /* can't portably use unsigned shorts between Python versions */ - unsigned short c1 = 52845; - unsigned short c2 = 22719; - unsigned char * inbuf; - unsigned char * outbuf; - unsigned long counter, insize; - - if (!PyArg_ParseTuple(args, "s#i", &inbuf, &insize, &tempR)) - return NULL; - - R = (unsigned short)tempR; - - if ((outbuf = malloc(insize)) == NULL) - { - PyErr_NoMemory(); - return NULL; - } - for(counter = 0;counter < insize; counter++) { - outbuf[counter] = (inbuf[counter] ^ (R>>8)); - R = (inbuf[counter] + R) * c1 + c2; - } - - _res = Py_BuildValue("s#l", outbuf, insize, (unsigned long)R); - free(outbuf); - return _res; -} - -static char eexec_encrypt__doc__[] = -"" -; - -static PyObject * -eexec_encrypt(PyObject *self, PyObject *args) -{ - PyObject *_res = NULL; - unsigned short R; - int tempR; /* can't portably use unsigned shorts between Python versions */ - unsigned short c1 = 52845; - unsigned short c2 = 22719; - unsigned char * inbuf; - unsigned char * outbuf; - unsigned long counter, insize; - - if (!PyArg_ParseTuple(args, "s#i", &inbuf, &insize, &tempR)) - return NULL; - - R = (unsigned short)tempR; - - if ((outbuf = malloc(insize)) == NULL) - { - PyErr_NoMemory(); - return NULL; - } - for(counter = 0;counter < insize; counter++) { - outbuf[counter] = (inbuf[counter] ^ (R>>8)); - R = (outbuf[counter] + R) * c1 + c2; - } - - _res = Py_BuildValue("s#l", outbuf, insize, (unsigned long)R); - free(outbuf); - return _res; -} - -static char eexec_hexString__doc__[] = -"" -; - -static PyObject * -eexec_hexString(PyObject *self, PyObject *args) -{ - PyObject *_res = NULL; - unsigned char * inbuf; - unsigned char * outbuf; - static const unsigned char hexchars[] = "0123456789ABCDEF"; - unsigned long i, insize; - - if (!PyArg_ParseTuple(args, "s#", &inbuf, &insize)) - return NULL; - - outbuf = malloc(2 * insize); - if (outbuf == NULL) { - PyErr_NoMemory(); - return NULL; - } - - for (i = 0; i < insize; i++) { - outbuf[2 * i] = hexchars[(inbuf[i] >> 4) & 0xF]; - outbuf[2 * i + 1] = hexchars[inbuf[i] & 0xF]; - } - _res = Py_BuildValue("s#", outbuf, 2 * insize); - free(outbuf); - return _res; -} - - -#define HEX2DEC(c) ((c) >= 'A' ? ((c) - 'A' + 10) : ((c) - '0')) - -static char eexec_deHexString__doc__[] = -"" -; - -static PyObject * -eexec_deHexString(PyObject *self, PyObject *args) -{ - PyObject *_res = NULL; - unsigned char * inbuf; - unsigned char * outbuf; - unsigned char c1, c2; - unsigned long insize, i; - - if (!PyArg_ParseTuple(args, "s#", &inbuf, &insize)) - return NULL; - - if (insize % 2) { - PyErr_SetString(ErrorObject, "hex string must have even length"); - return NULL; - } - - outbuf = malloc(insize / 2); - if (outbuf == NULL) { - PyErr_NoMemory(); - return NULL; - } - - for ( i = 0; i < insize; i += 2) { - c1 = toupper(inbuf[i]); - c2 = toupper(inbuf[i+1]); - if (!isxdigit(c1) || !isxdigit(c1)) { - PyErr_SetString(ErrorObject, "non-hex character found"); - goto error; - } - outbuf[i/2] = (HEX2DEC(c2)) | (HEX2DEC(c1) << 4); - } - _res = Py_BuildValue("s#", outbuf, insize / 2); -error: - free(outbuf); - return _res; -} - -/* List of methods defined in the module */ - -static struct PyMethodDef eexec_methods[] = { - {"decrypt", (PyCFunction)eexec_decrypt, METH_VARARGS, eexec_decrypt__doc__}, - {"encrypt", (PyCFunction)eexec_encrypt, METH_VARARGS, eexec_encrypt__doc__}, - {"hexString", (PyCFunction)eexec_hexString, METH_VARARGS, eexec_hexString__doc__}, - {"deHexString", (PyCFunction)eexec_deHexString, METH_VARARGS, eexec_deHexString__doc__}, - {NULL, (PyCFunction)NULL, 0, NULL} /* sentinel */ -}; - - -/* Initialization function for the module (*must* be called initeexec) */ - -static char eexec_module_documentation[] = -"" -; - -void initeexecOp(void); /* prototype to shut up the compiler */ - -void initeexecOp(void) -{ - PyObject *m, *d; - - /* Create the module and add the functions */ - m = Py_InitModule4("eexecOp", eexec_methods, - eexec_module_documentation, - (PyObject*)NULL,PYTHON_API_VERSION); - - /* Add some symbolic constants to the module */ - d = PyModule_GetDict(m); - ErrorObject = PyString_FromString("eexec.error"); - PyDict_SetItemString(d, "error", ErrorObject); - - /* Check for errors */ - if (PyErr_Occurred()) - Py_FatalError("can't initialize module eexec"); -} - diff -Nru fonttools-2.4/Src/eexecOp/README.txt fonttools-3.0/Src/eexecOp/README.txt --- fonttools-2.4/Src/eexecOp/README.txt 2013-04-23 04:08:08.000000000 +0000 +++ fonttools-3.0/Src/eexecOp/README.txt 1970-01-01 00:00:00.000000000 +0000 @@ -1,15 +0,0 @@ -eexecOp is imported by the fontTools.misc.eexec module, and the latter -provides a (slow) Python implementation in case eexecOp isn't there. -It is designed to be a shared library, to be placed in - FontTools/Lib/fontTools/misc/ -but it should also work as a (possibly statically linked) top level module. - -It is built automatically when you run - - python setup.py build -or - python setup.py install - -in the top level FontTools directory. - -Just diff -Nru fonttools-2.4/Tools/fontTools/afmLib.py fonttools-3.0/Tools/fontTools/afmLib.py --- fonttools-2.4/Tools/fontTools/afmLib.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.0/Tools/fontTools/afmLib.py 2015-08-31 17:57:15.000000000 +0000 @@ -0,0 +1,376 @@ +"""Module for reading and writing AFM files.""" + +# XXX reads AFM's generated by Fog, not tested with much else. +# It does not implement the full spec (Adobe Technote 5004, Adobe Font Metrics +# File Format Specification). Still, it should read most "common" AFM files. + +from __future__ import print_function, division, absolute_import +from fontTools.misc.py23 import * +import re + +# every single line starts with a "word" +identifierRE = re.compile("^([A-Za-z]+).*") + +# regular expression to parse char lines +charRE = re.compile( + "(-?\d+)" # charnum + "\s*;\s*WX\s+" # ; WX + "(-?\d+)" # width + "\s*;\s*N\s+" # ; N + "([.A-Za-z0-9_]+)" # charname + "\s*;\s*B\s+" # ; B + "(-?\d+)" # left + "\s+" + "(-?\d+)" # bottom + "\s+" + "(-?\d+)" # right + "\s+" + "(-?\d+)" # top + "\s*;\s*" # ; + ) + +# regular expression to parse kerning lines +kernRE = re.compile( + "([.A-Za-z0-9_]+)" # leftchar + "\s+" + "([.A-Za-z0-9_]+)" # rightchar + "\s+" + "(-?\d+)" # value + "\s*" + ) + +# regular expressions to parse composite info lines of the form: +# Aacute 2 ; PCC A 0 0 ; PCC acute 182 211 ; +compositeRE = re.compile( + "([.A-Za-z0-9_]+)" # char name + "\s+" + "(\d+)" # number of parts + "\s*;\s*" + ) +componentRE = re.compile( + "PCC\s+" # PPC + "([.A-Za-z0-9_]+)" # base char name + "\s+" + "(-?\d+)" # x offset + "\s+" + "(-?\d+)" # y offset + "\s*;\s*" + ) + +preferredAttributeOrder = [ + "FontName", + "FullName", + "FamilyName", + "Weight", + "ItalicAngle", + "IsFixedPitch", + "FontBBox", + "UnderlinePosition", + "UnderlineThickness", + "Version", + "Notice", + "EncodingScheme", + "CapHeight", + "XHeight", + "Ascender", + "Descender", +] + + +class error(Exception): + pass + + +class AFM(object): + + _attrs = None + + _keywords = ['StartFontMetrics', + 'EndFontMetrics', + 'StartCharMetrics', + 'EndCharMetrics', + 'StartKernData', + 'StartKernPairs', + 'EndKernPairs', + 'EndKernData', + 'StartComposites', + 'EndComposites', + ] + + def __init__(self, path=None): + self._attrs = {} + self._chars = {} + self._kerning = {} + self._index = {} + self._comments = [] + self._composites = {} + if path is not None: + self.read(path) + + def read(self, path): + lines = readlines(path) + for line in lines: + if not line.strip(): + continue + m = identifierRE.match(line) + if m is None: + raise error("syntax error in AFM file: " + repr(line)) + + pos = m.regs[1][1] + word = line[:pos] + rest = line[pos:].strip() + if word in self._keywords: + continue + if word == "C": + self.parsechar(rest) + elif word == "KPX": + self.parsekernpair(rest) + elif word == "CC": + self.parsecomposite(rest) + else: + self.parseattr(word, rest) + + def parsechar(self, rest): + m = charRE.match(rest) + if m is None: + raise error("syntax error in AFM file: " + repr(rest)) + things = [] + for fr, to in m.regs[1:]: + things.append(rest[fr:to]) + charname = things[2] + del things[2] + charnum, width, l, b, r, t = (int(thing) for thing in things) + self._chars[charname] = charnum, width, (l, b, r, t) + + def parsekernpair(self, rest): + m = kernRE.match(rest) + if m is None: + raise error("syntax error in AFM file: " + repr(rest)) + things = [] + for fr, to in m.regs[1:]: + things.append(rest[fr:to]) + leftchar, rightchar, value = things + value = int(value) + self._kerning[(leftchar, rightchar)] = value + + def parseattr(self, word, rest): + if word == "FontBBox": + l, b, r, t = [int(thing) for thing in rest.split()] + self._attrs[word] = l, b, r, t + elif word == "Comment": + self._comments.append(rest) + else: + try: + value = int(rest) + except (ValueError, OverflowError): + self._attrs[word] = rest + else: + self._attrs[word] = value + + def parsecomposite(self, rest): + m = compositeRE.match(rest) + if m is None: + raise error("syntax error in AFM file: " + repr(rest)) + charname = m.group(1) + ncomponents = int(m.group(2)) + rest = rest[m.regs[0][1]:] + components = [] + while True: + m = componentRE.match(rest) + if m is None: + raise error("syntax error in AFM file: " + repr(rest)) + basechar = m.group(1) + xoffset = int(m.group(2)) + yoffset = int(m.group(3)) + components.append((basechar, xoffset, yoffset)) + rest = rest[m.regs[0][1]:] + if not rest: + break + assert len(components) == ncomponents + self._composites[charname] = components + + def write(self, path, sep='\r'): + import time + lines = [ "StartFontMetrics 2.0", + "Comment Generated by afmLib; at %s" % ( + time.strftime("%m/%d/%Y %H:%M:%S", + time.localtime(time.time())))] + + # write comments, assuming (possibly wrongly!) they should + # all appear at the top + for comment in self._comments: + lines.append("Comment " + comment) + + # write attributes, first the ones we know about, in + # a preferred order + attrs = self._attrs + for attr in preferredAttributeOrder: + if attr in attrs: + value = attrs[attr] + if attr == "FontBBox": + value = "%s %s %s %s" % value + lines.append(attr + " " + str(value)) + # then write the attributes we don't know about, + # in alphabetical order + items = sorted(attrs.items()) + for attr, value in items: + if attr in preferredAttributeOrder: + continue + lines.append(attr + " " + str(value)) + + # write char metrics + lines.append("StartCharMetrics " + repr(len(self._chars))) + items = [(charnum, (charname, width, box)) for charname, (charnum, width, box) in self._chars.items()] + + def myKey(a): + """Custom key function to make sure unencoded chars (-1) + end up at the end of the list after sorting.""" + if a[0] == -1: + a = (0xffff,) + a[1:] # 0xffff is an arbitrary large number + return a + items.sort(key=myKey) + + for charnum, (charname, width, (l, b, r, t)) in items: + lines.append("C %d ; WX %d ; N %s ; B %d %d %d %d ;" % + (charnum, width, charname, l, b, r, t)) + lines.append("EndCharMetrics") + + # write kerning info + lines.append("StartKernData") + lines.append("StartKernPairs " + repr(len(self._kerning))) + items = sorted(self._kerning.items()) + for (leftchar, rightchar), value in items: + lines.append("KPX %s %s %d" % (leftchar, rightchar, value)) + lines.append("EndKernPairs") + lines.append("EndKernData") + + if self._composites: + composites = sorted(self._composites.items()) + lines.append("StartComposites %s" % len(self._composites)) + for charname, components in composites: + line = "CC %s %s ;" % (charname, len(components)) + for basechar, xoffset, yoffset in components: + line = line + " PCC %s %s %s ;" % (basechar, xoffset, yoffset) + lines.append(line) + lines.append("EndComposites") + + lines.append("EndFontMetrics") + + writelines(path, lines, sep) + + def has_kernpair(self, pair): + return pair in self._kerning + + def kernpairs(self): + return list(self._kerning.keys()) + + def has_char(self, char): + return char in self._chars + + def chars(self): + return list(self._chars.keys()) + + def comments(self): + return self._comments + + def addComment(self, comment): + self._comments.append(comment) + + def addComposite(self, glyphName, components): + self._composites[glyphName] = components + + def __getattr__(self, attr): + if attr in self._attrs: + return self._attrs[attr] + else: + raise AttributeError(attr) + + def __setattr__(self, attr, value): + # all attrs *not* starting with "_" are consider to be AFM keywords + if attr[:1] == "_": + self.__dict__[attr] = value + else: + self._attrs[attr] = value + + def __delattr__(self, attr): + # all attrs *not* starting with "_" are consider to be AFM keywords + if attr[:1] == "_": + try: + del self.__dict__[attr] + except KeyError: + raise AttributeError(attr) + else: + try: + del self._attrs[attr] + except KeyError: + raise AttributeError(attr) + + def __getitem__(self, key): + if isinstance(key, tuple): + # key is a tuple, return the kernpair + return self._kerning[key] + else: + # return the metrics instead + return self._chars[key] + + def __setitem__(self, key, value): + if isinstance(key, tuple): + # key is a tuple, set kernpair + self._kerning[key] = value + else: + # set char metrics + self._chars[key] = value + + def __delitem__(self, key): + if isinstance(key, tuple): + # key is a tuple, del kernpair + del self._kerning[key] + else: + # del char metrics + del self._chars[key] + + def __repr__(self): + if hasattr(self, "FullName"): + return '<AFM object for %s>' % self.FullName + else: + return '<AFM object at %x>' % id(self) + + +def readlines(path): + f = open(path, 'rb') + data = f.read() + f.close() + # read any text file, regardless whether it's formatted for Mac, Unix or Dos + sep = "" + if '\r' in data: + sep = sep + '\r' # mac or dos + if '\n' in data: + sep = sep + '\n' # unix or dos + return data.split(sep) + +def writelines(path, lines, sep='\r'): + f = open(path, 'wb') + for line in lines: + f.write(line + sep) + f.close() + + +if __name__ == "__main__": + import EasyDialogs + path = EasyDialogs.AskFileForOpen() + if path: + afm = AFM(path) + char = 'A' + if afm.has_char(char): + print(afm[char]) # print charnum, width and boundingbox + pair = ('A', 'V') + if afm.has_kernpair(pair): + print(afm[pair]) # print kerning value for pair + print(afm.Version) # various other afm entries have become attributes + print(afm.Weight) + # afm.comments() returns a list of all Comment lines found in the AFM + print(afm.comments()) + #print afm.chars() + #print afm.kernpairs() + print(afm) + afm.write(path + ".muck") diff -Nru fonttools-2.4/Tools/fontTools/agl.py fonttools-3.0/Tools/fontTools/agl.py --- fonttools-2.4/Tools/fontTools/agl.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.0/Tools/fontTools/agl.py 2015-08-31 17:57:15.000000000 +0000 @@ -0,0 +1,738 @@ +# The table below is taken from +# http://www.adobe.com/devnet/opentype/archives/aglfn.txt + +from __future__ import print_function, division, absolute_import +from fontTools.misc.py23 import * + +_aglText = """\ +# ----------------------------------------------------------- +# Copyright 2003, 2005-2008, 2010 Adobe Systems Incorporated. +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or +# without modification, are permitted provided that the +# following conditions are met: +# +# Redistributions of source code must retain the above +# copyright notice, this list of conditions and the following +# disclaimer. +# +# Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following +# disclaimer in the documentation and/or other materials +# provided with the distribution. +# +# Neither the name of Adobe Systems Incorporated nor the names +# of its contributors may be used to endorse or promote +# products derived from this software without specific prior +# written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND +# CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, +# INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF +# MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR +# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT +# NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) +# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR +# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +# ----------------------------------------------------------- +# Name: Adobe Glyph List For New Fonts +# Table version: 1.7 +# Date: November 6, 2008 +# URL: http://sourceforge.net/adobe/aglfn/ +# +# Description: +# +# AGLFN (Adobe Glyph List For New Fonts) provides a list of base glyph +# names that are recommended for new fonts, which are compatible with +# the AGL (Adobe Glyph List) Specification, and which should be used +# as described in Section 6 of that document. AGLFN comprises the set +# of glyph names from AGL that map via the AGL Specification rules to +# the semantically correct UV (Unicode Value). For example, "Asmall" +# is omitted because AGL maps this glyph name to the PUA (Private Use +# Area) value U+F761, rather than to the UV that maps from the glyph +# name "A." Also omitted is "ffi," because AGL maps this to the +# Alphabetic Presentation Forms value U+FB03, rather than decomposing +# it into the following sequence of three UVs: U+0066, U+0066, and +# U+0069. The name "arrowvertex" has been omitted because this glyph +# now has a real UV, and AGL is now incorrect in mapping it to the PUA +# value U+F8E6. If you do not find an appropriate name for your glyph +# in this list, then please refer to Section 6 of the AGL +# Specification. +# +# Format: three semicolon-delimited fields: +# (1) Standard UV or CUS UV--four uppercase hexadecimal digits +# (2) Glyph name--upper/lowercase letters and digits +# (3) Character names: Unicode character names for standard UVs, and +# descriptive names for CUS UVs--uppercase letters, hyphen, and +# space +# +# The records are sorted by glyph name in increasing ASCII order, +# entries with the same glyph name are sorted in decreasing priority +# order, the UVs and Unicode character names are provided for +# convenience, lines starting with "#" are comments, and blank lines +# should be ignored. +# +# Revision History: +# +# 1.7 [6 November 2008] +# - Reverted to the original 1.4 and earlier mappings for Delta, +# Omega, and mu. +# - Removed mappings for "afii" names. These should now be assigned +# "uni" names. +# - Removed mappings for "commaaccent" names. These should now be +# assigned "uni" names. +# +# 1.6 [30 January 2006] +# - Completed work intended in 1.5. +# +# 1.5 [23 November 2005] +# - Removed duplicated block at end of file. +# - Changed mappings: +# 2206;Delta;INCREMENT changed to 0394;Delta;GREEK CAPITAL LETTER DELTA +# 2126;Omega;OHM SIGN changed to 03A9;Omega;GREEK CAPITAL LETTER OMEGA +# 03BC;mu;MICRO SIGN changed to 03BC;mu;GREEK SMALL LETTER MU +# - Corrected statement above about why "ffi" is omitted. +# +# 1.4 [24 September 2003] +# - Changed version to 1.4, to avoid confusion with the AGL 1.3. +# - Fixed spelling errors in the header. +# - Fully removed "arrowvertex," as it is mapped only to a PUA Unicode +# value in some fonts. +# +# 1.1 [17 April 2003] +# - Renamed [Tt]cedilla back to [Tt]commaaccent. +# +# 1.0 [31 January 2003] +# - Original version. +# - Derived from the AGLv1.2 by: +# removing the PUA area codes; +# removing duplicate Unicode mappings; and +# renaming "tcommaaccent" to "tcedilla" and "Tcommaaccent" to "Tcedilla" +# +0041;A;LATIN CAPITAL LETTER A +00C6;AE;LATIN CAPITAL LETTER AE +01FC;AEacute;LATIN CAPITAL LETTER AE WITH ACUTE +00C1;Aacute;LATIN CAPITAL LETTER A WITH ACUTE +0102;Abreve;LATIN CAPITAL LETTER A WITH BREVE +00C2;Acircumflex;LATIN CAPITAL LETTER A WITH CIRCUMFLEX +00C4;Adieresis;LATIN CAPITAL LETTER A WITH DIAERESIS +00C0;Agrave;LATIN CAPITAL LETTER A WITH GRAVE +0391;Alpha;GREEK CAPITAL LETTER ALPHA +0386;Alphatonos;GREEK CAPITAL LETTER ALPHA WITH TONOS +0100;Amacron;LATIN CAPITAL LETTER A WITH MACRON +0104;Aogonek;LATIN CAPITAL LETTER A WITH OGONEK +00C5;Aring;LATIN CAPITAL LETTER A WITH RING ABOVE +01FA;Aringacute;LATIN CAPITAL LETTER A WITH RING ABOVE AND ACUTE +00C3;Atilde;LATIN CAPITAL LETTER A WITH TILDE +0042;B;LATIN CAPITAL LETTER B +0392;Beta;GREEK CAPITAL LETTER BETA +0043;C;LATIN CAPITAL LETTER C +0106;Cacute;LATIN CAPITAL LETTER C WITH ACUTE +010C;Ccaron;LATIN CAPITAL LETTER C WITH CARON +00C7;Ccedilla;LATIN CAPITAL LETTER C WITH CEDILLA +0108;Ccircumflex;LATIN CAPITAL LETTER C WITH CIRCUMFLEX +010A;Cdotaccent;LATIN CAPITAL LETTER C WITH DOT ABOVE +03A7;Chi;GREEK CAPITAL LETTER CHI +0044;D;LATIN CAPITAL LETTER D +010E;Dcaron;LATIN CAPITAL LETTER D WITH CARON +0110;Dcroat;LATIN CAPITAL LETTER D WITH STROKE +2206;Delta;INCREMENT +0045;E;LATIN CAPITAL LETTER E +00C9;Eacute;LATIN CAPITAL LETTER E WITH ACUTE +0114;Ebreve;LATIN CAPITAL LETTER E WITH BREVE +011A;Ecaron;LATIN CAPITAL LETTER E WITH CARON +00CA;Ecircumflex;LATIN CAPITAL LETTER E WITH CIRCUMFLEX +00CB;Edieresis;LATIN CAPITAL LETTER E WITH DIAERESIS +0116;Edotaccent;LATIN CAPITAL LETTER E WITH DOT ABOVE +00C8;Egrave;LATIN CAPITAL LETTER E WITH GRAVE +0112;Emacron;LATIN CAPITAL LETTER E WITH MACRON +014A;Eng;LATIN CAPITAL LETTER ENG +0118;Eogonek;LATIN CAPITAL LETTER E WITH OGONEK +0395;Epsilon;GREEK CAPITAL LETTER EPSILON +0388;Epsilontonos;GREEK CAPITAL LETTER EPSILON WITH TONOS +0397;Eta;GREEK CAPITAL LETTER ETA +0389;Etatonos;GREEK CAPITAL LETTER ETA WITH TONOS +00D0;Eth;LATIN CAPITAL LETTER ETH +20AC;Euro;EURO SIGN +0046;F;LATIN CAPITAL LETTER F +0047;G;LATIN CAPITAL LETTER G +0393;Gamma;GREEK CAPITAL LETTER GAMMA +011E;Gbreve;LATIN CAPITAL LETTER G WITH BREVE +01E6;Gcaron;LATIN CAPITAL LETTER G WITH CARON +011C;Gcircumflex;LATIN CAPITAL LETTER G WITH CIRCUMFLEX +0120;Gdotaccent;LATIN CAPITAL LETTER G WITH DOT ABOVE +0048;H;LATIN CAPITAL LETTER H +25CF;H18533;BLACK CIRCLE +25AA;H18543;BLACK SMALL SQUARE +25AB;H18551;WHITE SMALL SQUARE +25A1;H22073;WHITE SQUARE +0126;Hbar;LATIN CAPITAL LETTER H WITH STROKE +0124;Hcircumflex;LATIN CAPITAL LETTER H WITH CIRCUMFLEX +0049;I;LATIN CAPITAL LETTER I +0132;IJ;LATIN CAPITAL LIGATURE IJ +00CD;Iacute;LATIN CAPITAL LETTER I WITH ACUTE +012C;Ibreve;LATIN CAPITAL LETTER I WITH BREVE +00CE;Icircumflex;LATIN CAPITAL LETTER I WITH CIRCUMFLEX +00CF;Idieresis;LATIN CAPITAL LETTER I WITH DIAERESIS +0130;Idotaccent;LATIN CAPITAL LETTER I WITH DOT ABOVE +2111;Ifraktur;BLACK-LETTER CAPITAL I +00CC;Igrave;LATIN CAPITAL LETTER I WITH GRAVE +012A;Imacron;LATIN CAPITAL LETTER I WITH MACRON +012E;Iogonek;LATIN CAPITAL LETTER I WITH OGONEK +0399;Iota;GREEK CAPITAL LETTER IOTA +03AA;Iotadieresis;GREEK CAPITAL LETTER IOTA WITH DIALYTIKA +038A;Iotatonos;GREEK CAPITAL LETTER IOTA WITH TONOS +0128;Itilde;LATIN CAPITAL LETTER I WITH TILDE +004A;J;LATIN CAPITAL LETTER J +0134;Jcircumflex;LATIN CAPITAL LETTER J WITH CIRCUMFLEX +004B;K;LATIN CAPITAL LETTER K +039A;Kappa;GREEK CAPITAL LETTER KAPPA +004C;L;LATIN CAPITAL LETTER L +0139;Lacute;LATIN CAPITAL LETTER L WITH ACUTE +039B;Lambda;GREEK CAPITAL LETTER LAMDA +013D;Lcaron;LATIN CAPITAL LETTER L WITH CARON +013F;Ldot;LATIN CAPITAL LETTER L WITH MIDDLE DOT +0141;Lslash;LATIN CAPITAL LETTER L WITH STROKE +004D;M;LATIN CAPITAL LETTER M +039C;Mu;GREEK CAPITAL LETTER MU +004E;N;LATIN CAPITAL LETTER N +0143;Nacute;LATIN CAPITAL LETTER N WITH ACUTE +0147;Ncaron;LATIN CAPITAL LETTER N WITH CARON +00D1;Ntilde;LATIN CAPITAL LETTER N WITH TILDE +039D;Nu;GREEK CAPITAL LETTER NU +004F;O;LATIN CAPITAL LETTER O +0152;OE;LATIN CAPITAL LIGATURE OE +00D3;Oacute;LATIN CAPITAL LETTER O WITH ACUTE +014E;Obreve;LATIN CAPITAL LETTER O WITH BREVE +00D4;Ocircumflex;LATIN CAPITAL LETTER O WITH CIRCUMFLEX +00D6;Odieresis;LATIN CAPITAL LETTER O WITH DIAERESIS +00D2;Ograve;LATIN CAPITAL LETTER O WITH GRAVE +01A0;Ohorn;LATIN CAPITAL LETTER O WITH HORN +0150;Ohungarumlaut;LATIN CAPITAL LETTER O WITH DOUBLE ACUTE +014C;Omacron;LATIN CAPITAL LETTER O WITH MACRON +2126;Omega;OHM SIGN +038F;Omegatonos;GREEK CAPITAL LETTER OMEGA WITH TONOS +039F;Omicron;GREEK CAPITAL LETTER OMICRON +038C;Omicrontonos;GREEK CAPITAL LETTER OMICRON WITH TONOS +00D8;Oslash;LATIN CAPITAL LETTER O WITH STROKE +01FE;Oslashacute;LATIN CAPITAL LETTER O WITH STROKE AND ACUTE +00D5;Otilde;LATIN CAPITAL LETTER O WITH TILDE +0050;P;LATIN CAPITAL LETTER P +03A6;Phi;GREEK CAPITAL LETTER PHI +03A0;Pi;GREEK CAPITAL LETTER PI +03A8;Psi;GREEK CAPITAL LETTER PSI +0051;Q;LATIN CAPITAL LETTER Q +0052;R;LATIN CAPITAL LETTER R +0154;Racute;LATIN CAPITAL LETTER R WITH ACUTE +0158;Rcaron;LATIN CAPITAL LETTER R WITH CARON +211C;Rfraktur;BLACK-LETTER CAPITAL R +03A1;Rho;GREEK CAPITAL LETTER RHO +0053;S;LATIN CAPITAL LETTER S +250C;SF010000;BOX DRAWINGS LIGHT DOWN AND RIGHT +2514;SF020000;BOX DRAWINGS LIGHT UP AND RIGHT +2510;SF030000;BOX DRAWINGS LIGHT DOWN AND LEFT +2518;SF040000;BOX DRAWINGS LIGHT UP AND LEFT +253C;SF050000;BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL +252C;SF060000;BOX DRAWINGS LIGHT DOWN AND HORIZONTAL +2534;SF070000;BOX DRAWINGS LIGHT UP AND HORIZONTAL +251C;SF080000;BOX DRAWINGS LIGHT VERTICAL AND RIGHT +2524;SF090000;BOX DRAWINGS LIGHT VERTICAL AND LEFT +2500;SF100000;BOX DRAWINGS LIGHT HORIZONTAL +2502;SF110000;BOX DRAWINGS LIGHT VERTICAL +2561;SF190000;BOX DRAWINGS VERTICAL SINGLE AND LEFT DOUBLE +2562;SF200000;BOX DRAWINGS VERTICAL DOUBLE AND LEFT SINGLE +2556;SF210000;BOX DRAWINGS DOWN DOUBLE AND LEFT SINGLE +2555;SF220000;BOX DRAWINGS DOWN SINGLE AND LEFT DOUBLE +2563;SF230000;BOX DRAWINGS DOUBLE VERTICAL AND LEFT +2551;SF240000;BOX DRAWINGS DOUBLE VERTICAL +2557;SF250000;BOX DRAWINGS DOUBLE DOWN AND LEFT +255D;SF260000;BOX DRAWINGS DOUBLE UP AND LEFT +255C;SF270000;BOX DRAWINGS UP DOUBLE AND LEFT SINGLE +255B;SF280000;BOX DRAWINGS UP SINGLE AND LEFT DOUBLE +255E;SF360000;BOX DRAWINGS VERTICAL SINGLE AND RIGHT DOUBLE +255F;SF370000;BOX DRAWINGS VERTICAL DOUBLE AND RIGHT SINGLE +255A;SF380000;BOX DRAWINGS DOUBLE UP AND RIGHT +2554;SF390000;BOX DRAWINGS DOUBLE DOWN AND RIGHT +2569;SF400000;BOX DRAWINGS DOUBLE UP AND HORIZONTAL +2566;SF410000;BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL +2560;SF420000;BOX DRAWINGS DOUBLE VERTICAL AND RIGHT +2550;SF430000;BOX DRAWINGS DOUBLE HORIZONTAL +256C;SF440000;BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL +2567;SF450000;BOX DRAWINGS UP SINGLE AND HORIZONTAL DOUBLE +2568;SF460000;BOX DRAWINGS UP DOUBLE AND HORIZONTAL SINGLE +2564;SF470000;BOX DRAWINGS DOWN SINGLE AND HORIZONTAL DOUBLE +2565;SF480000;BOX DRAWINGS DOWN DOUBLE AND HORIZONTAL SINGLE +2559;SF490000;BOX DRAWINGS UP DOUBLE AND RIGHT SINGLE +2558;SF500000;BOX DRAWINGS UP SINGLE AND RIGHT DOUBLE +2552;SF510000;BOX DRAWINGS DOWN SINGLE AND RIGHT DOUBLE +2553;SF520000;BOX DRAWINGS DOWN DOUBLE AND RIGHT SINGLE +256B;SF530000;BOX DRAWINGS VERTICAL DOUBLE AND HORIZONTAL SINGLE +256A;SF540000;BOX DRAWINGS VERTICAL SINGLE AND HORIZONTAL DOUBLE +015A;Sacute;LATIN CAPITAL LETTER S WITH ACUTE +0160;Scaron;LATIN CAPITAL LETTER S WITH CARON +015E;Scedilla;LATIN CAPITAL LETTER S WITH CEDILLA +015C;Scircumflex;LATIN CAPITAL LETTER S WITH CIRCUMFLEX +03A3;Sigma;GREEK CAPITAL LETTER SIGMA +0054;T;LATIN CAPITAL LETTER T +03A4;Tau;GREEK CAPITAL LETTER TAU +0166;Tbar;LATIN CAPITAL LETTER T WITH STROKE +0164;Tcaron;LATIN CAPITAL LETTER T WITH CARON +0398;Theta;GREEK CAPITAL LETTER THETA +00DE;Thorn;LATIN CAPITAL LETTER THORN +0055;U;LATIN CAPITAL LETTER U +00DA;Uacute;LATIN CAPITAL LETTER U WITH ACUTE +016C;Ubreve;LATIN CAPITAL LETTER U WITH BREVE +00DB;Ucircumflex;LATIN CAPITAL LETTER U WITH CIRCUMFLEX +00DC;Udieresis;LATIN CAPITAL LETTER U WITH DIAERESIS +00D9;Ugrave;LATIN CAPITAL LETTER U WITH GRAVE +01AF;Uhorn;LATIN CAPITAL LETTER U WITH HORN +0170;Uhungarumlaut;LATIN CAPITAL LETTER U WITH DOUBLE ACUTE +016A;Umacron;LATIN CAPITAL LETTER U WITH MACRON +0172;Uogonek;LATIN CAPITAL LETTER U WITH OGONEK +03A5;Upsilon;GREEK CAPITAL LETTER UPSILON +03D2;Upsilon1;GREEK UPSILON WITH HOOK SYMBOL +03AB;Upsilondieresis;GREEK CAPITAL LETTER UPSILON WITH DIALYTIKA +038E;Upsilontonos;GREEK CAPITAL LETTER UPSILON WITH TONOS +016E;Uring;LATIN CAPITAL LETTER U WITH RING ABOVE +0168;Utilde;LATIN CAPITAL LETTER U WITH TILDE +0056;V;LATIN CAPITAL LETTER V +0057;W;LATIN CAPITAL LETTER W +1E82;Wacute;LATIN CAPITAL LETTER W WITH ACUTE +0174;Wcircumflex;LATIN CAPITAL LETTER W WITH CIRCUMFLEX +1E84;Wdieresis;LATIN CAPITAL LETTER W WITH DIAERESIS +1E80;Wgrave;LATIN CAPITAL LETTER W WITH GRAVE +0058;X;LATIN CAPITAL LETTER X +039E;Xi;GREEK CAPITAL LETTER XI +0059;Y;LATIN CAPITAL LETTER Y +00DD;Yacute;LATIN CAPITAL LETTER Y WITH ACUTE +0176;Ycircumflex;LATIN CAPITAL LETTER Y WITH CIRCUMFLEX +0178;Ydieresis;LATIN CAPITAL LETTER Y WITH DIAERESIS +1EF2;Ygrave;LATIN CAPITAL LETTER Y WITH GRAVE +005A;Z;LATIN CAPITAL LETTER Z +0179;Zacute;LATIN CAPITAL LETTER Z WITH ACUTE +017D;Zcaron;LATIN CAPITAL LETTER Z WITH CARON +017B;Zdotaccent;LATIN CAPITAL LETTER Z WITH DOT ABOVE +0396;Zeta;GREEK CAPITAL LETTER ZETA +0061;a;LATIN SMALL LETTER A +00E1;aacute;LATIN SMALL LETTER A WITH ACUTE +0103;abreve;LATIN SMALL LETTER A WITH BREVE +00E2;acircumflex;LATIN SMALL LETTER A WITH CIRCUMFLEX +00B4;acute;ACUTE ACCENT +0301;acutecomb;COMBINING ACUTE ACCENT +00E4;adieresis;LATIN SMALL LETTER A WITH DIAERESIS +00E6;ae;LATIN SMALL LETTER AE +01FD;aeacute;LATIN SMALL LETTER AE WITH ACUTE +00E0;agrave;LATIN SMALL LETTER A WITH GRAVE +2135;aleph;ALEF SYMBOL +03B1;alpha;GREEK SMALL LETTER ALPHA +03AC;alphatonos;GREEK SMALL LETTER ALPHA WITH TONOS +0101;amacron;LATIN SMALL LETTER A WITH MACRON +0026;ampersand;AMPERSAND +2220;angle;ANGLE +2329;angleleft;LEFT-POINTING ANGLE BRACKET +232A;angleright;RIGHT-POINTING ANGLE BRACKET +0387;anoteleia;GREEK ANO TELEIA +0105;aogonek;LATIN SMALL LETTER A WITH OGONEK +2248;approxequal;ALMOST EQUAL TO +00E5;aring;LATIN SMALL LETTER A WITH RING ABOVE +01FB;aringacute;LATIN SMALL LETTER A WITH RING ABOVE AND ACUTE +2194;arrowboth;LEFT RIGHT ARROW +21D4;arrowdblboth;LEFT RIGHT DOUBLE ARROW +21D3;arrowdbldown;DOWNWARDS DOUBLE ARROW +21D0;arrowdblleft;LEFTWARDS DOUBLE ARROW +21D2;arrowdblright;RIGHTWARDS DOUBLE ARROW +21D1;arrowdblup;UPWARDS DOUBLE ARROW +2193;arrowdown;DOWNWARDS ARROW +2190;arrowleft;LEFTWARDS ARROW +2192;arrowright;RIGHTWARDS ARROW +2191;arrowup;UPWARDS ARROW +2195;arrowupdn;UP DOWN ARROW +21A8;arrowupdnbse;UP DOWN ARROW WITH BASE +005E;asciicircum;CIRCUMFLEX ACCENT +007E;asciitilde;TILDE +002A;asterisk;ASTERISK +2217;asteriskmath;ASTERISK OPERATOR +0040;at;COMMERCIAL AT +00E3;atilde;LATIN SMALL LETTER A WITH TILDE +0062;b;LATIN SMALL LETTER B +005C;backslash;REVERSE SOLIDUS +007C;bar;VERTICAL LINE +03B2;beta;GREEK SMALL LETTER BETA +2588;block;FULL BLOCK +007B;braceleft;LEFT CURLY BRACKET +007D;braceright;RIGHT CURLY BRACKET +005B;bracketleft;LEFT SQUARE BRACKET +005D;bracketright;RIGHT SQUARE BRACKET +02D8;breve;BREVE +00A6;brokenbar;BROKEN BAR +2022;bullet;BULLET +0063;c;LATIN SMALL LETTER C +0107;cacute;LATIN SMALL LETTER C WITH ACUTE +02C7;caron;CARON +21B5;carriagereturn;DOWNWARDS ARROW WITH CORNER LEFTWARDS +010D;ccaron;LATIN SMALL LETTER C WITH CARON +00E7;ccedilla;LATIN SMALL LETTER C WITH CEDILLA +0109;ccircumflex;LATIN SMALL LETTER C WITH CIRCUMFLEX +010B;cdotaccent;LATIN SMALL LETTER C WITH DOT ABOVE +00B8;cedilla;CEDILLA +00A2;cent;CENT SIGN +03C7;chi;GREEK SMALL LETTER CHI +25CB;circle;WHITE CIRCLE +2297;circlemultiply;CIRCLED TIMES +2295;circleplus;CIRCLED PLUS +02C6;circumflex;MODIFIER LETTER CIRCUMFLEX ACCENT +2663;club;BLACK CLUB SUIT +003A;colon;COLON +20A1;colonmonetary;COLON SIGN +002C;comma;COMMA +2245;congruent;APPROXIMATELY EQUAL TO +00A9;copyright;COPYRIGHT SIGN +00A4;currency;CURRENCY SIGN +0064;d;LATIN SMALL LETTER D +2020;dagger;DAGGER +2021;daggerdbl;DOUBLE DAGGER +010F;dcaron;LATIN SMALL LETTER D WITH CARON +0111;dcroat;LATIN SMALL LETTER D WITH STROKE +00B0;degree;DEGREE SIGN +03B4;delta;GREEK SMALL LETTER DELTA +2666;diamond;BLACK DIAMOND SUIT +00A8;dieresis;DIAERESIS +0385;dieresistonos;GREEK DIALYTIKA TONOS +00F7;divide;DIVISION SIGN +2593;dkshade;DARK SHADE +2584;dnblock;LOWER HALF BLOCK +0024;dollar;DOLLAR SIGN +20AB;dong;DONG SIGN +02D9;dotaccent;DOT ABOVE +0323;dotbelowcomb;COMBINING DOT BELOW +0131;dotlessi;LATIN SMALL LETTER DOTLESS I +22C5;dotmath;DOT OPERATOR +0065;e;LATIN SMALL LETTER E +00E9;eacute;LATIN SMALL LETTER E WITH ACUTE +0115;ebreve;LATIN SMALL LETTER E WITH BREVE +011B;ecaron;LATIN SMALL LETTER E WITH CARON +00EA;ecircumflex;LATIN SMALL LETTER E WITH CIRCUMFLEX +00EB;edieresis;LATIN SMALL LETTER E WITH DIAERESIS +0117;edotaccent;LATIN SMALL LETTER E WITH DOT ABOVE +00E8;egrave;LATIN SMALL LETTER E WITH GRAVE +0038;eight;DIGIT EIGHT +2208;element;ELEMENT OF +2026;ellipsis;HORIZONTAL ELLIPSIS +0113;emacron;LATIN SMALL LETTER E WITH MACRON +2014;emdash;EM DASH +2205;emptyset;EMPTY SET +2013;endash;EN DASH +014B;eng;LATIN SMALL LETTER ENG +0119;eogonek;LATIN SMALL LETTER E WITH OGONEK +03B5;epsilon;GREEK SMALL LETTER EPSILON +03AD;epsilontonos;GREEK SMALL LETTER EPSILON WITH TONOS +003D;equal;EQUALS SIGN +2261;equivalence;IDENTICAL TO +212E;estimated;ESTIMATED SYMBOL +03B7;eta;GREEK SMALL LETTER ETA +03AE;etatonos;GREEK SMALL LETTER ETA WITH TONOS +00F0;eth;LATIN SMALL LETTER ETH +0021;exclam;EXCLAMATION MARK +203C;exclamdbl;DOUBLE EXCLAMATION MARK +00A1;exclamdown;INVERTED EXCLAMATION MARK +2203;existential;THERE EXISTS +0066;f;LATIN SMALL LETTER F +2640;female;FEMALE SIGN +2012;figuredash;FIGURE DASH +25A0;filledbox;BLACK SQUARE +25AC;filledrect;BLACK RECTANGLE +0035;five;DIGIT FIVE +215D;fiveeighths;VULGAR FRACTION FIVE EIGHTHS +0192;florin;LATIN SMALL LETTER F WITH HOOK +0034;four;DIGIT FOUR +2044;fraction;FRACTION SLASH +20A3;franc;FRENCH FRANC SIGN +0067;g;LATIN SMALL LETTER G +03B3;gamma;GREEK SMALL LETTER GAMMA +011F;gbreve;LATIN SMALL LETTER G WITH BREVE +01E7;gcaron;LATIN SMALL LETTER G WITH CARON +011D;gcircumflex;LATIN SMALL LETTER G WITH CIRCUMFLEX +0121;gdotaccent;LATIN SMALL LETTER G WITH DOT ABOVE +00DF;germandbls;LATIN SMALL LETTER SHARP S +2207;gradient;NABLA +0060;grave;GRAVE ACCENT +0300;gravecomb;COMBINING GRAVE ACCENT +003E;greater;GREATER-THAN SIGN +2265;greaterequal;GREATER-THAN OR EQUAL TO +00AB;guillemotleft;LEFT-POINTING DOUBLE ANGLE QUOTATION MARK +00BB;guillemotright;RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK +2039;guilsinglleft;SINGLE LEFT-POINTING ANGLE QUOTATION MARK +203A;guilsinglright;SINGLE RIGHT-POINTING ANGLE QUOTATION MARK +0068;h;LATIN SMALL LETTER H +0127;hbar;LATIN SMALL LETTER H WITH STROKE +0125;hcircumflex;LATIN SMALL LETTER H WITH CIRCUMFLEX +2665;heart;BLACK HEART SUIT +0309;hookabovecomb;COMBINING HOOK ABOVE +2302;house;HOUSE +02DD;hungarumlaut;DOUBLE ACUTE ACCENT +002D;hyphen;HYPHEN-MINUS +0069;i;LATIN SMALL LETTER I +00ED;iacute;LATIN SMALL LETTER I WITH ACUTE +012D;ibreve;LATIN SMALL LETTER I WITH BREVE +00EE;icircumflex;LATIN SMALL LETTER I WITH CIRCUMFLEX +00EF;idieresis;LATIN SMALL LETTER I WITH DIAERESIS +00EC;igrave;LATIN SMALL LETTER I WITH GRAVE +0133;ij;LATIN SMALL LIGATURE IJ +012B;imacron;LATIN SMALL LETTER I WITH MACRON +221E;infinity;INFINITY +222B;integral;INTEGRAL +2321;integralbt;BOTTOM HALF INTEGRAL +2320;integraltp;TOP HALF INTEGRAL +2229;intersection;INTERSECTION +25D8;invbullet;INVERSE BULLET +25D9;invcircle;INVERSE WHITE CIRCLE +263B;invsmileface;BLACK SMILING FACE +012F;iogonek;LATIN SMALL LETTER I WITH OGONEK +03B9;iota;GREEK SMALL LETTER IOTA +03CA;iotadieresis;GREEK SMALL LETTER IOTA WITH DIALYTIKA +0390;iotadieresistonos;GREEK SMALL LETTER IOTA WITH DIALYTIKA AND TONOS +03AF;iotatonos;GREEK SMALL LETTER IOTA WITH TONOS +0129;itilde;LATIN SMALL LETTER I WITH TILDE +006A;j;LATIN SMALL LETTER J +0135;jcircumflex;LATIN SMALL LETTER J WITH CIRCUMFLEX +006B;k;LATIN SMALL LETTER K +03BA;kappa;GREEK SMALL LETTER KAPPA +0138;kgreenlandic;LATIN SMALL LETTER KRA +006C;l;LATIN SMALL LETTER L +013A;lacute;LATIN SMALL LETTER L WITH ACUTE +03BB;lambda;GREEK SMALL LETTER LAMDA +013E;lcaron;LATIN SMALL LETTER L WITH CARON +0140;ldot;LATIN SMALL LETTER L WITH MIDDLE DOT +003C;less;LESS-THAN SIGN +2264;lessequal;LESS-THAN OR EQUAL TO +258C;lfblock;LEFT HALF BLOCK +20A4;lira;LIRA SIGN +2227;logicaland;LOGICAL AND +00AC;logicalnot;NOT SIGN +2228;logicalor;LOGICAL OR +017F;longs;LATIN SMALL LETTER LONG S +25CA;lozenge;LOZENGE +0142;lslash;LATIN SMALL LETTER L WITH STROKE +2591;ltshade;LIGHT SHADE +006D;m;LATIN SMALL LETTER M +00AF;macron;MACRON +2642;male;MALE SIGN +2212;minus;MINUS SIGN +2032;minute;PRIME +00B5;mu;MICRO SIGN +00D7;multiply;MULTIPLICATION SIGN +266A;musicalnote;EIGHTH NOTE +266B;musicalnotedbl;BEAMED EIGHTH NOTES +006E;n;LATIN SMALL LETTER N +0144;nacute;LATIN SMALL LETTER N WITH ACUTE +0149;napostrophe;LATIN SMALL LETTER N PRECEDED BY APOSTROPHE +0148;ncaron;LATIN SMALL LETTER N WITH CARON +0039;nine;DIGIT NINE +2209;notelement;NOT AN ELEMENT OF +2260;notequal;NOT EQUAL TO +2284;notsubset;NOT A SUBSET OF +00F1;ntilde;LATIN SMALL LETTER N WITH TILDE +03BD;nu;GREEK SMALL LETTER NU +0023;numbersign;NUMBER SIGN +006F;o;LATIN SMALL LETTER O +00F3;oacute;LATIN SMALL LETTER O WITH ACUTE +014F;obreve;LATIN SMALL LETTER O WITH BREVE +00F4;ocircumflex;LATIN SMALL LETTER O WITH CIRCUMFLEX +00F6;odieresis;LATIN SMALL LETTER O WITH DIAERESIS +0153;oe;LATIN SMALL LIGATURE OE +02DB;ogonek;OGONEK +00F2;ograve;LATIN SMALL LETTER O WITH GRAVE +01A1;ohorn;LATIN SMALL LETTER O WITH HORN +0151;ohungarumlaut;LATIN SMALL LETTER O WITH DOUBLE ACUTE +014D;omacron;LATIN SMALL LETTER O WITH MACRON +03C9;omega;GREEK SMALL LETTER OMEGA +03D6;omega1;GREEK PI SYMBOL +03CE;omegatonos;GREEK SMALL LETTER OMEGA WITH TONOS +03BF;omicron;GREEK SMALL LETTER OMICRON +03CC;omicrontonos;GREEK SMALL LETTER OMICRON WITH TONOS +0031;one;DIGIT ONE +2024;onedotenleader;ONE DOT LEADER +215B;oneeighth;VULGAR FRACTION ONE EIGHTH +00BD;onehalf;VULGAR FRACTION ONE HALF +00BC;onequarter;VULGAR FRACTION ONE QUARTER +2153;onethird;VULGAR FRACTION ONE THIRD +25E6;openbullet;WHITE BULLET +00AA;ordfeminine;FEMININE ORDINAL INDICATOR +00BA;ordmasculine;MASCULINE ORDINAL INDICATOR +221F;orthogonal;RIGHT ANGLE +00F8;oslash;LATIN SMALL LETTER O WITH STROKE +01FF;oslashacute;LATIN SMALL LETTER O WITH STROKE AND ACUTE +00F5;otilde;LATIN SMALL LETTER O WITH TILDE +0070;p;LATIN SMALL LETTER P +00B6;paragraph;PILCROW SIGN +0028;parenleft;LEFT PARENTHESIS +0029;parenright;RIGHT PARENTHESIS +2202;partialdiff;PARTIAL DIFFERENTIAL +0025;percent;PERCENT SIGN +002E;period;FULL STOP +00B7;periodcentered;MIDDLE DOT +22A5;perpendicular;UP TACK +2030;perthousand;PER MILLE SIGN +20A7;peseta;PESETA SIGN +03C6;phi;GREEK SMALL LETTER PHI +03D5;phi1;GREEK PHI SYMBOL +03C0;pi;GREEK SMALL LETTER PI +002B;plus;PLUS SIGN +00B1;plusminus;PLUS-MINUS SIGN +211E;prescription;PRESCRIPTION TAKE +220F;product;N-ARY PRODUCT +2282;propersubset;SUBSET OF +2283;propersuperset;SUPERSET OF +221D;proportional;PROPORTIONAL TO +03C8;psi;GREEK SMALL LETTER PSI +0071;q;LATIN SMALL LETTER Q +003F;question;QUESTION MARK +00BF;questiondown;INVERTED QUESTION MARK +0022;quotedbl;QUOTATION MARK +201E;quotedblbase;DOUBLE LOW-9 QUOTATION MARK +201C;quotedblleft;LEFT DOUBLE QUOTATION MARK +201D;quotedblright;RIGHT DOUBLE QUOTATION MARK +2018;quoteleft;LEFT SINGLE QUOTATION MARK +201B;quotereversed;SINGLE HIGH-REVERSED-9 QUOTATION MARK +2019;quoteright;RIGHT SINGLE QUOTATION MARK +201A;quotesinglbase;SINGLE LOW-9 QUOTATION MARK +0027;quotesingle;APOSTROPHE +0072;r;LATIN SMALL LETTER R +0155;racute;LATIN SMALL LETTER R WITH ACUTE +221A;radical;SQUARE ROOT +0159;rcaron;LATIN SMALL LETTER R WITH CARON +2286;reflexsubset;SUBSET OF OR EQUAL TO +2287;reflexsuperset;SUPERSET OF OR EQUAL TO +00AE;registered;REGISTERED SIGN +2310;revlogicalnot;REVERSED NOT SIGN +03C1;rho;GREEK SMALL LETTER RHO +02DA;ring;RING ABOVE +2590;rtblock;RIGHT HALF BLOCK +0073;s;LATIN SMALL LETTER S +015B;sacute;LATIN SMALL LETTER S WITH ACUTE +0161;scaron;LATIN SMALL LETTER S WITH CARON +015F;scedilla;LATIN SMALL LETTER S WITH CEDILLA +015D;scircumflex;LATIN SMALL LETTER S WITH CIRCUMFLEX +2033;second;DOUBLE PRIME +00A7;section;SECTION SIGN +003B;semicolon;SEMICOLON +0037;seven;DIGIT SEVEN +215E;seveneighths;VULGAR FRACTION SEVEN EIGHTHS +2592;shade;MEDIUM SHADE +03C3;sigma;GREEK SMALL LETTER SIGMA +03C2;sigma1;GREEK SMALL LETTER FINAL SIGMA +223C;similar;TILDE OPERATOR +0036;six;DIGIT SIX +002F;slash;SOLIDUS +263A;smileface;WHITE SMILING FACE +0020;space;SPACE +2660;spade;BLACK SPADE SUIT +00A3;sterling;POUND SIGN +220B;suchthat;CONTAINS AS MEMBER +2211;summation;N-ARY SUMMATION +263C;sun;WHITE SUN WITH RAYS +0074;t;LATIN SMALL LETTER T +03C4;tau;GREEK SMALL LETTER TAU +0167;tbar;LATIN SMALL LETTER T WITH STROKE +0165;tcaron;LATIN SMALL LETTER T WITH CARON +2234;therefore;THEREFORE +03B8;theta;GREEK SMALL LETTER THETA +03D1;theta1;GREEK THETA SYMBOL +00FE;thorn;LATIN SMALL LETTER THORN +0033;three;DIGIT THREE +215C;threeeighths;VULGAR FRACTION THREE EIGHTHS +00BE;threequarters;VULGAR FRACTION THREE QUARTERS +02DC;tilde;SMALL TILDE +0303;tildecomb;COMBINING TILDE +0384;tonos;GREEK TONOS +2122;trademark;TRADE MARK SIGN +25BC;triagdn;BLACK DOWN-POINTING TRIANGLE +25C4;triaglf;BLACK LEFT-POINTING POINTER +25BA;triagrt;BLACK RIGHT-POINTING POINTER +25B2;triagup;BLACK UP-POINTING TRIANGLE +0032;two;DIGIT TWO +2025;twodotenleader;TWO DOT LEADER +2154;twothirds;VULGAR FRACTION TWO THIRDS +0075;u;LATIN SMALL LETTER U +00FA;uacute;LATIN SMALL LETTER U WITH ACUTE +016D;ubreve;LATIN SMALL LETTER U WITH BREVE +00FB;ucircumflex;LATIN SMALL LETTER U WITH CIRCUMFLEX +00FC;udieresis;LATIN SMALL LETTER U WITH DIAERESIS +00F9;ugrave;LATIN SMALL LETTER U WITH GRAVE +01B0;uhorn;LATIN SMALL LETTER U WITH HORN +0171;uhungarumlaut;LATIN SMALL LETTER U WITH DOUBLE ACUTE +016B;umacron;LATIN SMALL LETTER U WITH MACRON +005F;underscore;LOW LINE +2017;underscoredbl;DOUBLE LOW LINE +222A;union;UNION +2200;universal;FOR ALL +0173;uogonek;LATIN SMALL LETTER U WITH OGONEK +2580;upblock;UPPER HALF BLOCK +03C5;upsilon;GREEK SMALL LETTER UPSILON +03CB;upsilondieresis;GREEK SMALL LETTER UPSILON WITH DIALYTIKA +03B0;upsilondieresistonos;GREEK SMALL LETTER UPSILON WITH DIALYTIKA AND TONOS +03CD;upsilontonos;GREEK SMALL LETTER UPSILON WITH TONOS +016F;uring;LATIN SMALL LETTER U WITH RING ABOVE +0169;utilde;LATIN SMALL LETTER U WITH TILDE +0076;v;LATIN SMALL LETTER V +0077;w;LATIN SMALL LETTER W +1E83;wacute;LATIN SMALL LETTER W WITH ACUTE +0175;wcircumflex;LATIN SMALL LETTER W WITH CIRCUMFLEX +1E85;wdieresis;LATIN SMALL LETTER W WITH DIAERESIS +2118;weierstrass;SCRIPT CAPITAL P +1E81;wgrave;LATIN SMALL LETTER W WITH GRAVE +0078;x;LATIN SMALL LETTER X +03BE;xi;GREEK SMALL LETTER XI +0079;y;LATIN SMALL LETTER Y +00FD;yacute;LATIN SMALL LETTER Y WITH ACUTE +0177;ycircumflex;LATIN SMALL LETTER Y WITH CIRCUMFLEX +00FF;ydieresis;LATIN SMALL LETTER Y WITH DIAERESIS +00A5;yen;YEN SIGN +1EF3;ygrave;LATIN SMALL LETTER Y WITH GRAVE +007A;z;LATIN SMALL LETTER Z +017A;zacute;LATIN SMALL LETTER Z WITH ACUTE +017E;zcaron;LATIN SMALL LETTER Z WITH CARON +017C;zdotaccent;LATIN SMALL LETTER Z WITH DOT ABOVE +0030;zero;DIGIT ZERO +03B6;zeta;GREEK SMALL LETTER ZETA +#END +""" + + +class AGLError(Exception): + pass + +AGL2UV = {} +UV2AGL = {} + +def _builddicts(): + import re + + lines = _aglText.splitlines() + + parseAGL_RE = re.compile("([0-9A-F]{4});([A-Za-z_0-9.]+);.*?$") + + for line in lines: + if not line or line[:1] == '#': + continue + m = parseAGL_RE.match(line) + if not m: + raise AGLError("syntax error in glyphlist.txt: %s" % repr(line[:20])) + unicode = m.group(1) + assert len(unicode) == 4 + unicode = int(unicode, 16) + glyphName = m.group(2) + if glyphName in AGL2UV: + # the above table contains identical duplicates + assert AGL2UV[glyphName] == unicode + else: + AGL2UV[glyphName] = unicode + UV2AGL[unicode] = glyphName + +_builddicts() diff -Nru fonttools-2.4/Tools/fontTools/cffLib.py fonttools-3.0/Tools/fontTools/cffLib.py --- fonttools-2.4/Tools/fontTools/cffLib.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.0/Tools/fontTools/cffLib.py 2015-08-31 17:57:15.000000000 +0000 @@ -0,0 +1,1810 @@ +"""cffLib.py -- read/write tools for Adobe CFF fonts.""" + +from __future__ import print_function, division, absolute_import +from fontTools.misc.py23 import * +from fontTools.misc import sstruct +from fontTools.misc import psCharStrings +from fontTools.misc.textTools import safeEval +import struct + +DEBUG = 0 + + +cffHeaderFormat = """ + major: B + minor: B + hdrSize: B + offSize: B +""" + +class CFFFontSet(object): + + def __init__(self): + pass + + def decompile(self, file, otFont): + sstruct.unpack(cffHeaderFormat, file.read(4), self) + assert self.major == 1 and self.minor == 0, \ + "unknown CFF format: %d.%d" % (self.major, self.minor) + + file.seek(self.hdrSize) + self.fontNames = list(Index(file)) + self.topDictIndex = TopDictIndex(file) + self.strings = IndexedStrings(file) + self.GlobalSubrs = GlobalSubrsIndex(file) + self.topDictIndex.strings = self.strings + self.topDictIndex.GlobalSubrs = self.GlobalSubrs + + def __len__(self): + return len(self.fontNames) + + def keys(self): + return list(self.fontNames) + + def values(self): + return self.topDictIndex + + def __getitem__(self, name): + try: + index = self.fontNames.index(name) + except ValueError: + raise KeyError(name) + return self.topDictIndex[index] + + def compile(self, file, otFont): + strings = IndexedStrings() + writer = CFFWriter() + writer.add(sstruct.pack(cffHeaderFormat, self)) + fontNames = Index() + for name in self.fontNames: + fontNames.append(name) + writer.add(fontNames.getCompiler(strings, None)) + topCompiler = self.topDictIndex.getCompiler(strings, None) + writer.add(topCompiler) + writer.add(strings.getCompiler()) + writer.add(self.GlobalSubrs.getCompiler(strings, None)) + + for topDict in self.topDictIndex: + if not hasattr(topDict, "charset") or topDict.charset is None: + charset = otFont.getGlyphOrder() + topDict.charset = charset + + for child in topCompiler.getChildren(strings): + writer.add(child) + + writer.toFile(file) + + def toXML(self, xmlWriter, progress=None): + for fontName in self.fontNames: + xmlWriter.begintag("CFFFont", name=tostr(fontName)) + xmlWriter.newline() + font = self[fontName] + font.toXML(xmlWriter, progress) + xmlWriter.endtag("CFFFont") + xmlWriter.newline() + xmlWriter.newline() + xmlWriter.begintag("GlobalSubrs") + xmlWriter.newline() + self.GlobalSubrs.toXML(xmlWriter, progress) + xmlWriter.endtag("GlobalSubrs") + xmlWriter.newline() + + def fromXML(self, name, attrs, content): + if not hasattr(self, "GlobalSubrs"): + self.GlobalSubrs = GlobalSubrsIndex() + self.major = 1 + self.minor = 0 + self.hdrSize = 4 + self.offSize = 4 # XXX ?? + if name == "CFFFont": + if not hasattr(self, "fontNames"): + self.fontNames = [] + self.topDictIndex = TopDictIndex() + fontName = attrs["name"] + topDict = TopDict(GlobalSubrs=self.GlobalSubrs) + topDict.charset = None # gets filled in later + self.fontNames.append(fontName) + self.topDictIndex.append(topDict) + for element in content: + if isinstance(element, basestring): + continue + name, attrs, content = element + topDict.fromXML(name, attrs, content) + elif name == "GlobalSubrs": + for element in content: + if isinstance(element, basestring): + continue + name, attrs, content = element + subr = psCharStrings.T2CharString() + subr.fromXML(name, attrs, content) + self.GlobalSubrs.append(subr) + + +class CFFWriter(object): + + def __init__(self): + self.data = [] + + def add(self, table): + self.data.append(table) + + def toFile(self, file): + lastPosList = None + count = 1 + while True: + if DEBUG: + print("CFFWriter.toFile() iteration:", count) + count = count + 1 + pos = 0 + posList = [pos] + for item in self.data: + if hasattr(item, "getDataLength"): + endPos = pos + item.getDataLength() + else: + endPos = pos + len(item) + if hasattr(item, "setPos"): + item.setPos(pos, endPos) + pos = endPos + posList.append(pos) + if posList == lastPosList: + break + lastPosList = posList + if DEBUG: + print("CFFWriter.toFile() writing to file.") + begin = file.tell() + posList = [0] + for item in self.data: + if hasattr(item, "toFile"): + item.toFile(file) + else: + file.write(item) + posList.append(file.tell() - begin) + assert posList == lastPosList + + +def calcOffSize(largestOffset): + if largestOffset < 0x100: + offSize = 1 + elif largestOffset < 0x10000: + offSize = 2 + elif largestOffset < 0x1000000: + offSize = 3 + else: + offSize = 4 + return offSize + + +class IndexCompiler(object): + + def __init__(self, items, strings, parent): + self.items = self.getItems(items, strings) + self.parent = parent + + def getItems(self, items, strings): + return items + + def getOffsets(self): + pos = 1 + offsets = [pos] + for item in self.items: + if hasattr(item, "getDataLength"): + pos = pos + item.getDataLength() + else: + pos = pos + len(item) + offsets.append(pos) + return offsets + + def getDataLength(self): + lastOffset = self.getOffsets()[-1] + offSize = calcOffSize(lastOffset) + dataLength = ( + 2 + # count + 1 + # offSize + (len(self.items) + 1) * offSize + # the offsets + lastOffset - 1 # size of object data + ) + return dataLength + + def toFile(self, file): + offsets = self.getOffsets() + writeCard16(file, len(self.items)) + offSize = calcOffSize(offsets[-1]) + writeCard8(file, offSize) + offSize = -offSize + pack = struct.pack + for offset in offsets: + binOffset = pack(">l", offset)[offSize:] + assert len(binOffset) == -offSize + file.write(binOffset) + for item in self.items: + if hasattr(item, "toFile"): + item.toFile(file) + else: + file.write(tobytes(item, encoding="latin1")) + + +class IndexedStringsCompiler(IndexCompiler): + + def getItems(self, items, strings): + return items.strings + + +class TopDictIndexCompiler(IndexCompiler): + + def getItems(self, items, strings): + out = [] + for item in items: + out.append(item.getCompiler(strings, self)) + return out + + def getChildren(self, strings): + children = [] + for topDict in self.items: + children.extend(topDict.getChildren(strings)) + return children + + +class FDArrayIndexCompiler(IndexCompiler): + + def getItems(self, items, strings): + out = [] + for item in items: + out.append(item.getCompiler(strings, self)) + return out + + def getChildren(self, strings): + children = [] + for fontDict in self.items: + children.extend(fontDict.getChildren(strings)) + return children + + def toFile(self, file): + offsets = self.getOffsets() + writeCard16(file, len(self.items)) + offSize = calcOffSize(offsets[-1]) + writeCard8(file, offSize) + offSize = -offSize + pack = struct.pack + for offset in offsets: + binOffset = pack(">l", offset)[offSize:] + assert len(binOffset) == -offSize + file.write(binOffset) + for item in self.items: + if hasattr(item, "toFile"): + item.toFile(file) + else: + file.write(item) + + def setPos(self, pos, endPos): + self.parent.rawDict["FDArray"] = pos + + +class GlobalSubrsCompiler(IndexCompiler): + def getItems(self, items, strings): + out = [] + for cs in items: + cs.compile() + out.append(cs.bytecode) + return out + +class SubrsCompiler(GlobalSubrsCompiler): + def setPos(self, pos, endPos): + offset = pos - self.parent.pos + self.parent.rawDict["Subrs"] = offset + +class CharStringsCompiler(GlobalSubrsCompiler): + def setPos(self, pos, endPos): + self.parent.rawDict["CharStrings"] = pos + + +class Index(object): + + """This class represents what the CFF spec calls an INDEX.""" + + compilerClass = IndexCompiler + + def __init__(self, file=None): + self.items = [] + name = self.__class__.__name__ + if file is None: + return + if DEBUG: + print("loading %s at %s" % (name, file.tell())) + self.file = file + count = readCard16(file) + if count == 0: + return + self.items = [None] * count + offSize = readCard8(file) + if DEBUG: + print(" index count: %s offSize: %s" % (count, offSize)) + assert offSize <= 4, "offSize too large: %s" % offSize + self.offsets = offsets = [] + pad = b'\0' * (4 - offSize) + for index in range(count+1): + chunk = file.read(offSize) + chunk = pad + chunk + offset, = struct.unpack(">L", chunk) + offsets.append(int(offset)) + self.offsetBase = file.tell() - 1 + file.seek(self.offsetBase + offsets[-1]) # pretend we've read the whole lot + if DEBUG: + print(" end of %s at %s" % (name, file.tell())) + + def __len__(self): + return len(self.items) + + def __getitem__(self, index): + item = self.items[index] + if item is not None: + return item + offset = self.offsets[index] + self.offsetBase + size = self.offsets[index+1] - self.offsets[index] + file = self.file + file.seek(offset) + data = file.read(size) + assert len(data) == size + item = self.produceItem(index, data, file, offset, size) + self.items[index] = item + return item + + def produceItem(self, index, data, file, offset, size): + return data + + def append(self, item): + self.items.append(item) + + def getCompiler(self, strings, parent): + return self.compilerClass(self, strings, parent) + + +class GlobalSubrsIndex(Index): + + compilerClass = GlobalSubrsCompiler + + def __init__(self, file=None, globalSubrs=None, private=None, fdSelect=None, fdArray=None): + Index.__init__(self, file) + self.globalSubrs = globalSubrs + self.private = private + if fdSelect: + self.fdSelect = fdSelect + if fdArray: + self.fdArray = fdArray + + def produceItem(self, index, data, file, offset, size): + if self.private is not None: + private = self.private + elif hasattr(self, 'fdArray') and self.fdArray is not None: + private = self.fdArray[self.fdSelect[index]].Private + else: + private = None + return psCharStrings.T2CharString(data, private=private, globalSubrs=self.globalSubrs) + + def toXML(self, xmlWriter, progress): + xmlWriter.comment("The 'index' attribute is only for humans; it is ignored when parsed.") + xmlWriter.newline() + for i in range(len(self)): + subr = self[i] + if subr.needsDecompilation(): + xmlWriter.begintag("CharString", index=i, raw=1) + else: + xmlWriter.begintag("CharString", index=i) + xmlWriter.newline() + subr.toXML(xmlWriter) + xmlWriter.endtag("CharString") + xmlWriter.newline() + + def fromXML(self, name, attrs, content): + if name != "CharString": + return + subr = psCharStrings.T2CharString() + subr.fromXML(name, attrs, content) + self.append(subr) + + def getItemAndSelector(self, index): + sel = None + if hasattr(self, 'fdSelect'): + sel = self.fdSelect[index] + return self[index], sel + + +class SubrsIndex(GlobalSubrsIndex): + compilerClass = SubrsCompiler + + +class TopDictIndex(Index): + + compilerClass = TopDictIndexCompiler + + def produceItem(self, index, data, file, offset, size): + top = TopDict(self.strings, file, offset, self.GlobalSubrs) + top.decompile(data) + return top + + def toXML(self, xmlWriter, progress): + for i in range(len(self)): + xmlWriter.begintag("FontDict", index=i) + xmlWriter.newline() + self[i].toXML(xmlWriter, progress) + xmlWriter.endtag("FontDict") + xmlWriter.newline() + + +class FDArrayIndex(TopDictIndex): + + compilerClass = FDArrayIndexCompiler + + def fromXML(self, name, attrs, content): + if name != "FontDict": + return + fontDict = FontDict() + for element in content: + if isinstance(element, basestring): + continue + name, attrs, content = element + fontDict.fromXML(name, attrs, content) + self.append(fontDict) + + +class FDSelect: + def __init__(self, file=None, numGlyphs=None, format=None): + if file: + # read data in from file + self.format = readCard8(file) + if self.format == 0: + from array import array + self.gidArray = array("B", file.read(numGlyphs)).tolist() + elif self.format == 3: + gidArray = [None] * numGlyphs + nRanges = readCard16(file) + fd = None + prev = None + for i in range(nRanges): + first = readCard16(file) + if prev is not None: + for glyphID in range(prev, first): + gidArray[glyphID] = fd + prev = first + fd = readCard8(file) + if prev is not None: + first = readCard16(file) + for glyphID in range(prev, first): + gidArray[glyphID] = fd + self.gidArray = gidArray + else: + assert False, "unsupported FDSelect format: %s" % format + else: + # reading from XML. Make empty gidArray,, and leave format as passed in. + # format is None will result in the smallest representation being used. + self.format = format + self.gidArray = [] + + def __len__(self): + return len(self.gidArray) + + def __getitem__(self, index): + return self.gidArray[index] + + def __setitem__(self, index, fdSelectValue): + self.gidArray[index] = fdSelectValue + + def append(self, fdSelectValue): + self.gidArray.append(fdSelectValue) + + +class CharStrings(object): + + def __init__(self, file, charset, globalSubrs, private, fdSelect, fdArray): + if file is not None: + self.charStringsIndex = SubrsIndex(file, globalSubrs, private, fdSelect, fdArray) + self.charStrings = charStrings = {} + for i in range(len(charset)): + charStrings[charset[i]] = i + self.charStringsAreIndexed = 1 + else: + self.charStrings = {} + self.charStringsAreIndexed = 0 + self.globalSubrs = globalSubrs + self.private = private + if fdSelect is not None: + self.fdSelect = fdSelect + if fdArray is not None: + self.fdArray = fdArray + + def keys(self): + return list(self.charStrings.keys()) + + def values(self): + if self.charStringsAreIndexed: + return self.charStringsIndex + else: + return list(self.charStrings.values()) + + def has_key(self, name): + return name in self.charStrings + + __contains__ = has_key + + def __len__(self): + return len(self.charStrings) + + def __getitem__(self, name): + charString = self.charStrings[name] + if self.charStringsAreIndexed: + charString = self.charStringsIndex[charString] + return charString + + def __setitem__(self, name, charString): + if self.charStringsAreIndexed: + index = self.charStrings[name] + self.charStringsIndex[index] = charString + else: + self.charStrings[name] = charString + + def getItemAndSelector(self, name): + if self.charStringsAreIndexed: + index = self.charStrings[name] + return self.charStringsIndex.getItemAndSelector(index) + else: + if hasattr(self, 'fdSelect'): + sel = self.fdSelect[index] # index is not defined at this point. Read R. ? + else: + raise KeyError("fdSelect array not yet defined.") + return self.charStrings[name], sel + + def toXML(self, xmlWriter, progress): + names = sorted(self.keys()) + i = 0 + step = 10 + numGlyphs = len(names) + for name in names: + charStr, fdSelectIndex = self.getItemAndSelector(name) + if charStr.needsDecompilation(): + raw = [("raw", 1)] + else: + raw = [] + if fdSelectIndex is None: + xmlWriter.begintag("CharString", [('name', name)] + raw) + else: + xmlWriter.begintag("CharString", + [('name', name), ('fdSelectIndex', fdSelectIndex)] + raw) + xmlWriter.newline() + charStr.toXML(xmlWriter) + xmlWriter.endtag("CharString") + xmlWriter.newline() + if not i % step and progress is not None: + progress.setLabel("Dumping 'CFF ' table... (%s)" % name) + progress.increment(step / numGlyphs) + i = i + 1 + + def fromXML(self, name, attrs, content): + for element in content: + if isinstance(element, basestring): + continue + name, attrs, content = element + if name != "CharString": + continue + fdID = -1 + if hasattr(self, "fdArray"): + fdID = safeEval(attrs["fdSelectIndex"]) + private = self.fdArray[fdID].Private + else: + private = self.private + + glyphName = attrs["name"] + charString = psCharStrings.T2CharString( + private=private, + globalSubrs=self.globalSubrs) + charString.fromXML(name, attrs, content) + if fdID >= 0: + charString.fdSelectIndex = fdID + self[glyphName] = charString + + +def readCard8(file): + return byteord(file.read(1)) + +def readCard16(file): + value, = struct.unpack(">H", file.read(2)) + return value + +def writeCard8(file, value): + file.write(bytechr(value)) + +def writeCard16(file, value): + file.write(struct.pack(">H", value)) + +def packCard8(value): + return bytechr(value) + +def packCard16(value): + return struct.pack(">H", value) + +def buildOperatorDict(table): + d = {} + for op, name, arg, default, conv in table: + d[op] = (name, arg) + return d + +def buildOpcodeDict(table): + d = {} + for op, name, arg, default, conv in table: + if isinstance(op, tuple): + op = bytechr(op[0]) + bytechr(op[1]) + else: + op = bytechr(op) + d[name] = (op, arg) + return d + +def buildOrder(table): + l = [] + for op, name, arg, default, conv in table: + l.append(name) + return l + +def buildDefaults(table): + d = {} + for op, name, arg, default, conv in table: + if default is not None: + d[name] = default + return d + +def buildConverters(table): + d = {} + for op, name, arg, default, conv in table: + d[name] = conv + return d + + +class SimpleConverter(object): + def read(self, parent, value): + return value + def write(self, parent, value): + return value + def xmlWrite(self, xmlWriter, name, value, progress): + xmlWriter.simpletag(name, value=value) + xmlWriter.newline() + def xmlRead(self, name, attrs, content, parent): + return attrs["value"] + +class ASCIIConverter(SimpleConverter): + def read(self, parent, value): + return tostr(value, encoding='ascii') + def write(self, parent, value): + return tobytes(value, encoding='ascii') + def xmlWrite(self, xmlWriter, name, value, progress): + xmlWriter.simpletag(name, value=tounicode(value, encoding="ascii")) + xmlWriter.newline() + def xmlRead(self, name, attrs, content, parent): + return tobytes(attrs["value"], encoding=("ascii")) + +class Latin1Converter(SimpleConverter): + def read(self, parent, value): + return tostr(value, encoding='latin1') + def write(self, parent, value): + return tobytes(value, encoding='latin1') + def xmlWrite(self, xmlWriter, name, value, progress): + xmlWriter.simpletag(name, value=tounicode(value, encoding="latin1")) + xmlWriter.newline() + def xmlRead(self, name, attrs, content, parent): + return tobytes(attrs["value"], encoding=("latin1")) + + +def parseNum(s): + try: + value = int(s) + except: + value = float(s) + return value + +class NumberConverter(SimpleConverter): + def xmlRead(self, name, attrs, content, parent): + return parseNum(attrs["value"]) + +class ArrayConverter(SimpleConverter): + def xmlWrite(self, xmlWriter, name, value, progress): + value = " ".join(map(str, value)) + xmlWriter.simpletag(name, value=value) + xmlWriter.newline() + def xmlRead(self, name, attrs, content, parent): + values = attrs["value"].split() + return [parseNum(value) for value in values] + +class TableConverter(SimpleConverter): + def xmlWrite(self, xmlWriter, name, value, progress): + xmlWriter.begintag(name) + xmlWriter.newline() + value.toXML(xmlWriter, progress) + xmlWriter.endtag(name) + xmlWriter.newline() + def xmlRead(self, name, attrs, content, parent): + ob = self.getClass()() + for element in content: + if isinstance(element, basestring): + continue + name, attrs, content = element + ob.fromXML(name, attrs, content) + return ob + +class PrivateDictConverter(TableConverter): + def getClass(self): + return PrivateDict + def read(self, parent, value): + size, offset = value + file = parent.file + priv = PrivateDict(parent.strings, file, offset) + file.seek(offset) + data = file.read(size) + assert len(data) == size + priv.decompile(data) + return priv + def write(self, parent, value): + return (0, 0) # dummy value + +class SubrsConverter(TableConverter): + def getClass(self): + return SubrsIndex + def read(self, parent, value): + file = parent.file + file.seek(parent.offset + value) # Offset(self) + return SubrsIndex(file) + def write(self, parent, value): + return 0 # dummy value + +class CharStringsConverter(TableConverter): + def read(self, parent, value): + file = parent.file + charset = parent.charset + globalSubrs = parent.GlobalSubrs + if hasattr(parent, "ROS"): + fdSelect, fdArray = parent.FDSelect, parent.FDArray + private = None + else: + fdSelect, fdArray = None, None + private = parent.Private + file.seek(value) # Offset(0) + return CharStrings(file, charset, globalSubrs, private, fdSelect, fdArray) + def write(self, parent, value): + return 0 # dummy value + def xmlRead(self, name, attrs, content, parent): + if hasattr(parent, "ROS"): + # if it is a CID-keyed font, then the private Dict is extracted from the parent.FDArray + private, fdSelect, fdArray = None, parent.FDSelect, parent.FDArray + else: + # if it is a name-keyed font, then the private dict is in the top dict, and there is no fdArray. + private, fdSelect, fdArray = parent.Private, None, None + charStrings = CharStrings(None, None, parent.GlobalSubrs, private, fdSelect, fdArray) + charStrings.fromXML(name, attrs, content) + return charStrings + +class CharsetConverter(object): + def read(self, parent, value): + isCID = hasattr(parent, "ROS") + if value > 2: + numGlyphs = parent.numGlyphs + file = parent.file + file.seek(value) + if DEBUG: + print("loading charset at %s" % value) + format = readCard8(file) + if format == 0: + charset = parseCharset0(numGlyphs, file, parent.strings, isCID) + elif format == 1 or format == 2: + charset = parseCharset(numGlyphs, file, parent.strings, isCID, format) + else: + raise NotImplementedError + assert len(charset) == numGlyphs + if DEBUG: + print(" charset end at %s" % file.tell()) + else: # offset == 0 -> no charset data. + if isCID or "CharStrings" not in parent.rawDict: + assert value == 0 # We get here only when processing fontDicts from the FDArray of CFF-CID fonts. Only the real topDict references the chrset. + charset = None + elif value == 0: + charset = cffISOAdobeStrings + elif value == 1: + charset = cffIExpertStrings + elif value == 2: + charset = cffExpertSubsetStrings + return charset + + def write(self, parent, value): + return 0 # dummy value + def xmlWrite(self, xmlWriter, name, value, progress): + # XXX only write charset when not in OT/TTX context, where we + # dump charset as a separate "GlyphOrder" table. + ##xmlWriter.simpletag("charset") + xmlWriter.comment("charset is dumped separately as the 'GlyphOrder' element") + xmlWriter.newline() + def xmlRead(self, name, attrs, content, parent): + if 0: + return safeEval(attrs["value"]) + + +class CharsetCompiler(object): + + def __init__(self, strings, charset, parent): + assert charset[0] == '.notdef' + isCID = hasattr(parent.dictObj, "ROS") + data0 = packCharset0(charset, isCID, strings) + data = packCharset(charset, isCID, strings) + if len(data) < len(data0): + self.data = data + else: + self.data = data0 + self.parent = parent + + def setPos(self, pos, endPos): + self.parent.rawDict["charset"] = pos + + def getDataLength(self): + return len(self.data) + + def toFile(self, file): + file.write(self.data) + + +def getCIDfromName(name, strings): + return int(name[3:]) + +def getSIDfromName(name, strings): + return strings.getSID(name) + +def packCharset0(charset, isCID, strings): + fmt = 0 + data = [packCard8(fmt)] + if isCID: + getNameID = getCIDfromName + else: + getNameID = getSIDfromName + + for name in charset[1:]: + data.append(packCard16(getNameID(name,strings))) + return bytesjoin(data) + + +def packCharset(charset, isCID, strings): + fmt = 1 + ranges = [] + first = None + end = 0 + if isCID: + getNameID = getCIDfromName + else: + getNameID = getSIDfromName + + for name in charset[1:]: + SID = getNameID(name, strings) + if first is None: + first = SID + elif end + 1 != SID: + nLeft = end - first + if nLeft > 255: + fmt = 2 + ranges.append((first, nLeft)) + first = SID + end = SID + if end: + nLeft = end - first + if nLeft > 255: + fmt = 2 + ranges.append((first, nLeft)) + + data = [packCard8(fmt)] + if fmt == 1: + nLeftFunc = packCard8 + else: + nLeftFunc = packCard16 + for first, nLeft in ranges: + data.append(packCard16(first) + nLeftFunc(nLeft)) + return bytesjoin(data) + +def parseCharset0(numGlyphs, file, strings, isCID): + charset = [".notdef"] + if isCID: + for i in range(numGlyphs - 1): + CID = readCard16(file) + charset.append("cid" + str(CID).zfill(5)) + else: + for i in range(numGlyphs - 1): + SID = readCard16(file) + charset.append(strings[SID]) + return charset + +def parseCharset(numGlyphs, file, strings, isCID, fmt): + charset = ['.notdef'] + count = 1 + if fmt == 1: + nLeftFunc = readCard8 + else: + nLeftFunc = readCard16 + while count < numGlyphs: + first = readCard16(file) + nLeft = nLeftFunc(file) + if isCID: + for CID in range(first, first+nLeft+1): + charset.append("cid" + str(CID).zfill(5)) + else: + for SID in range(first, first+nLeft+1): + charset.append(strings[SID]) + count = count + nLeft + 1 + return charset + + +class EncodingCompiler(object): + + def __init__(self, strings, encoding, parent): + assert not isinstance(encoding, basestring) + data0 = packEncoding0(parent.dictObj.charset, encoding, parent.strings) + data1 = packEncoding1(parent.dictObj.charset, encoding, parent.strings) + if len(data0) < len(data1): + self.data = data0 + else: + self.data = data1 + self.parent = parent + + def setPos(self, pos, endPos): + self.parent.rawDict["Encoding"] = pos + + def getDataLength(self): + return len(self.data) + + def toFile(self, file): + file.write(self.data) + + +class EncodingConverter(SimpleConverter): + + def read(self, parent, value): + if value == 0: + return "StandardEncoding" + elif value == 1: + return "ExpertEncoding" + else: + assert value > 1 + file = parent.file + file.seek(value) + if DEBUG: + print("loading Encoding at %s" % value) + fmt = readCard8(file) + haveSupplement = fmt & 0x80 + if haveSupplement: + raise NotImplementedError("Encoding supplements are not yet supported") + fmt = fmt & 0x7f + if fmt == 0: + encoding = parseEncoding0(parent.charset, file, haveSupplement, + parent.strings) + elif fmt == 1: + encoding = parseEncoding1(parent.charset, file, haveSupplement, + parent.strings) + return encoding + + def write(self, parent, value): + if value == "StandardEncoding": + return 0 + elif value == "ExpertEncoding": + return 1 + return 0 # dummy value + + def xmlWrite(self, xmlWriter, name, value, progress): + if value in ("StandardEncoding", "ExpertEncoding"): + xmlWriter.simpletag(name, name=value) + xmlWriter.newline() + return + xmlWriter.begintag(name) + xmlWriter.newline() + for code in range(len(value)): + glyphName = value[code] + if glyphName != ".notdef": + xmlWriter.simpletag("map", code=hex(code), name=glyphName) + xmlWriter.newline() + xmlWriter.endtag(name) + xmlWriter.newline() + + def xmlRead(self, name, attrs, content, parent): + if "name" in attrs: + return attrs["name"] + encoding = [".notdef"] * 256 + for element in content: + if isinstance(element, basestring): + continue + name, attrs, content = element + code = safeEval(attrs["code"]) + glyphName = attrs["name"] + encoding[code] = glyphName + return encoding + + +def parseEncoding0(charset, file, haveSupplement, strings): + nCodes = readCard8(file) + encoding = [".notdef"] * 256 + for glyphID in range(1, nCodes + 1): + code = readCard8(file) + if code != 0: + encoding[code] = charset[glyphID] + return encoding + +def parseEncoding1(charset, file, haveSupplement, strings): + nRanges = readCard8(file) + encoding = [".notdef"] * 256 + glyphID = 1 + for i in range(nRanges): + code = readCard8(file) + nLeft = readCard8(file) + for glyphID in range(glyphID, glyphID + nLeft + 1): + encoding[code] = charset[glyphID] + code = code + 1 + glyphID = glyphID + 1 + return encoding + +def packEncoding0(charset, encoding, strings): + fmt = 0 + m = {} + for code in range(len(encoding)): + name = encoding[code] + if name != ".notdef": + m[name] = code + codes = [] + for name in charset[1:]: + code = m.get(name) + codes.append(code) + + while codes and codes[-1] is None: + codes.pop() + + data = [packCard8(fmt), packCard8(len(codes))] + for code in codes: + if code is None: + code = 0 + data.append(packCard8(code)) + return bytesjoin(data) + +def packEncoding1(charset, encoding, strings): + fmt = 1 + m = {} + for code in range(len(encoding)): + name = encoding[code] + if name != ".notdef": + m[name] = code + ranges = [] + first = None + end = 0 + for name in charset[1:]: + code = m.get(name, -1) + if first is None: + first = code + elif end + 1 != code: + nLeft = end - first + ranges.append((first, nLeft)) + first = code + end = code + nLeft = end - first + ranges.append((first, nLeft)) + + # remove unencoded glyphs at the end. + while ranges and ranges[-1][0] == -1: + ranges.pop() + + data = [packCard8(fmt), packCard8(len(ranges))] + for first, nLeft in ranges: + if first == -1: # unencoded + first = 0 + data.append(packCard8(first) + packCard8(nLeft)) + return bytesjoin(data) + + +class FDArrayConverter(TableConverter): + + def read(self, parent, value): + file = parent.file + file.seek(value) + fdArray = FDArrayIndex(file) + fdArray.strings = parent.strings + fdArray.GlobalSubrs = parent.GlobalSubrs + return fdArray + + def write(self, parent, value): + return 0 # dummy value + + def xmlRead(self, name, attrs, content, parent): + fdArray = FDArrayIndex() + for element in content: + if isinstance(element, basestring): + continue + name, attrs, content = element + fdArray.fromXML(name, attrs, content) + return fdArray + + +class FDSelectConverter(object): + + def read(self, parent, value): + file = parent.file + file.seek(value) + fdSelect = FDSelect(file, parent.numGlyphs) + return fdSelect + + def write(self, parent, value): + return 0 # dummy value + + # The FDSelect glyph data is written out to XML in the charstring keys, + # so we write out only the format selector + def xmlWrite(self, xmlWriter, name, value, progress): + xmlWriter.simpletag(name, [('format', value.format)]) + xmlWriter.newline() + + def xmlRead(self, name, attrs, content, parent): + fmt = safeEval(attrs["format"]) + file = None + numGlyphs = None + fdSelect = FDSelect(file, numGlyphs, fmt) + return fdSelect + + +def packFDSelect0(fdSelectArray): + fmt = 0 + data = [packCard8(fmt)] + for index in fdSelectArray: + data.append(packCard8(index)) + return bytesjoin(data) + + +def packFDSelect3(fdSelectArray): + fmt = 3 + fdRanges = [] + first = None + end = 0 + lenArray = len(fdSelectArray) + lastFDIndex = -1 + for i in range(lenArray): + fdIndex = fdSelectArray[i] + if lastFDIndex != fdIndex: + fdRanges.append([i, fdIndex]) + lastFDIndex = fdIndex + sentinelGID = i + 1 + + data = [packCard8(fmt)] + data.append(packCard16( len(fdRanges) )) + for fdRange in fdRanges: + data.append(packCard16(fdRange[0])) + data.append(packCard8(fdRange[1])) + data.append(packCard16(sentinelGID)) + return bytesjoin(data) + + +class FDSelectCompiler(object): + + def __init__(self, fdSelect, parent): + fmt = fdSelect.format + fdSelectArray = fdSelect.gidArray + if fmt == 0: + self.data = packFDSelect0(fdSelectArray) + elif fmt == 3: + self.data = packFDSelect3(fdSelectArray) + else: + # choose smaller of the two formats + data0 = packFDSelect0(fdSelectArray) + data3 = packFDSelect3(fdSelectArray) + if len(data0) < len(data3): + self.data = data0 + fdSelect.format = 0 + else: + self.data = data3 + fdSelect.format = 3 + + self.parent = parent + + def setPos(self, pos, endPos): + self.parent.rawDict["FDSelect"] = pos + + def getDataLength(self): + return len(self.data) + + def toFile(self, file): + file.write(self.data) + + +class ROSConverter(SimpleConverter): + + def xmlWrite(self, xmlWriter, name, value, progress): + registry, order, supplement = value + xmlWriter.simpletag(name, [('Registry', tostr(registry)), ('Order', tostr(order)), + ('Supplement', supplement)]) + xmlWriter.newline() + + def xmlRead(self, name, attrs, content, parent): + return (attrs['Registry'], attrs['Order'], safeEval(attrs['Supplement'])) + + +topDictOperators = [ +# opcode name argument type default converter + ((12, 30), 'ROS', ('SID', 'SID', 'number'), None, ROSConverter()), + ((12, 20), 'SyntheticBase', 'number', None, None), + (0, 'version', 'SID', None, None), + (1, 'Notice', 'SID', None, Latin1Converter()), + ((12, 0), 'Copyright', 'SID', None, Latin1Converter()), + (2, 'FullName', 'SID', None, None), + ((12, 38), 'FontName', 'SID', None, None), + (3, 'FamilyName', 'SID', None, None), + (4, 'Weight', 'SID', None, None), + ((12, 1), 'isFixedPitch', 'number', 0, None), + ((12, 2), 'ItalicAngle', 'number', 0, None), + ((12, 3), 'UnderlinePosition', 'number', None, None), + ((12, 4), 'UnderlineThickness', 'number', 50, None), + ((12, 5), 'PaintType', 'number', 0, None), + ((12, 6), 'CharstringType', 'number', 2, None), + ((12, 7), 'FontMatrix', 'array', [0.001, 0, 0, 0.001, 0, 0], None), + (13, 'UniqueID', 'number', None, None), + (5, 'FontBBox', 'array', [0, 0, 0, 0], None), + ((12, 8), 'StrokeWidth', 'number', 0, None), + (14, 'XUID', 'array', None, None), + ((12, 21), 'PostScript', 'SID', None, None), + ((12, 22), 'BaseFontName', 'SID', None, None), + ((12, 23), 'BaseFontBlend', 'delta', None, None), + ((12, 31), 'CIDFontVersion', 'number', 0, None), + ((12, 32), 'CIDFontRevision', 'number', 0, None), + ((12, 33), 'CIDFontType', 'number', 0, None), + ((12, 34), 'CIDCount', 'number', 8720, None), + (15, 'charset', 'number', 0, CharsetConverter()), + ((12, 35), 'UIDBase', 'number', None, None), + (16, 'Encoding', 'number', 0, EncodingConverter()), + (18, 'Private', ('number', 'number'), None, PrivateDictConverter()), + ((12, 37), 'FDSelect', 'number', None, FDSelectConverter()), + ((12, 36), 'FDArray', 'number', None, FDArrayConverter()), + (17, 'CharStrings', 'number', None, CharStringsConverter()), +] + +# Note! FDSelect and FDArray must both preceed CharStrings in the output XML build order, +# in order for the font to compile back from xml. + + +privateDictOperators = [ +# opcode name argument type default converter + (6, 'BlueValues', 'delta', None, None), + (7, 'OtherBlues', 'delta', None, None), + (8, 'FamilyBlues', 'delta', None, None), + (9, 'FamilyOtherBlues', 'delta', None, None), + ((12, 9), 'BlueScale', 'number', 0.039625, None), + ((12, 10), 'BlueShift', 'number', 7, None), + ((12, 11), 'BlueFuzz', 'number', 1, None), + (10, 'StdHW', 'number', None, None), + (11, 'StdVW', 'number', None, None), + ((12, 12), 'StemSnapH', 'delta', None, None), + ((12, 13), 'StemSnapV', 'delta', None, None), + ((12, 14), 'ForceBold', 'number', 0, None), + ((12, 15), 'ForceBoldThreshold', 'number', None, None), # deprecated + ((12, 16), 'lenIV', 'number', None, None), # deprecated + ((12, 17), 'LanguageGroup', 'number', 0, None), + ((12, 18), 'ExpansionFactor', 'number', 0.06, None), + ((12, 19), 'initialRandomSeed', 'number', 0, None), + (20, 'defaultWidthX', 'number', 0, None), + (21, 'nominalWidthX', 'number', 0, None), + (19, 'Subrs', 'number', None, SubrsConverter()), +] + +def addConverters(table): + for i in range(len(table)): + op, name, arg, default, conv = table[i] + if conv is not None: + continue + if arg in ("delta", "array"): + conv = ArrayConverter() + elif arg == "number": + conv = NumberConverter() + elif arg == "SID": + conv = ASCIIConverter() + else: + assert False + table[i] = op, name, arg, default, conv + +addConverters(privateDictOperators) +addConverters(topDictOperators) + + +class TopDictDecompiler(psCharStrings.DictDecompiler): + operators = buildOperatorDict(topDictOperators) + + +class PrivateDictDecompiler(psCharStrings.DictDecompiler): + operators = buildOperatorDict(privateDictOperators) + + +class DictCompiler(object): + + def __init__(self, dictObj, strings, parent): + assert isinstance(strings, IndexedStrings) + self.dictObj = dictObj + self.strings = strings + self.parent = parent + rawDict = {} + for name in dictObj.order: + value = getattr(dictObj, name, None) + if value is None: + continue + conv = dictObj.converters[name] + value = conv.write(dictObj, value) + if value == dictObj.defaults.get(name): + continue + rawDict[name] = value + self.rawDict = rawDict + + def setPos(self, pos, endPos): + pass + + def getDataLength(self): + return len(self.compile("getDataLength")) + + def compile(self, reason): + if DEBUG: + print("-- compiling %s for %s" % (self.__class__.__name__, reason)) + print("in baseDict: ", self) + rawDict = self.rawDict + data = [] + for name in self.dictObj.order: + value = rawDict.get(name) + if value is None: + continue + op, argType = self.opcodes[name] + if isinstance(argType, tuple): + l = len(argType) + assert len(value) == l, "value doesn't match arg type" + for i in range(l): + arg = argType[i] + v = value[i] + arghandler = getattr(self, "arg_" + arg) + data.append(arghandler(v)) + else: + arghandler = getattr(self, "arg_" + argType) + data.append(arghandler(value)) + data.append(op) + return bytesjoin(data) + + def toFile(self, file): + file.write(self.compile("toFile")) + + def arg_number(self, num): + return encodeNumber(num) + def arg_SID(self, s): + return psCharStrings.encodeIntCFF(self.strings.getSID(s)) + def arg_array(self, value): + data = [] + for num in value: + data.append(encodeNumber(num)) + return bytesjoin(data) + def arg_delta(self, value): + out = [] + last = 0 + for v in value: + out.append(v - last) + last = v + data = [] + for num in out: + data.append(encodeNumber(num)) + return bytesjoin(data) + + +def encodeNumber(num): + if isinstance(num, float): + return psCharStrings.encodeFloat(num) + else: + return psCharStrings.encodeIntCFF(num) + + +class TopDictCompiler(DictCompiler): + + opcodes = buildOpcodeDict(topDictOperators) + + def getChildren(self, strings): + children = [] + if hasattr(self.dictObj, "charset") and self.dictObj.charset: + children.append(CharsetCompiler(strings, self.dictObj.charset, self)) + if hasattr(self.dictObj, "Encoding"): + encoding = self.dictObj.Encoding + if not isinstance(encoding, basestring): + children.append(EncodingCompiler(strings, encoding, self)) + if hasattr(self.dictObj, "FDSelect"): + # I have not yet supported merging a ttx CFF-CID font, as there are interesting + # issues about merging the FDArrays. Here I assume that + # either the font was read from XML, and teh FDSelect indices are all + # in the charstring data, or the FDSelect array is already fully defined. + fdSelect = self.dictObj.FDSelect + if len(fdSelect) == 0: # probably read in from XML; assume fdIndex in CharString data + charStrings = self.dictObj.CharStrings + for name in self.dictObj.charset: + fdSelect.append(charStrings[name].fdSelectIndex) + fdSelectComp = FDSelectCompiler(fdSelect, self) + children.append(fdSelectComp) + if hasattr(self.dictObj, "CharStrings"): + items = [] + charStrings = self.dictObj.CharStrings + for name in self.dictObj.charset: + items.append(charStrings[name]) + charStringsComp = CharStringsCompiler(items, strings, self) + children.append(charStringsComp) + if hasattr(self.dictObj, "FDArray"): + # I have not yet supported merging a ttx CFF-CID font, as there are interesting + # issues about merging the FDArrays. Here I assume that the FDArray info is correct + # and complete. + fdArrayIndexComp = self.dictObj.FDArray.getCompiler(strings, self) + children.append(fdArrayIndexComp) + children.extend(fdArrayIndexComp.getChildren(strings)) + if hasattr(self.dictObj, "Private"): + privComp = self.dictObj.Private.getCompiler(strings, self) + children.append(privComp) + children.extend(privComp.getChildren(strings)) + return children + + +class FontDictCompiler(DictCompiler): + + opcodes = buildOpcodeDict(topDictOperators) + + def getChildren(self, strings): + children = [] + if hasattr(self.dictObj, "Private"): + privComp = self.dictObj.Private.getCompiler(strings, self) + children.append(privComp) + children.extend(privComp.getChildren(strings)) + return children + + +class PrivateDictCompiler(DictCompiler): + + opcodes = buildOpcodeDict(privateDictOperators) + + def setPos(self, pos, endPos): + size = endPos - pos + self.parent.rawDict["Private"] = size, pos + self.pos = pos + + def getChildren(self, strings): + children = [] + if hasattr(self.dictObj, "Subrs"): + children.append(self.dictObj.Subrs.getCompiler(strings, self)) + return children + + +class BaseDict(object): + + def __init__(self, strings=None, file=None, offset=None): + self.rawDict = {} + if DEBUG: + print("loading %s at %s" % (self.__class__.__name__, offset)) + self.file = file + self.offset = offset + self.strings = strings + self.skipNames = [] + + def decompile(self, data): + if DEBUG: + print(" length %s is %s" % (self.__class__.__name__, len(data))) + dec = self.decompilerClass(self.strings) + dec.decompile(data) + self.rawDict = dec.getDict() + self.postDecompile() + + def postDecompile(self): + pass + + def getCompiler(self, strings, parent): + return self.compilerClass(self, strings, parent) + + def __getattr__(self, name): + value = self.rawDict.get(name) + if value is None: + value = self.defaults.get(name) + if value is None: + raise AttributeError(name) + conv = self.converters[name] + value = conv.read(self, value) + setattr(self, name, value) + return value + + def toXML(self, xmlWriter, progress): + for name in self.order: + if name in self.skipNames: + continue + value = getattr(self, name, None) + if value is None: + continue + conv = self.converters[name] + conv.xmlWrite(xmlWriter, name, value, progress) + + def fromXML(self, name, attrs, content): + conv = self.converters[name] + value = conv.xmlRead(name, attrs, content, self) + setattr(self, name, value) + + +class TopDict(BaseDict): + + defaults = buildDefaults(topDictOperators) + converters = buildConverters(topDictOperators) + order = buildOrder(topDictOperators) + decompilerClass = TopDictDecompiler + compilerClass = TopDictCompiler + + def __init__(self, strings=None, file=None, offset=None, GlobalSubrs=None): + BaseDict.__init__(self, strings, file, offset) + self.GlobalSubrs = GlobalSubrs + + def getGlyphOrder(self): + return self.charset + + def postDecompile(self): + offset = self.rawDict.get("CharStrings") + if offset is None: + return + # get the number of glyphs beforehand. + self.file.seek(offset) + self.numGlyphs = readCard16(self.file) + + def toXML(self, xmlWriter, progress): + if hasattr(self, "CharStrings"): + self.decompileAllCharStrings(progress) + if hasattr(self, "ROS"): + self.skipNames = ['Encoding'] + if not hasattr(self, "ROS") or not hasattr(self, "CharStrings"): + # these values have default values, but I only want them to show up + # in CID fonts. + self.skipNames = ['CIDFontVersion', 'CIDFontRevision', 'CIDFontType', + 'CIDCount'] + BaseDict.toXML(self, xmlWriter, progress) + + def decompileAllCharStrings(self, progress): + # XXX only when doing ttdump -i? + i = 0 + for charString in self.CharStrings.values(): + try: + charString.decompile() + except: + print("Error in charstring ", i) + import sys + typ, value = sys.exc_info()[0:2] + raise typ(value) + if not i % 30 and progress: + progress.increment(0) # update + i = i + 1 + + +class FontDict(BaseDict): + + defaults = buildDefaults(topDictOperators) + converters = buildConverters(topDictOperators) + order = buildOrder(topDictOperators) + decompilerClass = None + compilerClass = FontDictCompiler + + def __init__(self, strings=None, file=None, offset=None, GlobalSubrs=None): + BaseDict.__init__(self, strings, file, offset) + self.GlobalSubrs = GlobalSubrs + + def getGlyphOrder(self): + return self.charset + + def toXML(self, xmlWriter, progress): + self.skipNames = ['Encoding'] + BaseDict.toXML(self, xmlWriter, progress) + + +class PrivateDict(BaseDict): + defaults = buildDefaults(privateDictOperators) + converters = buildConverters(privateDictOperators) + order = buildOrder(privateDictOperators) + decompilerClass = PrivateDictDecompiler + compilerClass = PrivateDictCompiler + + +class IndexedStrings(object): + + """SID -> string mapping.""" + + def __init__(self, file=None): + if file is None: + strings = [] + else: + strings = [tostr(s, encoding="latin1") for s in Index(file)] + self.strings = strings + + def getCompiler(self): + return IndexedStringsCompiler(self, None, None) + + def __len__(self): + return len(self.strings) + + def __getitem__(self, SID): + if SID < cffStandardStringCount: + return cffStandardStrings[SID] + else: + return self.strings[SID - cffStandardStringCount] + + def getSID(self, s): + if not hasattr(self, "stringMapping"): + self.buildStringMapping() + if s in cffStandardStringMapping: + SID = cffStandardStringMapping[s] + elif s in self.stringMapping: + SID = self.stringMapping[s] + else: + SID = len(self.strings) + cffStandardStringCount + self.strings.append(s) + self.stringMapping[s] = SID + return SID + + def getStrings(self): + return self.strings + + def buildStringMapping(self): + self.stringMapping = {} + for index in range(len(self.strings)): + self.stringMapping[self.strings[index]] = index + cffStandardStringCount + + +# The 391 Standard Strings as used in the CFF format. +# from Adobe Technical None #5176, version 1.0, 18 March 1998 + +cffStandardStrings = ['.notdef', 'space', 'exclam', 'quotedbl', 'numbersign', + 'dollar', 'percent', 'ampersand', 'quoteright', 'parenleft', 'parenright', + 'asterisk', 'plus', 'comma', 'hyphen', 'period', 'slash', 'zero', 'one', + 'two', 'three', 'four', 'five', 'six', 'seven', 'eight', 'nine', 'colon', + 'semicolon', 'less', 'equal', 'greater', 'question', 'at', 'A', 'B', 'C', + 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M', 'N', 'O', 'P', 'Q', 'R', + 'S', 'T', 'U', 'V', 'W', 'X', 'Y', 'Z', 'bracketleft', 'backslash', + 'bracketright', 'asciicircum', 'underscore', 'quoteleft', 'a', 'b', 'c', + 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', + 's', 't', 'u', 'v', 'w', 'x', 'y', 'z', 'braceleft', 'bar', 'braceright', + 'asciitilde', 'exclamdown', 'cent', 'sterling', 'fraction', 'yen', 'florin', + 'section', 'currency', 'quotesingle', 'quotedblleft', 'guillemotleft', + 'guilsinglleft', 'guilsinglright', 'fi', 'fl', 'endash', 'dagger', + 'daggerdbl', 'periodcentered', 'paragraph', 'bullet', 'quotesinglbase', + 'quotedblbase', 'quotedblright', 'guillemotright', 'ellipsis', 'perthousand', + 'questiondown', 'grave', 'acute', 'circumflex', 'tilde', 'macron', 'breve', + 'dotaccent', 'dieresis', 'ring', 'cedilla', 'hungarumlaut', 'ogonek', 'caron', + 'emdash', 'AE', 'ordfeminine', 'Lslash', 'Oslash', 'OE', 'ordmasculine', 'ae', + 'dotlessi', 'lslash', 'oslash', 'oe', 'germandbls', 'onesuperior', + 'logicalnot', 'mu', 'trademark', 'Eth', 'onehalf', 'plusminus', 'Thorn', + 'onequarter', 'divide', 'brokenbar', 'degree', 'thorn', 'threequarters', + 'twosuperior', 'registered', 'minus', 'eth', 'multiply', 'threesuperior', + 'copyright', 'Aacute', 'Acircumflex', 'Adieresis', 'Agrave', 'Aring', + 'Atilde', 'Ccedilla', 'Eacute', 'Ecircumflex', 'Edieresis', 'Egrave', + 'Iacute', 'Icircumflex', 'Idieresis', 'Igrave', 'Ntilde', 'Oacute', + 'Ocircumflex', 'Odieresis', 'Ograve', 'Otilde', 'Scaron', 'Uacute', + 'Ucircumflex', 'Udieresis', 'Ugrave', 'Yacute', 'Ydieresis', 'Zcaron', + 'aacute', 'acircumflex', 'adieresis', 'agrave', 'aring', 'atilde', 'ccedilla', + 'eacute', 'ecircumflex', 'edieresis', 'egrave', 'iacute', 'icircumflex', + 'idieresis', 'igrave', 'ntilde', 'oacute', 'ocircumflex', 'odieresis', + 'ograve', 'otilde', 'scaron', 'uacute', 'ucircumflex', 'udieresis', 'ugrave', + 'yacute', 'ydieresis', 'zcaron', 'exclamsmall', 'Hungarumlautsmall', + 'dollaroldstyle', 'dollarsuperior', 'ampersandsmall', 'Acutesmall', + 'parenleftsuperior', 'parenrightsuperior', 'twodotenleader', 'onedotenleader', + 'zerooldstyle', 'oneoldstyle', 'twooldstyle', 'threeoldstyle', 'fouroldstyle', + 'fiveoldstyle', 'sixoldstyle', 'sevenoldstyle', 'eightoldstyle', + 'nineoldstyle', 'commasuperior', 'threequartersemdash', 'periodsuperior', + 'questionsmall', 'asuperior', 'bsuperior', 'centsuperior', 'dsuperior', + 'esuperior', 'isuperior', 'lsuperior', 'msuperior', 'nsuperior', 'osuperior', + 'rsuperior', 'ssuperior', 'tsuperior', 'ff', 'ffi', 'ffl', 'parenleftinferior', + 'parenrightinferior', 'Circumflexsmall', 'hyphensuperior', 'Gravesmall', + 'Asmall', 'Bsmall', 'Csmall', 'Dsmall', 'Esmall', 'Fsmall', 'Gsmall', 'Hsmall', + 'Ismall', 'Jsmall', 'Ksmall', 'Lsmall', 'Msmall', 'Nsmall', 'Osmall', 'Psmall', + 'Qsmall', 'Rsmall', 'Ssmall', 'Tsmall', 'Usmall', 'Vsmall', 'Wsmall', 'Xsmall', + 'Ysmall', 'Zsmall', 'colonmonetary', 'onefitted', 'rupiah', 'Tildesmall', + 'exclamdownsmall', 'centoldstyle', 'Lslashsmall', 'Scaronsmall', 'Zcaronsmall', + 'Dieresissmall', 'Brevesmall', 'Caronsmall', 'Dotaccentsmall', 'Macronsmall', + 'figuredash', 'hypheninferior', 'Ogoneksmall', 'Ringsmall', 'Cedillasmall', + 'questiondownsmall', 'oneeighth', 'threeeighths', 'fiveeighths', 'seveneighths', + 'onethird', 'twothirds', 'zerosuperior', 'foursuperior', 'fivesuperior', + 'sixsuperior', 'sevensuperior', 'eightsuperior', 'ninesuperior', 'zeroinferior', + 'oneinferior', 'twoinferior', 'threeinferior', 'fourinferior', 'fiveinferior', + 'sixinferior', 'seveninferior', 'eightinferior', 'nineinferior', 'centinferior', + 'dollarinferior', 'periodinferior', 'commainferior', 'Agravesmall', + 'Aacutesmall', 'Acircumflexsmall', 'Atildesmall', 'Adieresissmall', 'Aringsmall', + 'AEsmall', 'Ccedillasmall', 'Egravesmall', 'Eacutesmall', 'Ecircumflexsmall', + 'Edieresissmall', 'Igravesmall', 'Iacutesmall', 'Icircumflexsmall', + 'Idieresissmall', 'Ethsmall', 'Ntildesmall', 'Ogravesmall', 'Oacutesmall', + 'Ocircumflexsmall', 'Otildesmall', 'Odieresissmall', 'OEsmall', 'Oslashsmall', + 'Ugravesmall', 'Uacutesmall', 'Ucircumflexsmall', 'Udieresissmall', + 'Yacutesmall', 'Thornsmall', 'Ydieresissmall', '001.000', '001.001', '001.002', + '001.003', 'Black', 'Bold', 'Book', 'Light', 'Medium', 'Regular', 'Roman', + 'Semibold' +] + +cffStandardStringCount = 391 +assert len(cffStandardStrings) == cffStandardStringCount +# build reverse mapping +cffStandardStringMapping = {} +for _i in range(cffStandardStringCount): + cffStandardStringMapping[cffStandardStrings[_i]] = _i + +cffISOAdobeStrings = [".notdef", "space", "exclam", "quotedbl", "numbersign", +"dollar", "percent", "ampersand", "quoteright", "parenleft", "parenright", +"asterisk", "plus", "comma", "hyphen", "period", "slash", "zero", "one", "two", +"three", "four", "five", "six", "seven", "eight", "nine", "colon", "semicolon", +"less", "equal", "greater", "question", "at", "A", "B", "C", "D", "E", "F", "G", +"H", "I", "J", "K", "L", "M", "N", "O", "P", "Q", "R", "S", "T", "U", "V", "W", +"X", "Y", "Z", "bracketleft", "backslash", "bracketright", "asciicircum", +"underscore", "quoteleft", "a", "b", "c", "d", "e", "f", "g", "h", "i", "j", +"k", "l", "m", "n", "o", "p", "q", "r", "s", "t", "u", "v", "w", "x", "y", "z", +"braceleft", "bar", "braceright", "asciitilde", "exclamdown", "cent", +"sterling", "fraction", "yen", "florin", "section", "currency", "quotesingle", +"quotedblleft", "guillemotleft", "guilsinglleft", "guilsinglright", "fi", "fl", +"endash", "dagger", "daggerdbl", "periodcentered", "paragraph", "bullet", +"quotesinglbase", "quotedblbase", "quotedblright", "guillemotright", "ellipsis", +"perthousand", "questiondown", "grave", "acute", "circumflex", "tilde", +"macron", "breve", "dotaccent", "dieresis", "ring", "cedilla", "hungarumlaut", +"ogonek", "caron", "emdash", "AE", "ordfeminine", "Lslash", "Oslash", "OE", +"ordmasculine", "ae", "dotlessi", "lslash", "oslash", "oe", "germandbls", +"onesuperior", "logicalnot", "mu", "trademark", "Eth", "onehalf", "plusminus", +"Thorn", "onequarter", "divide", "brokenbar", "degree", "thorn", +"threequarters", "twosuperior", "registered", "minus", "eth", "multiply", +"threesuperior", "copyright", "Aacute", "Acircumflex", "Adieresis", "Agrave", +"Aring", "Atilde", "Ccedilla", "Eacute", "Ecircumflex", "Edieresis", "Egrave", +"Iacute", "Icircumflex", "Idieresis", "Igrave", "Ntilde", "Oacute", +"Ocircumflex", "Odieresis", "Ograve", "Otilde", "Scaron", "Uacute", +"Ucircumflex", "Udieresis", "Ugrave", "Yacute", "Ydieresis", "Zcaron", "aacute", +"acircumflex", "adieresis", "agrave", "aring", "atilde", "ccedilla", "eacute", +"ecircumflex", "edieresis", "egrave", "iacute", "icircumflex", "idieresis", +"igrave", "ntilde", "oacute", "ocircumflex", "odieresis", "ograve", "otilde", +"scaron", "uacute", "ucircumflex", "udieresis", "ugrave", "yacute", "ydieresis", +"zcaron"] + +cffISOAdobeStringCount = 229 +assert len(cffISOAdobeStrings) == cffISOAdobeStringCount + +cffIExpertStrings = [".notdef", "space", "exclamsmall", "Hungarumlautsmall", +"dollaroldstyle", "dollarsuperior", "ampersandsmall", "Acutesmall", +"parenleftsuperior", "parenrightsuperior", "twodotenleader", "onedotenleader", +"comma", "hyphen", "period", "fraction", "zerooldstyle", "oneoldstyle", +"twooldstyle", "threeoldstyle", "fouroldstyle", "fiveoldstyle", "sixoldstyle", +"sevenoldstyle", "eightoldstyle", "nineoldstyle", "colon", "semicolon", +"commasuperior", "threequartersemdash", "periodsuperior", "questionsmall", +"asuperior", "bsuperior", "centsuperior", "dsuperior", "esuperior", "isuperior", +"lsuperior", "msuperior", "nsuperior", "osuperior", "rsuperior", "ssuperior", +"tsuperior", "ff", "fi", "fl", "ffi", "ffl", "parenleftinferior", +"parenrightinferior", "Circumflexsmall", "hyphensuperior", "Gravesmall", +"Asmall", "Bsmall", "Csmall", "Dsmall", "Esmall", "Fsmall", "Gsmall", "Hsmall", +"Ismall", "Jsmall", "Ksmall", "Lsmall", "Msmall", "Nsmall", "Osmall", "Psmall", +"Qsmall", "Rsmall", "Ssmall", "Tsmall", "Usmall", "Vsmall", "Wsmall", "Xsmall", +"Ysmall", "Zsmall", "colonmonetary", "onefitted", "rupiah", "Tildesmall", +"exclamdownsmall", "centoldstyle", "Lslashsmall", "Scaronsmall", "Zcaronsmall", +"Dieresissmall", "Brevesmall", "Caronsmall", "Dotaccentsmall", "Macronsmall", +"figuredash", "hypheninferior", "Ogoneksmall", "Ringsmall", "Cedillasmall", +"onequarter", "onehalf", "threequarters", "questiondownsmall", "oneeighth", +"threeeighths", "fiveeighths", "seveneighths", "onethird", "twothirds", +"zerosuperior", "onesuperior", "twosuperior", "threesuperior", "foursuperior", +"fivesuperior", "sixsuperior", "sevensuperior", "eightsuperior", "ninesuperior", +"zeroinferior", "oneinferior", "twoinferior", "threeinferior", "fourinferior", +"fiveinferior", "sixinferior", "seveninferior", "eightinferior", "nineinferior", +"centinferior", "dollarinferior", "periodinferior", "commainferior", +"Agravesmall", "Aacutesmall", "Acircumflexsmall", "Atildesmall", +"Adieresissmall", "Aringsmall", "AEsmall", "Ccedillasmall", "Egravesmall", +"Eacutesmall", "Ecircumflexsmall", "Edieresissmall", "Igravesmall", +"Iacutesmall", "Icircumflexsmall", "Idieresissmall", "Ethsmall", "Ntildesmall", +"Ogravesmall", "Oacutesmall", "Ocircumflexsmall", "Otildesmall", +"Odieresissmall", "OEsmall", "Oslashsmall", "Ugravesmall", "Uacutesmall", +"Ucircumflexsmall", "Udieresissmall", "Yacutesmall", "Thornsmall", +"Ydieresissmall"] + +cffExpertStringCount = 166 +assert len(cffIExpertStrings) == cffExpertStringCount + +cffExpertSubsetStrings = [".notdef", "space", "dollaroldstyle", +"dollarsuperior", "parenleftsuperior", "parenrightsuperior", "twodotenleader", +"onedotenleader", "comma", "hyphen", "period", "fraction", "zerooldstyle", +"oneoldstyle", "twooldstyle", "threeoldstyle", "fouroldstyle", "fiveoldstyle", +"sixoldstyle", "sevenoldstyle", "eightoldstyle", "nineoldstyle", "colon", +"semicolon", "commasuperior", "threequartersemdash", "periodsuperior", +"asuperior", "bsuperior", "centsuperior", "dsuperior", "esuperior", "isuperior", +"lsuperior", "msuperior", "nsuperior", "osuperior", "rsuperior", "ssuperior", +"tsuperior", "ff", "fi", "fl", "ffi", "ffl", "parenleftinferior", +"parenrightinferior", "hyphensuperior", "colonmonetary", "onefitted", "rupiah", +"centoldstyle", "figuredash", "hypheninferior", "onequarter", "onehalf", +"threequarters", "oneeighth", "threeeighths", "fiveeighths", "seveneighths", +"onethird", "twothirds", "zerosuperior", "onesuperior", "twosuperior", +"threesuperior", "foursuperior", "fivesuperior", "sixsuperior", "sevensuperior", +"eightsuperior", "ninesuperior", "zeroinferior", "oneinferior", "twoinferior", +"threeinferior", "fourinferior", "fiveinferior", "sixinferior", "seveninferior", +"eightinferior", "nineinferior", "centinferior", "dollarinferior", +"periodinferior", "commainferior"] + +cffExpertSubsetStringCount = 87 +assert len(cffExpertSubsetStrings) == cffExpertSubsetStringCount diff -Nru fonttools-2.4/Tools/fontTools/encodings/codecs.py fonttools-3.0/Tools/fontTools/encodings/codecs.py --- fonttools-2.4/Tools/fontTools/encodings/codecs.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.0/Tools/fontTools/encodings/codecs.py 2015-08-31 17:57:15.000000000 +0000 @@ -0,0 +1,135 @@ +"""Extend the Python codecs module with a few encodings that are used in OpenType (name table) +but missing from Python. See https://github.com/behdad/fonttools/issues/236 for details.""" + +from __future__ import print_function, division, absolute_import +from fontTools.misc.py23 import * +import codecs +import encodings + +class ExtendCodec(codecs.Codec): + + def __init__(self, name, base_encoding, mapping): + self.name = name + self.base_encoding = base_encoding + self.mapping = mapping + self.reverse = {v:k for k,v in mapping.items()} + self.max_len = max(len(v) for v in mapping.values()) + self.info = codecs.CodecInfo(name=self.name, encode=self.encode, decode=self.decode) + codecs.register_error(name, self.error) + + def encode(self, input, errors='strict'): + assert errors == 'strict' + #return codecs.encode(input, self.base_encoding, self.name), len(input) + + # The above line could totally be all we needed, relying on the error + # handling to replace the unencodable Unicode characters with our extended + # byte sequences. + # + # However, there seems to be a design bug in Python (probably intentional): + # the error handler for encoding is supposed to return a **Unicode** character, + # that then needs to be encodable itself... Ugh. + # + # So we implement what codecs.encode() should have been doing: which is expect + # error handler to return bytes() to be added to the output. + # + # This seems to have been fixed in Python 3.3. We should try using that and + # use fallback only if that failed. + # https://docs.python.org/3.3/library/codecs.html#codecs.register_error + + length = len(input) + out = b'' + while input: + try: + part = codecs.encode(input, self.base_encoding) + out += part + input = '' # All converted + except UnicodeEncodeError as e: + # Convert the correct part + out += codecs.encode(input[:e.start], self.base_encoding) + replacement, pos = self.error(e) + out += replacement + input = input[pos:] + return out, length + + def decode(self, input, errors='strict'): + assert errors == 'strict' + return codecs.decode(input, self.base_encoding, self.name), len(input) + + def error(self, e): + if isinstance(e, UnicodeDecodeError): + for end in range(e.start + 1, e.end + 1): + s = e.object[e.start:end] + if s in self.mapping: + return self.mapping[s], end + elif isinstance(e, UnicodeEncodeError): + for end in range(e.start + 1, e.start + self.max_len + 1): + s = e.object[e.start:end] + if s in self.reverse: + return self.reverse[s], end + e.encoding = self.name + raise e + + +_extended_encodings = { + "x_mac_japanese_ttx": ("shift_jis", { + b"\xFC": unichr(0x007C), + b"\x7E": unichr(0x007E), + b"\x80": unichr(0x005C), + b"\xA0": unichr(0x00A0), + b"\xFD": unichr(0x00A9), + b"\xFE": unichr(0x2122), + b"\xFF": unichr(0x2026), + }), + "x_mac_trad_chinese_ttx": ("big5", { + b"\x80": unichr(0x005C), + b"\xA0": unichr(0x00A0), + b"\xFD": unichr(0x00A9), + b"\xFE": unichr(0x2122), + b"\xFF": unichr(0x2026), + }), + "x_mac_korean_ttx": ("euc_kr", { + b"\x80": unichr(0x00A0), + b"\x81": unichr(0x20A9), + b"\x82": unichr(0x2014), + b"\x83": unichr(0x00A9), + b"\xFE": unichr(0x2122), + b"\xFF": unichr(0x2026), + }), + "x_mac_simp_chinese_ttx": ("gb2312", { + b"\x80": unichr(0x00FC), + b"\xA0": unichr(0x00A0), + b"\xFD": unichr(0x00A9), + b"\xFE": unichr(0x2122), + b"\xFF": unichr(0x2026), + }), +} + +_cache = {} + +def search_function(name): + name = encodings.normalize_encoding(name) # Rather undocumented... + if name in _extended_encodings: + if name not in _cache: + base_encoding, mapping = _extended_encodings[name] + assert(name[-4:] == "_ttx") + # Python 2 didn't have any of the encodings that we are implementing + # in this file. Python 3 added aliases for the East Asian ones, mapping + # them "temporarily" to the same base encoding as us, with a comment + # suggesting that full implementation will appear some time later. + # As such, try the Python version of the x_mac_... first, if that is found, + # use *that* as our base encoding. This would make our encoding upgrade + # to the full encoding when and if Python finally implements that. + # http://bugs.python.org/issue24041 + base_encodings = [name[:-4], base_encoding] + for base_encoding in base_encodings: + try: + codecs.lookup(base_encoding) + except LookupError: + continue + _cache[name] = ExtendCodec(name, base_encoding, mapping) + break + return _cache[name].info + + return None + +codecs.register(search_function) diff -Nru fonttools-2.4/Tools/fontTools/encodings/codecs_test.py fonttools-3.0/Tools/fontTools/encodings/codecs_test.py --- fonttools-2.4/Tools/fontTools/encodings/codecs_test.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.0/Tools/fontTools/encodings/codecs_test.py 2015-08-31 17:57:15.000000000 +0000 @@ -0,0 +1,25 @@ +from __future__ import print_function, division, absolute_import, unicode_literals +from fontTools.misc.py23 import * +import unittest +import fontTools.encodings.codecs # Not to be confused with "import codecs" + +class ExtendedCodecsTest(unittest.TestCase): + + def test_decode_mac_japanese(self): + self.assertEqual(b'x\xfe\xfdy'.decode("x_mac_japanese_ttx"), + unichr(0x78)+unichr(0x2122)+unichr(0x00A9)+unichr(0x79)) + + def test_encode_mac_japanese(self): + self.assertEqual(b'x\xfe\xfdy', + (unichr(0x78)+unichr(0x2122)+unichr(0x00A9)+unichr(0x79)).encode("x_mac_japanese_ttx")) + + def test_decode_mac_trad_chinese(self): + self.assertEqual(b'\x80'.decode("x_mac_trad_chinese_ttx"), + unichr(0x5C)) + + def test_decode_mac_romanian(self): + self.assertEqual(b'x\xfb'.decode("mac_romanian"), + unichr(0x78)+unichr(0x02DA)) + +if __name__ == '__main__': + unittest.main() diff -Nru fonttools-2.4/Tools/fontTools/encodings/__init__.py fonttools-3.0/Tools/fontTools/encodings/__init__.py --- fonttools-2.4/Tools/fontTools/encodings/__init__.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.0/Tools/fontTools/encodings/__init__.py 2015-08-31 17:57:15.000000000 +0000 @@ -0,0 +1,4 @@ +"""Empty __init__.py file to signal Python this directory is a package.""" + +from __future__ import print_function, division, absolute_import +from fontTools.misc.py23 import * diff -Nru fonttools-2.4/Tools/fontTools/encodings/MacRoman.py fonttools-3.0/Tools/fontTools/encodings/MacRoman.py --- fonttools-2.4/Tools/fontTools/encodings/MacRoman.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.0/Tools/fontTools/encodings/MacRoman.py 2015-08-31 17:57:15.000000000 +0000 @@ -0,0 +1,39 @@ +from __future__ import print_function, division, absolute_import +from fontTools.misc.py23 import * + +MacRoman = [ + 'NUL', 'Eth', 'eth', 'Lslash', 'lslash', 'Scaron', 'scaron', 'Yacute', + 'yacute', 'HT', 'LF', 'Thorn', 'thorn', 'CR', 'Zcaron', 'zcaron', 'DLE', 'DC1', + 'DC2', 'DC3', 'DC4', 'onehalf', 'onequarter', 'onesuperior', 'threequarters', + 'threesuperior', 'twosuperior', 'brokenbar', 'minus', 'multiply', 'RS', 'US', + 'space', 'exclam', 'quotedbl', 'numbersign', 'dollar', 'percent', 'ampersand', + 'quotesingle', 'parenleft', 'parenright', 'asterisk', 'plus', 'comma', + 'hyphen', 'period', 'slash', 'zero', 'one', 'two', 'three', 'four', 'five', + 'six', 'seven', 'eight', 'nine', 'colon', 'semicolon', 'less', 'equal', + 'greater', 'question', 'at', 'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', + 'K', 'L', 'M', 'N', 'O', 'P', 'Q', 'R', 'S', 'T', 'U', 'V', 'W', 'X', 'Y', 'Z', + 'bracketleft', 'backslash', 'bracketright', 'asciicircum', 'underscore', + 'grave', 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', + 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z', 'braceleft', 'bar', + 'braceright', 'asciitilde', 'DEL', 'Adieresis', 'Aring', 'Ccedilla', 'Eacute', + 'Ntilde', 'Odieresis', 'Udieresis', 'aacute', 'agrave', 'acircumflex', + 'adieresis', 'atilde', 'aring', 'ccedilla', 'eacute', 'egrave', 'ecircumflex', + 'edieresis', 'iacute', 'igrave', 'icircumflex', 'idieresis', 'ntilde', + 'oacute', 'ograve', 'ocircumflex', 'odieresis', 'otilde', 'uacute', 'ugrave', + 'ucircumflex', 'udieresis', 'dagger', 'degree', 'cent', 'sterling', 'section', + 'bullet', 'paragraph', 'germandbls', 'registered', 'copyright', 'trademark', + 'acute', 'dieresis', 'notequal', 'AE', 'Oslash', 'infinity', 'plusminus', + 'lessequal', 'greaterequal', 'yen', 'mu', 'partialdiff', 'summation', + 'product', 'pi', 'integral', 'ordfeminine', 'ordmasculine', 'Omega', 'ae', + 'oslash', 'questiondown', 'exclamdown', 'logicalnot', 'radical', 'florin', + 'approxequal', 'Delta', 'guillemotleft', 'guillemotright', 'ellipsis', + 'nbspace', 'Agrave', 'Atilde', 'Otilde', 'OE', 'oe', 'endash', 'emdash', + 'quotedblleft', 'quotedblright', 'quoteleft', 'quoteright', 'divide', 'lozenge', + 'ydieresis', 'Ydieresis', 'fraction', 'currency', 'guilsinglleft', + 'guilsinglright', 'fi', 'fl', 'daggerdbl', 'periodcentered', 'quotesinglbase', + 'quotedblbase', 'perthousand', 'Acircumflex', 'Ecircumflex', 'Aacute', + 'Edieresis', 'Egrave', 'Iacute', 'Icircumflex', 'Idieresis', 'Igrave', 'Oacute', + 'Ocircumflex', 'apple', 'Ograve', 'Uacute', 'Ucircumflex', 'Ugrave', 'dotlessi', + 'circumflex', 'tilde', 'macron', 'breve', 'dotaccent', 'ring', 'cedilla', + 'hungarumlaut', 'ogonek', 'caron' + ] diff -Nru fonttools-2.4/Tools/fontTools/encodings/StandardEncoding.py fonttools-3.0/Tools/fontTools/encodings/StandardEncoding.py --- fonttools-2.4/Tools/fontTools/encodings/StandardEncoding.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.0/Tools/fontTools/encodings/StandardEncoding.py 2015-08-31 17:57:15.000000000 +0000 @@ -0,0 +1,51 @@ +from __future__ import print_function, division, absolute_import +from fontTools.misc.py23 import * + +StandardEncoding = [ + '.notdef', '.notdef', '.notdef', '.notdef', '.notdef', + '.notdef', '.notdef', '.notdef', '.notdef', '.notdef', + '.notdef', '.notdef', '.notdef', '.notdef', '.notdef', + '.notdef', '.notdef', '.notdef', '.notdef', '.notdef', + '.notdef', '.notdef', '.notdef', '.notdef', '.notdef', + '.notdef', '.notdef', '.notdef', '.notdef', '.notdef', + '.notdef', '.notdef', 'space', 'exclam', 'quotedbl', + 'numbersign', 'dollar', 'percent', 'ampersand', + 'quoteright', 'parenleft', 'parenright', 'asterisk', 'plus', + 'comma', 'hyphen', 'period', 'slash', 'zero', 'one', 'two', + 'three', 'four', 'five', 'six', 'seven', 'eight', 'nine', + 'colon', 'semicolon', 'less', 'equal', 'greater', + 'question', 'at', 'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', + 'I', 'J', 'K', 'L', 'M', 'N', 'O', 'P', 'Q', 'R', 'S', 'T', + 'U', 'V', 'W', 'X', 'Y', 'Z', 'bracketleft', 'backslash', + 'bracketright', 'asciicircum', 'underscore', 'quoteleft', + 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', + 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', + 'y', 'z', 'braceleft', 'bar', 'braceright', 'asciitilde', + '.notdef', '.notdef', '.notdef', '.notdef', '.notdef', + '.notdef', '.notdef', '.notdef', '.notdef', '.notdef', + '.notdef', '.notdef', '.notdef', '.notdef', '.notdef', + '.notdef', '.notdef', '.notdef', '.notdef', '.notdef', + '.notdef', '.notdef', '.notdef', '.notdef', '.notdef', + '.notdef', '.notdef', '.notdef', '.notdef', '.notdef', + '.notdef', '.notdef', '.notdef', '.notdef', 'exclamdown', + 'cent', 'sterling', 'fraction', 'yen', 'florin', 'section', + 'currency', 'quotesingle', 'quotedblleft', 'guillemotleft', + 'guilsinglleft', 'guilsinglright', 'fi', 'fl', '.notdef', + 'endash', 'dagger', 'daggerdbl', 'periodcentered', + '.notdef', 'paragraph', 'bullet', 'quotesinglbase', + 'quotedblbase', 'quotedblright', 'guillemotright', + 'ellipsis', 'perthousand', '.notdef', 'questiondown', + '.notdef', 'grave', 'acute', 'circumflex', 'tilde', + 'macron', 'breve', 'dotaccent', 'dieresis', '.notdef', + 'ring', 'cedilla', '.notdef', 'hungarumlaut', 'ogonek', + 'caron', 'emdash', '.notdef', '.notdef', '.notdef', + '.notdef', '.notdef', '.notdef', '.notdef', '.notdef', + '.notdef', '.notdef', '.notdef', '.notdef', '.notdef', + '.notdef', '.notdef', '.notdef', 'AE', '.notdef', + 'ordfeminine', '.notdef', '.notdef', '.notdef', '.notdef', + 'Lslash', 'Oslash', 'OE', 'ordmasculine', '.notdef', + '.notdef', '.notdef', '.notdef', '.notdef', 'ae', '.notdef', + '.notdef', '.notdef', 'dotlessi', '.notdef', '.notdef', + 'lslash', 'oslash', 'oe', 'germandbls', '.notdef', + '.notdef', '.notdef', '.notdef' + ] diff -Nru fonttools-2.4/Tools/fontTools/feaLib/ast.py fonttools-3.0/Tools/fontTools/feaLib/ast.py --- fonttools-2.4/Tools/fontTools/feaLib/ast.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.0/Tools/fontTools/feaLib/ast.py 2015-08-31 17:57:15.000000000 +0000 @@ -0,0 +1,98 @@ +from __future__ import print_function, division, absolute_import +from __future__ import unicode_literals + + +class FeatureFile(object): + def __init__(self): + self.statements = [] + + +class FeatureBlock(object): + def __init__(self, location, name, use_extension): + self.location = location + self.name, self.use_extension = name, use_extension + self.statements = [] + + +class LookupBlock(object): + def __init__(self, location, name, use_extension): + self.location = location + self.name, self.use_extension = name, use_extension + self.statements = [] + + +class GlyphClassDefinition(object): + def __init__(self, location, name, glyphs): + self.location = location + self.name = name + self.glyphs = glyphs + + +class AlternateSubstitution(object): + def __init__(self, location, glyph, from_class): + self.location = location + self.glyph, self.from_class = (glyph, from_class) + + +class AnchorDefinition(object): + def __init__(self, location, name, x, y, contourpoint): + self.location = location + self.name, self.x, self.y, self.contourpoint = name, x, y, contourpoint + + +class LanguageStatement(object): + def __init__(self, location, language, include_default, required): + self.location = location + self.language = language + self.include_default = include_default + self.required = required + + +class LanguageSystemStatement(object): + def __init__(self, location, script, language): + self.location = location + self.script, self.language = (script, language) + + +class IgnoreSubstitutionRule(object): + def __init__(self, location, prefix, glyphs, suffix): + self.location = location + self.prefix, self.glyphs, self.suffix = (prefix, glyphs, suffix) + + +class LookupReferenceStatement(object): + def __init__(self, location, lookup): + self.location, self.lookup = (location, lookup) + + +class ScriptStatement(object): + def __init__(self, location, script): + self.location = location + self.script = script + + +class SubtableStatement(object): + def __init__(self, location): + self.location = location + + +class SubstitutionRule(object): + def __init__(self, location, old, new): + self.location, self.old, self.new = (location, old, new) + self.old_prefix = [] + self.old_suffix = [] + self.lookups = [None] * len(old) + + +class ValueRecord(object): + def __init__(self, location, xPlacement, yPlacement, xAdvance, yAdvance): + self.location = location + self.xPlacement, self.yPlacement = (xPlacement, yPlacement) + self.xAdvance, self.yAdvance = (xAdvance, yAdvance) + + +class ValueRecordDefinition(object): + def __init__(self, location, name, value): + self.location = location + self.name = name + self.value = value diff -Nru fonttools-2.4/Tools/fontTools/feaLib/__init__.py fonttools-3.0/Tools/fontTools/feaLib/__init__.py --- fonttools-2.4/Tools/fontTools/feaLib/__init__.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.0/Tools/fontTools/feaLib/__init__.py 2015-08-31 17:57:15.000000000 +0000 @@ -0,0 +1,4 @@ +"""fontTools.feaLib -- a package for dealing with OpenType feature files.""" + +# The structure of OpenType feature files is defined here: +# http://www.adobe.com/devnet/opentype/afdko/topic_feature_file_syntax.html diff -Nru fonttools-2.4/Tools/fontTools/feaLib/lexer.py fonttools-3.0/Tools/fontTools/feaLib/lexer.py --- fonttools-2.4/Tools/fontTools/feaLib/lexer.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.0/Tools/fontTools/feaLib/lexer.py 2015-08-31 17:57:15.000000000 +0000 @@ -0,0 +1,203 @@ +from __future__ import print_function, division, absolute_import +from __future__ import unicode_literals +import codecs +import os + + +class LexerError(Exception): + def __init__(self, message, location): + Exception.__init__(self, message) + self.location = location + + def __str__(self): + message = Exception.__str__(self) + if self.location: + path, line, column = self.location + return "%s:%d:%d: %s" % (path, line, column, message) + else: + return message + + +class Lexer(object): + NUMBER = "NUMBER" + STRING = "STRING" + NAME = "NAME" + FILENAME = "FILENAME" + GLYPHCLASS = "GLYPHCLASS" + CID = "CID" + SYMBOL = "SYMBOL" + COMMENT = "COMMENT" + NEWLINE = "NEWLINE" + + CHAR_WHITESPACE_ = " \t" + CHAR_NEWLINE_ = "\r\n" + CHAR_SYMBOL_ = ";:-+'{}[]<>()=" + CHAR_DIGIT_ = "0123456789" + CHAR_HEXDIGIT_ = "0123456789ABCDEFabcdef" + CHAR_LETTER_ = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz" + CHAR_NAME_START_ = CHAR_LETTER_ + "_.\\" + CHAR_NAME_CONTINUATION_ = CHAR_LETTER_ + CHAR_DIGIT_ + "_." + + MODE_NORMAL_ = "NORMAL" + MODE_FILENAME_ = "FILENAME" + + def __init__(self, text, filename): + self.filename_ = filename + self.line_ = 1 + self.pos_ = 0 + self.line_start_ = 0 + self.text_ = text + self.text_length_ = len(text) + self.mode_ = Lexer.MODE_NORMAL_ + + def __iter__(self): + return self + + def next(self): # Python 2 + return self.__next__() + + def __next__(self): # Python 3 + while True: + token_type, token, location = self.next_() + if token_type not in {Lexer.COMMENT, Lexer.NEWLINE}: + return (token_type, token, location) + + def next_(self): + self.scan_over_(Lexer.CHAR_WHITESPACE_) + column = self.pos_ - self.line_start_ + 1 + location = (self.filename_, self.line_, column) + start = self.pos_ + text = self.text_ + limit = len(text) + if start >= limit: + raise StopIteration() + cur_char = text[start] + next_char = text[start + 1] if start + 1 < limit else None + + if cur_char == "\n": + self.pos_ += 1 + self.line_ += 1 + self.line_start_ = self.pos_ + return (Lexer.NEWLINE, None, location) + if cur_char == "\r": + self.pos_ += (2 if next_char == "\n" else 1) + self.line_ += 1 + self.line_start_ = self.pos_ + return (Lexer.NEWLINE, None, location) + if cur_char == "#": + self.scan_until_(Lexer.CHAR_NEWLINE_) + return (Lexer.COMMENT, text[start:self.pos_], location) + + if self.mode_ is Lexer.MODE_FILENAME_: + if cur_char != "(": + raise LexerError("Expected '(' before file name", location) + self.scan_until_(")") + cur_char = text[self.pos_] if self.pos_ < limit else None + if cur_char != ")": + raise LexerError("Expected ')' after file name", location) + self.pos_ += 1 + self.mode_ = Lexer.MODE_NORMAL_ + return (Lexer.FILENAME, text[start + 1:self.pos_ - 1], location) + + if cur_char == "\\" and next_char in Lexer.CHAR_DIGIT_: + self.pos_ += 1 + self.scan_over_(Lexer.CHAR_DIGIT_) + return (Lexer.CID, int(text[start + 1:self.pos_], 10), location) + if cur_char == "@": + self.pos_ += 1 + self.scan_over_(Lexer.CHAR_NAME_CONTINUATION_) + glyphclass = text[start + 1:self.pos_] + if len(glyphclass) < 1: + raise LexerError("Expected glyph class name", location) + if len(glyphclass) > 30: + raise LexerError( + "Glyph class names must not be longer than 30 characters", + location) + return (Lexer.GLYPHCLASS, glyphclass, location) + if cur_char in Lexer.CHAR_NAME_START_: + self.pos_ += 1 + self.scan_over_(Lexer.CHAR_NAME_CONTINUATION_) + token = text[start:self.pos_] + if token == "include": + self.mode_ = Lexer.MODE_FILENAME_ + return (Lexer.NAME, token, location) + if cur_char == "0" and next_char in "xX": + self.pos_ += 2 + self.scan_over_(Lexer.CHAR_HEXDIGIT_) + return (Lexer.NUMBER, int(text[start:self.pos_], 16), location) + if cur_char in Lexer.CHAR_DIGIT_: + self.scan_over_(Lexer.CHAR_DIGIT_) + return (Lexer.NUMBER, int(text[start:self.pos_], 10), location) + if cur_char == "-" and next_char in Lexer.CHAR_DIGIT_: + self.pos_ += 1 + self.scan_over_(Lexer.CHAR_DIGIT_) + return (Lexer.NUMBER, int(text[start:self.pos_], 10), location) + if cur_char in Lexer.CHAR_SYMBOL_: + self.pos_ += 1 + return (Lexer.SYMBOL, cur_char, location) + if cur_char == '"': + self.pos_ += 1 + self.scan_until_('"\r\n') + if self.pos_ < self.text_length_ and self.text_[self.pos_] == '"': + self.pos_ += 1 + return (Lexer.STRING, text[start + 1:self.pos_ - 1], location) + else: + raise LexerError("Expected '\"' to terminate string", location) + raise LexerError("Unexpected character: '%s'" % cur_char, location) + + def scan_over_(self, valid): + p = self.pos_ + while p < self.text_length_ and self.text_[p] in valid: + p += 1 + self.pos_ = p + + def scan_until_(self, stop_at): + p = self.pos_ + while p < self.text_length_ and self.text_[p] not in stop_at: + p += 1 + self.pos_ = p + + +class IncludingLexer(object): + def __init__(self, filename): + self.lexers_ = [self.make_lexer_(filename, (filename, 0, 0))] + + def __iter__(self): + return self + + def next(self): # Python 2 + return self.__next__() + + def __next__(self): # Python 3 + while self.lexers_: + lexer = self.lexers_[-1] + try: + token_type, token, location = lexer.next() + except StopIteration: + self.lexers_.pop() + continue + if token_type is Lexer.NAME and token == "include": + fname_type, fname_token, fname_location = lexer.next() + if fname_type is not Lexer.FILENAME: + raise LexerError("Expected file name", fname_location) + semi_type, semi_token, semi_location = lexer.next() + if semi_type is not Lexer.SYMBOL or semi_token != ";": + raise LexerError("Expected ';'", semi_location) + curpath, _ = os.path.split(lexer.filename_) + path = os.path.join(curpath, fname_token) + if len(self.lexers_) >= 5: + raise LexerError("Too many recursive includes", + fname_location) + self.lexers_.append(self.make_lexer_(path, fname_location)) + continue + else: + return (token_type, token, location) + raise StopIteration() + + @staticmethod + def make_lexer_(filename, location): + try: + with codecs.open(filename, "rb", "utf-8") as f: + return Lexer(f.read(), filename) + except IOError as err: + raise LexerError(str(err), location) diff -Nru fonttools-2.4/Tools/fontTools/feaLib/lexer_test.py fonttools-3.0/Tools/fontTools/feaLib/lexer_test.py --- fonttools-2.4/Tools/fontTools/feaLib/lexer_test.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.0/Tools/fontTools/feaLib/lexer_test.py 2015-08-31 17:57:15.000000000 +0000 @@ -0,0 +1,160 @@ +from __future__ import print_function, division, absolute_import +from __future__ import unicode_literals +from fontTools.feaLib.lexer import IncludingLexer, Lexer, LexerError +import os +import unittest + + +def lex(s): + return [(typ, tok) for (typ, tok, _) in Lexer(s, "test.fea")] + + +class LexerErrorTest(unittest.TestCase): + def test_str(self): + err = LexerError("Squeak!", ("foo.fea", 23, 42)) + self.assertEqual(str(err), "foo.fea:23:42: Squeak!") + + def test_str_nolocation(self): + err = LexerError("Squeak!", None) + self.assertEqual(str(err), "Squeak!") + + +class LexerTest(unittest.TestCase): + def __init__(self, methodName): + unittest.TestCase.__init__(self, methodName) + # Python 3 renamed assertRaisesRegexp to assertRaisesRegex, + # and fires deprecation warnings if a program uses the old name. + if not hasattr(self, "assertRaisesRegex"): + self.assertRaisesRegex = self.assertRaisesRegexp + + def test_empty(self): + self.assertEqual(lex(""), []) + self.assertEqual(lex(" \t "), []) + + def test_name(self): + self.assertEqual(lex("a17"), [(Lexer.NAME, "a17")]) + self.assertEqual(lex(".notdef"), [(Lexer.NAME, ".notdef")]) + self.assertEqual(lex("two.oldstyle"), [(Lexer.NAME, "two.oldstyle")]) + self.assertEqual(lex("_"), [(Lexer.NAME, "_")]) + self.assertEqual(lex("\\table"), [(Lexer.NAME, "\\table")]) + + def test_cid(self): + self.assertEqual(lex("\\0 \\987"), [(Lexer.CID, 0), (Lexer.CID, 987)]) + + def test_glyphclass(self): + self.assertEqual(lex("@Vowel.sc"), [(Lexer.GLYPHCLASS, "Vowel.sc")]) + self.assertRaisesRegex(LexerError, "Expected glyph class", lex, "@(a)") + self.assertRaisesRegex(LexerError, "Expected glyph class", lex, "@ A") + self.assertRaisesRegex(LexerError, "not be longer than 30 characters", + lex, "@a123456789.a123456789.a123456789.x") + + def test_include(self): + self.assertEqual(lex("include (~/foo/bar baz.fea);"), [ + (Lexer.NAME, "include"), + (Lexer.FILENAME, "~/foo/bar baz.fea"), + (Lexer.SYMBOL, ";") + ]) + self.assertEqual(lex("include # Comment\n (foo) \n;"), [ + (Lexer.NAME, "include"), + (Lexer.FILENAME, "foo"), + (Lexer.SYMBOL, ";") + ]) + self.assertRaises(LexerError, lex, "include blah") + self.assertRaises(LexerError, lex, "include (blah") + + def test_number(self): + self.assertEqual(lex("123 -456"), + [(Lexer.NUMBER, 123), (Lexer.NUMBER, -456)]) + self.assertEqual(lex("0xCAFED00D"), [(Lexer.NUMBER, 0xCAFED00D)]) + self.assertEqual(lex("0xcafed00d"), [(Lexer.NUMBER, 0xCAFED00D)]) + + def test_symbol(self): + self.assertEqual(lex("a'"), [(Lexer.NAME, "a"), (Lexer.SYMBOL, "'")]) + self.assertEqual( + lex("foo - -2"), + [(Lexer.NAME, "foo"), (Lexer.SYMBOL, "-"), (Lexer.NUMBER, -2)]) + + def test_comment(self): + self.assertEqual(lex("# Comment\n#"), []) + + def test_string(self): + self.assertEqual(lex('"foo" "bar"'), + [(Lexer.STRING, "foo"), (Lexer.STRING, "bar")]) + self.assertRaises(LexerError, lambda: lex('"foo\n bar"')) + + def test_bad_character(self): + self.assertRaises(LexerError, lambda: lex("123 \u0001")) + + def test_newline(self): + lines = lambda s: [loc[1] for (_, _, loc) in Lexer(s, "test.fea")] + self.assertEqual(lines("FOO\n\nBAR\nBAZ"), [1, 3, 4]) # Unix + self.assertEqual(lines("FOO\r\rBAR\rBAZ"), [1, 3, 4]) # Macintosh + self.assertEqual(lines("FOO\r\n\r\n BAR\r\nBAZ"), [1, 3, 4]) # Windows + self.assertEqual(lines("FOO\n\rBAR\r\nBAZ"), [1, 3, 4]) # mixed + + def test_location(self): + locs = lambda s: ["%s:%d:%d" % loc + for (_, _, loc) in Lexer(s, "test.fea")] + self.assertEqual(locs("a b # Comment\n12 @x"), [ + "test.fea:1:1", "test.fea:1:3", "test.fea:2:1", + "test.fea:2:4" + ]) + + def test_scan_over_(self): + lexer = Lexer("abbacabba12", "test.fea") + self.assertEqual(lexer.pos_, 0) + lexer.scan_over_("xyz") + self.assertEqual(lexer.pos_, 0) + lexer.scan_over_("abc") + self.assertEqual(lexer.pos_, 9) + lexer.scan_over_("abc") + self.assertEqual(lexer.pos_, 9) + lexer.scan_over_("0123456789") + self.assertEqual(lexer.pos_, 11) + + def test_scan_until_(self): + lexer = Lexer("foo'bar", "test.fea") + self.assertEqual(lexer.pos_, 0) + lexer.scan_until_("'") + self.assertEqual(lexer.pos_, 3) + lexer.scan_until_("'") + self.assertEqual(lexer.pos_, 3) + + +class IncludingLexerTest(unittest.TestCase): + @staticmethod + def getpath(filename): + path, _ = os.path.split(__file__) + return os.path.join(path, "testdata", filename) + + def test_include(self): + lexer = IncludingLexer(self.getpath("include4.fea")) + result = ['%s %s:%d' % (token, os.path.split(loc[0])[1], loc[1]) + for _, token, loc in lexer] + self.assertEqual(result, [ + "I4a include4.fea:1", + "I3a include3.fea:1", + "I2a include2.fea:1", + "I1a include1.fea:1", + "I0 include0.fea:1", + "I1b include1.fea:3", + "I2b include2.fea:3", + "I3b include3.fea:3", + "I4b include4.fea:3" + ]) + + def test_include_limit(self): + lexer = IncludingLexer(self.getpath("include6.fea")) + self.assertRaises(LexerError, lambda: list(lexer)) + + def test_include_self(self): + lexer = IncludingLexer(self.getpath("includeself.fea")) + self.assertRaises(LexerError, lambda: list(lexer)) + + def test_include_missing_file(self): + lexer = IncludingLexer(self.getpath("includemissingfile.fea")) + self.assertRaises(LexerError, lambda: list(lexer)) + + +if __name__ == "__main__": + unittest.main() diff -Nru fonttools-2.4/Tools/fontTools/feaLib/parser.py fonttools-3.0/Tools/fontTools/feaLib/parser.py --- fonttools-2.4/Tools/fontTools/feaLib/parser.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.0/Tools/fontTools/feaLib/parser.py 2015-08-31 17:57:15.000000000 +0000 @@ -0,0 +1,466 @@ +from __future__ import print_function, division, absolute_import +from __future__ import unicode_literals +from fontTools.feaLib.lexer import Lexer, IncludingLexer +import fontTools.feaLib.ast as ast +import os +import re + + +class ParserError(Exception): + def __init__(self, message, location): + Exception.__init__(self, message) + self.location = location + + def __str__(self): + message = Exception.__str__(self) + if self.location: + path, line, column = self.location + return "%s:%d:%d: %s" % (path, line, column, message) + else: + return message + + +class Parser(object): + def __init__(self, path): + self.doc_ = ast.FeatureFile() + self.anchors_ = SymbolTable() + self.glyphclasses_ = SymbolTable() + self.lookups_ = SymbolTable() + self.valuerecords_ = SymbolTable() + self.symbol_tables_ = { + self.anchors_, self.glyphclasses_, + self.lookups_, self.valuerecords_ + } + self.next_token_type_, self.next_token_ = (None, None) + self.next_token_location_ = None + self.lexer_ = IncludingLexer(path) + self.advance_lexer_() + + def parse(self): + statements = self.doc_.statements + while self.next_token_type_ is not None: + self.advance_lexer_() + if self.cur_token_type_ is Lexer.GLYPHCLASS: + statements.append(self.parse_glyphclass_definition_()) + elif self.is_cur_keyword_("anchorDef"): + statements.append(self.parse_anchordef_()) + elif self.is_cur_keyword_("languagesystem"): + statements.append(self.parse_languagesystem_()) + elif self.is_cur_keyword_("lookup"): + statements.append(self.parse_lookup_(vertical=False)) + elif self.is_cur_keyword_("feature"): + statements.append(self.parse_feature_block_()) + elif self.is_cur_keyword_("valueRecordDef"): + statements.append( + self.parse_valuerecord_definition_(vertical=False)) + else: + raise ParserError("Expected feature, languagesystem, " + "lookup, or glyph class definition", + self.cur_token_location_) + return self.doc_ + + def parse_anchordef_(self): + assert self.is_cur_keyword_("anchorDef") + location = self.cur_token_location_ + x, y = self.expect_number_(), self.expect_number_() + contourpoint = None + if self.next_token_ == "contourpoint": + self.expect_keyword_("contourpoint") + contourpoint = self.expect_number_() + name = self.expect_name_() + self.expect_symbol_(";") + anchordef = ast.AnchorDefinition(location, name, x, y, contourpoint) + self.anchors_.define(name, anchordef) + return anchordef + + def parse_glyphclass_definition_(self): + location, name = self.cur_token_location_, self.cur_token_ + self.expect_symbol_("=") + glyphs = self.parse_glyphclass_(accept_glyphname=False) + self.expect_symbol_(";") + if self.glyphclasses_.resolve(name) is not None: + raise ParserError("Glyph class @%s already defined" % name, + location) + glyphclass = ast.GlyphClassDefinition(location, name, glyphs) + self.glyphclasses_.define(name, glyphclass) + return glyphclass + + def parse_glyphclass_(self, accept_glyphname): + result = set() + if accept_glyphname and self.next_token_type_ is Lexer.NAME: + result.add(self.expect_name_()) + return result + if self.next_token_type_ is Lexer.GLYPHCLASS: + self.advance_lexer_() + gc = self.glyphclasses_.resolve(self.cur_token_) + if gc is None: + raise ParserError("Unknown glyph class @%s" % self.cur_token_, + self.cur_token_location_) + result.update(gc.glyphs) + return result + + self.expect_symbol_("[") + while self.next_token_ != "]": + self.advance_lexer_() + if self.cur_token_type_ is Lexer.NAME: + if self.next_token_ == "-": + range_location_ = self.cur_token_location_ + range_start = self.cur_token_ + self.expect_symbol_("-") + range_end = self.expect_name_() + result.update(self.make_glyph_range_(range_location_, + range_start, + range_end)) + else: + result.add(self.cur_token_) + elif self.cur_token_type_ is Lexer.GLYPHCLASS: + gc = self.glyphclasses_.resolve(self.cur_token_) + if gc is None: + raise ParserError( + "Unknown glyph class @%s" % self.cur_token_, + self.cur_token_location_) + result.update(gc.glyphs) + else: + raise ParserError( + "Expected glyph name, glyph range, " + "or glyph class reference", + self.cur_token_location_) + self.expect_symbol_("]") + return result + + def parse_glyph_pattern_(self): + prefix, glyphs, lookups, suffix = ([], [], [], []) + while self.next_token_ not in {"by", "from", ";"}: + gc = self.parse_glyphclass_(accept_glyphname=True) + marked = False + if self.next_token_ == "'": + self.expect_symbol_("'") + marked = True + if marked: + glyphs.append(gc) + elif glyphs: + suffix.append(gc) + else: + prefix.append(gc) + + lookup = None + if self.next_token_ == "lookup": + self.expect_keyword_("lookup") + if not marked: + raise ParserError("Lookups can only follow marked glyphs", + self.cur_token_location_) + lookup_name = self.expect_name_() + lookup = self.lookups_.resolve(lookup_name) + if lookup is None: + raise ParserError('Unknown lookup "%s"' % lookup_name, + self.cur_token_location_) + if marked: + lookups.append(lookup) + + if not glyphs and not suffix: # eg., "sub f f i by" + assert lookups == [] + return ([], prefix, [None] * len(prefix), []) + else: + return (prefix, glyphs, lookups, suffix) + + def parse_ignore_(self): + assert self.is_cur_keyword_("ignore") + location = self.cur_token_location_ + self.advance_lexer_() + if self.cur_token_ in ["substitute", "sub"]: + prefix, glyphs, lookups, suffix = self.parse_glyph_pattern_() + self.expect_symbol_(";") + return ast.IgnoreSubstitutionRule(location, prefix, glyphs, suffix) + raise ParserError("Expected \"substitute\"", self.next_token_location_) + + def parse_language_(self): + assert self.is_cur_keyword_("language") + location, language = self.cur_token_location_, self.expect_tag_() + include_default, required = (True, False) + if self.next_token_ in {"exclude_dflt", "include_dflt"}: + include_default = (self.expect_name_() == "include_dflt") + if self.next_token_ == "required": + self.expect_keyword_("required") + required = True + self.expect_symbol_(";") + return ast.LanguageStatement(location, language.strip(), + include_default, required) + + def parse_lookup_(self, vertical): + assert self.is_cur_keyword_("lookup") + location, name = self.cur_token_location_, self.expect_name_() + + if self.next_token_ == ";": + lookup = self.lookups_.resolve(name) + if lookup is None: + raise ParserError("Unknown lookup \"%s\"" % name, + self.cur_token_location_) + self.expect_symbol_(";") + return ast.LookupReferenceStatement(location, lookup) + + use_extension = False + if self.next_token_ == "useExtension": + self.expect_keyword_("useExtension") + use_extension = True + + block = ast.LookupBlock(location, name, use_extension) + self.parse_block_(block, vertical) + self.lookups_.define(name, block) + return block + + def parse_script_(self): + assert self.is_cur_keyword_("script") + location, script = self.cur_token_location_, self.expect_tag_() + self.expect_symbol_(";") + return ast.ScriptStatement(location, script) + + def parse_substitute_(self): + assert self.cur_token_ in {"substitute", "sub"} + location = self.cur_token_location_ + old_prefix, old, lookups, old_suffix = self.parse_glyph_pattern_() + + new = [] + if self.next_token_ == "by": + keyword = self.expect_keyword_("by") + while self.next_token_ != ";": + new.append(self.parse_glyphclass_(accept_glyphname=True)) + elif self.next_token_ == "from": + keyword = self.expect_keyword_("from") + new = [self.parse_glyphclass_(accept_glyphname=False)] + else: + keyword = None + self.expect_symbol_(";") + if len(new) is 0 and not any(lookups): + raise ParserError( + 'Expected "by", "from" or explicit lookup references', + self.cur_token_location_) + + if keyword == "from": + if len(old) != 1 or len(old[0]) != 1: + raise ParserError('Expected a single glyph before "from"', + location) + if len(new) != 1: + raise ParserError('Expected a single glyphclass after "from"', + location) + return ast.AlternateSubstitution(location, list(old[0])[0], new[0]) + + rule = ast.SubstitutionRule(location, old, new) + rule.old_prefix, rule.old_suffix = old_prefix, old_suffix + rule.lookups = lookups + return rule + + def parse_subtable_(self): + assert self.is_cur_keyword_("subtable") + location = self.cur_token_location_ + self.expect_symbol_(";") + return ast.SubtableStatement(location) + + def parse_valuerecord_(self, vertical): + if self.next_token_type_ is Lexer.NUMBER: + number, location = self.expect_number_(), self.cur_token_location_ + if vertical: + val = ast.ValueRecord(location, 0, 0, 0, number) + else: + val = ast.ValueRecord(location, 0, 0, number, 0) + return val + self.expect_symbol_("<") + location = self.cur_token_location_ + if self.next_token_type_ is Lexer.NAME: + name = self.expect_name_() + vrd = self.valuerecords_.resolve(name) + if vrd is None: + raise ParserError("Unknown valueRecordDef \"%s\"" % name, + self.cur_token_location_) + value = vrd.value + xPlacement, yPlacement = (value.xPlacement, value.yPlacement) + xAdvance, yAdvance = (value.xAdvance, value.yAdvance) + else: + xPlacement, yPlacement, xAdvance, yAdvance = ( + self.expect_number_(), self.expect_number_(), + self.expect_number_(), self.expect_number_()) + self.expect_symbol_(">") + return ast.ValueRecord( + location, xPlacement, yPlacement, xAdvance, yAdvance) + + def parse_valuerecord_definition_(self, vertical): + assert self.is_cur_keyword_("valueRecordDef") + location = self.cur_token_location_ + value = self.parse_valuerecord_(vertical) + name = self.expect_name_() + self.expect_symbol_(";") + vrd = ast.ValueRecordDefinition(location, name, value) + self.valuerecords_.define(name, vrd) + return vrd + + def parse_languagesystem_(self): + assert self.cur_token_ == "languagesystem" + location = self.cur_token_location_ + script, language = self.expect_tag_(), self.expect_tag_() + self.expect_symbol_(";") + return ast.LanguageSystemStatement(location, script, language) + + def parse_feature_block_(self): + assert self.cur_token_ == "feature" + location = self.cur_token_location_ + tag = self.expect_tag_() + vertical = (tag == "vkrn") + + use_extension = False + if self.next_token_ == "useExtension": + self.expect_keyword_("useExtension") + use_extension = True + + block = ast.FeatureBlock(location, tag, use_extension) + self.parse_block_(block, vertical) + return block + + def parse_block_(self, block, vertical): + self.expect_symbol_("{") + for symtab in self.symbol_tables_: + symtab.enter_scope() + + statements = block.statements + while self.next_token_ != "}": + self.advance_lexer_() + if self.cur_token_type_ is Lexer.GLYPHCLASS: + statements.append(self.parse_glyphclass_definition_()) + elif self.is_cur_keyword_("anchorDef"): + statements.append(self.parse_anchordef_()) + elif self.is_cur_keyword_("ignore"): + statements.append(self.parse_ignore_()) + elif self.is_cur_keyword_("language"): + statements.append(self.parse_language_()) + elif self.is_cur_keyword_("lookup"): + statements.append(self.parse_lookup_(vertical)) + elif self.is_cur_keyword_("script"): + statements.append(self.parse_script_()) + elif (self.is_cur_keyword_("substitute") or + self.is_cur_keyword_("sub")): + statements.append(self.parse_substitute_()) + elif self.is_cur_keyword_("subtable"): + statements.append(self.parse_subtable_()) + elif self.is_cur_keyword_("valueRecordDef"): + statements.append(self.parse_valuerecord_definition_(vertical)) + else: + raise ParserError( + "Expected glyph class definition or statement", + self.cur_token_location_) + + self.expect_symbol_("}") + for symtab in self.symbol_tables_: + symtab.exit_scope() + + name = self.expect_name_() + if name != block.name.strip(): + raise ParserError("Expected \"%s\"" % block.name.strip(), + self.cur_token_location_) + self.expect_symbol_(";") + + def is_cur_keyword_(self, k): + return (self.cur_token_type_ is Lexer.NAME) and (self.cur_token_ == k) + + def expect_tag_(self): + self.advance_lexer_() + if self.cur_token_type_ is not Lexer.NAME: + raise ParserError("Expected a tag", self.cur_token_location_) + if len(self.cur_token_) > 4: + raise ParserError("Tags can not be longer than 4 characters", + self.cur_token_location_) + return (self.cur_token_ + " ")[:4] + + def expect_symbol_(self, symbol): + self.advance_lexer_() + if self.cur_token_type_ is Lexer.SYMBOL and self.cur_token_ == symbol: + return symbol + raise ParserError("Expected '%s'" % symbol, self.cur_token_location_) + + def expect_keyword_(self, keyword): + self.advance_lexer_() + if self.cur_token_type_ is Lexer.NAME and self.cur_token_ == keyword: + return self.cur_token_ + raise ParserError("Expected \"%s\"" % keyword, + self.cur_token_location_) + + def expect_name_(self): + self.advance_lexer_() + if self.cur_token_type_ is Lexer.NAME: + return self.cur_token_ + raise ParserError("Expected a name", self.cur_token_location_) + + def expect_number_(self): + self.advance_lexer_() + if self.cur_token_type_ is Lexer.NUMBER: + return self.cur_token_ + raise ParserError("Expected a number", self.cur_token_location_) + + def advance_lexer_(self): + self.cur_token_type_, self.cur_token_, self.cur_token_location_ = ( + self.next_token_type_, self.next_token_, self.next_token_location_) + try: + (self.next_token_type_, self.next_token_, + self.next_token_location_) = self.lexer_.next() + except StopIteration: + self.next_token_type_, self.next_token_ = (None, None) + + def make_glyph_range_(self, location, start, limit): + """("a.sc", "d.sc") --> {"a.sc", "b.sc", "c.sc", "d.sc"}""" + result = set() + if len(start) != len(limit): + raise ParserError( + "Bad range: \"%s\" and \"%s\" should have the same length" % + (start, limit), location) + rev = lambda s: ''.join(reversed(list(s))) # string reversal + prefix = os.path.commonprefix([start, limit]) + suffix = rev(os.path.commonprefix([rev(start), rev(limit)])) + if len(suffix) > 0: + start_range = start[len(prefix):-len(suffix)] + limit_range = limit[len(prefix):-len(suffix)] + else: + start_range = start[len(prefix):] + limit_range = limit[len(prefix):] + + if start_range >= limit_range: + raise ParserError("Start of range must be smaller than its end", + location) + + uppercase = re.compile(r'^[A-Z]$') + if uppercase.match(start_range) and uppercase.match(limit_range): + for c in range(ord(start_range), ord(limit_range) + 1): + result.add("%s%c%s" % (prefix, c, suffix)) + return result + + lowercase = re.compile(r'^[a-z]$') + if lowercase.match(start_range) and lowercase.match(limit_range): + for c in range(ord(start_range), ord(limit_range) + 1): + result.add("%s%c%s" % (prefix, c, suffix)) + return result + + digits = re.compile(r'^[0-9]{1,3}$') + if digits.match(start_range) and digits.match(limit_range): + for i in range(int(start_range, 10), int(limit_range, 10) + 1): + number = ("000" + str(i))[-len(start_range):] + result.add("%s%s%s" % (prefix, number, suffix)) + return result + + raise ParserError("Bad range: \"%s-%s\"" % (start, limit), location) + + +class SymbolTable(object): + def __init__(self): + self.scopes_ = [{}] + + def enter_scope(self): + self.scopes_.append({}) + + def exit_scope(self): + self.scopes_.pop() + + def define(self, name, item): + self.scopes_[-1][name] = item + + def resolve(self, name): + for scope in reversed(self.scopes_): + item = scope.get(name) + if item: + return item + return None diff -Nru fonttools-2.4/Tools/fontTools/feaLib/parser_test.py fonttools-3.0/Tools/fontTools/feaLib/parser_test.py --- fonttools-2.4/Tools/fontTools/feaLib/parser_test.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.0/Tools/fontTools/feaLib/parser_test.py 2015-08-31 17:57:15.000000000 +0000 @@ -0,0 +1,448 @@ +from __future__ import print_function, division, absolute_import +from __future__ import unicode_literals +from fontTools.feaLib.lexer import LexerError +from fontTools.feaLib.parser import Parser, ParserError, SymbolTable +from fontTools.misc.py23 import * +import fontTools.feaLib.ast as ast +import codecs +import os +import shutil +import sys +import tempfile +import unittest + + +class ParserTest(unittest.TestCase): + def __init__(self, methodName): + unittest.TestCase.__init__(self, methodName) + # Python 3 renamed assertRaisesRegexp to assertRaisesRegex, + # and fires deprecation warnings if a program uses the old name. + if not hasattr(self, "assertRaisesRegex"): + self.assertRaisesRegex = self.assertRaisesRegexp + + def test_anchordef(self): + [foo] = self.parse("anchorDef 123 456 foo;").statements + self.assertEqual(type(foo), ast.AnchorDefinition) + self.assertEqual(foo.name, "foo") + self.assertEqual(foo.x, 123) + self.assertEqual(foo.y, 456) + self.assertEqual(foo.contourpoint, None) + + def test_anchordef_contourpoint(self): + [foo] = self.parse("anchorDef 123 456 contourpoint 5 foo;").statements + self.assertEqual(type(foo), ast.AnchorDefinition) + self.assertEqual(foo.name, "foo") + self.assertEqual(foo.x, 123) + self.assertEqual(foo.y, 456) + self.assertEqual(foo.contourpoint, 5) + + def test_feature_block(self): + [liga] = self.parse("feature liga {} liga;").statements + self.assertEqual(liga.name, "liga") + self.assertFalse(liga.use_extension) + + def test_feature_block_useExtension(self): + [liga] = self.parse("feature liga useExtension {} liga;").statements + self.assertEqual(liga.name, "liga") + self.assertTrue(liga.use_extension) + + def test_glyphclass(self): + [gc] = self.parse("@dash = [endash emdash figuredash];").statements + self.assertEqual(gc.name, "dash") + self.assertEqual(gc.glyphs, {"endash", "emdash", "figuredash"}) + + def test_glyphclass_bad(self): + self.assertRaisesRegex( + ParserError, + "Expected glyph name, glyph range, or glyph class reference", + self.parse, "@bad = [a 123];") + + def test_glyphclass_duplicate(self): + self.assertRaisesRegex( + ParserError, "Glyph class @dup already defined", + self.parse, "@dup = [a b]; @dup = [x];") + + def test_glyphclass_empty(self): + [gc] = self.parse("@empty_set = [];").statements + self.assertEqual(gc.name, "empty_set") + self.assertEqual(gc.glyphs, set()) + + def test_glyphclass_equality(self): + [foo, bar] = self.parse("@foo = [a b]; @bar = @foo;").statements + self.assertEqual(foo.glyphs, {"a", "b"}) + self.assertEqual(bar.glyphs, {"a", "b"}) + + def test_glyphclass_range_uppercase(self): + [gc] = self.parse("@swashes = [X.swash-Z.swash];").statements + self.assertEqual(gc.name, "swashes") + self.assertEqual(gc.glyphs, {"X.swash", "Y.swash", "Z.swash"}) + + def test_glyphclass_range_lowercase(self): + [gc] = self.parse("@defg.sc = [d.sc-g.sc];").statements + self.assertEqual(gc.name, "defg.sc") + self.assertEqual(gc.glyphs, {"d.sc", "e.sc", "f.sc", "g.sc"}) + + def test_glyphclass_range_digit1(self): + [gc] = self.parse("@range = [foo.2-foo.5];").statements + self.assertEqual(gc.glyphs, {"foo.2", "foo.3", "foo.4", "foo.5"}) + + def test_glyphclass_range_digit2(self): + [gc] = self.parse("@range = [foo.09-foo.11];").statements + self.assertEqual(gc.glyphs, {"foo.09", "foo.10", "foo.11"}) + + def test_glyphclass_range_digit3(self): + [gc] = self.parse("@range = [foo.123-foo.125];").statements + self.assertEqual(gc.glyphs, {"foo.123", "foo.124", "foo.125"}) + + def test_glyphclass_range_bad(self): + self.assertRaisesRegex( + ParserError, + "Bad range: \"a\" and \"foobar\" should have the same length", + self.parse, "@bad = [a-foobar];") + self.assertRaisesRegex( + ParserError, "Bad range: \"A.swash-z.swash\"", + self.parse, "@bad = [A.swash-z.swash];") + self.assertRaisesRegex( + ParserError, "Start of range must be smaller than its end", + self.parse, "@bad = [B.swash-A.swash];") + self.assertRaisesRegex( + ParserError, "Bad range: \"foo.1234-foo.9876\"", + self.parse, "@bad = [foo.1234-foo.9876];") + + def test_glyphclass_range_mixed(self): + [gc] = self.parse("@range = [a foo.09-foo.11 X.sc-Z.sc];").statements + self.assertEqual(gc.glyphs, { + "a", "foo.09", "foo.10", "foo.11", "X.sc", "Y.sc", "Z.sc" + }) + + def test_glyphclass_reference(self): + [vowels_lc, vowels_uc, vowels] = self.parse( + "@Vowels.lc = [a e i o u]; @Vowels.uc = [A E I O U];" + "@Vowels = [@Vowels.lc @Vowels.uc y Y];").statements + self.assertEqual(vowels_lc.glyphs, set(list("aeiou"))) + self.assertEqual(vowels_uc.glyphs, set(list("AEIOU"))) + self.assertEqual(vowels.glyphs, set(list("aeiouyAEIOUY"))) + self.assertRaisesRegex( + ParserError, "Unknown glyph class @unknown", + self.parse, "@bad = [@unknown];") + + def test_glyphclass_scoping(self): + [foo, liga, smcp] = self.parse( + "@foo = [a b];" + "feature liga { @bar = [@foo l]; } liga;" + "feature smcp { @bar = [@foo s]; } smcp;" + ).statements + self.assertEqual(foo.glyphs, {"a", "b"}) + self.assertEqual(liga.statements[0].glyphs, {"a", "b", "l"}) + self.assertEqual(smcp.statements[0].glyphs, {"a", "b", "s"}) + + def test_ignore_sub(self): + doc = self.parse("feature test {ignore sub e t' c;} test;") + s = doc.statements[0].statements[0] + self.assertEqual(type(s), ast.IgnoreSubstitutionRule) + self.assertEqual(s.prefix, [{"e"}]) + self.assertEqual(s.glyphs, [{"t"}]) + self.assertEqual(s.suffix, [{"c"}]) + + def test_ignore_substitute(self): + doc = self.parse( + "feature test {" + " ignore substitute f [a e] d' [a u]' [e y];" + "} test;") + s = doc.statements[0].statements[0] + self.assertEqual(type(s), ast.IgnoreSubstitutionRule) + self.assertEqual(s.prefix, [{"f"}, {"a", "e"}]) + self.assertEqual(s.glyphs, [{"d"}, {"a", "u"}]) + self.assertEqual(s.suffix, [{"e", "y"}]) + + def test_language(self): + doc = self.parse("feature test {language DEU;} test;") + s = doc.statements[0].statements[0] + self.assertEqual(type(s), ast.LanguageStatement) + self.assertEqual(s.language, "DEU") + self.assertTrue(s.include_default) + self.assertFalse(s.required) + + def test_language_exclude_dflt(self): + doc = self.parse("feature test {language DEU exclude_dflt;} test;") + s = doc.statements[0].statements[0] + self.assertEqual(type(s), ast.LanguageStatement) + self.assertEqual(s.language, "DEU") + self.assertFalse(s.include_default) + self.assertFalse(s.required) + + def test_language_exclude_dflt_required(self): + doc = self.parse("feature test {" + " language DEU exclude_dflt required;" + "} test;") + s = doc.statements[0].statements[0] + self.assertEqual(type(s), ast.LanguageStatement) + self.assertEqual(s.language, "DEU") + self.assertFalse(s.include_default) + self.assertTrue(s.required) + + def test_language_include_dflt(self): + doc = self.parse("feature test {language DEU include_dflt;} test;") + s = doc.statements[0].statements[0] + self.assertEqual(type(s), ast.LanguageStatement) + self.assertEqual(s.language, "DEU") + self.assertTrue(s.include_default) + self.assertFalse(s.required) + + def test_language_include_dflt_required(self): + doc = self.parse("feature test {" + " language DEU include_dflt required;" + "} test;") + s = doc.statements[0].statements[0] + self.assertEqual(type(s), ast.LanguageStatement) + self.assertEqual(s.language, "DEU") + self.assertTrue(s.include_default) + self.assertTrue(s.required) + + def test_lookup_block(self): + [lookup] = self.parse("lookup Ligatures {} Ligatures;").statements + self.assertEqual(lookup.name, "Ligatures") + self.assertFalse(lookup.use_extension) + + def test_lookup_block_useExtension(self): + [lookup] = self.parse("lookup Foo useExtension {} Foo;").statements + self.assertEqual(lookup.name, "Foo") + self.assertTrue(lookup.use_extension) + + def test_lookup_block_name_mismatch(self): + self.assertRaisesRegex( + ParserError, 'Expected "Foo"', + self.parse, "lookup Foo {} Bar;") + + def test_lookup_block_with_horizontal_valueRecordDef(self): + doc = self.parse("feature liga {" + " lookup look {" + " valueRecordDef 123 foo;" + " } look;" + "} liga;") + [liga] = doc.statements + [look] = liga.statements + [foo] = look.statements + self.assertEqual(foo.value.xAdvance, 123) + self.assertEqual(foo.value.yAdvance, 0) + + def test_lookup_block_with_vertical_valueRecordDef(self): + doc = self.parse("feature vkrn {" + " lookup look {" + " valueRecordDef 123 foo;" + " } look;" + "} vkrn;") + [vkrn] = doc.statements + [look] = vkrn.statements + [foo] = look.statements + self.assertEqual(foo.value.xAdvance, 0) + self.assertEqual(foo.value.yAdvance, 123) + + def test_lookup_reference(self): + [foo, bar] = self.parse("lookup Foo {} Foo;" + "feature Bar {lookup Foo;} Bar;").statements + [ref] = bar.statements + self.assertEqual(type(ref), ast.LookupReferenceStatement) + self.assertEqual(ref.lookup, foo) + + def test_lookup_reference_unknown(self): + self.assertRaisesRegex( + ParserError, 'Unknown lookup "Huh"', + self.parse, "feature liga {lookup Huh;} liga;") + + def test_script(self): + doc = self.parse("feature test {script cyrl;} test;") + s = doc.statements[0].statements[0] + self.assertEqual(type(s), ast.ScriptStatement) + self.assertEqual(s.script, "cyrl") + + def test_substitute_single_format_a(self): # GSUB LookupType 1 + doc = self.parse("feature smcp {substitute a by a.sc;} smcp;") + sub = doc.statements[0].statements[0] + self.assertEqual(sub.old_prefix, []) + self.assertEqual(sub.old, [{"a"}]) + self.assertEqual(sub.old_suffix, []) + self.assertEqual(sub.new, [{"a.sc"}]) + self.assertEqual(sub.lookups, [None]) + + def test_substitute_single_format_b(self): # GSUB LookupType 1 + doc = self.parse( + "feature smcp {" + " substitute [one.fitted one.oldstyle] by one;" + "} smcp;") + sub = doc.statements[0].statements[0] + self.assertEqual(sub.old_prefix, []) + self.assertEqual(sub.old, [{"one.fitted", "one.oldstyle"}]) + self.assertEqual(sub.old_suffix, []) + self.assertEqual(sub.new, [{"one"}]) + self.assertEqual(sub.lookups, [None]) + + def test_substitute_single_format_c(self): # GSUB LookupType 1 + doc = self.parse( + "feature smcp {" + " substitute [a-d] by [A.sc-D.sc];" + "} smcp;") + sub = doc.statements[0].statements[0] + self.assertEqual(sub.old_prefix, []) + self.assertEqual(sub.old, [{"a", "b", "c", "d"}]) + self.assertEqual(sub.old_suffix, []) + self.assertEqual(sub.new, [{"A.sc", "B.sc", "C.sc", "D.sc"}]) + self.assertEqual(sub.lookups, [None]) + + def test_substitute_multiple(self): # GSUB LookupType 2 + doc = self.parse("lookup Look {substitute f_f_i by f f i;} Look;") + sub = doc.statements[0].statements[0] + self.assertEqual(type(sub), ast.SubstitutionRule) + self.assertEqual(sub.old_prefix, []) + self.assertEqual(sub.old, [{"f_f_i"}]) + self.assertEqual(sub.old_suffix, []) + self.assertEqual(sub.new, [{"f"}, {"f"}, {"i"}]) + self.assertEqual(sub.lookups, [None]) + + def test_substitute_from(self): # GSUB LookupType 3 + doc = self.parse("feature test {" + " substitute a from [a.1 a.2 a.3];" + "} test;") + sub = doc.statements[0].statements[0] + self.assertEqual(type(sub), ast.AlternateSubstitution) + self.assertEqual(sub.glyph, "a") + self.assertEqual(sub.from_class, {"a.1", "a.2", "a.3"}) + + def test_substitute_from_glyphclass(self): # GSUB LookupType 3 + doc = self.parse("feature test {" + " @Ampersands = [ampersand.1 ampersand.2];" + " substitute ampersand from @Ampersands;" + "} test;") + [glyphclass, sub] = doc.statements[0].statements + self.assertEqual(type(sub), ast.AlternateSubstitution) + self.assertEqual(sub.glyph, "ampersand") + self.assertEqual(sub.from_class, {"ampersand.1", "ampersand.2"}) + + def test_substitute_ligature(self): # GSUB LookupType 4 + doc = self.parse("feature liga {substitute f f i by f_f_i;} liga;") + sub = doc.statements[0].statements[0] + self.assertEqual(sub.old_prefix, []) + self.assertEqual(sub.old, [{"f"}, {"f"}, {"i"}]) + self.assertEqual(sub.old_suffix, []) + self.assertEqual(sub.new, [{"f_f_i"}]) + self.assertEqual(sub.lookups, [None, None, None]) + + def test_substitute_lookups(self): + doc = Parser(self.getpath("spec5fi.fea")).parse() + [ligs, sub, feature] = doc.statements + self.assertEqual(feature.statements[0].lookups, [ligs, None, sub]) + self.assertEqual(feature.statements[1].lookups, [ligs, None, sub]) + + def test_substitute_missing_by(self): + self.assertRaisesRegex( + ParserError, 'Expected "by", "from" or explicit lookup references', + self.parse, "feature liga {substitute f f i;} liga;") + + def test_subtable(self): + doc = self.parse("feature test {subtable;} test;") + s = doc.statements[0].statements[0] + self.assertEqual(type(s), ast.SubtableStatement) + + def test_valuerecord_format_a_horizontal(self): + doc = self.parse("feature liga {valueRecordDef 123 foo;} liga;") + value = doc.statements[0].statements[0].value + self.assertEqual(value.xPlacement, 0) + self.assertEqual(value.yPlacement, 0) + self.assertEqual(value.xAdvance, 123) + self.assertEqual(value.yAdvance, 0) + + def test_valuerecord_format_a_vertical(self): + doc = self.parse("feature vkrn {valueRecordDef 123 foo;} vkrn;") + value = doc.statements[0].statements[0].value + self.assertEqual(value.xPlacement, 0) + self.assertEqual(value.yPlacement, 0) + self.assertEqual(value.xAdvance, 0) + self.assertEqual(value.yAdvance, 123) + + def test_valuerecord_format_b(self): + doc = self.parse("feature liga {valueRecordDef <1 2 3 4> foo;} liga;") + value = doc.statements[0].statements[0].value + self.assertEqual(value.xPlacement, 1) + self.assertEqual(value.yPlacement, 2) + self.assertEqual(value.xAdvance, 3) + self.assertEqual(value.yAdvance, 4) + + def test_valuerecord_named(self): + doc = self.parse("valueRecordDef <1 2 3 4> foo;" + "feature liga {valueRecordDef <foo> bar;} liga;") + value = doc.statements[1].statements[0].value + self.assertEqual(value.xPlacement, 1) + self.assertEqual(value.yPlacement, 2) + self.assertEqual(value.xAdvance, 3) + self.assertEqual(value.yAdvance, 4) + + def test_valuerecord_named_unknown(self): + self.assertRaisesRegex( + ParserError, "Unknown valueRecordDef \"unknown\"", + self.parse, "valueRecordDef <unknown> foo;") + + def test_valuerecord_scoping(self): + [foo, liga, smcp] = self.parse( + "valueRecordDef 789 foo;" + "feature liga {valueRecordDef <foo> bar;} liga;" + "feature smcp {valueRecordDef <foo> bar;} smcp;" + ).statements + self.assertEqual(foo.value.xAdvance, 789) + self.assertEqual(liga.statements[0].value.xAdvance, 789) + self.assertEqual(smcp.statements[0].value.xAdvance, 789) + + def test_languagesystem(self): + [langsys] = self.parse("languagesystem latn DEU;").statements + self.assertEqual(langsys.script, "latn") + self.assertEqual(langsys.language, "DEU ") + self.assertRaisesRegex( + ParserError, "Expected ';'", + self.parse, "languagesystem latn DEU") + self.assertRaisesRegex( + ParserError, "longer than 4 characters", + self.parse, "languagesystem foobar DEU") + self.assertRaisesRegex( + ParserError, "longer than 4 characters", + self.parse, "languagesystem latn FOOBAR") + + def setUp(self): + self.tempdir = None + self.num_tempfiles = 0 + + def tearDown(self): + if self.tempdir: + shutil.rmtree(self.tempdir) + + def parse(self, text): + if not self.tempdir: + self.tempdir = tempfile.mkdtemp() + self.num_tempfiles += 1 + path = os.path.join(self.tempdir, "tmp%d.fea" % self.num_tempfiles) + with codecs.open(path, "wb", "utf-8") as outfile: + outfile.write(text) + return Parser(path).parse() + + @staticmethod + def getpath(testfile): + path, _ = os.path.split(__file__) + return os.path.join(path, "testdata", testfile) + + +class SymbolTableTest(unittest.TestCase): + def test_scopes(self): + symtab = SymbolTable() + symtab.define("foo", 23) + self.assertEqual(symtab.resolve("foo"), 23) + symtab.enter_scope() + self.assertEqual(symtab.resolve("foo"), 23) + symtab.define("foo", 42) + self.assertEqual(symtab.resolve("foo"), 42) + symtab.exit_scope() + self.assertEqual(symtab.resolve("foo"), 23) + + def test_resolve_undefined(self): + self.assertEqual(SymbolTable().resolve("abc"), None) + + +if __name__ == "__main__": + unittest.main() diff -Nru fonttools-2.4/Tools/fontTools/feaLib/testdata/include0.fea fonttools-3.0/Tools/fontTools/feaLib/testdata/include0.fea --- fonttools-2.4/Tools/fontTools/feaLib/testdata/include0.fea 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.0/Tools/fontTools/feaLib/testdata/include0.fea 2015-08-31 17:57:15.000000000 +0000 @@ -0,0 +1 @@ +I0 diff -Nru fonttools-2.4/Tools/fontTools/feaLib/testdata/include1.fea fonttools-3.0/Tools/fontTools/feaLib/testdata/include1.fea --- fonttools-2.4/Tools/fontTools/feaLib/testdata/include1.fea 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.0/Tools/fontTools/feaLib/testdata/include1.fea 2015-08-31 17:57:15.000000000 +0000 @@ -0,0 +1,3 @@ +I1a +include(include0.fea); +I1b diff -Nru fonttools-2.4/Tools/fontTools/feaLib/testdata/include2.fea fonttools-3.0/Tools/fontTools/feaLib/testdata/include2.fea --- fonttools-2.4/Tools/fontTools/feaLib/testdata/include2.fea 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.0/Tools/fontTools/feaLib/testdata/include2.fea 2015-08-31 17:57:15.000000000 +0000 @@ -0,0 +1,3 @@ +I2a +include(include1.fea); +I2b diff -Nru fonttools-2.4/Tools/fontTools/feaLib/testdata/include3.fea fonttools-3.0/Tools/fontTools/feaLib/testdata/include3.fea --- fonttools-2.4/Tools/fontTools/feaLib/testdata/include3.fea 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.0/Tools/fontTools/feaLib/testdata/include3.fea 2015-08-31 17:57:15.000000000 +0000 @@ -0,0 +1,4 @@ +I3a +include(include2.fea); +I3b + diff -Nru fonttools-2.4/Tools/fontTools/feaLib/testdata/include4.fea fonttools-3.0/Tools/fontTools/feaLib/testdata/include4.fea --- fonttools-2.4/Tools/fontTools/feaLib/testdata/include4.fea 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.0/Tools/fontTools/feaLib/testdata/include4.fea 2015-08-31 17:57:15.000000000 +0000 @@ -0,0 +1,4 @@ +I4a +include(include3.fea); +I4b + diff -Nru fonttools-2.4/Tools/fontTools/feaLib/testdata/include5.fea fonttools-3.0/Tools/fontTools/feaLib/testdata/include5.fea --- fonttools-2.4/Tools/fontTools/feaLib/testdata/include5.fea 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.0/Tools/fontTools/feaLib/testdata/include5.fea 2015-08-31 17:57:15.000000000 +0000 @@ -0,0 +1,3 @@ +I5a +include(include4.fea); +I5b diff -Nru fonttools-2.4/Tools/fontTools/feaLib/testdata/include6.fea fonttools-3.0/Tools/fontTools/feaLib/testdata/include6.fea --- fonttools-2.4/Tools/fontTools/feaLib/testdata/include6.fea 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.0/Tools/fontTools/feaLib/testdata/include6.fea 2015-08-31 17:57:15.000000000 +0000 @@ -0,0 +1,3 @@ +I6a +include(include5.fea); +I6b diff -Nru fonttools-2.4/Tools/fontTools/feaLib/testdata/includemissingfile.fea fonttools-3.0/Tools/fontTools/feaLib/testdata/includemissingfile.fea --- fonttools-2.4/Tools/fontTools/feaLib/testdata/includemissingfile.fea 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.0/Tools/fontTools/feaLib/testdata/includemissingfile.fea 2015-08-31 17:57:15.000000000 +0000 @@ -0,0 +1 @@ +include(missingfile.fea); diff -Nru fonttools-2.4/Tools/fontTools/feaLib/testdata/includeself.fea fonttools-3.0/Tools/fontTools/feaLib/testdata/includeself.fea --- fonttools-2.4/Tools/fontTools/feaLib/testdata/includeself.fea 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.0/Tools/fontTools/feaLib/testdata/includeself.fea 2015-08-31 17:57:15.000000000 +0000 @@ -0,0 +1 @@ +include(includeself.fea); diff -Nru fonttools-2.4/Tools/fontTools/feaLib/testdata/mini.fea fonttools-3.0/Tools/fontTools/feaLib/testdata/mini.fea --- fonttools-2.4/Tools/fontTools/feaLib/testdata/mini.fea 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.0/Tools/fontTools/feaLib/testdata/mini.fea 2015-08-31 17:57:15.000000000 +0000 @@ -0,0 +1,19 @@ +# Example file from OpenType Feature File specification, section 1. +# http://www.adobe.com/devnet/opentype/afdko/topic_feature_file_syntax.html + +# Script and language coverage +languagesystem DFLT dflt; +languagesystem latn dflt; + +# Ligature formation +feature liga { + substitute f i by f_i; + substitute f l by f_l; +} liga; + +# Kerning +feature kern { + position A Y -100; + position a y -80; + position s f' <0 0 10 0> t; +} kern; diff -Nru fonttools-2.4/Tools/fontTools/feaLib/testdata/spec5fi.fea fonttools-3.0/Tools/fontTools/feaLib/testdata/spec5fi.fea --- fonttools-2.4/Tools/fontTools/feaLib/testdata/spec5fi.fea 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.0/Tools/fontTools/feaLib/testdata/spec5fi.fea 2015-08-31 17:57:15.000000000 +0000 @@ -0,0 +1,18 @@ +# OpenType Feature File specification, section 5.f.i, example 1 +# "Specifying a Chain Sub rule and marking sub-runs" +# http://www.adobe.com/devnet/opentype/afdko/topic_feature_file_syntax.html + +lookup CNTXT_LIGS { + substitute f i by f_i; + substitute c t by c_t; + } CNTXT_LIGS; + +lookup CNTXT_SUB { + substitute n by n.end; + substitute s by s.end; + } CNTXT_SUB; + +feature test { + substitute [a e i o u] f' lookup CNTXT_LIGS i' n' lookup CNTXT_SUB; + substitute [a e i o u] c' lookup CNTXT_LIGS t' s' lookup CNTXT_SUB; +} test; diff -Nru fonttools-2.4/Tools/fontTools/__init__.py fonttools-3.0/Tools/fontTools/__init__.py --- fonttools-2.4/Tools/fontTools/__init__.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.0/Tools/fontTools/__init__.py 2015-08-31 17:57:15.000000000 +0000 @@ -0,0 +1,4 @@ +from __future__ import print_function, division, absolute_import +from fontTools.misc.py23 import * + +version = "3.0" diff -Nru fonttools-2.4/Tools/fontTools/inspect.py fonttools-3.0/Tools/fontTools/inspect.py --- fonttools-2.4/Tools/fontTools/inspect.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.0/Tools/fontTools/inspect.py 2015-08-31 17:57:15.000000000 +0000 @@ -0,0 +1,265 @@ +# Copyright 2013 Google, Inc. All Rights Reserved. +# +# Google Author(s): Behdad Esfahbod + +"""GUI font inspector. +""" + +from __future__ import print_function, division, absolute_import +from fontTools.misc.py23 import * +from fontTools import misc, ttLib, cffLib +import pygtk +pygtk.require('2.0') +import gtk +import sys + + +class Row(object): + def __init__(self, parent, index, key, value, font): + self._parent = parent + self._index = index + self._key = key + self._value = value + self._font = font + + if isinstance(value, ttLib.TTFont): + self._add_font(value) + return + + if not isinstance(value, basestring): + # Try sequences + is_sequence = True + try: + len(value) + iter(value) + # It's hard to differentiate list-type sequences + # from dict-type ones. Try fetching item 0. + value[0] + except (TypeError, AttributeError, KeyError, IndexError): + is_sequence = False + if is_sequence: + self._add_list(key, value) + return + if hasattr(value, '__dict__'): + self._add_object(key, value) + return + if hasattr(value, 'items'): + self._add_dict(key, value) + return + + if isinstance(value, basestring): + self._value_str = '"'+value+'"' + self._children = [] + return + + # Everything else + self._children = [] + + def _filter_items(self): + items = [] + for k,v in self._items: + if isinstance(v, ttLib.TTFont): + continue + if k in ['reader', 'file', 'tableTag', 'compileStatus', 'recurse']: + continue + if isinstance(k, basestring) and k[0] == '_': + continue + items.append((k,v)) + self._items = items + + def _add_font(self, font): + self._items = [(tag,font[tag]) for tag in font.keys()] + + def _add_object(self, key, value): + # Make sure item is decompiled + try: + value["asdf"] + except (AttributeError, KeyError, TypeError, ttLib.TTLibError): + pass + if isinstance(value, ttLib.getTableModule('glyf').Glyph): + # Glyph type needs explicit expanding to be useful + value.expand(self._font['glyf']) + if isinstance(value, misc.psCharStrings.T2CharString): + try: + value.decompile() + except TypeError: # Subroutines can't be decompiled + pass + if isinstance(value, cffLib.BaseDict): + for k in value.rawDict.keys(): + getattr(value, k) + if isinstance(value, cffLib.Index): + # Load all items + for i in range(len(value)): + value[i] + # Discard offsets as should not be needed anymore + if hasattr(value, 'offsets'): + del value.offsets + + self._value_str = value.__class__.__name__ + if isinstance(value, ttLib.tables.DefaultTable.DefaultTable): + self._value_str += ' (%d Bytes)' % self._font.reader.tables[key].length + self._items = sorted(value.__dict__.items()) + self._filter_items() + + def _add_dict(self, key, value): + self._value_str = '%s of %d items' % (value.__class__.__name__, len(value)) + self._items = sorted(value.items()) + + def _add_list(self, key, value): + if len(value) and len(value) <= 32: + self._value_str = str(value) + else: + self._value_str = '%s of %d items' % (value.__class__.__name__, len(value)) + self._items = list(enumerate(value)) + + def __len__(self): + if hasattr(self, '_children'): + return len(self._children) + if hasattr(self, '_items'): + return len(self._items) + assert False + + def _ensure_children(self): + if hasattr(self, '_children'): + return + children = [] + for i,(k,v) in enumerate(self._items): + children.append(Row(self, i, k, v, self._font)) + self._children = children + del self._items + + def __getitem__(self, n): + if n >= len(self): + return None + if not hasattr(self, '_children'): + self._children = [None] * len(self) + c = self._children[n] + if c is None: + k,v = self._items[n] + c = self._children[n] = Row(self, n, k, v, self._font) + self._items[n] = None + return c + + def get_parent(self): + return self._parent + + def get_index(self): + return self._index + + def get_key(self): + return self._key + + def get_value(self): + return self._value + + def get_value_str(self): + if hasattr(self,'_value_str'): + return self._value_str + return str(self._value) + +class FontTreeModel(gtk.GenericTreeModel): + + __gtype_name__ = 'FontTreeModel' + + def __init__(self, font): + super(FontTreeModel, self).__init__() + self._columns = (str, str) + self.font = font + self._root = Row(None, 0, "font", font, font) + + def on_get_flags(self): + return 0 + + def on_get_n_columns(self): + return len(self._columns) + + def on_get_column_type(self, index): + return self._columns[index] + + def on_get_iter(self, path): + rowref = self._root + while path: + rowref = rowref[path[0]] + path = path[1:] + return rowref + + def on_get_path(self, rowref): + path = [] + while rowref != self._root: + path.append(rowref.get_index()) + rowref = rowref.get_parent() + path.reverse() + return tuple(path) + + def on_get_value(self, rowref, column): + if column == 0: + return rowref.get_key() + else: + return rowref.get_value_str() + + def on_iter_next(self, rowref): + return rowref.get_parent()[rowref.get_index() + 1] + + def on_iter_children(self, rowref): + return rowref[0] + + def on_iter_has_child(self, rowref): + return bool(len(rowref)) + + def on_iter_n_children(self, rowref): + return len(rowref) + + def on_iter_nth_child(self, rowref, n): + if not rowref: rowref = self._root + return rowref[n] + + def on_iter_parent(self, rowref): + return rowref.get_parent() + +class Inspect(object): + + def _delete_event(self, widget, event, data=None): + gtk.main_quit() + return False + + def __init__(self, fontfile): + + self.window = gtk.Window(gtk.WINDOW_TOPLEVEL) + self.window.set_title("%s - pyftinspect" % fontfile) + self.window.connect("delete_event", self._delete_event) + self.window.set_size_request(400, 600) + + self.scrolled_window = gtk.ScrolledWindow() + self.window.add(self.scrolled_window) + + self.font = ttLib.TTFont(fontfile, lazy=True) + self.treemodel = FontTreeModel(self.font) + self.treeview = gtk.TreeView(self.treemodel) + #self.treeview.set_reorderable(True) + + for i in range(2): + col_name = ('Key', 'Value')[i] + col = gtk.TreeViewColumn(col_name) + col.set_sort_column_id(-1) + self.treeview.append_column(col) + + cell = gtk.CellRendererText() + col.pack_start(cell, True) + col.add_attribute(cell, 'text', i) + + self.treeview.set_search_column(1) + self.scrolled_window.add(self.treeview) + self.window.show_all() + +def main(args=None): + if args is None: + args = sys.argv[1:] + if len(args) < 1: + print("usage: pyftinspect font...", file=sys.stderr) + sys.exit(1) + for arg in args: + Inspect(arg) + gtk.main() + +if __name__ == "__main__": + main() diff -Nru fonttools-2.4/Tools/fontTools/merge.py fonttools-3.0/Tools/fontTools/merge.py --- fonttools-2.4/Tools/fontTools/merge.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.0/Tools/fontTools/merge.py 2015-08-31 17:57:15.000000000 +0000 @@ -0,0 +1,949 @@ +# Copyright 2013 Google, Inc. All Rights Reserved. +# +# Google Author(s): Behdad Esfahbod, Roozbeh Pournader + +"""Font merger. +""" + +from __future__ import print_function, division, absolute_import +from fontTools.misc.py23 import * +from fontTools.misc.timeTools import timestampNow +from fontTools import ttLib, cffLib +from fontTools.ttLib.tables import otTables, _h_e_a_d +from fontTools.ttLib.tables.DefaultTable import DefaultTable +from functools import reduce +import sys +import time +import operator + + +def _add_method(*clazzes, **kwargs): + """Returns a decorator function that adds a new method to one or + more classes.""" + allowDefault = kwargs.get('allowDefaultTable', False) + def wrapper(method): + for clazz in clazzes: + assert allowDefault or clazz != DefaultTable, 'Oops, table class not found.' + assert method.__name__ not in clazz.__dict__, \ + "Oops, class '%s' has method '%s'." % (clazz.__name__, method.__name__) + setattr(clazz, method.__name__, method) + return None + return wrapper + +# General utility functions for merging values from different fonts + +def equal(lst): + lst = list(lst) + t = iter(lst) + first = next(t) + assert all(item == first for item in t), "Expected all items to be equal: %s" % lst + return first + +def first(lst): + return next(iter(lst)) + +def recalculate(lst): + return NotImplemented + +def current_time(lst): + return timestampNow() + +def bitwise_and(lst): + return reduce(operator.and_, lst) + +def bitwise_or(lst): + return reduce(operator.or_, lst) + +def avg_int(lst): + lst = list(lst) + return sum(lst) // len(lst) + +def onlyExisting(func): + """Returns a filter func that when called with a list, + only calls func on the non-NotImplemented items of the list, + and only so if there's at least one item remaining. + Otherwise returns NotImplemented.""" + + def wrapper(lst): + items = [item for item in lst if item is not NotImplemented] + return func(items) if items else NotImplemented + + return wrapper + +def sumLists(lst): + l = [] + for item in lst: + l.extend(item) + return l + +def sumDicts(lst): + d = {} + for item in lst: + d.update(item) + return d + +def mergeObjects(lst): + lst = [item for item in lst if item is not NotImplemented] + if not lst: + return NotImplemented + lst = [item for item in lst if item is not None] + if not lst: + return None + + clazz = lst[0].__class__ + assert all(type(item) == clazz for item in lst), lst + + logic = clazz.mergeMap + returnTable = clazz() + returnDict = {} + + allKeys = set.union(set(), *(vars(table).keys() for table in lst)) + for key in allKeys: + try: + mergeLogic = logic[key] + except KeyError: + try: + mergeLogic = logic['*'] + except KeyError: + raise Exception("Don't know how to merge key %s of class %s" % + (key, clazz.__name__)) + if mergeLogic is NotImplemented: + continue + value = mergeLogic(getattr(table, key, NotImplemented) for table in lst) + if value is not NotImplemented: + returnDict[key] = value + + returnTable.__dict__ = returnDict + + return returnTable + +def mergeBits(bitmap): + + def wrapper(lst): + lst = list(lst) + returnValue = 0 + for bitNumber in range(bitmap['size']): + try: + mergeLogic = bitmap[bitNumber] + except KeyError: + try: + mergeLogic = bitmap['*'] + except KeyError: + raise Exception("Don't know how to merge bit %s" % bitNumber) + shiftedBit = 1 << bitNumber + mergedValue = mergeLogic(bool(item & shiftedBit) for item in lst) + returnValue |= mergedValue << bitNumber + return returnValue + + return wrapper + + +@_add_method(DefaultTable, allowDefaultTable=True) +def merge(self, m, tables): + if not hasattr(self, 'mergeMap'): + m.log("Don't know how to merge '%s'." % self.tableTag) + return NotImplemented + + logic = self.mergeMap + + if isinstance(logic, dict): + return m.mergeObjects(self, self.mergeMap, tables) + else: + return logic(tables) + + +ttLib.getTableClass('maxp').mergeMap = { + '*': max, + 'tableTag': equal, + 'tableVersion': equal, + 'numGlyphs': sum, + 'maxStorage': first, + 'maxFunctionDefs': first, + 'maxInstructionDefs': first, + # TODO When we correctly merge hinting data, update these values: + # maxFunctionDefs, maxInstructionDefs, maxSizeOfInstructions +} + +headFlagsMergeBitMap = { + 'size': 16, + '*': bitwise_or, + 1: bitwise_and, # Baseline at y = 0 + 2: bitwise_and, # lsb at x = 0 + 3: bitwise_and, # Force ppem to integer values. FIXME? + 5: bitwise_and, # Font is vertical + 6: lambda bit: 0, # Always set to zero + 11: bitwise_and, # Font data is 'lossless' + 13: bitwise_and, # Optimized for ClearType + 14: bitwise_and, # Last resort font. FIXME? equal or first may be better + 15: lambda bit: 0, # Always set to zero +} + +ttLib.getTableClass('head').mergeMap = { + 'tableTag': equal, + 'tableVersion': max, + 'fontRevision': max, + 'checkSumAdjustment': lambda lst: 0, # We need *something* here + 'magicNumber': equal, + 'flags': mergeBits(headFlagsMergeBitMap), + 'unitsPerEm': equal, + 'created': current_time, + 'modified': current_time, + 'xMin': min, + 'yMin': min, + 'xMax': max, + 'yMax': max, + 'macStyle': first, + 'lowestRecPPEM': max, + 'fontDirectionHint': lambda lst: 2, + 'indexToLocFormat': recalculate, + 'glyphDataFormat': equal, +} + +ttLib.getTableClass('hhea').mergeMap = { + '*': equal, + 'tableTag': equal, + 'tableVersion': max, + 'ascent': max, + 'descent': min, + 'lineGap': max, + 'advanceWidthMax': max, + 'minLeftSideBearing': min, + 'minRightSideBearing': min, + 'xMaxExtent': max, + 'caretSlopeRise': first, + 'caretSlopeRun': first, + 'caretOffset': first, + 'numberOfHMetrics': recalculate, +} + +os2FsTypeMergeBitMap = { + 'size': 16, + '*': lambda bit: 0, + 1: bitwise_or, # no embedding permitted + 2: bitwise_and, # allow previewing and printing documents + 3: bitwise_and, # allow editing documents + 8: bitwise_or, # no subsetting permitted + 9: bitwise_or, # no embedding of outlines permitted +} + +def mergeOs2FsType(lst): + lst = list(lst) + if all(item == 0 for item in lst): + return 0 + + # Compute least restrictive logic for each fsType value + for i in range(len(lst)): + # unset bit 1 (no embedding permitted) if either bit 2 or 3 is set + if lst[i] & 0x000C: + lst[i] &= ~0x0002 + # set bit 2 (allow previewing) if bit 3 is set (allow editing) + elif lst[i] & 0x0008: + lst[i] |= 0x0004 + # set bits 2 and 3 if everything is allowed + elif lst[i] == 0: + lst[i] = 0x000C + + fsType = mergeBits(os2FsTypeMergeBitMap)(lst) + # unset bits 2 and 3 if bit 1 is set (some font is "no embedding") + if fsType & 0x0002: + fsType &= ~0x000C + return fsType + + +ttLib.getTableClass('OS/2').mergeMap = { + '*': first, + 'tableTag': equal, + 'version': max, + 'xAvgCharWidth': avg_int, # Apparently fontTools doesn't recalc this + 'fsType': mergeOs2FsType, # Will be overwritten + 'panose': first, # FIXME: should really be the first Latin font + 'ulUnicodeRange1': bitwise_or, + 'ulUnicodeRange2': bitwise_or, + 'ulUnicodeRange3': bitwise_or, + 'ulUnicodeRange4': bitwise_or, + 'fsFirstCharIndex': min, + 'fsLastCharIndex': max, + 'sTypoAscender': max, + 'sTypoDescender': min, + 'sTypoLineGap': max, + 'usWinAscent': max, + 'usWinDescent': max, + # Version 2,3,4 + 'ulCodePageRange1': onlyExisting(bitwise_or), + 'ulCodePageRange2': onlyExisting(bitwise_or), + 'usMaxContex': onlyExisting(max), + # TODO version 5 +} + +@_add_method(ttLib.getTableClass('OS/2')) +def merge(self, m, tables): + DefaultTable.merge(self, m, tables) + if self.version < 2: + # bits 8 and 9 are reserved and should be set to zero + self.fsType &= ~0x0300 + if self.version >= 3: + # Only one of bits 1, 2, and 3 may be set. We already take + # care of bit 1 implications in mergeOs2FsType. So unset + # bit 2 if bit 3 is already set. + if self.fsType & 0x0008: + self.fsType &= ~0x0004 + return self + +ttLib.getTableClass('post').mergeMap = { + '*': first, + 'tableTag': equal, + 'formatType': max, + 'isFixedPitch': min, + 'minMemType42': max, + 'maxMemType42': lambda lst: 0, + 'minMemType1': max, + 'maxMemType1': lambda lst: 0, + 'mapping': onlyExisting(sumDicts), + 'extraNames': lambda lst: [], +} + +ttLib.getTableClass('vmtx').mergeMap = ttLib.getTableClass('hmtx').mergeMap = { + 'tableTag': equal, + 'metrics': sumDicts, +} + +ttLib.getTableClass('gasp').mergeMap = { + 'tableTag': equal, + 'version': max, + 'gaspRange': first, # FIXME? Appears irreconcilable +} + +ttLib.getTableClass('name').mergeMap = { + 'tableTag': equal, + 'names': first, # FIXME? Does mixing name records make sense? +} + +ttLib.getTableClass('loca').mergeMap = { + '*': recalculate, + 'tableTag': equal, +} + +ttLib.getTableClass('glyf').mergeMap = { + 'tableTag': equal, + 'glyphs': sumDicts, + 'glyphOrder': sumLists, +} + +@_add_method(ttLib.getTableClass('glyf')) +def merge(self, m, tables): + for i,table in enumerate(tables): + for g in table.glyphs.values(): + if i: + # Drop hints for all but first font, since + # we don't map functions / CVT values. + g.removeHinting() + # Expand composite glyphs to load their + # composite glyph names. + if g.isComposite(): + g.expand(table) + return DefaultTable.merge(self, m, tables) + +ttLib.getTableClass('prep').mergeMap = lambda self, lst: first(lst) +ttLib.getTableClass('fpgm').mergeMap = lambda self, lst: first(lst) +ttLib.getTableClass('cvt ').mergeMap = lambda self, lst: first(lst) + +@_add_method(ttLib.getTableClass('cmap')) +def merge(self, m, tables): + # TODO Handle format=14. + cmapTables = [(t,fontIdx) for fontIdx,table in enumerate(tables) for t in table.tables if t.isUnicode()] + # TODO Better handle format-4 and format-12 coexisting in same font. + # TODO Insert both a format-4 and format-12 if needed. + module = ttLib.getTableModule('cmap') + assert all(t.format in [4, 12] for t,_ in cmapTables) + format = max(t.format for t,_ in cmapTables) + cmapTable = module.cmap_classes[format](format) + cmapTable.cmap = {} + cmapTable.platformID = 3 + cmapTable.platEncID = max(t.platEncID for t,_ in cmapTables) + cmapTable.language = 0 + cmap = cmapTable.cmap + for table,fontIdx in cmapTables: + # TODO handle duplicates. + for uni,gid in table.cmap.items(): + oldgid = cmap.get(uni, None) + if oldgid is None: + cmap[uni] = gid + elif oldgid != gid: + # Char previously mapped to oldgid, now to gid. + # Record, to fix up in GSUB 'locl' later. + assert m.duplicateGlyphsPerFont[fontIdx].get(oldgid, gid) == gid + m.duplicateGlyphsPerFont[fontIdx][oldgid] = gid + self.tableVersion = 0 + self.tables = [cmapTable] + self.numSubTables = len(self.tables) + return self + + +otTables.ScriptList.mergeMap = { + 'ScriptCount': sum, + 'ScriptRecord': lambda lst: sorted(sumLists(lst), key=lambda s: s.ScriptTag), +} +otTables.BaseScriptList.mergeMap = { + 'BaseScriptCount': sum, + 'BaseScriptRecord': lambda lst: sorted(sumLists(lst), key=lambda s: s.BaseScriptTag), +} + +otTables.FeatureList.mergeMap = { + 'FeatureCount': sum, + 'FeatureRecord': sumLists, +} + +otTables.LookupList.mergeMap = { + 'LookupCount': sum, + 'Lookup': sumLists, +} + +otTables.Coverage.mergeMap = { + 'glyphs': sumLists, +} + +otTables.ClassDef.mergeMap = { + 'classDefs': sumDicts, +} + +otTables.LigCaretList.mergeMap = { + 'Coverage': mergeObjects, + 'LigGlyphCount': sum, + 'LigGlyph': sumLists, +} + +otTables.AttachList.mergeMap = { + 'Coverage': mergeObjects, + 'GlyphCount': sum, + 'AttachPoint': sumLists, +} + +# XXX Renumber MarkFilterSets of lookups +otTables.MarkGlyphSetsDef.mergeMap = { + 'MarkSetTableFormat': equal, + 'MarkSetCount': sum, + 'Coverage': sumLists, +} + +otTables.Axis.mergeMap = { + '*': mergeObjects, +} + +# XXX Fix BASE table merging +otTables.BaseTagList.mergeMap = { + 'BaseTagCount': sum, + 'BaselineTag': sumLists, +} + +otTables.GDEF.mergeMap = \ +otTables.GSUB.mergeMap = \ +otTables.GPOS.mergeMap = \ +otTables.BASE.mergeMap = \ +otTables.JSTF.mergeMap = \ +otTables.MATH.mergeMap = \ +{ + '*': mergeObjects, + 'Version': max, +} + +ttLib.getTableClass('GDEF').mergeMap = \ +ttLib.getTableClass('GSUB').mergeMap = \ +ttLib.getTableClass('GPOS').mergeMap = \ +ttLib.getTableClass('BASE').mergeMap = \ +ttLib.getTableClass('JSTF').mergeMap = \ +ttLib.getTableClass('MATH').mergeMap = \ +{ + 'tableTag': onlyExisting(equal), # XXX clean me up + 'table': mergeObjects, +} + +@_add_method(ttLib.getTableClass('GSUB')) +def merge(self, m, tables): + + assert len(tables) == len(m.duplicateGlyphsPerFont) + for i,(table,dups) in enumerate(zip(tables, m.duplicateGlyphsPerFont)): + if not dups: continue + assert (table is not None and table is not NotImplemented), "Have duplicates to resolve for font %d but no GSUB" % (i + 1) + lookupMap = {id(v):v for v in table.table.LookupList.Lookup} + featureMap = {id(v):v for v in table.table.FeatureList.FeatureRecord} + synthFeature = None + synthLookup = None + for script in table.table.ScriptList.ScriptRecord: + if script.ScriptTag == 'DFLT': continue # XXX + for langsys in [script.Script.DefaultLangSys] + [l.LangSys for l in script.Script.LangSysRecord]: + feature = [featureMap[v] for v in langsys.FeatureIndex if featureMap[v].FeatureTag == 'locl'] + assert len(feature) <= 1 + if feature: + feature = feature[0] + else: + if not synthFeature: + synthFeature = otTables.FeatureRecord() + synthFeature.FeatureTag = 'locl' + f = synthFeature.Feature = otTables.Feature() + f.FeatureParams = None + f.LookupCount = 0 + f.LookupListIndex = [] + langsys.FeatureIndex.append(id(synthFeature)) + featureMap[id(synthFeature)] = synthFeature + langsys.FeatureIndex.sort(key=lambda v: featureMap[v].FeatureTag) + table.table.FeatureList.FeatureRecord.append(synthFeature) + table.table.FeatureList.FeatureCount += 1 + feature = synthFeature + + if not synthLookup: + subtable = otTables.SingleSubst() + subtable.mapping = dups + synthLookup = otTables.Lookup() + synthLookup.LookupFlag = 0 + synthLookup.LookupType = 1 + synthLookup.SubTableCount = 1 + synthLookup.SubTable = [subtable] + table.table.LookupList.Lookup.append(synthLookup) + table.table.LookupList.LookupCount += 1 + + feature.Feature.LookupListIndex[:0] = [id(synthLookup)] + feature.Feature.LookupCount += 1 + + DefaultTable.merge(self, m, tables) + return self + +@_add_method(otTables.SingleSubst, + otTables.MultipleSubst, + otTables.AlternateSubst, + otTables.LigatureSubst, + otTables.ReverseChainSingleSubst, + otTables.SinglePos, + otTables.PairPos, + otTables.CursivePos, + otTables.MarkBasePos, + otTables.MarkLigPos, + otTables.MarkMarkPos) +def mapLookups(self, lookupMap): + pass + +# Copied and trimmed down from subset.py +@_add_method(otTables.ContextSubst, + otTables.ChainContextSubst, + otTables.ContextPos, + otTables.ChainContextPos) +def __merge_classify_context(self): + + class ContextHelper(object): + def __init__(self, klass, Format): + if klass.__name__.endswith('Subst'): + Typ = 'Sub' + Type = 'Subst' + else: + Typ = 'Pos' + Type = 'Pos' + if klass.__name__.startswith('Chain'): + Chain = 'Chain' + else: + Chain = '' + ChainTyp = Chain+Typ + + self.Typ = Typ + self.Type = Type + self.Chain = Chain + self.ChainTyp = ChainTyp + + self.LookupRecord = Type+'LookupRecord' + + if Format == 1: + self.Rule = ChainTyp+'Rule' + self.RuleSet = ChainTyp+'RuleSet' + elif Format == 2: + self.Rule = ChainTyp+'ClassRule' + self.RuleSet = ChainTyp+'ClassSet' + + if self.Format not in [1, 2, 3]: + return None # Don't shoot the messenger; let it go + if not hasattr(self.__class__, "__ContextHelpers"): + self.__class__.__ContextHelpers = {} + if self.Format not in self.__class__.__ContextHelpers: + helper = ContextHelper(self.__class__, self.Format) + self.__class__.__ContextHelpers[self.Format] = helper + return self.__class__.__ContextHelpers[self.Format] + + +@_add_method(otTables.ContextSubst, + otTables.ChainContextSubst, + otTables.ContextPos, + otTables.ChainContextPos) +def mapLookups(self, lookupMap): + c = self.__merge_classify_context() + + if self.Format in [1, 2]: + for rs in getattr(self, c.RuleSet): + if not rs: continue + for r in getattr(rs, c.Rule): + if not r: continue + for ll in getattr(r, c.LookupRecord): + if not ll: continue + ll.LookupListIndex = lookupMap[ll.LookupListIndex] + elif self.Format == 3: + for ll in getattr(self, c.LookupRecord): + if not ll: continue + ll.LookupListIndex = lookupMap[ll.LookupListIndex] + else: + assert 0, "unknown format: %s" % self.Format + +@_add_method(otTables.ExtensionSubst, + otTables.ExtensionPos) +def mapLookups(self, lookupMap): + if self.Format == 1: + self.ExtSubTable.mapLookups(lookupMap) + else: + assert 0, "unknown format: %s" % self.Format + +@_add_method(otTables.Lookup) +def mapLookups(self, lookupMap): + for st in self.SubTable: + if not st: continue + st.mapLookups(lookupMap) + +@_add_method(otTables.LookupList) +def mapLookups(self, lookupMap): + for l in self.Lookup: + if not l: continue + l.mapLookups(lookupMap) + +@_add_method(otTables.Feature) +def mapLookups(self, lookupMap): + self.LookupListIndex = [lookupMap[i] for i in self.LookupListIndex] + +@_add_method(otTables.FeatureList) +def mapLookups(self, lookupMap): + for f in self.FeatureRecord: + if not f or not f.Feature: continue + f.Feature.mapLookups(lookupMap) + +@_add_method(otTables.DefaultLangSys, + otTables.LangSys) +def mapFeatures(self, featureMap): + self.FeatureIndex = [featureMap[i] for i in self.FeatureIndex] + if self.ReqFeatureIndex != 65535: + self.ReqFeatureIndex = featureMap[self.ReqFeatureIndex] + +@_add_method(otTables.Script) +def mapFeatures(self, featureMap): + if self.DefaultLangSys: + self.DefaultLangSys.mapFeatures(featureMap) + for l in self.LangSysRecord: + if not l or not l.LangSys: continue + l.LangSys.mapFeatures(featureMap) + +@_add_method(otTables.ScriptList) +def mapFeatures(self, featureMap): + for s in self.ScriptRecord: + if not s or not s.Script: continue + s.Script.mapFeatures(featureMap) + + +class Options(object): + + class UnknownOptionError(Exception): + pass + + def __init__(self, **kwargs): + + self.set(**kwargs) + + def set(self, **kwargs): + for k,v in kwargs.items(): + if not hasattr(self, k): + raise self.UnknownOptionError("Unknown option '%s'" % k) + setattr(self, k, v) + + def parse_opts(self, argv, ignore_unknown=False): + ret = [] + opts = {} + for a in argv: + orig_a = a + if not a.startswith('--'): + ret.append(a) + continue + a = a[2:] + i = a.find('=') + op = '=' + if i == -1: + if a.startswith("no-"): + k = a[3:] + v = False + else: + k = a + v = True + else: + k = a[:i] + if k[-1] in "-+": + op = k[-1]+'=' # Ops is '-=' or '+=' now. + k = k[:-1] + v = a[i+1:] + k = k.replace('-', '_') + if not hasattr(self, k): + if ignore_unknown is True or k in ignore_unknown: + ret.append(orig_a) + continue + else: + raise self.UnknownOptionError("Unknown option '%s'" % a) + + ov = getattr(self, k) + if isinstance(ov, bool): + v = bool(v) + elif isinstance(ov, int): + v = int(v) + elif isinstance(ov, list): + vv = v.split(',') + if vv == ['']: + vv = [] + vv = [int(x, 0) if len(x) and x[0] in "0123456789" else x for x in vv] + if op == '=': + v = vv + elif op == '+=': + v = ov + v.extend(vv) + elif op == '-=': + v = ov + for x in vv: + if x in v: + v.remove(x) + else: + assert 0 + + opts[k] = v + self.set(**opts) + + return ret + + +class Merger(object): + + def __init__(self, options=None, log=None): + + if not log: + log = Logger() + if not options: + options = Options() + + self.options = options + self.log = log + + def merge(self, fontfiles): + + mega = ttLib.TTFont() + + # + # Settle on a mega glyph order. + # + fonts = [ttLib.TTFont(fontfile) for fontfile in fontfiles] + glyphOrders = [font.getGlyphOrder() for font in fonts] + megaGlyphOrder = self._mergeGlyphOrders(glyphOrders) + # Reload fonts and set new glyph names on them. + # TODO Is it necessary to reload font? I think it is. At least + # it's safer, in case tables were loaded to provide glyph names. + fonts = [ttLib.TTFont(fontfile) for fontfile in fontfiles] + for font,glyphOrder in zip(fonts, glyphOrders): + font.setGlyphOrder(glyphOrder) + mega.setGlyphOrder(megaGlyphOrder) + + for font in fonts: + self._preMerge(font) + + self.duplicateGlyphsPerFont = [{} for f in fonts] + + allTags = reduce(set.union, (list(font.keys()) for font in fonts), set()) + allTags.remove('GlyphOrder') + + # Make sure we process cmap before GSUB as we have a dependency there. + if 'GSUB' in allTags: + allTags.remove('GSUB') + allTags = ['GSUB'] + list(allTags) + if 'cmap' in allTags: + allTags.remove('cmap') + allTags = ['cmap'] + list(allTags) + + for tag in allTags: + + tables = [font.get(tag, NotImplemented) for font in fonts] + + clazz = ttLib.getTableClass(tag) + table = clazz(tag).merge(self, tables) + # XXX Clean this up and use: table = mergeObjects(tables) + + if table is not NotImplemented and table is not False: + mega[tag] = table + self.log("Merged '%s'." % tag) + else: + self.log("Dropped '%s'." % tag) + self.log.lapse("merge '%s'" % tag) + + del self.duplicateGlyphsPerFont + + self._postMerge(mega) + + return mega + + def _mergeGlyphOrders(self, glyphOrders): + """Modifies passed-in glyphOrders to reflect new glyph names. + Returns glyphOrder for the merged font.""" + # Simply append font index to the glyph name for now. + # TODO Even this simplistic numbering can result in conflicts. + # But then again, we have to improve this soon anyway. + mega = [] + for n,glyphOrder in enumerate(glyphOrders): + for i,glyphName in enumerate(glyphOrder): + glyphName += "#" + repr(n) + glyphOrder[i] = glyphName + mega.append(glyphName) + return mega + + def mergeObjects(self, returnTable, logic, tables): + # Right now we don't use self at all. Will use in the future + # for options and logging. + + allKeys = set.union(set(), *(vars(table).keys() for table in tables if table is not NotImplemented)) + for key in allKeys: + try: + mergeLogic = logic[key] + except KeyError: + try: + mergeLogic = logic['*'] + except KeyError: + raise Exception("Don't know how to merge key %s of class %s" % + (key, returnTable.__class__.__name__)) + if mergeLogic is NotImplemented: + continue + value = mergeLogic(getattr(table, key, NotImplemented) for table in tables) + if value is not NotImplemented: + setattr(returnTable, key, value) + + return returnTable + + def _preMerge(self, font): + + # Map indices to references + + GDEF = font.get('GDEF') + GSUB = font.get('GSUB') + GPOS = font.get('GPOS') + + for t in [GSUB, GPOS]: + if not t: continue + + if t.table.LookupList: + lookupMap = {i:id(v) for i,v in enumerate(t.table.LookupList.Lookup)} + t.table.LookupList.mapLookups(lookupMap) + if t.table.FeatureList: + # XXX Handle present FeatureList but absent LookupList + t.table.FeatureList.mapLookups(lookupMap) + + if t.table.FeatureList and t.table.ScriptList: + featureMap = {i:id(v) for i,v in enumerate(t.table.FeatureList.FeatureRecord)} + t.table.ScriptList.mapFeatures(featureMap) + + # TODO GDEF/Lookup MarkFilteringSets + # TODO FeatureParams nameIDs + + def _postMerge(self, font): + + # Map references back to indices + + GDEF = font.get('GDEF') + GSUB = font.get('GSUB') + GPOS = font.get('GPOS') + + for t in [GSUB, GPOS]: + if not t: continue + + if t.table.LookupList: + lookupMap = {id(v):i for i,v in enumerate(t.table.LookupList.Lookup)} + t.table.LookupList.mapLookups(lookupMap) + if t.table.FeatureList: + # XXX Handle present FeatureList but absent LookupList + t.table.FeatureList.mapLookups(lookupMap) + + if t.table.FeatureList and t.table.ScriptList: + # XXX Handle present ScriptList but absent FeatureList + featureMap = {id(v):i for i,v in enumerate(t.table.FeatureList.FeatureRecord)} + t.table.ScriptList.mapFeatures(featureMap) + + # TODO GDEF/Lookup MarkFilteringSets + # TODO FeatureParams nameIDs + + +class Logger(object): + + def __init__(self, verbose=False, xml=False, timing=False): + self.verbose = verbose + self.xml = xml + self.timing = timing + self.last_time = self.start_time = time.time() + + def parse_opts(self, argv): + argv = argv[:] + for v in ['verbose', 'xml', 'timing']: + if "--"+v in argv: + setattr(self, v, True) + argv.remove("--"+v) + return argv + + def __call__(self, *things): + if not self.verbose: + return + print(' '.join(str(x) for x in things)) + + def lapse(self, *things): + if not self.timing: + return + new_time = time.time() + print("Took %0.3fs to %s" %(new_time - self.last_time, + ' '.join(str(x) for x in things))) + self.last_time = new_time + + def font(self, font, file=sys.stdout): + if not self.xml: + return + from fontTools.misc import xmlWriter + writer = xmlWriter.XMLWriter(file) + font.disassembleInstructions = False # Work around ttLib bug + for tag in font.keys(): + writer.begintag(tag) + writer.newline() + font[tag].toXML(writer, font) + writer.endtag(tag) + writer.newline() + + +__all__ = [ + 'Options', + 'Merger', + 'Logger', + 'main' +] + +def main(args=None): + + if args is None: + args = sys.argv[1:] + + log = Logger() + args = log.parse_opts(args) + + options = Options() + args = options.parse_opts(args) + + if len(args) < 1: + print("usage: pyftmerge font...", file=sys.stderr) + sys.exit(1) + + merger = Merger(options=options, log=log) + font = merger.merge(args) + outfile = 'merged.ttf' + font.save(outfile) + log.lapse("compile and save font") + + log.last_time = log.start_time + log.lapse("make one with everything(TOTAL TIME)") + +if __name__ == "__main__": + main() diff -Nru fonttools-2.4/Tools/fontTools/misc/arrayTools.py fonttools-3.0/Tools/fontTools/misc/arrayTools.py --- fonttools-2.4/Tools/fontTools/misc/arrayTools.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.0/Tools/fontTools/misc/arrayTools.py 2015-08-31 17:57:15.000000000 +0000 @@ -0,0 +1,185 @@ +# +# Various array and rectangle tools, but mostly rectangles, hence the +# name of this module (not). +# + + +from __future__ import print_function, division, absolute_import +from fontTools.misc.py23 import * +import math + +def calcBounds(array): + """Return the bounding rectangle of a 2D points array as a tuple: + (xMin, yMin, xMax, yMax) + """ + if len(array) == 0: + return 0, 0, 0, 0 + xs = [x for x, y in array] + ys = [y for x, y in array] + return min(xs), min(ys), max(xs), max(ys) + +def calcIntBounds(array): + """Return the integer bounding rectangle of a 2D points array as a + tuple: (xMin, yMin, xMax, yMax) + """ + xMin, yMin, xMax, yMax = calcBounds(array) + xMin = int(math.floor(xMin)) + xMax = int(math.ceil(xMax)) + yMin = int(math.floor(yMin)) + yMax = int(math.ceil(yMax)) + return xMin, yMin, xMax, yMax + + +def updateBounds(bounds, p, min=min, max=max): + """Return the bounding recangle of rectangle bounds and point (x, y).""" + (x, y) = p + xMin, yMin, xMax, yMax = bounds + return min(xMin, x), min(yMin, y), max(xMax, x), max(yMax, y) + +def pointInRect(p, rect): + """Return True when point (x, y) is inside rect.""" + (x, y) = p + xMin, yMin, xMax, yMax = rect + return (xMin <= x <= xMax) and (yMin <= y <= yMax) + +def pointsInRect(array, rect): + """Find out which points or array are inside rect. + Returns an array with a boolean for each point. + """ + if len(array) < 1: + return [] + xMin, yMin, xMax, yMax = rect + return [(xMin <= x <= xMax) and (yMin <= y <= yMax) for x, y in array] + +def vectorLength(vector): + """Return the length of the given vector.""" + x, y = vector + return math.sqrt(x**2 + y**2) + +def asInt16(array): + """Round and cast to 16 bit integer.""" + return [int(math.floor(i+0.5)) for i in array] + + +def normRect(rect): + """Normalize the rectangle so that the following holds: + xMin <= xMax and yMin <= yMax + """ + (xMin, yMin, xMax, yMax) = rect + return min(xMin, xMax), min(yMin, yMax), max(xMin, xMax), max(yMin, yMax) + +def scaleRect(rect, x, y): + """Scale the rectangle by x, y.""" + (xMin, yMin, xMax, yMax) = rect + return xMin * x, yMin * y, xMax * x, yMax * y + +def offsetRect(rect, dx, dy): + """Offset the rectangle by dx, dy.""" + (xMin, yMin, xMax, yMax) = rect + return xMin+dx, yMin+dy, xMax+dx, yMax+dy + +def insetRect(rect, dx, dy): + """Inset the rectangle by dx, dy on all sides.""" + (xMin, yMin, xMax, yMax) = rect + return xMin+dx, yMin+dy, xMax-dx, yMax-dy + +def sectRect(rect1, rect2): + """Return a boolean and a rectangle. If the input rectangles intersect, return + True and the intersecting rectangle. Return False and (0, 0, 0, 0) if the input + rectangles don't intersect. + """ + (xMin1, yMin1, xMax1, yMax1) = rect1 + (xMin2, yMin2, xMax2, yMax2) = rect2 + xMin, yMin, xMax, yMax = (max(xMin1, xMin2), max(yMin1, yMin2), + min(xMax1, xMax2), min(yMax1, yMax2)) + if xMin >= xMax or yMin >= yMax: + return False, (0, 0, 0, 0) + return True, (xMin, yMin, xMax, yMax) + +def unionRect(rect1, rect2): + """Return the smallest rectangle in which both input rectangles are fully + enclosed. In other words, return the total bounding rectangle of both input + rectangles. + """ + (xMin1, yMin1, xMax1, yMax1) = rect1 + (xMin2, yMin2, xMax2, yMax2) = rect2 + xMin, yMin, xMax, yMax = (min(xMin1, xMin2), min(yMin1, yMin2), + max(xMax1, xMax2), max(yMax1, yMax2)) + return (xMin, yMin, xMax, yMax) + +def rectCenter(rect0): + """Return the center of the rectangle as an (x, y) coordinate.""" + (xMin, yMin, xMax, yMax) = rect0 + return (xMin+xMax)/2, (yMin+yMax)/2 + +def intRect(rect1): + """Return the rectangle, rounded off to integer values, but guaranteeing that + the resulting rectangle is NOT smaller than the original. + """ + (xMin, yMin, xMax, yMax) = rect1 + xMin = int(math.floor(xMin)) + yMin = int(math.floor(yMin)) + xMax = int(math.ceil(xMax)) + yMax = int(math.ceil(yMax)) + return (xMin, yMin, xMax, yMax) + + +def _test(): + """ + >>> import math + >>> calcBounds([]) + (0, 0, 0, 0) + >>> calcBounds([(0, 40), (0, 100), (50, 50), (80, 10)]) + (0, 10, 80, 100) + >>> updateBounds((0, 0, 0, 0), (100, 100)) + (0, 0, 100, 100) + >>> pointInRect((50, 50), (0, 0, 100, 100)) + True + >>> pointInRect((0, 0), (0, 0, 100, 100)) + True + >>> pointInRect((100, 100), (0, 0, 100, 100)) + True + >>> not pointInRect((101, 100), (0, 0, 100, 100)) + True + >>> list(pointsInRect([(50, 50), (0, 0), (100, 100), (101, 100)], (0, 0, 100, 100))) + [True, True, True, False] + >>> vectorLength((3, 4)) + 5.0 + >>> vectorLength((1, 1)) == math.sqrt(2) + True + >>> list(asInt16([0, 0.1, 0.5, 0.9])) + [0, 0, 1, 1] + >>> normRect((0, 10, 100, 200)) + (0, 10, 100, 200) + >>> normRect((100, 200, 0, 10)) + (0, 10, 100, 200) + >>> scaleRect((10, 20, 50, 150), 1.5, 2) + (15.0, 40, 75.0, 300) + >>> offsetRect((10, 20, 30, 40), 5, 6) + (15, 26, 35, 46) + >>> insetRect((10, 20, 50, 60), 5, 10) + (15, 30, 45, 50) + >>> insetRect((10, 20, 50, 60), -5, -10) + (5, 10, 55, 70) + >>> intersects, rect = sectRect((0, 10, 20, 30), (0, 40, 20, 50)) + >>> not intersects + True + >>> intersects, rect = sectRect((0, 10, 20, 30), (5, 20, 35, 50)) + >>> intersects + 1 + >>> rect + (5, 20, 20, 30) + >>> unionRect((0, 10, 20, 30), (0, 40, 20, 50)) + (0, 10, 20, 50) + >>> rectCenter((0, 0, 100, 200)) + (50.0, 100.0) + >>> rectCenter((0, 0, 100, 199.0)) + (50.0, 99.5) + >>> intRect((0.9, 2.9, 3.1, 4.1)) + (0, 2, 4, 5) + """ + +if __name__ == "__main__": + import sys + import doctest + sys.exit(doctest.testmod().failed) diff -Nru fonttools-2.4/Tools/fontTools/misc/bezierTools.py fonttools-3.0/Tools/fontTools/misc/bezierTools.py --- fonttools-2.4/Tools/fontTools/misc/bezierTools.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.0/Tools/fontTools/misc/bezierTools.py 2015-08-31 17:57:15.000000000 +0000 @@ -0,0 +1,414 @@ +"""fontTools.misc.bezierTools.py -- tools for working with bezier path segments. +""" + +from __future__ import print_function, division, absolute_import +from fontTools.misc.py23 import * + +__all__ = [ + "calcQuadraticBounds", + "calcCubicBounds", + "splitLine", + "splitQuadratic", + "splitCubic", + "splitQuadraticAtT", + "splitCubicAtT", + "solveQuadratic", + "solveCubic", +] + +from fontTools.misc.arrayTools import calcBounds + +epsilon = 1e-12 + + +def calcQuadraticBounds(pt1, pt2, pt3): + """Return the bounding rectangle for a qudratic bezier segment. + pt1 and pt3 are the "anchor" points, pt2 is the "handle". + + >>> calcQuadraticBounds((0, 0), (50, 100), (100, 0)) + (0, 0, 100, 50.0) + >>> calcQuadraticBounds((0, 0), (100, 0), (100, 100)) + (0.0, 0.0, 100, 100) + """ + (ax, ay), (bx, by), (cx, cy) = calcQuadraticParameters(pt1, pt2, pt3) + ax2 = ax*2.0 + ay2 = ay*2.0 + roots = [] + if ax2 != 0: + roots.append(-bx/ax2) + if ay2 != 0: + roots.append(-by/ay2) + points = [(ax*t*t + bx*t + cx, ay*t*t + by*t + cy) for t in roots if 0 <= t < 1] + [pt1, pt3] + return calcBounds(points) + + +def calcCubicBounds(pt1, pt2, pt3, pt4): + """Return the bounding rectangle for a cubic bezier segment. + pt1 and pt4 are the "anchor" points, pt2 and pt3 are the "handles". + + >>> calcCubicBounds((0, 0), (25, 100), (75, 100), (100, 0)) + (0, 0, 100, 75.0) + >>> calcCubicBounds((0, 0), (50, 0), (100, 50), (100, 100)) + (0.0, 0.0, 100, 100) + >>> print("%f %f %f %f" % calcCubicBounds((50, 0), (0, 100), (100, 100), (50, 0))) + 35.566243 0.000000 64.433757 75.000000 + """ + (ax, ay), (bx, by), (cx, cy), (dx, dy) = calcCubicParameters(pt1, pt2, pt3, pt4) + # calc first derivative + ax3 = ax * 3.0 + ay3 = ay * 3.0 + bx2 = bx * 2.0 + by2 = by * 2.0 + xRoots = [t for t in solveQuadratic(ax3, bx2, cx) if 0 <= t < 1] + yRoots = [t for t in solveQuadratic(ay3, by2, cy) if 0 <= t < 1] + roots = xRoots + yRoots + + points = [(ax*t*t*t + bx*t*t + cx * t + dx, ay*t*t*t + by*t*t + cy * t + dy) for t in roots] + [pt1, pt4] + return calcBounds(points) + + +def splitLine(pt1, pt2, where, isHorizontal): + """Split the line between pt1 and pt2 at position 'where', which + is an x coordinate if isHorizontal is False, a y coordinate if + isHorizontal is True. Return a list of two line segments if the + line was successfully split, or a list containing the original + line. + + >>> printSegments(splitLine((0, 0), (100, 100), 50, True)) + ((0, 0), (50, 50)) + ((50, 50), (100, 100)) + >>> printSegments(splitLine((0, 0), (100, 100), 100, True)) + ((0, 0), (100, 100)) + >>> printSegments(splitLine((0, 0), (100, 100), 0, True)) + ((0, 0), (0, 0)) + ((0, 0), (100, 100)) + >>> printSegments(splitLine((0, 0), (100, 100), 0, False)) + ((0, 0), (0, 0)) + ((0, 0), (100, 100)) + >>> printSegments(splitLine((100, 0), (0, 0), 50, False)) + ((100, 0), (50, 0)) + ((50, 0), (0, 0)) + >>> printSegments(splitLine((0, 100), (0, 0), 50, True)) + ((0, 100), (0, 50)) + ((0, 50), (0, 0)) + """ + pt1x, pt1y = pt1 + pt2x, pt2y = pt2 + + ax = (pt2x - pt1x) + ay = (pt2y - pt1y) + + bx = pt1x + by = pt1y + + a = (ax, ay)[isHorizontal] + + if a == 0: + return [(pt1, pt2)] + t = (where - (bx, by)[isHorizontal]) / a + if 0 <= t < 1: + midPt = ax * t + bx, ay * t + by + return [(pt1, midPt), (midPt, pt2)] + else: + return [(pt1, pt2)] + + +def splitQuadratic(pt1, pt2, pt3, where, isHorizontal): + """Split the quadratic curve between pt1, pt2 and pt3 at position 'where', + which is an x coordinate if isHorizontal is False, a y coordinate if + isHorizontal is True. Return a list of curve segments. + + >>> printSegments(splitQuadratic((0, 0), (50, 100), (100, 0), 150, False)) + ((0, 0), (50, 100), (100, 0)) + >>> printSegments(splitQuadratic((0, 0), (50, 100), (100, 0), 50, False)) + ((0, 0), (25, 50), (50, 50)) + ((50, 50), (75, 50), (100, 0)) + >>> printSegments(splitQuadratic((0, 0), (50, 100), (100, 0), 25, False)) + ((0, 0), (12.5, 25), (25, 37.5)) + ((25, 37.5), (62.5, 75), (100, 0)) + >>> printSegments(splitQuadratic((0, 0), (50, 100), (100, 0), 25, True)) + ((0, 0), (7.32233, 14.6447), (14.6447, 25)) + ((14.6447, 25), (50, 75), (85.3553, 25)) + ((85.3553, 25), (92.6777, 14.6447), (100, -7.10543e-15)) + >>> # XXX I'm not at all sure if the following behavior is desirable: + >>> printSegments(splitQuadratic((0, 0), (50, 100), (100, 0), 50, True)) + ((0, 0), (25, 50), (50, 50)) + ((50, 50), (50, 50), (50, 50)) + ((50, 50), (75, 50), (100, 0)) + """ + a, b, c = calcQuadraticParameters(pt1, pt2, pt3) + solutions = solveQuadratic(a[isHorizontal], b[isHorizontal], + c[isHorizontal] - where) + solutions = sorted([t for t in solutions if 0 <= t < 1]) + if not solutions: + return [(pt1, pt2, pt3)] + return _splitQuadraticAtT(a, b, c, *solutions) + + +def splitCubic(pt1, pt2, pt3, pt4, where, isHorizontal): + """Split the cubic curve between pt1, pt2, pt3 and pt4 at position 'where', + which is an x coordinate if isHorizontal is False, a y coordinate if + isHorizontal is True. Return a list of curve segments. + + >>> printSegments(splitCubic((0, 0), (25, 100), (75, 100), (100, 0), 150, False)) + ((0, 0), (25, 100), (75, 100), (100, 0)) + >>> printSegments(splitCubic((0, 0), (25, 100), (75, 100), (100, 0), 50, False)) + ((0, 0), (12.5, 50), (31.25, 75), (50, 75)) + ((50, 75), (68.75, 75), (87.5, 50), (100, 0)) + >>> printSegments(splitCubic((0, 0), (25, 100), (75, 100), (100, 0), 25, True)) + ((0, 0), (2.29379, 9.17517), (4.79804, 17.5085), (7.47414, 25)) + ((7.47414, 25), (31.2886, 91.6667), (68.7114, 91.6667), (92.5259, 25)) + ((92.5259, 25), (95.202, 17.5085), (97.7062, 9.17517), (100, 1.77636e-15)) + """ + a, b, c, d = calcCubicParameters(pt1, pt2, pt3, pt4) + solutions = solveCubic(a[isHorizontal], b[isHorizontal], c[isHorizontal], + d[isHorizontal] - where) + solutions = sorted([t for t in solutions if 0 <= t < 1]) + if not solutions: + return [(pt1, pt2, pt3, pt4)] + return _splitCubicAtT(a, b, c, d, *solutions) + + +def splitQuadraticAtT(pt1, pt2, pt3, *ts): + """Split the quadratic curve between pt1, pt2 and pt3 at one or more + values of t. Return a list of curve segments. + + >>> printSegments(splitQuadraticAtT((0, 0), (50, 100), (100, 0), 0.5)) + ((0, 0), (25, 50), (50, 50)) + ((50, 50), (75, 50), (100, 0)) + >>> printSegments(splitQuadraticAtT((0, 0), (50, 100), (100, 0), 0.5, 0.75)) + ((0, 0), (25, 50), (50, 50)) + ((50, 50), (62.5, 50), (75, 37.5)) + ((75, 37.5), (87.5, 25), (100, 0)) + """ + a, b, c = calcQuadraticParameters(pt1, pt2, pt3) + return _splitQuadraticAtT(a, b, c, *ts) + + +def splitCubicAtT(pt1, pt2, pt3, pt4, *ts): + """Split the cubic curve between pt1, pt2, pt3 and pt4 at one or more + values of t. Return a list of curve segments. + + >>> printSegments(splitCubicAtT((0, 0), (25, 100), (75, 100), (100, 0), 0.5)) + ((0, 0), (12.5, 50), (31.25, 75), (50, 75)) + ((50, 75), (68.75, 75), (87.5, 50), (100, 0)) + >>> printSegments(splitCubicAtT((0, 0), (25, 100), (75, 100), (100, 0), 0.5, 0.75)) + ((0, 0), (12.5, 50), (31.25, 75), (50, 75)) + ((50, 75), (59.375, 75), (68.75, 68.75), (77.3438, 56.25)) + ((77.3438, 56.25), (85.9375, 43.75), (93.75, 25), (100, 0)) + """ + a, b, c, d = calcCubicParameters(pt1, pt2, pt3, pt4) + return _splitCubicAtT(a, b, c, d, *ts) + + +def _splitQuadraticAtT(a, b, c, *ts): + ts = list(ts) + segments = [] + ts.insert(0, 0.0) + ts.append(1.0) + ax, ay = a + bx, by = b + cx, cy = c + for i in range(len(ts) - 1): + t1 = ts[i] + t2 = ts[i+1] + delta = (t2 - t1) + # calc new a, b and c + a1x = ax * delta**2 + a1y = ay * delta**2 + b1x = (2*ax*t1 + bx) * delta + b1y = (2*ay*t1 + by) * delta + c1x = ax*t1**2 + bx*t1 + cx + c1y = ay*t1**2 + by*t1 + cy + + pt1, pt2, pt3 = calcQuadraticPoints((a1x, a1y), (b1x, b1y), (c1x, c1y)) + segments.append((pt1, pt2, pt3)) + return segments + + +def _splitCubicAtT(a, b, c, d, *ts): + ts = list(ts) + ts.insert(0, 0.0) + ts.append(1.0) + segments = [] + ax, ay = a + bx, by = b + cx, cy = c + dx, dy = d + for i in range(len(ts) - 1): + t1 = ts[i] + t2 = ts[i+1] + delta = (t2 - t1) + # calc new a, b, c and d + a1x = ax * delta**3 + a1y = ay * delta**3 + b1x = (3*ax*t1 + bx) * delta**2 + b1y = (3*ay*t1 + by) * delta**2 + c1x = (2*bx*t1 + cx + 3*ax*t1**2) * delta + c1y = (2*by*t1 + cy + 3*ay*t1**2) * delta + d1x = ax*t1**3 + bx*t1**2 + cx*t1 + dx + d1y = ay*t1**3 + by*t1**2 + cy*t1 + dy + pt1, pt2, pt3, pt4 = calcCubicPoints((a1x, a1y), (b1x, b1y), (c1x, c1y), (d1x, d1y)) + segments.append((pt1, pt2, pt3, pt4)) + return segments + + +# +# Equation solvers. +# + +from math import sqrt, acos, cos, pi + + +def solveQuadratic(a, b, c, + sqrt=sqrt): + """Solve a quadratic equation where a, b and c are real. + a*x*x + b*x + c = 0 + This function returns a list of roots. Note that the returned list + is neither guaranteed to be sorted nor to contain unique values! + """ + if abs(a) < epsilon: + if abs(b) < epsilon: + # We have a non-equation; therefore, we have no valid solution + roots = [] + else: + # We have a linear equation with 1 root. + roots = [-c/b] + else: + # We have a true quadratic equation. Apply the quadratic formula to find two roots. + DD = b*b - 4.0*a*c + if DD >= 0.0: + rDD = sqrt(DD) + roots = [(-b+rDD)/2.0/a, (-b-rDD)/2.0/a] + else: + # complex roots, ignore + roots = [] + return roots + + +def solveCubic(a, b, c, d): + """Solve a cubic equation where a, b, c and d are real. + a*x*x*x + b*x*x + c*x + d = 0 + This function returns a list of roots. Note that the returned list + is neither guaranteed to be sorted nor to contain unique values! + """ + # + # adapted from: + # CUBIC.C - Solve a cubic polynomial + # public domain by Ross Cottrell + # found at: http://www.strangecreations.com/library/snippets/Cubic.C + # + if abs(a) < epsilon: + # don't just test for zero; for very small values of 'a' solveCubic() + # returns unreliable results, so we fall back to quad. + return solveQuadratic(b, c, d) + a = float(a) + a1 = b/a + a2 = c/a + a3 = d/a + + Q = (a1*a1 - 3.0*a2)/9.0 + R = (2.0*a1*a1*a1 - 9.0*a1*a2 + 27.0*a3)/54.0 + R2_Q3 = R*R - Q*Q*Q + + if R2_Q3 < 0: + theta = acos(R/sqrt(Q*Q*Q)) + rQ2 = -2.0*sqrt(Q) + x0 = rQ2*cos(theta/3.0) - a1/3.0 + x1 = rQ2*cos((theta+2.0*pi)/3.0) - a1/3.0 + x2 = rQ2*cos((theta+4.0*pi)/3.0) - a1/3.0 + return [x0, x1, x2] + else: + if Q == 0 and R == 0: + x = 0 + else: + x = pow(sqrt(R2_Q3)+abs(R), 1/3.0) + x = x + Q/x + if R >= 0.0: + x = -x + x = x - a1/3.0 + return [x] + + +# +# Conversion routines for points to parameters and vice versa +# + +def calcQuadraticParameters(pt1, pt2, pt3): + x2, y2 = pt2 + x3, y3 = pt3 + cx, cy = pt1 + bx = (x2 - cx) * 2.0 + by = (y2 - cy) * 2.0 + ax = x3 - cx - bx + ay = y3 - cy - by + return (ax, ay), (bx, by), (cx, cy) + + +def calcCubicParameters(pt1, pt2, pt3, pt4): + x2, y2 = pt2 + x3, y3 = pt3 + x4, y4 = pt4 + dx, dy = pt1 + cx = (x2 -dx) * 3.0 + cy = (y2 -dy) * 3.0 + bx = (x3 - x2) * 3.0 - cx + by = (y3 - y2) * 3.0 - cy + ax = x4 - dx - cx - bx + ay = y4 - dy - cy - by + return (ax, ay), (bx, by), (cx, cy), (dx, dy) + + +def calcQuadraticPoints(a, b, c): + ax, ay = a + bx, by = b + cx, cy = c + x1 = cx + y1 = cy + x2 = (bx * 0.5) + cx + y2 = (by * 0.5) + cy + x3 = ax + bx + cx + y3 = ay + by + cy + return (x1, y1), (x2, y2), (x3, y3) + + +def calcCubicPoints(a, b, c, d): + ax, ay = a + bx, by = b + cx, cy = c + dx, dy = d + x1 = dx + y1 = dy + x2 = (cx / 3.0) + dx + y2 = (cy / 3.0) + dy + x3 = (bx + cx) / 3.0 + x2 + y3 = (by + cy) / 3.0 + y2 + x4 = ax + dx + cx + bx + y4 = ay + dy + cy + by + return (x1, y1), (x2, y2), (x3, y3), (x4, y4) + + +def _segmentrepr(obj): + """ + >>> _segmentrepr([1, [2, 3], [], [[2, [3, 4], [0.1, 2.2]]]]) + '(1, (2, 3), (), ((2, (3, 4), (0.1, 2.2))))' + """ + try: + it = iter(obj) + except TypeError: + return "%g" % obj + else: + return "(%s)" % ", ".join([_segmentrepr(x) for x in it]) + + +def printSegments(segments): + """Helper for the doctests, displaying each segment in a list of + segments on a single line as a tuple. + """ + for segment in segments: + print(_segmentrepr(segment)) + +if __name__ == "__main__": + import sys + import doctest + sys.exit(doctest.testmod().failed) diff -Nru fonttools-2.4/Tools/fontTools/misc/eexec.py fonttools-3.0/Tools/fontTools/misc/eexec.py --- fonttools-2.4/Tools/fontTools/misc/eexec.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.0/Tools/fontTools/misc/eexec.py 2015-08-31 17:57:15.000000000 +0000 @@ -0,0 +1,55 @@ +"""fontTools.misc.eexec.py -- Module implementing the eexec and +charstring encryption algorithm as used by PostScript Type 1 fonts. +""" + +from __future__ import print_function, division, absolute_import +from fontTools.misc.py23 import * + +def _decryptChar(cipher, R): + cipher = byteord(cipher) + plain = ( (cipher ^ (R>>8)) ) & 0xFF + R = ( (cipher + R) * 52845 + 22719 ) & 0xFFFF + return bytechr(plain), R + +def _encryptChar(plain, R): + plain = byteord(plain) + cipher = ( (plain ^ (R>>8)) ) & 0xFF + R = ( (cipher + R) * 52845 + 22719 ) & 0xFFFF + return bytechr(cipher), R + + +def decrypt(cipherstring, R): + plainList = [] + for cipher in cipherstring: + plain, R = _decryptChar(cipher, R) + plainList.append(plain) + plainstring = strjoin(plainList) + return plainstring, int(R) + +def encrypt(plainstring, R): + cipherList = [] + for plain in plainstring: + cipher, R = _encryptChar(plain, R) + cipherList.append(cipher) + cipherstring = strjoin(cipherList) + return cipherstring, int(R) + + +def hexString(s): + import binascii + return binascii.hexlify(s) + +def deHexString(h): + import binascii + h = strjoin(h.split()) + return binascii.unhexlify(h) + + +def _test(): + testStr = "\0\0asdadads asds\265" + print(decrypt, decrypt(testStr, 12321)) + print(encrypt, encrypt(testStr, 12321)) + + +if __name__ == "__main__": + _test() diff -Nru fonttools-2.4/Tools/fontTools/misc/encodingTools.py fonttools-3.0/Tools/fontTools/misc/encodingTools.py --- fonttools-2.4/Tools/fontTools/misc/encodingTools.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.0/Tools/fontTools/misc/encodingTools.py 2015-08-31 17:57:15.000000000 +0000 @@ -0,0 +1,73 @@ +"""fontTools.misc.encodingTools.py -- tools for working with OpenType encodings. +""" + +from __future__ import print_function, division, absolute_import +from fontTools.misc.py23 import * +import fontTools.encodings.codecs + +# Map keyed by platformID, then platEncID, then possibly langID +_encodingMap = { + 0: { # Unicode + 0: 'utf_16_be', + 1: 'utf_16_be', + 2: 'utf_16_be', + 3: 'utf_16_be', + 4: 'utf_16_be', + 5: 'utf_16_be', + 6: 'utf_16_be', + }, + 1: { # Macintosh + # See + # https://github.com/behdad/fonttools/issues/236 + 0: { # Macintosh, platEncID==0, keyed by langID + 15: "mac_iceland", + 17: "mac_turkish", + 18: "mac_croatian", + 24: "mac_latin2", + 25: "mac_latin2", + 26: "mac_latin2", + 27: "mac_latin2", + 28: "mac_latin2", + 36: "mac_latin2", + 37: "mac_romanian", + 38: "mac_latin2", + 39: "mac_latin2", + 40: "mac_latin2", + Ellipsis: 'mac_roman', # Other + }, + 1: 'x_mac_japanese_ttx', + 2: 'x_mac_trad_chinese_ttx', + 3: 'x_mac_korean_ttx', + 6: 'mac_greek', + 7: 'mac_cyrillic', + 25: 'x_mac_simp_chinese_ttx', + 29: 'mac_latin2', + 35: 'mac_turkish', + 37: 'mac_iceland', + }, + 2: { # ISO + 0: 'ascii', + 1: 'utf_16_be', + 2: 'latin1', + }, + 3: { # Microsoft + 0: 'utf_16_be', + 1: 'utf_16_be', + 2: 'shift_jis', + 3: 'gb2312', + 4: 'big5', + 5: 'euc_kr', + 6: 'johab', + 10: 'utf_16_be', + }, +} + +def getEncoding(platformID, platEncID, langID, default=None): + """Returns the Python encoding name for OpenType platformID/encodingID/langID + triplet. If encoding for these values is not known, by default None is + returned. That can be overriden by passing a value to the default argument. + """ + encoding = _encodingMap.get(platformID, {}).get(platEncID, default) + if isinstance(encoding, dict): + encoding = encoding.get(langID, encoding[Ellipsis]) + return encoding diff -Nru fonttools-2.4/Tools/fontTools/misc/encodingTools_test.py fonttools-3.0/Tools/fontTools/misc/encodingTools_test.py --- fonttools-2.4/Tools/fontTools/misc/encodingTools_test.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.0/Tools/fontTools/misc/encodingTools_test.py 2015-08-31 17:57:15.000000000 +0000 @@ -0,0 +1,31 @@ +from __future__ import print_function, division, absolute_import, unicode_literals +from fontTools.misc.py23 import * +import unittest +from .encodingTools import getEncoding + +class EncodingTest(unittest.TestCase): + + def test_encoding_unicode(self): + + self.assertEqual(getEncoding(3, 0, None), "utf_16_be") # MS Symbol is Unicode as well + self.assertEqual(getEncoding(3, 1, None), "utf_16_be") + self.assertEqual(getEncoding(3, 10, None), "utf_16_be") + self.assertEqual(getEncoding(0, 3, None), "utf_16_be") + + def test_encoding_macroman_misc(self): + self.assertEqual(getEncoding(1, 0, 17), "mac_turkish") + self.assertEqual(getEncoding(1, 0, 37), "mac_romanian") + self.assertEqual(getEncoding(1, 0, 45), "mac_roman") + + def test_extended_mac_encodings(self): + encoding = getEncoding(1, 1, 0) # Mac Japanese + decoded = b'\xfe'.decode(encoding) + self.assertEqual(decoded, unichr(0x2122)) + + def test_extended_unknown(self): + self.assertEqual(getEncoding(10, 11, 12), None) + self.assertEqual(getEncoding(10, 11, 12, "ascii"), "ascii") + self.assertEqual(getEncoding(10, 11, 12, default="ascii"), "ascii") + +if __name__ == "__main__": + unittest.main() diff -Nru fonttools-2.4/Tools/fontTools/misc/fixedTools.py fonttools-3.0/Tools/fontTools/misc/fixedTools.py --- fonttools-2.4/Tools/fontTools/misc/fixedTools.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.0/Tools/fontTools/misc/fixedTools.py 2015-08-31 17:57:15.000000000 +0000 @@ -0,0 +1,47 @@ +"""fontTools.misc.fixedTools.py -- tools for working with fixed numbers. +""" + +from __future__ import print_function, division, absolute_import +from fontTools.misc.py23 import * + +__all__ = [ + "fixedToFloat", + "floatToFixed", +] + +def fixedToFloat(value, precisionBits): + """Converts a fixed-point number to a float, choosing the float + that has the shortest decimal reprentation. Eg. to convert a + fixed number in a 2.14 format, use precisionBits=14. This is + pretty slow compared to a simple division. Use sporadically. + + precisionBits is only supported up to 16. + """ + if not value: return 0.0 + + scale = 1 << precisionBits + value /= scale + eps = .5 / scale + lo = value - eps + hi = value + eps + # If the range of valid choices spans an integer, return the integer. + if int(lo) != int(hi): + return float(round(value)) + fmt = "%.8f" + lo = fmt % lo + hi = fmt % hi + assert len(lo) == len(hi) and lo != hi + for i in range(len(lo)): + if lo[i] != hi[i]: + break + period = lo.find('.') + assert period < i + fmt = "%%.%df" % (i - period) + value = fmt % value + return float(value) + +def floatToFixed(value, precisionBits): + """Converts a float to a fixed-point number given the number of + precisionBits. Ie. int(round(value * (1<<precisionBits))). + """ + return int(round(value * (1<<precisionBits))) diff -Nru fonttools-2.4/Tools/fontTools/misc/fixedTools_test.py fonttools-3.0/Tools/fontTools/misc/fixedTools_test.py --- fonttools-2.4/Tools/fontTools/misc/fixedTools_test.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.0/Tools/fontTools/misc/fixedTools_test.py 2015-08-31 17:57:15.000000000 +0000 @@ -0,0 +1,42 @@ +from __future__ import print_function, division, absolute_import +from fontTools.misc.py23 import * +from fontTools.misc.fixedTools import fixedToFloat, floatToFixed +import unittest + + +class FixedToolsTest(unittest.TestCase): + + def test_roundtrip(self): + for bits in range(0, 15): + for value in range(-(2**(bits+1)), 2**(bits+1)): + self.assertEqual(value, floatToFixed(fixedToFloat(value, bits), bits)) + + def test_fixedToFloat_precision14(self): + self.assertEqual(0.8, fixedToFloat(13107, 14)) + self.assertEqual(0.0, fixedToFloat(0, 14)) + self.assertEqual(1.0, fixedToFloat(16384, 14)) + self.assertEqual(-1.0, fixedToFloat(-16384, 14)) + self.assertEqual(0.99994, fixedToFloat(16383, 14)) + self.assertEqual(-0.99994, fixedToFloat(-16383, 14)) + + def test_fixedToFloat_precision6(self): + self.assertAlmostEqual(-9.98, fixedToFloat(-639, 6)) + self.assertAlmostEqual(-10.0, fixedToFloat(-640, 6)) + self.assertAlmostEqual(9.98, fixedToFloat(639, 6)) + self.assertAlmostEqual(10.0, fixedToFloat(640, 6)) + + def test_floatToFixed_precision14(self): + self.assertEqual(13107, floatToFixed(0.8, 14)) + self.assertEqual(16384, floatToFixed(1.0, 14)) + self.assertEqual(16384, floatToFixed(1, 14)) + self.assertEqual(-16384, floatToFixed(-1.0, 14)) + self.assertEqual(-16384, floatToFixed(-1, 14)) + self.assertEqual(0, floatToFixed(0, 14)) + + def test_fixedToFloat_return_float(self): + value = fixedToFloat(16384, 14) + self.assertIsInstance(value, float) + + +if __name__ == "__main__": + unittest.main() diff -Nru fonttools-2.4/Tools/fontTools/misc/homeResFile.py fonttools-3.0/Tools/fontTools/misc/homeResFile.py --- fonttools-2.4/Tools/fontTools/misc/homeResFile.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.0/Tools/fontTools/misc/homeResFile.py 2015-08-31 17:57:15.000000000 +0000 @@ -0,0 +1,97 @@ +"""Mac-only module to find the home file of a resource.""" + +from __future__ import print_function, division, absolute_import +from fontTools.misc.py23 import * +from fontTools.misc import sstruct +import array +import calldll +import macfs +import Res + + +def HomeResFile(res): + """Return a path to the file in which resource 'res' lives.""" + return GetFileLocation(res.HomeResFile()) + + +def GetFileLocation(refNum): + """Return a path to the open file identified with refNum.""" + pb = ParamBlock(refNum) + return pb.getPath() + +# +# Internal cruft, adapted from MoreFiles +# + +_InterfaceLib = calldll.getlibrary("InterfaceLib") +GetVRefNum = calldll.newcall(_InterfaceLib.GetVRefNum, "None", "InShort", "OutShort") +_getInfo = calldll.newcall(_InterfaceLib.PBGetFCBInfoSync, "Short", "InLong") + + +_FCBPBFormat = """ + qLink: l + qType: h + ioTrap: h + ioCmdAddr: l + ioCompletion: l + ioResult: h + ioNamePtr: l + ioVRefNum: h + ioRefNum: h + filler: h + ioFCBIndx: h + filler1: h + ioFCBFINm: l + ioFCBFlags: h + ioFCBStBlk: h + ioFCBEOF: l + ioFCBPLen: l + ioFCBCrPs: l + ioFCBVRefNum: h + ioFCBClpSiz: l + ioFCBParID: l +""" + +class ParamBlock(object): + + """Wrapper for the very low level FCBPB record.""" + + def __init__(self, refNum): + self.__fileName = array.array("c", "\0" * 64) + sstruct.unpack(_FCBPBFormat, + "\0" * sstruct.calcsize(_FCBPBFormat), self) + self.ioNamePtr = self.__fileName.buffer_info()[0] + self.ioRefNum = refNum + self.ioVRefNum = GetVRefNum(refNum) + self.__haveInfo = 0 + + def getInfo(self): + if self.__haveInfo: + return + data = sstruct.pack(_FCBPBFormat, self) + buf = array.array("c", data) + ptr = buf.buffer_info()[0] + err = _getInfo(ptr) + if err: + raise Res.Error("can't get file info", err) + sstruct.unpack(_FCBPBFormat, buf.tostring(), self) + self.__haveInfo = 1 + + def getFileName(self): + self.getInfo() + data = self.__fileName.tostring() + return data[1:byteord(data[0])+1] + + def getFSSpec(self): + self.getInfo() + vRefNum = self.ioVRefNum + parID = self.ioFCBParID + return macfs.FSSpec((vRefNum, parID, self.getFileName())) + + def getPath(self): + return self.getFSSpec().as_pathname() + + +if __name__ == "__main__": + fond = Res.GetNamedResource("FOND", "Helvetica") + print(HomeResFile(fond)) diff -Nru fonttools-2.4/Tools/fontTools/misc/__init__.py fonttools-3.0/Tools/fontTools/misc/__init__.py --- fonttools-2.4/Tools/fontTools/misc/__init__.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.0/Tools/fontTools/misc/__init__.py 2015-08-31 17:57:15.000000000 +0000 @@ -0,0 +1,4 @@ +"""Empty __init__.py file to signal Python this directory is a package.""" + +from __future__ import print_function, division, absolute_import +from fontTools.misc.py23 import * diff -Nru fonttools-2.4/Tools/fontTools/misc/macCreatorType.py fonttools-3.0/Tools/fontTools/misc/macCreatorType.py --- fonttools-2.4/Tools/fontTools/misc/macCreatorType.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.0/Tools/fontTools/misc/macCreatorType.py 2015-08-31 17:57:15.000000000 +0000 @@ -0,0 +1,32 @@ +from __future__ import print_function, division, absolute_import +from fontTools.misc.py23 import * +import sys +try: + import MacOS +except ImportError: + MacOS = None +from .py23 import * + +def _reverseString(s): + s = list(s) + s.reverse() + return strjoin(s) + + +def getMacCreatorAndType(path): + if MacOS is not None: + fileCreator, fileType = MacOS.GetCreatorAndType(path) + if sys.version_info[:2] < (2, 7) and sys.byteorder == "little": + # work around bug in MacOS.GetCreatorAndType() on intel: + # http://bugs.python.org/issue1594 + # (fixed with Python 2.7) + fileCreator = _reverseString(fileCreator) + fileType = _reverseString(fileType) + return fileCreator, fileType + else: + return None, None + + +def setMacCreatorAndType(path, fileCreator, fileType): + if MacOS is not None: + MacOS.SetCreatorAndType(path, fileCreator, fileType) diff -Nru fonttools-2.4/Tools/fontTools/misc/psCharStrings.py fonttools-3.0/Tools/fontTools/misc/psCharStrings.py --- fonttools-2.4/Tools/fontTools/misc/psCharStrings.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.0/Tools/fontTools/misc/psCharStrings.py 2015-08-31 17:57:15.000000000 +0000 @@ -0,0 +1,1175 @@ +"""psCharStrings.py -- module implementing various kinds of CharStrings: +CFF dictionary data and Type1/Type2 CharStrings. +""" + +from __future__ import print_function, division, absolute_import +from fontTools.misc.py23 import * +import struct + + +DEBUG = 0 + + +def read_operator(self, b0, data, index): + if b0 == 12: + op = (b0, byteord(data[index])) + index = index+1 + else: + op = b0 + operator = self.operators[op] + value = self.handle_operator(operator) + return value, index + +def read_byte(self, b0, data, index): + return b0 - 139, index + +def read_smallInt1(self, b0, data, index): + b1 = byteord(data[index]) + return (b0-247)*256 + b1 + 108, index+1 + +def read_smallInt2(self, b0, data, index): + b1 = byteord(data[index]) + return -(b0-251)*256 - b1 - 108, index+1 + +def read_shortInt(self, b0, data, index): + value, = struct.unpack(">h", data[index:index+2]) + return value, index+2 + +def read_longInt(self, b0, data, index): + value, = struct.unpack(">l", data[index:index+4]) + return value, index+4 + +def read_fixed1616(self, b0, data, index): + value, = struct.unpack(">l", data[index:index+4]) + return value / 65536, index+4 + +def read_reserved(self, b0, data, index): + assert NotImplementedError + return NotImplemented, index + +def read_realNumber(self, b0, data, index): + number = '' + while True: + b = byteord(data[index]) + index = index + 1 + nibble0 = (b & 0xf0) >> 4 + nibble1 = b & 0x0f + if nibble0 == 0xf: + break + number = number + realNibbles[nibble0] + if nibble1 == 0xf: + break + number = number + realNibbles[nibble1] + return float(number), index + + +t1OperandEncoding = [None] * 256 +t1OperandEncoding[0:32] = (32) * [read_operator] +t1OperandEncoding[32:247] = (247 - 32) * [read_byte] +t1OperandEncoding[247:251] = (251 - 247) * [read_smallInt1] +t1OperandEncoding[251:255] = (255 - 251) * [read_smallInt2] +t1OperandEncoding[255] = read_longInt +assert len(t1OperandEncoding) == 256 + +t2OperandEncoding = t1OperandEncoding[:] +t2OperandEncoding[28] = read_shortInt +t2OperandEncoding[255] = read_fixed1616 + +cffDictOperandEncoding = t2OperandEncoding[:] +cffDictOperandEncoding[29] = read_longInt +cffDictOperandEncoding[30] = read_realNumber +cffDictOperandEncoding[255] = read_reserved + + +realNibbles = ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9', + '.', 'E', 'E-', None, '-'] +realNibblesDict = {v:i for i,v in enumerate(realNibbles)} + + +class ByteCodeBase(object): + pass + + +def buildOperatorDict(operatorList): + oper = {} + opc = {} + for item in operatorList: + if len(item) == 2: + oper[item[0]] = item[1] + else: + oper[item[0]] = item[1:] + if isinstance(item[0], tuple): + opc[item[1]] = item[0] + else: + opc[item[1]] = (item[0],) + return oper, opc + + +t2Operators = [ +# opcode name + (1, 'hstem'), + (3, 'vstem'), + (4, 'vmoveto'), + (5, 'rlineto'), + (6, 'hlineto'), + (7, 'vlineto'), + (8, 'rrcurveto'), + (10, 'callsubr'), + (11, 'return'), + (14, 'endchar'), + (16, 'blend'), + (18, 'hstemhm'), + (19, 'hintmask'), + (20, 'cntrmask'), + (21, 'rmoveto'), + (22, 'hmoveto'), + (23, 'vstemhm'), + (24, 'rcurveline'), + (25, 'rlinecurve'), + (26, 'vvcurveto'), + (27, 'hhcurveto'), +# (28, 'shortint'), # not really an operator + (29, 'callgsubr'), + (30, 'vhcurveto'), + (31, 'hvcurveto'), + ((12, 0), 'ignore'), # dotsection. Yes, there a few very early OTF/CFF + # fonts with this deprecated operator. Just ignore it. + ((12, 3), 'and'), + ((12, 4), 'or'), + ((12, 5), 'not'), + ((12, 8), 'store'), + ((12, 9), 'abs'), + ((12, 10), 'add'), + ((12, 11), 'sub'), + ((12, 12), 'div'), + ((12, 13), 'load'), + ((12, 14), 'neg'), + ((12, 15), 'eq'), + ((12, 18), 'drop'), + ((12, 20), 'put'), + ((12, 21), 'get'), + ((12, 22), 'ifelse'), + ((12, 23), 'random'), + ((12, 24), 'mul'), + ((12, 26), 'sqrt'), + ((12, 27), 'dup'), + ((12, 28), 'exch'), + ((12, 29), 'index'), + ((12, 30), 'roll'), + ((12, 34), 'hflex'), + ((12, 35), 'flex'), + ((12, 36), 'hflex1'), + ((12, 37), 'flex1'), +] + + +def getIntEncoder(format): + if format == "cff": + fourByteOp = bytechr(29) + elif format == "t1": + fourByteOp = bytechr(255) + else: + assert format == "t2" + fourByteOp = None + + def encodeInt(value, fourByteOp=fourByteOp, bytechr=bytechr, + pack=struct.pack, unpack=struct.unpack): + if -107 <= value <= 107: + code = bytechr(value + 139) + elif 108 <= value <= 1131: + value = value - 108 + code = bytechr((value >> 8) + 247) + bytechr(value & 0xFF) + elif -1131 <= value <= -108: + value = -value - 108 + code = bytechr((value >> 8) + 251) + bytechr(value & 0xFF) + elif fourByteOp is None: + # T2 only supports 2 byte ints + if -32768 <= value <= 32767: + code = bytechr(28) + pack(">h", value) + else: + # Backwards compatible hack: due to a previous bug in FontTools, + # 16.16 fixed numbers were written out as 4-byte ints. When + # these numbers were small, they were wrongly written back as + # small ints instead of 4-byte ints, breaking round-tripping. + # This here workaround doesn't do it any better, since we can't + # distinguish anymore between small ints that were supposed to + # be small fixed numbers and small ints that were just small + # ints. Hence the warning. + import sys + sys.stderr.write("Warning: 4-byte T2 number got passed to the " + "IntType handler. This should happen only when reading in " + "old XML files.\n") + code = bytechr(255) + pack(">l", value) + else: + code = fourByteOp + pack(">l", value) + return code + + return encodeInt + + +encodeIntCFF = getIntEncoder("cff") +encodeIntT1 = getIntEncoder("t1") +encodeIntT2 = getIntEncoder("t2") + +def encodeFixed(f, pack=struct.pack): + # For T2 only + return b"\xff" + pack(">l", int(round(f * 65536))) + +def encodeFloat(f): + # For CFF only, used in cffLib + s = str(f).upper() + if s[:2] == "0.": + s = s[1:] + elif s[:3] == "-0.": + s = "-" + s[2:] + nibbles = [] + while s: + c = s[0] + s = s[1:] + if c == "E" and s[:1] == "-": + s = s[1:] + c = "E-" + nibbles.append(realNibblesDict[c]) + nibbles.append(0xf) + if len(nibbles) % 2: + nibbles.append(0xf) + d = bytechr(30) + for i in range(0, len(nibbles), 2): + d = d + bytechr(nibbles[i] << 4 | nibbles[i+1]) + return d + + +class CharStringCompileError(Exception): pass + + +class T2CharString(ByteCodeBase): + + operandEncoding = t2OperandEncoding + operators, opcodes = buildOperatorDict(t2Operators) + + def __init__(self, bytecode=None, program=None, private=None, globalSubrs=None): + if program is None: + program = [] + self.bytecode = bytecode + self.program = program + self.private = private + self.globalSubrs = globalSubrs if globalSubrs is not None else [] + + def __repr__(self): + if self.bytecode is None: + return "<%s (source) at %x>" % (self.__class__.__name__, id(self)) + else: + return "<%s (bytecode) at %x>" % (self.__class__.__name__, id(self)) + + def getIntEncoder(self): + return encodeIntT2 + + def getFixedEncoder(self): + return encodeFixed + + def decompile(self): + if not self.needsDecompilation(): + return + subrs = getattr(self.private, "Subrs", []) + decompiler = SimpleT2Decompiler(subrs, self.globalSubrs) + decompiler.execute(self) + + def draw(self, pen): + subrs = getattr(self.private, "Subrs", []) + extractor = T2OutlineExtractor(pen, subrs, self.globalSubrs, + self.private.nominalWidthX, self.private.defaultWidthX) + extractor.execute(self) + self.width = extractor.width + + def compile(self): + if self.bytecode is not None: + return + assert self.program, "illegal CharString: decompiled to empty program" + assert self.program[-1] in ("endchar", "return", "callsubr", "callgsubr", + "seac"), "illegal CharString" + bytecode = [] + opcodes = self.opcodes + program = self.program + encodeInt = self.getIntEncoder() + encodeFixed = self.getFixedEncoder() + i = 0 + end = len(program) + while i < end: + token = program[i] + i = i + 1 + tp = type(token) + if issubclass(tp, basestring): + try: + bytecode.extend(bytechr(b) for b in opcodes[token]) + except KeyError: + raise CharStringCompileError("illegal operator: %s" % token) + if token in ('hintmask', 'cntrmask'): + bytecode.append(program[i]) # hint mask + i = i + 1 + elif tp == int: + bytecode.append(encodeInt(token)) + elif tp == float: + bytecode.append(encodeFixed(token)) + else: + assert 0, "unsupported type: %s" % tp + try: + bytecode = bytesjoin(bytecode) + except TypeError: + print(bytecode) + raise + self.setBytecode(bytecode) + + def needsDecompilation(self): + return self.bytecode is not None + + def setProgram(self, program): + self.program = program + self.bytecode = None + + def setBytecode(self, bytecode): + self.bytecode = bytecode + self.program = None + + def getToken(self, index, + len=len, byteord=byteord, basestring=basestring, + isinstance=isinstance): + if self.bytecode is not None: + if index >= len(self.bytecode): + return None, 0, 0 + b0 = byteord(self.bytecode[index]) + index = index + 1 + handler = self.operandEncoding[b0] + token, index = handler(self, b0, self.bytecode, index) + else: + if index >= len(self.program): + return None, 0, 0 + token = self.program[index] + index = index + 1 + isOperator = isinstance(token, basestring) + return token, isOperator, index + + def getBytes(self, index, nBytes): + if self.bytecode is not None: + newIndex = index + nBytes + bytes = self.bytecode[index:newIndex] + index = newIndex + else: + bytes = self.program[index] + index = index + 1 + assert len(bytes) == nBytes + return bytes, index + + def handle_operator(self, operator): + return operator + + def toXML(self, xmlWriter): + from fontTools.misc.textTools import num2binary + if self.bytecode is not None: + xmlWriter.dumphex(self.bytecode) + else: + index = 0 + args = [] + while True: + token, isOperator, index = self.getToken(index) + if token is None: + break + if isOperator: + args = [str(arg) for arg in args] + if token in ('hintmask', 'cntrmask'): + hintMask, isOperator, index = self.getToken(index) + bits = [] + for byte in hintMask: + bits.append(num2binary(byteord(byte), 8)) + hintMask = strjoin(bits) + line = ' '.join(args + [token, hintMask]) + else: + line = ' '.join(args + [token]) + xmlWriter.write(line) + xmlWriter.newline() + args = [] + else: + args.append(token) + + def fromXML(self, name, attrs, content): + from fontTools.misc.textTools import binary2num, readHex + if attrs.get("raw"): + self.setBytecode(readHex(content)) + return + content = strjoin(content) + content = content.split() + program = [] + end = len(content) + i = 0 + while i < end: + token = content[i] + i = i + 1 + try: + token = int(token) + except ValueError: + try: + token = float(token) + except ValueError: + program.append(token) + if token in ('hintmask', 'cntrmask'): + mask = content[i] + maskBytes = b"" + for j in range(0, len(mask), 8): + maskBytes = maskBytes + bytechr(binary2num(mask[j:j+8])) + program.append(maskBytes) + i = i + 1 + else: + program.append(token) + else: + program.append(token) + self.setProgram(program) + + +t1Operators = [ +# opcode name + (1, 'hstem'), + (3, 'vstem'), + (4, 'vmoveto'), + (5, 'rlineto'), + (6, 'hlineto'), + (7, 'vlineto'), + (8, 'rrcurveto'), + (9, 'closepath'), + (10, 'callsubr'), + (11, 'return'), + (13, 'hsbw'), + (14, 'endchar'), + (21, 'rmoveto'), + (22, 'hmoveto'), + (30, 'vhcurveto'), + (31, 'hvcurveto'), + ((12, 0), 'dotsection'), + ((12, 1), 'vstem3'), + ((12, 2), 'hstem3'), + ((12, 6), 'seac'), + ((12, 7), 'sbw'), + ((12, 12), 'div'), + ((12, 16), 'callothersubr'), + ((12, 17), 'pop'), + ((12, 33), 'setcurrentpoint'), +] + +class T1CharString(T2CharString): + + operandEncoding = t1OperandEncoding + operators, opcodes = buildOperatorDict(t1Operators) + + def __init__(self, bytecode=None, program=None, subrs=None): + if program is None: + program = [] + self.bytecode = bytecode + self.program = program + self.subrs = subrs + + def getIntEncoder(self): + return encodeIntT1 + + def getFixedEncoder(self): + def encodeFixed(value): + raise TypeError("Type 1 charstrings don't support floating point operands") + + def decompile(self): + if self.bytecode is None: + return + program = [] + index = 0 + while True: + token, isOperator, index = self.getToken(index) + if token is None: + break + program.append(token) + self.setProgram(program) + + def draw(self, pen): + extractor = T1OutlineExtractor(pen, self.subrs) + extractor.execute(self) + self.width = extractor.width + + +class SimpleT2Decompiler(object): + + def __init__(self, localSubrs, globalSubrs): + self.localSubrs = localSubrs + self.localBias = calcSubrBias(localSubrs) + self.globalSubrs = globalSubrs + self.globalBias = calcSubrBias(globalSubrs) + self.reset() + + def reset(self): + self.callingStack = [] + self.operandStack = [] + self.hintCount = 0 + self.hintMaskBytes = 0 + + def execute(self, charString): + self.callingStack.append(charString) + needsDecompilation = charString.needsDecompilation() + if needsDecompilation: + program = [] + pushToProgram = program.append + else: + pushToProgram = lambda x: None + pushToStack = self.operandStack.append + index = 0 + while True: + token, isOperator, index = charString.getToken(index) + if token is None: + break # we're done! + pushToProgram(token) + if isOperator: + handlerName = "op_" + token + handler = getattr(self, handlerName, None) + if handler is not None: + rv = handler(index) + if rv: + hintMaskBytes, index = rv + pushToProgram(hintMaskBytes) + else: + self.popall() + else: + pushToStack(token) + if needsDecompilation: + assert program, "illegal CharString: decompiled to empty program" + assert program[-1] in ("endchar", "return", "callsubr", "callgsubr", + "seac"), "illegal CharString" + charString.setProgram(program) + del self.callingStack[-1] + + def pop(self): + value = self.operandStack[-1] + del self.operandStack[-1] + return value + + def popall(self): + stack = self.operandStack[:] + self.operandStack[:] = [] + return stack + + def push(self, value): + self.operandStack.append(value) + + def op_return(self, index): + if self.operandStack: + pass + + def op_endchar(self, index): + pass + + def op_ignore(self, index): + pass + + def op_callsubr(self, index): + subrIndex = self.pop() + subr = self.localSubrs[subrIndex+self.localBias] + self.execute(subr) + + def op_callgsubr(self, index): + subrIndex = self.pop() + subr = self.globalSubrs[subrIndex+self.globalBias] + self.execute(subr) + + def op_hstem(self, index): + self.countHints() + def op_vstem(self, index): + self.countHints() + def op_hstemhm(self, index): + self.countHints() + def op_vstemhm(self, index): + self.countHints() + + def op_hintmask(self, index): + if not self.hintMaskBytes: + self.countHints() + self.hintMaskBytes = (self.hintCount + 7) // 8 + hintMaskBytes, index = self.callingStack[-1].getBytes(index, self.hintMaskBytes) + return hintMaskBytes, index + + op_cntrmask = op_hintmask + + def countHints(self): + args = self.popall() + self.hintCount = self.hintCount + len(args) // 2 + + # misc + def op_and(self, index): + raise NotImplementedError + def op_or(self, index): + raise NotImplementedError + def op_not(self, index): + raise NotImplementedError + def op_store(self, index): + raise NotImplementedError + def op_abs(self, index): + raise NotImplementedError + def op_add(self, index): + raise NotImplementedError + def op_sub(self, index): + raise NotImplementedError + def op_div(self, index): + raise NotImplementedError + def op_load(self, index): + raise NotImplementedError + def op_neg(self, index): + raise NotImplementedError + def op_eq(self, index): + raise NotImplementedError + def op_drop(self, index): + raise NotImplementedError + def op_put(self, index): + raise NotImplementedError + def op_get(self, index): + raise NotImplementedError + def op_ifelse(self, index): + raise NotImplementedError + def op_random(self, index): + raise NotImplementedError + def op_mul(self, index): + raise NotImplementedError + def op_sqrt(self, index): + raise NotImplementedError + def op_dup(self, index): + raise NotImplementedError + def op_exch(self, index): + raise NotImplementedError + def op_index(self, index): + raise NotImplementedError + def op_roll(self, index): + raise NotImplementedError + +class T2OutlineExtractor(SimpleT2Decompiler): + + def __init__(self, pen, localSubrs, globalSubrs, nominalWidthX, defaultWidthX): + SimpleT2Decompiler.__init__(self, localSubrs, globalSubrs) + self.pen = pen + self.nominalWidthX = nominalWidthX + self.defaultWidthX = defaultWidthX + + def reset(self): + SimpleT2Decompiler.reset(self) + self.hints = [] + self.gotWidth = 0 + self.width = 0 + self.currentPoint = (0, 0) + self.sawMoveTo = 0 + + def _nextPoint(self, point): + x, y = self.currentPoint + point = x + point[0], y + point[1] + self.currentPoint = point + return point + + def rMoveTo(self, point): + self.pen.moveTo(self._nextPoint(point)) + self.sawMoveTo = 1 + + def rLineTo(self, point): + if not self.sawMoveTo: + self.rMoveTo((0, 0)) + self.pen.lineTo(self._nextPoint(point)) + + def rCurveTo(self, pt1, pt2, pt3): + if not self.sawMoveTo: + self.rMoveTo((0, 0)) + nextPoint = self._nextPoint + self.pen.curveTo(nextPoint(pt1), nextPoint(pt2), nextPoint(pt3)) + + def closePath(self): + if self.sawMoveTo: + self.pen.closePath() + self.sawMoveTo = 0 + + def endPath(self): + # In T2 there are no open paths, so always do a closePath when + # finishing a sub path. + self.closePath() + + def popallWidth(self, evenOdd=0): + args = self.popall() + if not self.gotWidth: + if evenOdd ^ (len(args) % 2): + self.width = self.nominalWidthX + args[0] + args = args[1:] + else: + self.width = self.defaultWidthX + self.gotWidth = 1 + return args + + def countHints(self): + args = self.popallWidth() + self.hintCount = self.hintCount + len(args) // 2 + + # + # hint operators + # + #def op_hstem(self, index): + # self.countHints() + #def op_vstem(self, index): + # self.countHints() + #def op_hstemhm(self, index): + # self.countHints() + #def op_vstemhm(self, index): + # self.countHints() + #def op_hintmask(self, index): + # self.countHints() + #def op_cntrmask(self, index): + # self.countHints() + + # + # path constructors, moveto + # + def op_rmoveto(self, index): + self.endPath() + self.rMoveTo(self.popallWidth()) + def op_hmoveto(self, index): + self.endPath() + self.rMoveTo((self.popallWidth(1)[0], 0)) + def op_vmoveto(self, index): + self.endPath() + self.rMoveTo((0, self.popallWidth(1)[0])) + def op_endchar(self, index): + self.endPath() + args = self.popallWidth() + if args: + from fontTools.encodings.StandardEncoding import StandardEncoding + # endchar can do seac accent bulding; The T2 spec says it's deprecated, + # but recent software that shall remain nameless does output it. + adx, ady, bchar, achar = args + baseGlyph = StandardEncoding[bchar] + self.pen.addComponent(baseGlyph, (1, 0, 0, 1, 0, 0)) + accentGlyph = StandardEncoding[achar] + self.pen.addComponent(accentGlyph, (1, 0, 0, 1, adx, ady)) + + # + # path constructors, lines + # + def op_rlineto(self, index): + args = self.popall() + for i in range(0, len(args), 2): + point = args[i:i+2] + self.rLineTo(point) + + def op_hlineto(self, index): + self.alternatingLineto(1) + def op_vlineto(self, index): + self.alternatingLineto(0) + + # + # path constructors, curves + # + def op_rrcurveto(self, index): + """{dxa dya dxb dyb dxc dyc}+ rrcurveto""" + args = self.popall() + for i in range(0, len(args), 6): + dxa, dya, dxb, dyb, dxc, dyc, = args[i:i+6] + self.rCurveTo((dxa, dya), (dxb, dyb), (dxc, dyc)) + + def op_rcurveline(self, index): + """{dxa dya dxb dyb dxc dyc}+ dxd dyd rcurveline""" + args = self.popall() + for i in range(0, len(args)-2, 6): + dxb, dyb, dxc, dyc, dxd, dyd = args[i:i+6] + self.rCurveTo((dxb, dyb), (dxc, dyc), (dxd, dyd)) + self.rLineTo(args[-2:]) + + def op_rlinecurve(self, index): + """{dxa dya}+ dxb dyb dxc dyc dxd dyd rlinecurve""" + args = self.popall() + lineArgs = args[:-6] + for i in range(0, len(lineArgs), 2): + self.rLineTo(lineArgs[i:i+2]) + dxb, dyb, dxc, dyc, dxd, dyd = args[-6:] + self.rCurveTo((dxb, dyb), (dxc, dyc), (dxd, dyd)) + + def op_vvcurveto(self, index): + "dx1? {dya dxb dyb dyc}+ vvcurveto" + args = self.popall() + if len(args) % 2: + dx1 = args[0] + args = args[1:] + else: + dx1 = 0 + for i in range(0, len(args), 4): + dya, dxb, dyb, dyc = args[i:i+4] + self.rCurveTo((dx1, dya), (dxb, dyb), (0, dyc)) + dx1 = 0 + + def op_hhcurveto(self, index): + """dy1? {dxa dxb dyb dxc}+ hhcurveto""" + args = self.popall() + if len(args) % 2: + dy1 = args[0] + args = args[1:] + else: + dy1 = 0 + for i in range(0, len(args), 4): + dxa, dxb, dyb, dxc = args[i:i+4] + self.rCurveTo((dxa, dy1), (dxb, dyb), (dxc, 0)) + dy1 = 0 + + def op_vhcurveto(self, index): + """dy1 dx2 dy2 dx3 {dxa dxb dyb dyc dyd dxe dye dxf}* dyf? vhcurveto (30) + {dya dxb dyb dxc dxd dxe dye dyf}+ dxf? vhcurveto + """ + args = self.popall() + while args: + args = self.vcurveto(args) + if args: + args = self.hcurveto(args) + + def op_hvcurveto(self, index): + """dx1 dx2 dy2 dy3 {dya dxb dyb dxc dxd dxe dye dyf}* dxf? + {dxa dxb dyb dyc dyd dxe dye dxf}+ dyf? + """ + args = self.popall() + while args: + args = self.hcurveto(args) + if args: + args = self.vcurveto(args) + + # + # path constructors, flex + # + def op_hflex(self, index): + dx1, dx2, dy2, dx3, dx4, dx5, dx6 = self.popall() + dy1 = dy3 = dy4 = dy6 = 0 + dy5 = -dy2 + self.rCurveTo((dx1, dy1), (dx2, dy2), (dx3, dy3)) + self.rCurveTo((dx4, dy4), (dx5, dy5), (dx6, dy6)) + def op_flex(self, index): + dx1, dy1, dx2, dy2, dx3, dy3, dx4, dy4, dx5, dy5, dx6, dy6, fd = self.popall() + self.rCurveTo((dx1, dy1), (dx2, dy2), (dx3, dy3)) + self.rCurveTo((dx4, dy4), (dx5, dy5), (dx6, dy6)) + def op_hflex1(self, index): + dx1, dy1, dx2, dy2, dx3, dx4, dx5, dy5, dx6 = self.popall() + dy3 = dy4 = 0 + dy6 = -(dy1 + dy2 + dy3 + dy4 + dy5) + + self.rCurveTo((dx1, dy1), (dx2, dy2), (dx3, dy3)) + self.rCurveTo((dx4, dy4), (dx5, dy5), (dx6, dy6)) + def op_flex1(self, index): + dx1, dy1, dx2, dy2, dx3, dy3, dx4, dy4, dx5, dy5, d6 = self.popall() + dx = dx1 + dx2 + dx3 + dx4 + dx5 + dy = dy1 + dy2 + dy3 + dy4 + dy5 + if abs(dx) > abs(dy): + dx6 = d6 + dy6 = -dy + else: + dx6 = -dx + dy6 = d6 + self.rCurveTo((dx1, dy1), (dx2, dy2), (dx3, dy3)) + self.rCurveTo((dx4, dy4), (dx5, dy5), (dx6, dy6)) + + # + # MultipleMaster. Well... + # + def op_blend(self, index): + self.popall() + + # misc + def op_and(self, index): + raise NotImplementedError + def op_or(self, index): + raise NotImplementedError + def op_not(self, index): + raise NotImplementedError + def op_store(self, index): + raise NotImplementedError + def op_abs(self, index): + raise NotImplementedError + def op_add(self, index): + raise NotImplementedError + def op_sub(self, index): + raise NotImplementedError + def op_div(self, index): + num2 = self.pop() + num1 = self.pop() + d1 = num1//num2 + d2 = num1/num2 + if d1 == d2: + self.push(d1) + else: + self.push(d2) + def op_load(self, index): + raise NotImplementedError + def op_neg(self, index): + raise NotImplementedError + def op_eq(self, index): + raise NotImplementedError + def op_drop(self, index): + raise NotImplementedError + def op_put(self, index): + raise NotImplementedError + def op_get(self, index): + raise NotImplementedError + def op_ifelse(self, index): + raise NotImplementedError + def op_random(self, index): + raise NotImplementedError + def op_mul(self, index): + raise NotImplementedError + def op_sqrt(self, index): + raise NotImplementedError + def op_dup(self, index): + raise NotImplementedError + def op_exch(self, index): + raise NotImplementedError + def op_index(self, index): + raise NotImplementedError + def op_roll(self, index): + raise NotImplementedError + + # + # miscellaneous helpers + # + def alternatingLineto(self, isHorizontal): + args = self.popall() + for arg in args: + if isHorizontal: + point = (arg, 0) + else: + point = (0, arg) + self.rLineTo(point) + isHorizontal = not isHorizontal + + def vcurveto(self, args): + dya, dxb, dyb, dxc = args[:4] + args = args[4:] + if len(args) == 1: + dyc = args[0] + args = [] + else: + dyc = 0 + self.rCurveTo((0, dya), (dxb, dyb), (dxc, dyc)) + return args + + def hcurveto(self, args): + dxa, dxb, dyb, dyc = args[:4] + args = args[4:] + if len(args) == 1: + dxc = args[0] + args = [] + else: + dxc = 0 + self.rCurveTo((dxa, 0), (dxb, dyb), (dxc, dyc)) + return args + + +class T1OutlineExtractor(T2OutlineExtractor): + + def __init__(self, pen, subrs): + self.pen = pen + self.subrs = subrs + self.reset() + + def reset(self): + self.flexing = 0 + self.width = 0 + self.sbx = 0 + T2OutlineExtractor.reset(self) + + def endPath(self): + if self.sawMoveTo: + self.pen.endPath() + self.sawMoveTo = 0 + + def popallWidth(self, evenOdd=0): + return self.popall() + + def exch(self): + stack = self.operandStack + stack[-1], stack[-2] = stack[-2], stack[-1] + + # + # path constructors + # + def op_rmoveto(self, index): + if self.flexing: + return + self.endPath() + self.rMoveTo(self.popall()) + def op_hmoveto(self, index): + if self.flexing: + # We must add a parameter to the stack if we are flexing + self.push(0) + return + self.endPath() + self.rMoveTo((self.popall()[0], 0)) + def op_vmoveto(self, index): + if self.flexing: + # We must add a parameter to the stack if we are flexing + self.push(0) + self.exch() + return + self.endPath() + self.rMoveTo((0, self.popall()[0])) + def op_closepath(self, index): + self.closePath() + def op_setcurrentpoint(self, index): + args = self.popall() + x, y = args + self.currentPoint = x, y + + def op_endchar(self, index): + self.endPath() + + def op_hsbw(self, index): + sbx, wx = self.popall() + self.width = wx + self.sbx = sbx + self.currentPoint = sbx, self.currentPoint[1] + def op_sbw(self, index): + self.popall() # XXX + + # + def op_callsubr(self, index): + subrIndex = self.pop() + subr = self.subrs[subrIndex] + self.execute(subr) + def op_callothersubr(self, index): + subrIndex = self.pop() + nArgs = self.pop() + #print nArgs, subrIndex, "callothersubr" + if subrIndex == 0 and nArgs == 3: + self.doFlex() + self.flexing = 0 + elif subrIndex == 1 and nArgs == 0: + self.flexing = 1 + # ignore... + def op_pop(self, index): + pass # ignore... + + def doFlex(self): + finaly = self.pop() + finalx = self.pop() + self.pop() # flex height is unused + + p3y = self.pop() + p3x = self.pop() + bcp4y = self.pop() + bcp4x = self.pop() + bcp3y = self.pop() + bcp3x = self.pop() + p2y = self.pop() + p2x = self.pop() + bcp2y = self.pop() + bcp2x = self.pop() + bcp1y = self.pop() + bcp1x = self.pop() + rpy = self.pop() + rpx = self.pop() + + # call rrcurveto + self.push(bcp1x+rpx) + self.push(bcp1y+rpy) + self.push(bcp2x) + self.push(bcp2y) + self.push(p2x) + self.push(p2y) + self.op_rrcurveto(None) + + # call rrcurveto + self.push(bcp3x) + self.push(bcp3y) + self.push(bcp4x) + self.push(bcp4y) + self.push(p3x) + self.push(p3y) + self.op_rrcurveto(None) + + # Push back final coords so subr 0 can find them + self.push(finalx) + self.push(finaly) + + def op_dotsection(self, index): + self.popall() # XXX + def op_hstem3(self, index): + self.popall() # XXX + def op_seac(self, index): + "asb adx ady bchar achar seac" + from fontTools.encodings.StandardEncoding import StandardEncoding + asb, adx, ady, bchar, achar = self.popall() + baseGlyph = StandardEncoding[bchar] + self.pen.addComponent(baseGlyph, (1, 0, 0, 1, 0, 0)) + accentGlyph = StandardEncoding[achar] + adx = adx + self.sbx - asb # seac weirdness + self.pen.addComponent(accentGlyph, (1, 0, 0, 1, adx, ady)) + def op_vstem3(self, index): + self.popall() # XXX + + +class DictDecompiler(ByteCodeBase): + + operandEncoding = cffDictOperandEncoding + + def __init__(self, strings): + self.stack = [] + self.strings = strings + self.dict = {} + + def getDict(self): + assert len(self.stack) == 0, "non-empty stack" + return self.dict + + def decompile(self, data): + index = 0 + lenData = len(data) + push = self.stack.append + while index < lenData: + b0 = byteord(data[index]) + index = index + 1 + handler = self.operandEncoding[b0] + value, index = handler(self, b0, data, index) + if value is not None: + push(value) + + def pop(self): + value = self.stack[-1] + del self.stack[-1] + return value + + def popall(self): + args = self.stack[:] + del self.stack[:] + return args + + def handle_operator(self, operator): + operator, argType = operator + if isinstance(argType, type(())): + value = () + for i in range(len(argType)-1, -1, -1): + arg = argType[i] + arghandler = getattr(self, "arg_" + arg) + value = (arghandler(operator),) + value + else: + arghandler = getattr(self, "arg_" + argType) + value = arghandler(operator) + self.dict[operator] = value + + def arg_number(self, name): + return self.pop() + def arg_SID(self, name): + return self.strings[self.pop()] + def arg_array(self, name): + return self.popall() + def arg_delta(self, name): + out = [] + current = 0 + for v in self.popall(): + current = current + v + out.append(current) + return out + + +def calcSubrBias(subrs): + nSubrs = len(subrs) + if nSubrs < 1240: + bias = 107 + elif nSubrs < 33900: + bias = 1131 + else: + bias = 32768 + return bias diff -Nru fonttools-2.4/Tools/fontTools/misc/psLib.py fonttools-3.0/Tools/fontTools/misc/psLib.py --- fonttools-2.4/Tools/fontTools/misc/psLib.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.0/Tools/fontTools/misc/psLib.py 2015-08-31 17:57:15.000000000 +0000 @@ -0,0 +1,350 @@ +from __future__ import print_function, division, absolute_import +from fontTools.misc.py23 import * +from fontTools.misc import eexec +from .psOperators import * +import re +import collections +from string import whitespace + + +ps_special = '()<>[]{}%' # / is one too, but we take care of that one differently + +skipwhiteRE = re.compile("[%s]*" % whitespace) +endofthingPat = "[^][(){}<>/%%%s]*" % whitespace +endofthingRE = re.compile(endofthingPat) +commentRE = re.compile("%[^\n\r]*") + +# XXX This not entirely correct as it doesn't allow *nested* embedded parens: +stringPat = r""" + \( + ( + ( + [^()]* \ [()] + ) + | + ( + [^()]* \( [^()]* \) + ) + )* + [^()]* + \) +""" +stringPat = "".join(stringPat.split()) +stringRE = re.compile(stringPat) + +hexstringRE = re.compile("<[%s0-9A-Fa-f]*>" % whitespace) + +class PSTokenError(Exception): pass +class PSError(Exception): pass + + +class PSTokenizer(BytesIO): + + def getnexttoken(self, + # localize some stuff, for performance + len=len, + ps_special=ps_special, + stringmatch=stringRE.match, + hexstringmatch=hexstringRE.match, + commentmatch=commentRE.match, + endmatch=endofthingRE.match, + whitematch=skipwhiteRE.match): + + _, nextpos = whitematch(self.buf, self.pos).span() + self.pos = nextpos + if self.pos >= self.len: + return None, None + pos = self.pos + buf = self.buf + char = buf[pos] + if char in ps_special: + if char in '{}[]': + tokentype = 'do_special' + token = char + elif char == '%': + tokentype = 'do_comment' + _, nextpos = commentmatch(buf, pos).span() + token = buf[pos:nextpos] + elif char == '(': + tokentype = 'do_string' + m = stringmatch(buf, pos) + if m is None: + raise PSTokenError('bad string at character %d' % pos) + _, nextpos = m.span() + token = buf[pos:nextpos] + elif char == '<': + tokentype = 'do_hexstring' + m = hexstringmatch(buf, pos) + if m is None: + raise PSTokenError('bad hexstring at character %d' % pos) + _, nextpos = m.span() + token = buf[pos:nextpos] + else: + raise PSTokenError('bad token at character %d' % pos) + else: + if char == '/': + tokentype = 'do_literal' + m = endmatch(buf, pos+1) + else: + tokentype = '' + m = endmatch(buf, pos) + if m is None: + raise PSTokenError('bad token at character %d' % pos) + _, nextpos = m.span() + token = buf[pos:nextpos] + self.pos = pos + len(token) + return tokentype, token + + def skipwhite(self, whitematch=skipwhiteRE.match): + _, nextpos = whitematch(self.buf, self.pos).span() + self.pos = nextpos + + def starteexec(self): + self.pos = self.pos + 1 + #self.skipwhite() + self.dirtybuf = self.buf[self.pos:] + self.buf, R = eexec.decrypt(self.dirtybuf, 55665) + self.len = len(self.buf) + self.pos = 4 + + def stopeexec(self): + if not hasattr(self, 'dirtybuf'): + return + self.buf = self.dirtybuf + del self.dirtybuf + + def flush(self): + if self.buflist: + self.buf = self.buf + "".join(self.buflist) + self.buflist = [] + + +class PSInterpreter(PSOperators): + + def __init__(self): + systemdict = {} + userdict = {} + self.dictstack = [systemdict, userdict] + self.stack = [] + self.proclevel = 0 + self.procmark = ps_procmark() + self.fillsystemdict() + + def fillsystemdict(self): + systemdict = self.dictstack[0] + systemdict['['] = systemdict['mark'] = self.mark = ps_mark() + systemdict[']'] = ps_operator(']', self.do_makearray) + systemdict['true'] = ps_boolean(1) + systemdict['false'] = ps_boolean(0) + systemdict['StandardEncoding'] = ps_array(ps_StandardEncoding) + systemdict['FontDirectory'] = ps_dict({}) + self.suckoperators(systemdict, self.__class__) + + def suckoperators(self, systemdict, klass): + for name in dir(klass): + attr = getattr(self, name) + if isinstance(attr, collections.Callable) and name[:3] == 'ps_': + name = name[3:] + systemdict[name] = ps_operator(name, attr) + for baseclass in klass.__bases__: + self.suckoperators(systemdict, baseclass) + + def interpret(self, data, getattr=getattr): + tokenizer = self.tokenizer = PSTokenizer(data) + getnexttoken = tokenizer.getnexttoken + do_token = self.do_token + handle_object = self.handle_object + try: + while 1: + tokentype, token = getnexttoken() + #print token + if not token: + break + if tokentype: + handler = getattr(self, tokentype) + object = handler(token) + else: + object = do_token(token) + if object is not None: + handle_object(object) + tokenizer.close() + self.tokenizer = None + finally: + if self.tokenizer is not None: + if 0: + print('ps error:\n- - - - - - -') + print(self.tokenizer.buf[self.tokenizer.pos-50:self.tokenizer.pos]) + print('>>>') + print(self.tokenizer.buf[self.tokenizer.pos:self.tokenizer.pos+50]) + print('- - - - - - -') + + def handle_object(self, object): + if not (self.proclevel or object.literal or object.type == 'proceduretype'): + if object.type != 'operatortype': + object = self.resolve_name(object.value) + if object.literal: + self.push(object) + else: + if object.type == 'proceduretype': + self.call_procedure(object) + else: + object.function() + else: + self.push(object) + + def call_procedure(self, proc): + handle_object = self.handle_object + for item in proc.value: + handle_object(item) + + def resolve_name(self, name): + dictstack = self.dictstack + for i in range(len(dictstack)-1, -1, -1): + if name in dictstack[i]: + return dictstack[i][name] + raise PSError('name error: ' + str(name)) + + def do_token(self, token, + int=int, + float=float, + ps_name=ps_name, + ps_integer=ps_integer, + ps_real=ps_real): + try: + num = int(token) + except (ValueError, OverflowError): + try: + num = float(token) + except (ValueError, OverflowError): + if '#' in token: + hashpos = token.find('#') + try: + base = int(token[:hashpos]) + num = int(token[hashpos+1:], base) + except (ValueError, OverflowError): + return ps_name(token) + else: + return ps_integer(num) + else: + return ps_name(token) + else: + return ps_real(num) + else: + return ps_integer(num) + + def do_comment(self, token): + pass + + def do_literal(self, token): + return ps_literal(token[1:]) + + def do_string(self, token): + return ps_string(token[1:-1]) + + def do_hexstring(self, token): + hexStr = "".join(token[1:-1].split()) + if len(hexStr) % 2: + hexStr = hexStr + '0' + cleanstr = [] + for i in range(0, len(hexStr), 2): + cleanstr.append(chr(int(hexStr[i:i+2], 16))) + cleanstr = "".join(cleanstr) + return ps_string(cleanstr) + + def do_special(self, token): + if token == '{': + self.proclevel = self.proclevel + 1 + return self.procmark + elif token == '}': + proc = [] + while 1: + topobject = self.pop() + if topobject == self.procmark: + break + proc.append(topobject) + self.proclevel = self.proclevel - 1 + proc.reverse() + return ps_procedure(proc) + elif token == '[': + return self.mark + elif token == ']': + return ps_name(']') + else: + raise PSTokenError('huh?') + + def push(self, object): + self.stack.append(object) + + def pop(self, *types): + stack = self.stack + if not stack: + raise PSError('stack underflow') + object = stack[-1] + if types: + if object.type not in types: + raise PSError('typecheck, expected %s, found %s' % (repr(types), object.type)) + del stack[-1] + return object + + def do_makearray(self): + array = [] + while 1: + topobject = self.pop() + if topobject == self.mark: + break + array.append(topobject) + array.reverse() + self.push(ps_array(array)) + + def close(self): + """Remove circular references.""" + del self.stack + del self.dictstack + + +def unpack_item(item): + tp = type(item.value) + if tp == dict: + newitem = {} + for key, value in item.value.items(): + newitem[key] = unpack_item(value) + elif tp == list: + newitem = [None] * len(item.value) + for i in range(len(item.value)): + newitem[i] = unpack_item(item.value[i]) + if item.type == 'proceduretype': + newitem = tuple(newitem) + else: + newitem = item.value + return newitem + +def suckfont(data): + m = re.search(br"/FontName\s+/([^ \t\n\r]+)\s+def", data) + if m: + fontName = m.group(1) + else: + fontName = None + interpreter = PSInterpreter() + interpreter.interpret(b"/Helvetica 4 dict dup /Encoding StandardEncoding put definefont pop") + interpreter.interpret(data) + fontdir = interpreter.dictstack[0]['FontDirectory'].value + if fontName in fontdir: + rawfont = fontdir[fontName] + else: + # fall back, in case fontName wasn't found + fontNames = list(fontdir.keys()) + if len(fontNames) > 1: + fontNames.remove("Helvetica") + fontNames.sort() + rawfont = fontdir[fontNames[0]] + interpreter.close() + return unpack_item(rawfont) + + +if __name__ == "__main__": + import EasyDialogs + path = EasyDialogs.AskFileForOpen() + if path: + from fontTools import t1Lib + data, kind = t1Lib.read(path) + font = suckfont(data) diff -Nru fonttools-2.4/Tools/fontTools/misc/psOperators.py fonttools-3.0/Tools/fontTools/misc/psOperators.py --- fonttools-2.4/Tools/fontTools/misc/psOperators.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.0/Tools/fontTools/misc/psOperators.py 2015-08-31 17:57:15.000000000 +0000 @@ -0,0 +1,540 @@ +from __future__ import print_function, division, absolute_import +from fontTools.misc.py23 import * + +_accessstrings = {0: "", 1: "readonly", 2: "executeonly", 3: "noaccess"} + + +class ps_object(object): + + literal = 1 + access = 0 + value = None + + def __init__(self, value): + self.value = value + self.type = self.__class__.__name__[3:] + "type" + + def __repr__(self): + return "<%s %s>" % (self.__class__.__name__[3:], repr(self.value)) + + +class ps_operator(ps_object): + + literal = 0 + + def __init__(self, name, function): + self.name = name + self.function = function + self.type = self.__class__.__name__[3:] + "type" + def __repr__(self): + return "<operator %s>" % self.name + +class ps_procedure(ps_object): + literal = 0 + def __repr__(self): + return "<procedure>" + def __str__(self): + psstring = '{' + for i in range(len(self.value)): + if i: + psstring = psstring + ' ' + str(self.value[i]) + else: + psstring = psstring + str(self.value[i]) + return psstring + '}' + +class ps_name(ps_object): + literal = 0 + def __str__(self): + if self.literal: + return '/' + self.value + else: + return self.value + +class ps_literal(ps_object): + def __str__(self): + return '/' + self.value + +class ps_array(ps_object): + def __str__(self): + psstring = '[' + for i in range(len(self.value)): + item = self.value[i] + access = _accessstrings[item.access] + if access: + access = ' ' + access + if i: + psstring = psstring + ' ' + str(item) + access + else: + psstring = psstring + str(item) + access + return psstring + ']' + def __repr__(self): + return "<array>" + +_type1_pre_eexec_order = [ + "FontInfo", + "FontName", + "Encoding", + "PaintType", + "FontType", + "FontMatrix", + "FontBBox", + "UniqueID", + "Metrics", + "StrokeWidth" + ] + +_type1_fontinfo_order = [ + "version", + "Notice", + "FullName", + "FamilyName", + "Weight", + "ItalicAngle", + "isFixedPitch", + "UnderlinePosition", + "UnderlineThickness" + ] + +_type1_post_eexec_order = [ + "Private", + "CharStrings", + "FID" + ] + +def _type1_item_repr(key, value): + psstring = "" + access = _accessstrings[value.access] + if access: + access = access + ' ' + if key == 'CharStrings': + psstring = psstring + "/%s %s def\n" % (key, _type1_CharString_repr(value.value)) + elif key == 'Encoding': + psstring = psstring + _type1_Encoding_repr(value, access) + else: + psstring = psstring + "/%s %s %sdef\n" % (str(key), str(value), access) + return psstring + +def _type1_Encoding_repr(encoding, access): + encoding = encoding.value + psstring = "/Encoding 256 array\n0 1 255 {1 index exch /.notdef put} for\n" + for i in range(256): + name = encoding[i].value + if name != '.notdef': + psstring = psstring + "dup %d /%s put\n" % (i, name) + return psstring + access + "def\n" + +def _type1_CharString_repr(charstrings): + items = sorted(charstrings.items()) + return 'xxx' + +class ps_font(ps_object): + def __str__(self): + psstring = "%d dict dup begin\n" % len(self.value) + for key in _type1_pre_eexec_order: + try: + value = self.value[key] + except KeyError: + pass + else: + psstring = psstring + _type1_item_repr(key, value) + items = sorted(self.value.items()) + for key, value in items: + if key not in _type1_pre_eexec_order + _type1_post_eexec_order: + psstring = psstring + _type1_item_repr(key, value) + psstring = psstring + "currentdict end\ncurrentfile eexec\ndup " + for key in _type1_post_eexec_order: + try: + value = self.value[key] + except KeyError: + pass + else: + psstring = psstring + _type1_item_repr(key, value) + return psstring + 'dup/FontName get exch definefont pop\nmark currentfile closefile\n' + \ + 8 * (64 * '0' + '\n') + 'cleartomark' + '\n' + def __repr__(self): + return '<font>' + +class ps_file(ps_object): + pass + +class ps_dict(ps_object): + def __str__(self): + psstring = "%d dict dup begin\n" % len(self.value) + items = sorted(self.value.items()) + for key, value in items: + access = _accessstrings[value.access] + if access: + access = access + ' ' + psstring = psstring + "/%s %s %sdef\n" % (str(key), str(value), access) + return psstring + 'end ' + def __repr__(self): + return "<dict>" + +class ps_mark(ps_object): + def __init__(self): + self.value = 'mark' + self.type = self.__class__.__name__[3:] + "type" + +class ps_procmark(ps_object): + def __init__(self): + self.value = 'procmark' + self.type = self.__class__.__name__[3:] + "type" + +class ps_null(ps_object): + def __init__(self): + self.type = self.__class__.__name__[3:] + "type" + +class ps_boolean(ps_object): + def __str__(self): + if self.value: + return 'true' + else: + return 'false' + +class ps_string(ps_object): + def __str__(self): + return "(%s)" % repr(self.value)[1:-1] + +class ps_integer(ps_object): + def __str__(self): + return repr(self.value) + +class ps_real(ps_object): + def __str__(self): + return repr(self.value) + + +class PSOperators(object): + + def ps_def(self): + obj = self.pop() + name = self.pop() + self.dictstack[-1][name.value] = obj + + def ps_bind(self): + proc = self.pop('proceduretype') + self.proc_bind(proc) + self.push(proc) + + def proc_bind(self, proc): + for i in range(len(proc.value)): + item = proc.value[i] + if item.type == 'proceduretype': + self.proc_bind(item) + else: + if not item.literal: + try: + obj = self.resolve_name(item.value) + except: + pass + else: + if obj.type == 'operatortype': + proc.value[i] = obj + + def ps_exch(self): + if len(self.stack) < 2: + raise RuntimeError('stack underflow') + obj1 = self.pop() + obj2 = self.pop() + self.push(obj1) + self.push(obj2) + + def ps_dup(self): + if not self.stack: + raise RuntimeError('stack underflow') + self.push(self.stack[-1]) + + def ps_exec(self): + obj = self.pop() + if obj.type == 'proceduretype': + self.call_procedure(obj) + else: + self.handle_object(obj) + + def ps_count(self): + self.push(ps_integer(len(self.stack))) + + def ps_eq(self): + any1 = self.pop() + any2 = self.pop() + self.push(ps_boolean(any1.value == any2.value)) + + def ps_ne(self): + any1 = self.pop() + any2 = self.pop() + self.push(ps_boolean(any1.value != any2.value)) + + def ps_cvx(self): + obj = self.pop() + obj.literal = 0 + self.push(obj) + + def ps_matrix(self): + matrix = [ps_real(1.0), ps_integer(0), ps_integer(0), ps_real(1.0), ps_integer(0), ps_integer(0)] + self.push(ps_array(matrix)) + + def ps_string(self): + num = self.pop('integertype').value + self.push(ps_string('\0' * num)) + + def ps_type(self): + obj = self.pop() + self.push(ps_string(obj.type)) + + def ps_store(self): + value = self.pop() + key = self.pop() + name = key.value + for i in range(len(self.dictstack)-1, -1, -1): + if name in self.dictstack[i]: + self.dictstack[i][name] = value + break + self.dictstack[-1][name] = value + + def ps_where(self): + name = self.pop() + # XXX + self.push(ps_boolean(0)) + + def ps_systemdict(self): + self.push(ps_dict(self.dictstack[0])) + + def ps_userdict(self): + self.push(ps_dict(self.dictstack[1])) + + def ps_currentdict(self): + self.push(ps_dict(self.dictstack[-1])) + + def ps_currentfile(self): + self.push(ps_file(self.tokenizer)) + + def ps_eexec(self): + f = self.pop('filetype').value + f.starteexec() + + def ps_closefile(self): + f = self.pop('filetype').value + f.skipwhite() + f.stopeexec() + + def ps_cleartomark(self): + obj = self.pop() + while obj != self.mark: + obj = self.pop() + + def ps_readstring(self, + ps_boolean=ps_boolean, + len=len): + s = self.pop('stringtype') + oldstr = s.value + f = self.pop('filetype') + #pad = file.value.read(1) + # for StringIO, this is faster + f.value.pos = f.value.pos + 1 + newstr = f.value.read(len(oldstr)) + s.value = newstr + self.push(s) + self.push(ps_boolean(len(oldstr) == len(newstr))) + + def ps_known(self): + key = self.pop() + d = self.pop('dicttype', 'fonttype') + self.push(ps_boolean(key.value in d.value)) + + def ps_if(self): + proc = self.pop('proceduretype') + if self.pop('booleantype').value: + self.call_procedure(proc) + + def ps_ifelse(self): + proc2 = self.pop('proceduretype') + proc1 = self.pop('proceduretype') + if self.pop('booleantype').value: + self.call_procedure(proc1) + else: + self.call_procedure(proc2) + + def ps_readonly(self): + obj = self.pop() + if obj.access < 1: + obj.access = 1 + self.push(obj) + + def ps_executeonly(self): + obj = self.pop() + if obj.access < 2: + obj.access = 2 + self.push(obj) + + def ps_noaccess(self): + obj = self.pop() + if obj.access < 3: + obj.access = 3 + self.push(obj) + + def ps_not(self): + obj = self.pop('booleantype', 'integertype') + if obj.type == 'booleantype': + self.push(ps_boolean(not obj.value)) + else: + self.push(ps_integer(~obj.value)) + + def ps_print(self): + str = self.pop('stringtype') + print('PS output --->', str.value) + + def ps_anchorsearch(self): + seek = self.pop('stringtype') + s = self.pop('stringtype') + seeklen = len(seek.value) + if s.value[:seeklen] == seek.value: + self.push(ps_string(s.value[seeklen:])) + self.push(seek) + self.push(ps_boolean(1)) + else: + self.push(s) + self.push(ps_boolean(0)) + + def ps_array(self): + num = self.pop('integertype') + array = ps_array([None] * num.value) + self.push(array) + + def ps_astore(self): + array = self.pop('arraytype') + for i in range(len(array.value)-1, -1, -1): + array.value[i] = self.pop() + self.push(array) + + def ps_load(self): + name = self.pop() + self.push(self.resolve_name(name.value)) + + def ps_put(self): + obj1 = self.pop() + obj2 = self.pop() + obj3 = self.pop('arraytype', 'dicttype', 'stringtype', 'proceduretype') + tp = obj3.type + if tp == 'arraytype' or tp == 'proceduretype': + obj3.value[obj2.value] = obj1 + elif tp == 'dicttype': + obj3.value[obj2.value] = obj1 + elif tp == 'stringtype': + index = obj2.value + obj3.value = obj3.value[:index] + chr(obj1.value) + obj3.value[index+1:] + + def ps_get(self): + obj1 = self.pop() + if obj1.value == "Encoding": + pass + obj2 = self.pop('arraytype', 'dicttype', 'stringtype', 'proceduretype', 'fonttype') + tp = obj2.type + if tp in ('arraytype', 'proceduretype'): + self.push(obj2.value[obj1.value]) + elif tp in ('dicttype', 'fonttype'): + self.push(obj2.value[obj1.value]) + elif tp == 'stringtype': + self.push(ps_integer(ord(obj2.value[obj1.value]))) + else: + assert False, "shouldn't get here" + + def ps_getinterval(self): + obj1 = self.pop('integertype') + obj2 = self.pop('integertype') + obj3 = self.pop('arraytype', 'stringtype') + tp = obj3.type + if tp == 'arraytype': + self.push(ps_array(obj3.value[obj2.value:obj2.value + obj1.value])) + elif tp == 'stringtype': + self.push(ps_string(obj3.value[obj2.value:obj2.value + obj1.value])) + + def ps_putinterval(self): + obj1 = self.pop('arraytype', 'stringtype') + obj2 = self.pop('integertype') + obj3 = self.pop('arraytype', 'stringtype') + tp = obj3.type + if tp == 'arraytype': + obj3.value[obj2.value:obj2.value + len(obj1.value)] = obj1.value + elif tp == 'stringtype': + newstr = obj3.value[:obj2.value] + newstr = newstr + obj1.value + newstr = newstr + obj3.value[obj2.value + len(obj1.value):] + obj3.value = newstr + + def ps_cvn(self): + self.push(ps_name(self.pop('stringtype').value)) + + def ps_index(self): + n = self.pop('integertype').value + if n < 0: + raise RuntimeError('index may not be negative') + self.push(self.stack[-1-n]) + + def ps_for(self): + proc = self.pop('proceduretype') + limit = self.pop('integertype', 'realtype').value + increment = self.pop('integertype', 'realtype').value + i = self.pop('integertype', 'realtype').value + while 1: + if increment > 0: + if i > limit: + break + else: + if i < limit: + break + if type(i) == type(0.0): + self.push(ps_real(i)) + else: + self.push(ps_integer(i)) + self.call_procedure(proc) + i = i + increment + + def ps_forall(self): + proc = self.pop('proceduretype') + obj = self.pop('arraytype', 'stringtype', 'dicttype') + tp = obj.type + if tp == 'arraytype': + for item in obj.value: + self.push(item) + self.call_procedure(proc) + elif tp == 'stringtype': + for item in obj.value: + self.push(ps_integer(ord(item))) + self.call_procedure(proc) + elif tp == 'dicttype': + for key, value in obj.value.items(): + self.push(ps_name(key)) + self.push(value) + self.call_procedure(proc) + + def ps_definefont(self): + font = self.pop('dicttype') + name = self.pop() + font = ps_font(font.value) + self.dictstack[0]['FontDirectory'].value[name.value] = font + self.push(font) + + def ps_findfont(self): + name = self.pop() + font = self.dictstack[0]['FontDirectory'].value[name.value] + self.push(font) + + def ps_pop(self): + self.pop() + + def ps_dict(self): + self.pop('integertype') + self.push(ps_dict({})) + + def ps_begin(self): + self.dictstack.append(self.pop('dicttype').value) + + def ps_end(self): + if len(self.dictstack) > 2: + del self.dictstack[-1] + else: + raise RuntimeError('dictstack underflow') + +notdef = '.notdef' +from fontTools.encodings.StandardEncoding import StandardEncoding +ps_StandardEncoding = list(map(ps_name, StandardEncoding)) diff -Nru fonttools-2.4/Tools/fontTools/misc/py23.py fonttools-3.0/Tools/fontTools/misc/py23.py --- fonttools-2.4/Tools/fontTools/misc/py23.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.0/Tools/fontTools/misc/py23.py 2015-08-31 17:57:15.000000000 +0000 @@ -0,0 +1,161 @@ +"""Python 2/3 compat layer.""" + +from __future__ import print_function, division, absolute_import +import sys + +try: + basestring +except NameError: + basestring = str + +try: + unicode +except NameError: + unicode = str + +try: + unichr + + if sys.maxunicode < 0x10FFFF: + # workarounds for Python 2 "narrow" builds with UCS2-only support. + + _narrow_unichr = unichr + + def unichr(i): + """ + Return the unicode character whose Unicode code is the integer 'i'. + The valid range is 0 to 0x10FFFF inclusive. + + >>> _narrow_unichr(0xFFFF + 1) + Traceback (most recent call last): + File "<stdin>", line 1, in ? + ValueError: unichr() arg not in range(0x10000) (narrow Python build) + >>> unichr(0xFFFF + 1) == u'\U00010000' + True + >>> unichr(1114111) == u'\U0010FFFF' + True + >>> unichr(0x10FFFF + 1) + Traceback (most recent call last): + File "<stdin>", line 1, in ? + ValueError: unichr() arg not in range(0x110000) + """ + try: + return _narrow_unichr(i) + except ValueError: + try: + padded_hex_str = hex(i)[2:].zfill(8) + escape_str = "\\U" + padded_hex_str + return escape_str.decode("unicode-escape") + except UnicodeDecodeError: + raise ValueError('unichr() arg not in range(0x110000)') + + import re + _unicode_escape_RE = re.compile(r'\\U[A-Fa-f0-9]{8}') + + def byteord(c): + """ + Given a 8-bit or unicode character, return an integer representing the + Unicode code point of the character. If a unicode argument is given, the + character's code point must be in the range 0 to 0x10FFFF inclusive. + + >>> ord(u'\U00010000') + Traceback (most recent call last): + File "<stdin>", line 1, in ? + TypeError: ord() expected a character, but string of length 2 found + >>> byteord(u'\U00010000') == 0xFFFF + 1 + True + >>> byteord(u'\U0010FFFF') == 1114111 + True + """ + try: + return ord(c) + except TypeError as e: + try: + escape_str = c.encode('unicode-escape') + if not _unicode_escape_RE.match(escape_str): + raise + hex_str = escape_str[3:] + return int(hex_str, 16) + except: + raise TypeError(e) + + else: + byteord = ord + bytechr = chr + +except NameError: + unichr = chr + def bytechr(n): + return bytes([n]) + def byteord(c): + return c if isinstance(c, int) else ord(c) + + +# the 'io' module provides the same I/O interface on both 2 and 3. +# here we define an alias of io.StringIO to disambiguate it eternally... +from io import BytesIO +from io import StringIO as UnicodeIO +try: + # in python 2, by 'StringIO' we still mean a stream of *byte* strings + from StringIO import StringIO +except ImportError: + # in Python 3, we mean instead a stream of *unicode* strings + StringIO = UnicodeIO + + +def strjoin(iterable, joiner=''): + return tostr(joiner).join(iterable) + +def tobytes(s, encoding='ascii', errors='strict'): + if not isinstance(s, bytes): + return s.encode(encoding, errors) + else: + return s +def tounicode(s, encoding='ascii', errors='strict'): + if not isinstance(s, unicode): + return s.decode(encoding, errors) + else: + return s + +if str == bytes: + class Tag(str): + def tobytes(self): + if isinstance(self, bytes): + return self + else: + return self.encode('latin1') + + tostr = tobytes + + bytesjoin = strjoin +else: + class Tag(str): + + @staticmethod + def transcode(blob): + if not isinstance(blob, str): + blob = blob.decode('latin-1') + return blob + + def __new__(self, content): + return str.__new__(self, self.transcode(content)) + def __ne__(self, other): + return not self.__eq__(other) + def __eq__(self, other): + return str.__eq__(self, self.transcode(other)) + + def __hash__(self): + return str.__hash__(self) + + def tobytes(self): + return self.encode('latin-1') + + tostr = tounicode + + def bytesjoin(iterable, joiner=b''): + return tobytes(joiner).join(tobytes(item) for item in iterable) + + +if __name__ == "__main__": + import doctest, sys + sys.exit(doctest.testmod().failed) diff -Nru fonttools-2.4/Tools/fontTools/misc/sstruct.py fonttools-3.0/Tools/fontTools/misc/sstruct.py --- fonttools-2.4/Tools/fontTools/misc/sstruct.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.0/Tools/fontTools/misc/sstruct.py 2015-08-31 17:57:15.000000000 +0000 @@ -0,0 +1,211 @@ +"""sstruct.py -- SuperStruct + +Higher level layer on top of the struct module, enabling to +bind names to struct elements. The interface is similar to +struct, except the objects passed and returned are not tuples +(or argument lists), but dictionaries or instances. + +Just like struct, we use fmt strings to describe a data +structure, except we use one line per element. Lines are +separated by newlines or semi-colons. Each line contains +either one of the special struct characters ('@', '=', '<', +'>' or '!') or a 'name:formatchar' combo (eg. 'myFloat:f'). +Repetitions, like the struct module offers them are not useful +in this context, except for fixed length strings (eg. 'myInt:5h' +is not allowed but 'myString:5s' is). The 'x' fmt character +(pad byte) is treated as 'special', since it is by definition +anonymous. Extra whitespace is allowed everywhere. + +The sstruct module offers one feature that the "normal" struct +module doesn't: support for fixed point numbers. These are spelled +as "n.mF", where n is the number of bits before the point, and m +the number of bits after the point. Fixed point numbers get +converted to floats. + +pack(fmt, object): + 'object' is either a dictionary or an instance (or actually + anything that has a __dict__ attribute). If it is a dictionary, + its keys are used for names. If it is an instance, it's + attributes are used to grab struct elements from. Returns + a string containing the data. + +unpack(fmt, data, object=None) + If 'object' is omitted (or None), a new dictionary will be + returned. If 'object' is a dictionary, it will be used to add + struct elements to. If it is an instance (or in fact anything + that has a __dict__ attribute), an attribute will be added for + each struct element. In the latter two cases, 'object' itself + is returned. + +unpack2(fmt, data, object=None) + Convenience function. Same as unpack, except data may be longer + than needed. The returned value is a tuple: (object, leftoverdata). + +calcsize(fmt) + like struct.calcsize(), but uses our own fmt strings: + it returns the size of the data in bytes. +""" + +from __future__ import print_function, division, absolute_import +from fontTools.misc.py23 import * +from fontTools.misc.fixedTools import fixedToFloat as fi2fl, floatToFixed as fl2fi +import struct +import re + +__version__ = "1.2" +__copyright__ = "Copyright 1998, Just van Rossum <just@letterror.com>" + + +class Error(Exception): + pass + +def pack(fmt, obj): + formatstring, names, fixes = getformat(fmt) + elements = [] + if not isinstance(obj, dict): + obj = obj.__dict__ + for name in names: + value = obj[name] + if name in fixes: + # fixed point conversion + value = fl2fi(value, fixes[name]) + elif isinstance(value, basestring): + value = tobytes(value) + elements.append(value) + data = struct.pack(*(formatstring,) + tuple(elements)) + return data + +def unpack(fmt, data, obj=None): + if obj is None: + obj = {} + data = tobytes(data) + formatstring, names, fixes = getformat(fmt) + if isinstance(obj, dict): + d = obj + else: + d = obj.__dict__ + elements = struct.unpack(formatstring, data) + for i in range(len(names)): + name = names[i] + value = elements[i] + if name in fixes: + # fixed point conversion + value = fi2fl(value, fixes[name]) + elif isinstance(value, bytes): + try: + value = tostr(value) + except UnicodeDecodeError: + pass + d[name] = value + return obj + +def unpack2(fmt, data, obj=None): + length = calcsize(fmt) + return unpack(fmt, data[:length], obj), data[length:] + +def calcsize(fmt): + formatstring, names, fixes = getformat(fmt) + return struct.calcsize(formatstring) + + +# matches "name:formatchar" (whitespace is allowed) +_elementRE = re.compile( + "\s*" # whitespace + "([A-Za-z_][A-Za-z_0-9]*)" # name (python identifier) + "\s*:\s*" # whitespace : whitespace + "([cbBhHiIlLqQfd]|[0-9]+[ps]|" # formatchar... + "([0-9]+)\.([0-9]+)(F))" # ...formatchar + "\s*" # whitespace + "(#.*)?$" # [comment] + end of string + ) + +# matches the special struct fmt chars and 'x' (pad byte) +_extraRE = re.compile("\s*([x@=<>!])\s*(#.*)?$") + +# matches an "empty" string, possibly containing whitespace and/or a comment +_emptyRE = re.compile("\s*(#.*)?$") + +_fixedpointmappings = { + 8: "b", + 16: "h", + 32: "l"} + +_formatcache = {} + +def getformat(fmt): + try: + formatstring, names, fixes = _formatcache[fmt] + except KeyError: + lines = re.split("[\n;]", fmt) + formatstring = "" + names = [] + fixes = {} + for line in lines: + if _emptyRE.match(line): + continue + m = _extraRE.match(line) + if m: + formatchar = m.group(1) + if formatchar != 'x' and formatstring: + raise Error("a special fmt char must be first") + else: + m = _elementRE.match(line) + if not m: + raise Error("syntax error in fmt: '%s'" % line) + name = m.group(1) + names.append(name) + formatchar = m.group(2) + if m.group(3): + # fixed point + before = int(m.group(3)) + after = int(m.group(4)) + bits = before + after + if bits not in [8, 16, 32]: + raise Error("fixed point must be 8, 16 or 32 bits long") + formatchar = _fixedpointmappings[bits] + assert m.group(5) == "F" + fixes[name] = after + formatstring = formatstring + formatchar + _formatcache[fmt] = formatstring, names, fixes + return formatstring, names, fixes + +def _test(): + fmt = """ + # comments are allowed + > # big endian (see documentation for struct) + # empty lines are allowed: + + ashort: h + along: l + abyte: b # a byte + achar: c + astr: 5s + afloat: f; adouble: d # multiple "statements" are allowed + afixed: 16.16F + """ + + print('size:', calcsize(fmt)) + + class foo(object): + pass + + i = foo() + + i.ashort = 0x7fff + i.along = 0x7fffffff + i.abyte = 0x7f + i.achar = "a" + i.astr = "12345" + i.afloat = 0.5 + i.adouble = 0.5 + i.afixed = 1.5 + + data = pack(fmt, i) + print('data:', repr(data)) + print(unpack(fmt, data)) + i2 = foo() + unpack(fmt, data, i2) + print(vars(i2)) + +if __name__ == "__main__": + _test() diff -Nru fonttools-2.4/Tools/fontTools/misc/textTools.py fonttools-3.0/Tools/fontTools/misc/textTools.py --- fonttools-2.4/Tools/fontTools/misc/textTools.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.0/Tools/fontTools/misc/textTools.py 2015-08-31 17:57:15.000000000 +0000 @@ -0,0 +1,101 @@ +"""fontTools.misc.textTools.py -- miscellaneous routines.""" + + +from __future__ import print_function, division, absolute_import +from fontTools.misc.py23 import * +import string + + +def safeEval(data, eval=eval): + """A (kindof) safe replacement for eval.""" + return eval(data, {"__builtins__":{"True":True,"False":False}}) + + +def readHex(content): + """Convert a list of hex strings to binary data.""" + return deHexStr(strjoin(chunk for chunk in content if isinstance(chunk, basestring))) + +def deHexStr(hexdata): + """Convert a hex string to binary data.""" + hexdata = strjoin(hexdata.split()) + if len(hexdata) % 2: + hexdata = hexdata + "0" + data = [] + for i in range(0, len(hexdata), 2): + data.append(bytechr(int(hexdata[i:i+2], 16))) + return bytesjoin(data) + + +def hexStr(data): + """Convert binary data to a hex string.""" + h = string.hexdigits + r = '' + for c in data: + i = byteord(c) + r = r + h[(i >> 4) & 0xF] + h[i & 0xF] + return r + + +def num2binary(l, bits=32): + items = [] + binary = "" + for i in range(bits): + if l & 0x1: + binary = "1" + binary + else: + binary = "0" + binary + l = l >> 1 + if not ((i+1) % 8): + items.append(binary) + binary = "" + if binary: + items.append(binary) + items.reverse() + assert l in (0, -1), "number doesn't fit in number of bits" + return ' '.join(items) + + +def binary2num(bin): + bin = strjoin(bin.split()) + l = 0 + for digit in bin: + l = l << 1 + if digit != "0": + l = l | 0x1 + return l + + +def caselessSort(alist): + """Return a sorted copy of a list. If there are only strings + in the list, it will not consider case. + """ + + try: + return sorted(alist, key=lambda a: (a.lower(), a)) + except TypeError: + return sorted(alist) + + +def pad(data, size): + r""" Pad byte string 'data' with null bytes until its length is a + multiple of 'size'. + + >>> len(pad(b'abcd', 4)) + 4 + >>> len(pad(b'abcde', 2)) + 6 + >>> len(pad(b'abcde', 4)) + 8 + >>> pad(b'abcdef', 4) == b'abcdef\x00\x00' + True + """ + data = tobytes(data) + if size > 1: + while len(data) % size != 0: + data += b"\0" + return data + + +if __name__ == "__main__": + import doctest + sys.exit(doctest.testmod().failed) diff -Nru fonttools-2.4/Tools/fontTools/misc/timeTools.py fonttools-3.0/Tools/fontTools/misc/timeTools.py --- fonttools-2.4/Tools/fontTools/misc/timeTools.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.0/Tools/fontTools/misc/timeTools.py 2015-08-31 17:57:15.000000000 +0000 @@ -0,0 +1,22 @@ +"""fontTools.misc.timeTools.py -- tools for working with OpenType timestamps. +""" + +from __future__ import print_function, division, absolute_import +from fontTools.misc.py23 import * +import time +import calendar + + +epoch_diff = calendar.timegm((1904, 1, 1, 0, 0, 0, 0, 0, 0)) + +def timestampToString(value): + return time.asctime(time.gmtime(max(0, value + epoch_diff))) + +def timestampFromString(value): + return calendar.timegm(time.strptime(value)) - epoch_diff + +def timestampNow(): + return int(time.time() - epoch_diff) + +def timestampSinceEpoch(value): + return int(value - epoch_diff) diff -Nru fonttools-2.4/Tools/fontTools/misc/transform.py fonttools-3.0/Tools/fontTools/misc/transform.py --- fonttools-2.4/Tools/fontTools/misc/transform.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.0/Tools/fontTools/misc/transform.py 2015-08-31 17:57:15.000000000 +0000 @@ -0,0 +1,357 @@ +"""Affine 2D transformation matrix class. + +The Transform class implements various transformation matrix operations, +both on the matrix itself, as well as on 2D coordinates. + +Transform instances are effectively immutable: all methods that operate on the +transformation itself always return a new instance. This has as the +interesting side effect that Transform instances are hashable, ie. they can be +used as dictionary keys. + +This module exports the following symbols: + + Transform -- this is the main class + Identity -- Transform instance set to the identity transformation + Offset -- Convenience function that returns a translating transformation + Scale -- Convenience function that returns a scaling transformation + +Examples: + + >>> t = Transform(2, 0, 0, 3, 0, 0) + >>> t.transformPoint((100, 100)) + (200, 300) + >>> t = Scale(2, 3) + >>> t.transformPoint((100, 100)) + (200, 300) + >>> t.transformPoint((0, 0)) + (0, 0) + >>> t = Offset(2, 3) + >>> t.transformPoint((100, 100)) + (102, 103) + >>> t.transformPoint((0, 0)) + (2, 3) + >>> t2 = t.scale(0.5) + >>> t2.transformPoint((100, 100)) + (52.0, 53.0) + >>> import math + >>> t3 = t2.rotate(math.pi / 2) + >>> t3.transformPoint((0, 0)) + (2.0, 3.0) + >>> t3.transformPoint((100, 100)) + (-48.0, 53.0) + >>> t = Identity.scale(0.5).translate(100, 200).skew(0.1, 0.2) + >>> t.transformPoints([(0, 0), (1, 1), (100, 100)]) + [(50.0, 100.0), (50.550167336042726, 100.60135501775433), (105.01673360427253, 160.13550177543362)] + >>> +""" + +from __future__ import print_function, division, absolute_import +from fontTools.misc.py23 import * + +__all__ = ["Transform", "Identity", "Offset", "Scale"] + + +_EPSILON = 1e-15 +_ONE_EPSILON = 1 - _EPSILON +_MINUS_ONE_EPSILON = -1 + _EPSILON + + +def _normSinCos(v): + if abs(v) < _EPSILON: + v = 0 + elif v > _ONE_EPSILON: + v = 1 + elif v < _MINUS_ONE_EPSILON: + v = -1 + return v + + +class Transform(object): + + """2x2 transformation matrix plus offset, a.k.a. Affine transform. + Transform instances are immutable: all transforming methods, eg. + rotate(), return a new Transform instance. + + Examples: + >>> t = Transform() + >>> t + <Transform [1 0 0 1 0 0]> + >>> t.scale(2) + <Transform [2 0 0 2 0 0]> + >>> t.scale(2.5, 5.5) + <Transform [2.5 0 0 5.5 0 0]> + >>> + >>> t.scale(2, 3).transformPoint((100, 100)) + (200, 300) + """ + + def __init__(self, xx=1, xy=0, yx=0, yy=1, dx=0, dy=0): + """Transform's constructor takes six arguments, all of which are + optional, and can be used as keyword arguments: + >>> Transform(12) + <Transform [12 0 0 1 0 0]> + >>> Transform(dx=12) + <Transform [1 0 0 1 12 0]> + >>> Transform(yx=12) + <Transform [1 0 12 1 0 0]> + >>> + """ + self.__affine = xx, xy, yx, yy, dx, dy + + def transformPoint(self, p): + """Transform a point. + + Example: + >>> t = Transform() + >>> t = t.scale(2.5, 5.5) + >>> t.transformPoint((100, 100)) + (250.0, 550.0) + """ + (x, y) = p + xx, xy, yx, yy, dx, dy = self.__affine + return (xx*x + yx*y + dx, xy*x + yy*y + dy) + + def transformPoints(self, points): + """Transform a list of points. + + Example: + >>> t = Scale(2, 3) + >>> t.transformPoints([(0, 0), (0, 100), (100, 100), (100, 0)]) + [(0, 0), (0, 300), (200, 300), (200, 0)] + >>> + """ + xx, xy, yx, yy, dx, dy = self.__affine + return [(xx*x + yx*y + dx, xy*x + yy*y + dy) for x, y in points] + + def translate(self, x=0, y=0): + """Return a new transformation, translated (offset) by x, y. + + Example: + >>> t = Transform() + >>> t.translate(20, 30) + <Transform [1 0 0 1 20 30]> + >>> + """ + return self.transform((1, 0, 0, 1, x, y)) + + def scale(self, x=1, y=None): + """Return a new transformation, scaled by x, y. The 'y' argument + may be None, which implies to use the x value for y as well. + + Example: + >>> t = Transform() + >>> t.scale(5) + <Transform [5 0 0 5 0 0]> + >>> t.scale(5, 6) + <Transform [5 0 0 6 0 0]> + >>> + """ + if y is None: + y = x + return self.transform((x, 0, 0, y, 0, 0)) + + def rotate(self, angle): + """Return a new transformation, rotated by 'angle' (radians). + + Example: + >>> import math + >>> t = Transform() + >>> t.rotate(math.pi / 2) + <Transform [0 1 -1 0 0 0]> + >>> + """ + import math + c = _normSinCos(math.cos(angle)) + s = _normSinCos(math.sin(angle)) + return self.transform((c, s, -s, c, 0, 0)) + + def skew(self, x=0, y=0): + """Return a new transformation, skewed by x and y. + + Example: + >>> import math + >>> t = Transform() + >>> t.skew(math.pi / 4) + <Transform [1 0 1 1 0 0]> + >>> + """ + import math + return self.transform((1, math.tan(y), math.tan(x), 1, 0, 0)) + + def transform(self, other): + """Return a new transformation, transformed by another + transformation. + + Example: + >>> t = Transform(2, 0, 0, 3, 1, 6) + >>> t.transform((4, 3, 2, 1, 5, 6)) + <Transform [8 9 4 3 11 24]> + >>> + """ + xx1, xy1, yx1, yy1, dx1, dy1 = other + xx2, xy2, yx2, yy2, dx2, dy2 = self.__affine + return self.__class__( + xx1*xx2 + xy1*yx2, + xx1*xy2 + xy1*yy2, + yx1*xx2 + yy1*yx2, + yx1*xy2 + yy1*yy2, + xx2*dx1 + yx2*dy1 + dx2, + xy2*dx1 + yy2*dy1 + dy2) + + def reverseTransform(self, other): + """Return a new transformation, which is the other transformation + transformed by self. self.reverseTransform(other) is equivalent to + other.transform(self). + + Example: + >>> t = Transform(2, 0, 0, 3, 1, 6) + >>> t.reverseTransform((4, 3, 2, 1, 5, 6)) + <Transform [8 6 6 3 21 15]> + >>> Transform(4, 3, 2, 1, 5, 6).transform((2, 0, 0, 3, 1, 6)) + <Transform [8 6 6 3 21 15]> + >>> + """ + xx1, xy1, yx1, yy1, dx1, dy1 = self.__affine + xx2, xy2, yx2, yy2, dx2, dy2 = other + return self.__class__( + xx1*xx2 + xy1*yx2, + xx1*xy2 + xy1*yy2, + yx1*xx2 + yy1*yx2, + yx1*xy2 + yy1*yy2, + xx2*dx1 + yx2*dy1 + dx2, + xy2*dx1 + yy2*dy1 + dy2) + + def inverse(self): + """Return the inverse transformation. + + Example: + >>> t = Identity.translate(2, 3).scale(4, 5) + >>> t.transformPoint((10, 20)) + (42, 103) + >>> it = t.inverse() + >>> it.transformPoint((42, 103)) + (10.0, 20.0) + >>> + """ + if self.__affine == (1, 0, 0, 1, 0, 0): + return self + xx, xy, yx, yy, dx, dy = self.__affine + det = xx*yy - yx*xy + xx, xy, yx, yy = yy/det, -xy/det, -yx/det, xx/det + dx, dy = -xx*dx - yx*dy, -xy*dx - yy*dy + return self.__class__(xx, xy, yx, yy, dx, dy) + + def toPS(self): + """Return a PostScript representation: + >>> t = Identity.scale(2, 3).translate(4, 5) + >>> t.toPS() + '[2 0 0 3 8 15]' + >>> + """ + return "[%s %s %s %s %s %s]" % self.__affine + + def __len__(self): + """Transform instances also behave like sequences of length 6: + >>> len(Identity) + 6 + >>> + """ + return 6 + + def __getitem__(self, index): + """Transform instances also behave like sequences of length 6: + >>> list(Identity) + [1, 0, 0, 1, 0, 0] + >>> tuple(Identity) + (1, 0, 0, 1, 0, 0) + >>> + """ + return self.__affine[index] + + def __ne__(self, other): + return not self.__eq__(other) + def __eq__(self, other): + """Transform instances are comparable: + >>> t1 = Identity.scale(2, 3).translate(4, 6) + >>> t2 = Identity.translate(8, 18).scale(2, 3) + >>> t1 == t2 + 1 + >>> + + But beware of floating point rounding errors: + >>> t1 = Identity.scale(0.2, 0.3).translate(0.4, 0.6) + >>> t2 = Identity.translate(0.08, 0.18).scale(0.2, 0.3) + >>> t1 + <Transform [0.2 0 0 0.3 0.08 0.18]> + >>> t2 + <Transform [0.2 0 0 0.3 0.08 0.18]> + >>> t1 == t2 + 0 + >>> + """ + xx1, xy1, yx1, yy1, dx1, dy1 = self.__affine + xx2, xy2, yx2, yy2, dx2, dy2 = other + return (xx1, xy1, yx1, yy1, dx1, dy1) == \ + (xx2, xy2, yx2, yy2, dx2, dy2) + + def __hash__(self): + """Transform instances are hashable, meaning you can use them as + keys in dictionaries: + >>> d = {Scale(12, 13): None} + >>> d + {<Transform [12 0 0 13 0 0]>: None} + >>> + + But again, beware of floating point rounding errors: + >>> t1 = Identity.scale(0.2, 0.3).translate(0.4, 0.6) + >>> t2 = Identity.translate(0.08, 0.18).scale(0.2, 0.3) + >>> t1 + <Transform [0.2 0 0 0.3 0.08 0.18]> + >>> t2 + <Transform [0.2 0 0 0.3 0.08 0.18]> + >>> d = {t1: None} + >>> d + {<Transform [0.2 0 0 0.3 0.08 0.18]>: None} + >>> d[t2] + Traceback (most recent call last): + File "<stdin>", line 1, in ? + KeyError: <Transform [0.2 0 0 0.3 0.08 0.18]> + >>> + """ + return hash(self.__affine) + + def __repr__(self): + return "<%s [%g %g %g %g %g %g]>" % ((self.__class__.__name__,) \ + + self.__affine) + + +Identity = Transform() + +def Offset(x=0, y=0): + """Return the identity transformation offset by x, y. + + Example: + >>> Offset(2, 3) + <Transform [1 0 0 1 2 3]> + >>> + """ + return Transform(1, 0, 0, 1, x, y) + +def Scale(x, y=None): + """Return the identity transformation scaled by x, y. The 'y' argument + may be None, which implies to use the x value for y as well. + + Example: + >>> Scale(2, 3) + <Transform [2 0 0 3 0 0]> + >>> + """ + if y is None: + y = x + return Transform(x, 0, 0, y, 0, 0) + + +if __name__ == "__main__": + import sys + import doctest + sys.exit(doctest.testmod().failed) diff -Nru fonttools-2.4/Tools/fontTools/misc/xmlReader.py fonttools-3.0/Tools/fontTools/misc/xmlReader.py --- fonttools-2.4/Tools/fontTools/misc/xmlReader.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.0/Tools/fontTools/misc/xmlReader.py 2015-08-31 17:57:15.000000000 +0000 @@ -0,0 +1,131 @@ +from __future__ import print_function, division, absolute_import +from fontTools.misc.py23 import * +from fontTools import ttLib +from fontTools.misc.textTools import safeEval +from fontTools.ttLib.tables.DefaultTable import DefaultTable +import os + + +class TTXParseError(Exception): pass + +BUFSIZE = 0x4000 + + +class XMLReader(object): + + def __init__(self, fileName, ttFont, progress=None, quiet=False): + self.ttFont = ttFont + self.fileName = fileName + self.progress = progress + self.quiet = quiet + self.root = None + self.contentStack = [] + self.stackSize = 0 + + def read(self): + if self.progress: + import stat + self.progress.set(0, os.stat(self.fileName)[stat.ST_SIZE] // 100 or 1) + file = open(self.fileName, 'rb') + self._parseFile(file) + file.close() + + def _parseFile(self, file): + from xml.parsers.expat import ParserCreate + parser = ParserCreate() + parser.StartElementHandler = self._startElementHandler + parser.EndElementHandler = self._endElementHandler + parser.CharacterDataHandler = self._characterDataHandler + + pos = 0 + while True: + chunk = file.read(BUFSIZE) + if not chunk: + parser.Parse(chunk, 1) + break + pos = pos + len(chunk) + if self.progress: + self.progress.set(pos // 100) + parser.Parse(chunk, 0) + + def _startElementHandler(self, name, attrs): + stackSize = self.stackSize + self.stackSize = stackSize + 1 + if not stackSize: + if name != "ttFont": + raise TTXParseError("illegal root tag: %s" % name) + sfntVersion = attrs.get("sfntVersion") + if sfntVersion is not None: + if len(sfntVersion) != 4: + sfntVersion = safeEval('"' + sfntVersion + '"') + self.ttFont.sfntVersion = sfntVersion + self.contentStack.append([]) + elif stackSize == 1: + subFile = attrs.get("src") + if subFile is not None: + subFile = os.path.join(os.path.dirname(self.fileName), subFile) + subReader = XMLReader(subFile, self.ttFont, self.progress, self.quiet) + subReader.read() + self.contentStack.append([]) + return + tag = ttLib.xmlToTag(name) + msg = "Parsing '%s' table..." % tag + if self.progress: + self.progress.setlabel(msg) + elif self.ttFont.verbose: + ttLib.debugmsg(msg) + else: + if not self.quiet: + print(msg) + if tag == "GlyphOrder": + tableClass = ttLib.GlyphOrder + elif "ERROR" in attrs or ('raw' in attrs and safeEval(attrs['raw'])): + tableClass = DefaultTable + else: + tableClass = ttLib.getTableClass(tag) + if tableClass is None: + tableClass = DefaultTable + if tag == 'loca' and tag in self.ttFont: + # Special-case the 'loca' table as we need the + # original if the 'glyf' table isn't recompiled. + self.currentTable = self.ttFont[tag] + else: + self.currentTable = tableClass(tag) + self.ttFont[tag] = self.currentTable + self.contentStack.append([]) + elif stackSize == 2: + self.contentStack.append([]) + self.root = (name, attrs, self.contentStack[-1]) + else: + l = [] + self.contentStack[-1].append((name, attrs, l)) + self.contentStack.append(l) + + def _characterDataHandler(self, data): + if self.stackSize > 1: + self.contentStack[-1].append(data) + + def _endElementHandler(self, name): + self.stackSize = self.stackSize - 1 + del self.contentStack[-1] + if self.stackSize == 1: + self.root = None + elif self.stackSize == 2: + name, attrs, content = self.root + self.currentTable.fromXML(name, attrs, content, self.ttFont) + self.root = None + + +class ProgressPrinter(object): + + def __init__(self, title, maxval=100): + print(title) + + def set(self, val, maxval=None): + pass + + def increment(self, val=1): + pass + + def setLabel(self, text): + print(text) diff -Nru fonttools-2.4/Tools/fontTools/misc/xmlReader_test.py fonttools-3.0/Tools/fontTools/misc/xmlReader_test.py --- fonttools-2.4/Tools/fontTools/misc/xmlReader_test.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.0/Tools/fontTools/misc/xmlReader_test.py 2015-08-31 17:57:15.000000000 +0000 @@ -0,0 +1,81 @@ +# -*- coding: utf-8 -*- + +from __future__ import print_function, division, absolute_import, unicode_literals +from fontTools.misc.py23 import * +import os +import unittest +from fontTools.ttLib import TTFont +from .xmlReader import XMLReader +import tempfile + + +class TestXMLReader(unittest.TestCase): + + def test_decode_utf8(self): + + class DebugXMLReader(XMLReader): + + def __init__(self, fileName, ttFont, progress=None, quiet=False): + super(DebugXMLReader, self).__init__( + fileName, ttFont, progress, quiet) + self.contents = [] + + def _endElementHandler(self, name): + if self.stackSize == 3: + name, attrs, content = self.root + self.contents.append(content) + super(DebugXMLReader, self)._endElementHandler(name) + + expected = 'fôôbär' + data = '''\ +<?xml version="1.0" encoding="UTF-8"?> +<ttFont> + <name> + <namerecord nameID="1" platformID="3" platEncID="1" langID="0x409"> + %s + </namerecord> + </name> +</ttFont> +''' % expected + + with tempfile.NamedTemporaryFile(delete=False) as tmp: + tmp.write(data.encode('utf-8')) + reader = DebugXMLReader(tmp.name, TTFont(), quiet=True) + reader.read() + os.remove(tmp.name) + content = strjoin(reader.contents[0]).strip() + self.assertEqual(expected, content) + + def test_normalise_newlines(self): + + class DebugXMLReader(XMLReader): + + def __init__(self, fileName, ttFont, progress=None, quiet=False): + super(DebugXMLReader, self).__init__( + fileName, ttFont, progress, quiet) + self.newlines = [] + + def _characterDataHandler(self, data): + self.newlines.extend([c for c in data if c in ('\r', '\n')]) + + # notice how when CR is escaped, it is not normalised by the XML parser + data = ( + '<ttFont>\r' # \r -> \n + ' <test>\r\n' # \r\n -> \n + ' a line of text\n' # \n + ' escaped CR and unix newline &#13;\n' # &#13;\n -> \r\n + ' escaped CR and macintosh newline &#13;\r' # &#13;\r -> \r\n + ' escaped CR and windows newline &#13;\r\n' # &#13;\r\n -> \r\n + ' </test>\n' # \n + '</ttFont>') + with tempfile.NamedTemporaryFile(delete=False) as tmp: + tmp.write(data.encode('utf-8')) + reader = DebugXMLReader(tmp.name, TTFont(), quiet=True) + reader.read() + os.remove(tmp.name) + expected = ['\n'] * 3 + ['\r', '\n'] * 3 + ['\n'] + self.assertEqual(expected, reader.newlines) + + +if __name__ == '__main__': + unittest.main() diff -Nru fonttools-2.4/Tools/fontTools/misc/xmlWriter.py fonttools-3.0/Tools/fontTools/misc/xmlWriter.py --- fonttools-2.4/Tools/fontTools/misc/xmlWriter.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.0/Tools/fontTools/misc/xmlWriter.py 2015-08-31 17:57:15.000000000 +0000 @@ -0,0 +1,180 @@ +"""xmlWriter.py -- Simple XML authoring class""" + +from __future__ import print_function, division, absolute_import +from fontTools.misc.py23 import * +import sys +import os +import string + +INDENT = " " + + +class XMLWriter(object): + + def __init__(self, fileOrPath, indentwhite=INDENT, idlefunc=None, encoding="utf_8"): + if encoding.lower().replace('-','').replace('_','') != 'utf8': + raise Exception('Only UTF-8 encoding is supported.') + if fileOrPath == '-': + fileOrPath = sys.stdout + if not hasattr(fileOrPath, "write"): + self.file = open(fileOrPath, "wb") + else: + # assume writable file object + self.file = fileOrPath + + # Figure out if writer expects bytes or unicodes + try: + # The bytes check should be first. See: + # https://github.com/behdad/fonttools/pull/233 + self.file.write(b'') + self.totype = tobytes + except TypeError: + # This better not fail. + self.file.write(tounicode('')) + self.totype = tounicode + self.indentwhite = self.totype(indentwhite) + self.newlinestr = self.totype(os.linesep) + self.indentlevel = 0 + self.stack = [] + self.needindent = 1 + self.idlefunc = idlefunc + self.idlecounter = 0 + self._writeraw('<?xml version="1.0" encoding="UTF-8"?>') + self.newline() + + def close(self): + self.file.close() + + def write(self, string, indent=True): + """Writes text.""" + self._writeraw(escape(string), indent=indent) + + def writecdata(self, string): + """Writes text in a CDATA section.""" + self._writeraw("<![CDATA[" + string + "") + + def write8bit(self, data, strip=False): + """Writes a bytes() sequence into the XML, escaping + non-ASCII bytes. When this is read in xmlReader, + the original bytes can be recovered by encoding to + 'latin-1'.""" + self._writeraw(escape8bit(data), strip=strip) + + def write_noindent(self, string): + """Writes text without indentation.""" + self._writeraw(escape(string), indent=False) + + def _writeraw(self, data, indent=True, strip=False): + """Writes bytes, possibly indented.""" + if indent and self.needindent: + self.file.write(self.indentlevel * self.indentwhite) + self.needindent = 0 + s = self.totype(data, encoding="utf_8") + if (strip): + s = s.strip() + self.file.write(s) + + def newline(self): + self.file.write(self.newlinestr) + self.needindent = 1 + idlecounter = self.idlecounter + if not idlecounter % 100 and self.idlefunc is not None: + self.idlefunc() + self.idlecounter = idlecounter + 1 + + def comment(self, data): + data = escape(data) + lines = data.split("\n") + self._writeraw("") + + def simpletag(self, _TAG_, *args, **kwargs): + attrdata = self.stringifyattrs(*args, **kwargs) + data = "<%s%s/>" % (_TAG_, attrdata) + self._writeraw(data) + + def begintag(self, _TAG_, *args, **kwargs): + attrdata = self.stringifyattrs(*args, **kwargs) + data = "<%s%s>" % (_TAG_, attrdata) + self._writeraw(data) + self.stack.append(_TAG_) + self.indent() + + def endtag(self, _TAG_): + assert self.stack and self.stack[-1] == _TAG_, "nonmatching endtag" + del self.stack[-1] + self.dedent() + data = "" % _TAG_ + self._writeraw(data) + + def dumphex(self, data): + linelength = 16 + hexlinelength = linelength * 2 + chunksize = 8 + for i in range(0, len(data), linelength): + hexline = hexStr(data[i:i+linelength]) + line = "" + white = "" + for j in range(0, hexlinelength, chunksize): + line = line + white + hexline[j:j+chunksize] + white = " " + self._writeraw(line) + self.newline() + + def indent(self): + self.indentlevel = self.indentlevel + 1 + + def dedent(self): + assert self.indentlevel > 0 + self.indentlevel = self.indentlevel - 1 + + def stringifyattrs(self, *args, **kwargs): + if kwargs: + assert not args + attributes = sorted(kwargs.items()) + elif args: + assert len(args) == 1 + attributes = args[0] + else: + return "" + data = "" + for attr, value in attributes: + if not isinstance(value, (bytes, unicode)): + value = str(value) + data = data + ' %s="%s"' % (attr, escapeattr(value)) + return data + + +def escape(data): + data = tostr(data, 'utf_8') + data = data.replace("&", "&") + data = data.replace("<", "<") + data = data.replace(">", ">") + data = data.replace("\r", " ") + return data + +def escapeattr(data): + data = escape(data) + data = data.replace('"', """) + return data + +def escape8bit(data): + """Input is Unicode string.""" + def escapechar(c): + n = ord(c) + if 32 <= n <= 127 and c not in "<&>": + return c + else: + return "&#" + repr(n) + ";" + return strjoin(map(escapechar, data.decode('latin-1'))) + +def hexStr(s): + h = string.hexdigits + r = '' + for c in s: + i = byteord(c) + r = r + h[(i >> 4) & 0xF] + h[i & 0xF] + return r diff -Nru fonttools-2.4/Tools/fontTools/misc/xmlWriter_test.py fonttools-3.0/Tools/fontTools/misc/xmlWriter_test.py --- fonttools-2.4/Tools/fontTools/misc/xmlWriter_test.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.0/Tools/fontTools/misc/xmlWriter_test.py 2015-08-31 17:57:15.000000000 +0000 @@ -0,0 +1,111 @@ +from __future__ import print_function, division, absolute_import +from fontTools.misc.py23 import * +import os +import unittest +from .xmlWriter import XMLWriter + +linesep = tobytes(os.linesep) +HEADER = b'' + linesep + +class TestXMLWriter(unittest.TestCase): + + def test_comment_escaped(self): + writer = XMLWriter(BytesIO()) + writer.comment("This&that are ") + self.assertEqual(HEADER + b"", writer.file.getvalue()) + + def test_comment_multiline(self): + writer = XMLWriter(BytesIO()) + writer.comment("Hello world\nHow are you?") + self.assertEqual(HEADER + b"", + writer.file.getvalue()) + + def test_encoding_default(self): + writer = XMLWriter(BytesIO()) + self.assertEqual(b'' + linesep, + writer.file.getvalue()) + + def test_encoding_utf8(self): + # https://github.com/behdad/fonttools/issues/246 + writer = XMLWriter(BytesIO(), encoding="utf8") + self.assertEqual(b'' + linesep, + writer.file.getvalue()) + + def test_encoding_UTF_8(self): + # https://github.com/behdad/fonttools/issues/246 + writer = XMLWriter(BytesIO(), encoding="UTF-8") + self.assertEqual(b'' + linesep, + writer.file.getvalue()) + + def test_encoding_UTF8(self): + # https://github.com/behdad/fonttools/issues/246 + writer = XMLWriter(BytesIO(), encoding="UTF8") + self.assertEqual(b'' + linesep, + writer.file.getvalue()) + + def test_encoding_other(self): + self.assertRaises(Exception, XMLWriter, BytesIO(), + encoding="iso-8859-1") + + def test_write(self): + writer = XMLWriter(BytesIO()) + writer.write("foo&bar") + self.assertEqual(HEADER + b"foo&bar", writer.file.getvalue()) + + def test_indent_dedent(self): + writer = XMLWriter(BytesIO()) + writer.write("foo") + writer.newline() + writer.indent() + writer.write("bar") + writer.newline() + writer.dedent() + writer.write("baz") + self.assertEqual(HEADER + bytesjoin(["foo", " bar", "baz"], linesep), + writer.file.getvalue()) + + def test_writecdata(self): + writer = XMLWriter(BytesIO()) + writer.writecdata("foo&bar") + self.assertEqual(HEADER + b"foo&bar", writer.file.getvalue()) + + def test_simpletag(self): + writer = XMLWriter(BytesIO()) + writer.simpletag("tag", a="1", b="2") + self.assertEqual(HEADER + b'', writer.file.getvalue()) + + def test_begintag_endtag(self): + writer = XMLWriter(BytesIO()) + writer.begintag("tag", attr="value") + writer.write("content") + writer.endtag("tag") + self.assertEqual(HEADER + b'content', writer.file.getvalue()) + + def test_dumphex(self): + writer = XMLWriter(BytesIO()) + writer.dumphex("Type is a beautiful group of letters, not a group of beautiful letters.") + self.assertEqual(HEADER + bytesjoin([ + "54797065 20697320 61206265 61757469", + "66756c20 67726f75 70206f66 206c6574", + "74657273 2c206e6f 74206120 67726f75", + "70206f66 20626561 75746966 756c206c", + "65747465 72732e ", ""], joiner=linesep), writer.file.getvalue()) + + def test_stringifyattrs(self): + writer = XMLWriter(BytesIO()) + expected = ' attr="0"' + self.assertEqual(expected, writer.stringifyattrs(attr=0)) + self.assertEqual(expected, writer.stringifyattrs(attr=b'0')) + self.assertEqual(expected, writer.stringifyattrs(attr='0')) + self.assertEqual(expected, writer.stringifyattrs(attr=u'0')) + + def test_carriage_return_escaped(self): + writer = XMLWriter(BytesIO()) + writer.write("two lines\r\nseparated by Windows line endings") + self.assertEqual( + HEADER + b'two lines \nseparated by Windows line endings', + writer.file.getvalue()) + + +if __name__ == '__main__': + unittest.main() diff -Nru fonttools-2.4/Tools/fontTools/pens/basePen.py fonttools-3.0/Tools/fontTools/pens/basePen.py --- fonttools-2.4/Tools/fontTools/pens/basePen.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.0/Tools/fontTools/pens/basePen.py 2015-08-31 17:57:15.000000000 +0000 @@ -0,0 +1,363 @@ +"""fontTools.pens.basePen.py -- Tools and base classes to build pen objects. + +The Pen Protocol + +A Pen is a kind of object that standardizes the way how to "draw" outlines: +it is a middle man between an outline and a drawing. In other words: +it is an abstraction for drawing outlines, making sure that outline objects +don't need to know the details about how and where they're being drawn, and +that drawings don't need to know the details of how outlines are stored. + +The most basic pattern is this: + + outline.draw(pen) # 'outline' draws itself onto 'pen' + +Pens can be used to render outlines to the screen, but also to construct +new outlines. Eg. an outline object can be both a drawable object (it has a +draw() method) as well as a pen itself: you *build* an outline using pen +methods. + +The AbstractPen class defines the Pen protocol. It implements almost +nothing (only no-op closePath() and endPath() methods), but is useful +for documentation purposes. Subclassing it basically tells the reader: +"this class implements the Pen protocol.". An examples of an AbstractPen +subclass is fontTools.pens.transformPen.TransformPen. + +The BasePen class is a base implementation useful for pens that actually +draw (for example a pen renders outlines using a native graphics engine). +BasePen contains a lot of base functionality, making it very easy to build +a pen that fully conforms to the pen protocol. Note that if you subclass +BasePen, you _don't_ override moveTo(), lineTo(), etc., but _moveTo(), +_lineTo(), etc. See the BasePen doc string for details. Examples of +BasePen subclasses are fontTools.pens.boundsPen.BoundsPen and +fontTools.pens.cocoaPen.CocoaPen. + +Coordinates are usually expressed as (x, y) tuples, but generally any +sequence of length 2 will do. +""" + +from __future__ import print_function, division, absolute_import +from fontTools.misc.py23 import * + +__all__ = ["AbstractPen", "NullPen", "BasePen", + "decomposeSuperBezierSegment", "decomposeQuadraticSegment"] + + +class AbstractPen(object): + + def moveTo(self, pt): + """Begin a new sub path, set the current point to 'pt'. You must + end each sub path with a call to pen.closePath() or pen.endPath(). + """ + raise NotImplementedError + + def lineTo(self, pt): + """Draw a straight line from the current point to 'pt'.""" + raise NotImplementedError + + def curveTo(self, *points): + """Draw a cubic bezier with an arbitrary number of control points. + + The last point specified is on-curve, all others are off-curve + (control) points. If the number of control points is > 2, the + segment is split into multiple bezier segments. This works + like this: + + Let n be the number of control points (which is the number of + arguments to this call minus 1). If n==2, a plain vanilla cubic + bezier is drawn. If n==1, we fall back to a quadratic segment and + if n==0 we draw a straight line. It gets interesting when n>2: + n-1 PostScript-style cubic segments will be drawn as if it were + one curve. See decomposeSuperBezierSegment(). + + The conversion algorithm used for n>2 is inspired by NURB + splines, and is conceptually equivalent to the TrueType "implied + points" principle. See also decomposeQuadraticSegment(). + """ + raise NotImplementedError + + def qCurveTo(self, *points): + """Draw a whole string of quadratic curve segments. + + The last point specified is on-curve, all others are off-curve + points. + + This method implements TrueType-style curves, breaking up curves + using 'implied points': between each two consequtive off-curve points, + there is one implied point exactly in the middle between them. See + also decomposeQuadraticSegment(). + + The last argument (normally the on-curve point) may be None. + This is to support contours that have NO on-curve points (a rarely + seen feature of TrueType outlines). + """ + raise NotImplementedError + + def closePath(self): + """Close the current sub path. You must call either pen.closePath() + or pen.endPath() after each sub path. + """ + pass + + def endPath(self): + """End the current sub path, but don't close it. You must call + either pen.closePath() or pen.endPath() after each sub path. + """ + pass + + def addComponent(self, glyphName, transformation): + """Add a sub glyph. The 'transformation' argument must be a 6-tuple + containing an affine transformation, or a Transform object from the + fontTools.misc.transform module. More precisely: it should be a + sequence containing 6 numbers. + """ + raise NotImplementedError + + +class NullPen(object): + + """A pen that does nothing. + """ + + def moveTo(self, pt): + pass + + def lineTo(self, pt): + pass + + def curveTo(self, *points): + pass + + def qCurveTo(self, *points): + pass + + def closePath(self): + pass + + def endPath(self): + pass + + def addComponent(self, glyphName, transformation): + pass + + +class BasePen(AbstractPen): + + """Base class for drawing pens. You must override _moveTo, _lineTo and + _curveToOne. You may additionally override _closePath, _endPath, + addComponent and/or _qCurveToOne. You should not override any other + methods. + """ + + def __init__(self, glyphSet): + self.glyphSet = glyphSet + self.__currentPoint = None + + # must override + + def _moveTo(self, pt): + raise NotImplementedError + + def _lineTo(self, pt): + raise NotImplementedError + + def _curveToOne(self, pt1, pt2, pt3): + raise NotImplementedError + + # may override + + def _closePath(self): + pass + + def _endPath(self): + pass + + def _qCurveToOne(self, pt1, pt2): + """This method implements the basic quadratic curve type. The + default implementation delegates the work to the cubic curve + function. Optionally override with a native implementation. + """ + pt0x, pt0y = self.__currentPoint + pt1x, pt1y = pt1 + pt2x, pt2y = pt2 + mid1x = pt0x + 0.66666666666666667 * (pt1x - pt0x) + mid1y = pt0y + 0.66666666666666667 * (pt1y - pt0y) + mid2x = pt2x + 0.66666666666666667 * (pt1x - pt2x) + mid2y = pt2y + 0.66666666666666667 * (pt1y - pt2y) + self._curveToOne((mid1x, mid1y), (mid2x, mid2y), pt2) + + def addComponent(self, glyphName, transformation): + """This default implementation simply transforms the points + of the base glyph and draws it onto self. + """ + from fontTools.pens.transformPen import TransformPen + try: + glyph = self.glyphSet[glyphName] + except KeyError: + pass + else: + tPen = TransformPen(self, transformation) + glyph.draw(tPen) + + # don't override + + def _getCurrentPoint(self): + """Return the current point. This is not part of the public + interface, yet is useful for subclasses. + """ + return self.__currentPoint + + def closePath(self): + self._closePath() + self.__currentPoint = None + + def endPath(self): + self._endPath() + self.__currentPoint = None + + def moveTo(self, pt): + self._moveTo(pt) + self.__currentPoint = pt + + def lineTo(self, pt): + self._lineTo(pt) + self.__currentPoint = pt + + def curveTo(self, *points): + n = len(points) - 1 # 'n' is the number of control points + assert n >= 0 + if n == 2: + # The common case, we have exactly two BCP's, so this is a standard + # cubic bezier. Even though decomposeSuperBezierSegment() handles + # this case just fine, we special-case it anyway since it's so + # common. + self._curveToOne(*points) + self.__currentPoint = points[-1] + elif n > 2: + # n is the number of control points; split curve into n-1 cubic + # bezier segments. The algorithm used here is inspired by NURB + # splines and the TrueType "implied point" principle, and ensures + # the smoothest possible connection between two curve segments, + # with no disruption in the curvature. It is practical since it + # allows one to construct multiple bezier segments with a much + # smaller amount of points. + _curveToOne = self._curveToOne + for pt1, pt2, pt3 in decomposeSuperBezierSegment(points): + _curveToOne(pt1, pt2, pt3) + self.__currentPoint = pt3 + elif n == 1: + self.qCurveTo(*points) + elif n == 0: + self.lineTo(points[0]) + else: + raise AssertionError("can't get there from here") + + def qCurveTo(self, *points): + n = len(points) - 1 # 'n' is the number of control points + assert n >= 0 + if points[-1] is None: + # Special case for TrueType quadratics: it is possible to + # define a contour with NO on-curve points. BasePen supports + # this by allowing the final argument (the expected on-curve + # point) to be None. We simulate the feature by making the implied + # on-curve point between the last and the first off-curve points + # explicit. + x, y = points[-2] # last off-curve point + nx, ny = points[0] # first off-curve point + impliedStartPoint = (0.5 * (x + nx), 0.5 * (y + ny)) + self.__currentPoint = impliedStartPoint + self._moveTo(impliedStartPoint) + points = points[:-1] + (impliedStartPoint,) + if n > 0: + # Split the string of points into discrete quadratic curve + # segments. Between any two consecutive off-curve points + # there's an implied on-curve point exactly in the middle. + # This is where the segment splits. + _qCurveToOne = self._qCurveToOne + for pt1, pt2 in decomposeQuadraticSegment(points): + _qCurveToOne(pt1, pt2) + self.__currentPoint = pt2 + else: + self.lineTo(points[0]) + + +def decomposeSuperBezierSegment(points): + """Split the SuperBezier described by 'points' into a list of regular + bezier segments. The 'points' argument must be a sequence with length + 3 or greater, containing (x, y) coordinates. The last point is the + destination on-curve point, the rest of the points are off-curve points. + The start point should not be supplied. + + This function returns a list of (pt1, pt2, pt3) tuples, which each + specify a regular curveto-style bezier segment. + """ + n = len(points) - 1 + assert n > 1 + bezierSegments = [] + pt1, pt2, pt3 = points[0], None, None + for i in range(2, n+1): + # calculate points in between control points. + nDivisions = min(i, 3, n-i+2) + for j in range(1, nDivisions): + factor = j / nDivisions + temp1 = points[i-1] + temp2 = points[i-2] + temp = (temp2[0] + factor * (temp1[0] - temp2[0]), + temp2[1] + factor * (temp1[1] - temp2[1])) + if pt2 is None: + pt2 = temp + else: + pt3 = (0.5 * (pt2[0] + temp[0]), + 0.5 * (pt2[1] + temp[1])) + bezierSegments.append((pt1, pt2, pt3)) + pt1, pt2, pt3 = temp, None, None + bezierSegments.append((pt1, points[-2], points[-1])) + return bezierSegments + + +def decomposeQuadraticSegment(points): + """Split the quadratic curve segment described by 'points' into a list + of "atomic" quadratic segments. The 'points' argument must be a sequence + with length 2 or greater, containing (x, y) coordinates. The last point + is the destination on-curve point, the rest of the points are off-curve + points. The start point should not be supplied. + + This function returns a list of (pt1, pt2) tuples, which each specify a + plain quadratic bezier segment. + """ + n = len(points) - 1 + assert n > 0 + quadSegments = [] + for i in range(n - 1): + x, y = points[i] + nx, ny = points[i+1] + impliedPt = (0.5 * (x + nx), 0.5 * (y + ny)) + quadSegments.append((points[i], impliedPt)) + quadSegments.append((points[-2], points[-1])) + return quadSegments + + +class _TestPen(BasePen): + """Test class that prints PostScript to stdout.""" + def _moveTo(self, pt): + print("%s %s moveto" % (pt[0], pt[1])) + def _lineTo(self, pt): + print("%s %s lineto" % (pt[0], pt[1])) + def _curveToOne(self, bcp1, bcp2, pt): + print("%s %s %s %s %s %s curveto" % (bcp1[0], bcp1[1], + bcp2[0], bcp2[1], pt[0], pt[1])) + def _closePath(self): + print("closepath") + + +if __name__ == "__main__": + pen = _TestPen(None) + pen.moveTo((0, 0)) + pen.lineTo((0, 100)) + pen.curveTo((50, 75), (60, 50), (50, 25), (0, 0)) + pen.closePath() + + pen = _TestPen(None) + # testing the "no on-curve point" scenario + pen.qCurveTo((0, 0), (0, 100), (100, 100), (100, 0), None) + pen.closePath() diff -Nru fonttools-2.4/Tools/fontTools/pens/basePen_test.py fonttools-3.0/Tools/fontTools/pens/basePen_test.py --- fonttools-2.4/Tools/fontTools/pens/basePen_test.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.0/Tools/fontTools/pens/basePen_test.py 2015-08-31 17:57:15.000000000 +0000 @@ -0,0 +1,171 @@ +from __future__ import print_function, division, absolute_import +from fontTools.misc.py23 import * +from fontTools.pens.basePen import \ + BasePen, decomposeSuperBezierSegment, decomposeQuadraticSegment +import unittest + + +class _TestPen(BasePen): + def __init__(self): + BasePen.__init__(self, glyphSet={}) + self._commands = [] + + def __repr__(self): + return " ".join(self._commands) + + def getCurrentPoint(self): + return self._getCurrentPoint() + + def _moveTo(self, pt): + self._commands.append("%s %s moveto" % (pt[0], pt[1])) + + def _lineTo(self, pt): + self._commands.append("%s %s lineto" % (pt[0], pt[1])) + + def _curveToOne(self, bcp1, bcp2, pt): + self._commands.append("%s %s %s %s %s %s curveto" % + (bcp1[0], bcp1[1], + bcp2[0], bcp2[1], + pt[0], pt[1])) + + def _closePath(self): + self._commands.append("closepath") + + def _endPath(self): + self._commands.append("endpath") + + +class _TestGlyph: + def draw(self, pen): + pen.moveTo((0.0, 0.0)) + pen.lineTo((0.0, 100.0)) + pen.curveTo((50.0, 75.0), (60.0, 50.0), (50.0, 25.0), (0.0, 0.0)) + pen.closePath() + + +class BasePenTest(unittest.TestCase): + def test_moveTo(self): + pen = _TestPen() + pen.moveTo((0.5, -4.3)) + self.assertEqual("0.5 -4.3 moveto", repr(pen)) + self.assertEqual((0.5, -4.3), pen.getCurrentPoint()) + + def test_lineTo(self): + pen = _TestPen() + pen.moveTo((4, 5)) + pen.lineTo((7, 8)) + self.assertEqual("4 5 moveto 7 8 lineto", repr(pen)) + self.assertEqual((7, 8), pen.getCurrentPoint()) + + def test_curveTo_zeroPoints(self): + pen = _TestPen() + pen.moveTo((0.0, 0.0)) + self.assertRaises(AssertionError, pen.curveTo) + + def test_curveTo_onePoint(self): + pen = _TestPen() + pen.moveTo((0.0, 0.0)) + pen.curveTo((1.0, 1.1)) + self.assertEqual("0.0 0.0 moveto 1.0 1.1 lineto", repr(pen)) + self.assertEqual((1.0, 1.1), pen.getCurrentPoint()) + + def test_curveTo_twoPoints(self): + pen = _TestPen() + pen.moveTo((0.0, 0.0)) + pen.curveTo((6.0, 3.0), (3.0, 6.0)) + self.assertEqual("0.0 0.0 moveto 4.0 2.0 5.0 4.0 3.0 6.0 curveto", + repr(pen)) + self.assertEqual((3.0, 6.0), pen.getCurrentPoint()) + + def test_curveTo_manyPoints(self): + pen = _TestPen() + pen.moveTo((0.0, 0.0)) + pen.curveTo((1.0, 1.1), (2.0, 2.1), (3.0, 3.1), (4.0, 4.1)) + self.assertEqual("0.0 0.0 moveto " + "1.0 1.1 1.5 1.6 2.0 2.1 curveto " + "2.5 2.6 3.0 3.1 4.0 4.1 curveto", repr(pen)) + self.assertEqual((4.0, 4.1), pen.getCurrentPoint()) + + def test_qCurveTo_zeroPoints(self): + pen = _TestPen() + pen.moveTo((0.0, 0.0)) + self.assertRaises(AssertionError, pen.qCurveTo) + + def test_qCurveTo_onePoint(self): + pen = _TestPen() + pen.moveTo((0.0, 0.0)) + pen.qCurveTo((77.7, 99.9)) + self.assertEqual("0.0 0.0 moveto 77.7 99.9 lineto", repr(pen)) + self.assertEqual((77.7, 99.9), pen.getCurrentPoint()) + + def test_qCurveTo_manyPoints(self): + pen = _TestPen() + pen.moveTo((0.0, 0.0)) + pen.qCurveTo((6.0, 3.0), (3.0, 6.0)) + self.assertEqual("0.0 0.0 moveto 4.0 2.0 5.0 4.0 3.0 6.0 curveto", + repr(pen)) + self.assertEqual((3.0, 6.0), pen.getCurrentPoint()) + + def test_qCurveTo_onlyOffCurvePoints(self): + pen = _TestPen() + pen.moveTo((0.0, 0.0)) + pen.qCurveTo((6.0, -6.0), (12.0, 12.0), (18.0, -18.0), None) + self.assertEqual("0.0 0.0 moveto " + "12.0 -12.0 moveto " + "8.0 -8.0 7.0 -3.0 9.0 3.0 curveto " + "11.0 9.0 13.0 7.0 15.0 -3.0 curveto " + "17.0 -13.0 16.0 -16.0 12.0 -12.0 curveto", repr(pen)) + self.assertEqual((12.0, -12.0), pen.getCurrentPoint()) + + def test_closePath(self): + pen = _TestPen() + pen.lineTo((3, 4)) + pen.closePath() + self.assertEqual("3 4 lineto closepath", repr(pen)) + self.assertEqual(None, pen.getCurrentPoint()) + + def test_endPath(self): + pen = _TestPen() + pen.lineTo((3, 4)) + pen.endPath() + self.assertEqual("3 4 lineto endpath", repr(pen)) + self.assertEqual(None, pen.getCurrentPoint()) + + def test_addComponent(self): + pen = _TestPen() + pen.glyphSet["oslash"] = _TestGlyph() + pen.addComponent("oslash", (2, 3, 0.5, 2, -10, 0)) + self.assertEqual("-10.0 0.0 moveto " + "40.0 200.0 lineto " + "127.5 300.0 131.25 290.0 125.0 265.0 curveto " + "118.75 240.0 102.5 200.0 -10.0 0.0 curveto " + "closepath", repr(pen)) + self.assertEqual(None, pen.getCurrentPoint()) + + +class DecomposeSegmentTest(unittest.TestCase): + def test_decomposeSuperBezierSegment(self): + decompose = decomposeSuperBezierSegment + self.assertRaises(AssertionError, decompose, []) + self.assertRaises(AssertionError, decompose, [(0, 0)]) + self.assertRaises(AssertionError, decompose, [(0, 0), (1, 1)]) + self.assertEqual([((0, 0), (1, 1), (2, 2))], + decompose([(0, 0), (1, 1), (2, 2)])) + self.assertEqual( + [((0, 0), (2, -2), (4, 0)), ((6, 2), (8, 8), (12, -12))], + decompose([(0, 0), (4, -4), (8, 8), (12, -12)])) + + def test_decomposeQuadraticSegment(self): + decompose = decomposeQuadraticSegment + self.assertRaises(AssertionError, decompose, []) + self.assertRaises(AssertionError, decompose, [(0, 0)]) + self.assertEqual([((0,0), (4, 8))], decompose([(0, 0), (4, 8)])) + self.assertEqual([((0,0), (2, 4)), ((4, 8), (9, -9))], + decompose([(0, 0), (4, 8), (9, -9)])) + self.assertEqual( + [((0, 0), (2.0, 4.0)), ((4, 8), (6.5, -0.5)), ((9, -9), (10, 10))], + decompose([(0, 0), (4, 8), (9, -9), (10, 10)])) + + +if __name__ == '__main__': + unittest.main() diff -Nru fonttools-2.4/Tools/fontTools/pens/boundsPen.py fonttools-3.0/Tools/fontTools/pens/boundsPen.py --- fonttools-2.4/Tools/fontTools/pens/boundsPen.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.0/Tools/fontTools/pens/boundsPen.py 2015-08-31 17:57:15.000000000 +0000 @@ -0,0 +1,78 @@ +from __future__ import print_function, division, absolute_import +from fontTools.misc.py23 import * +from fontTools.misc.arrayTools import updateBounds, pointInRect, unionRect +from fontTools.misc.bezierTools import calcCubicBounds, calcQuadraticBounds +from fontTools.pens.basePen import BasePen + + +__all__ = ["BoundsPen", "ControlBoundsPen"] + + +class ControlBoundsPen(BasePen): + + """Pen to calculate the "control bounds" of a shape. This is the + bounding box of all control points, so may be larger than the + actual bounding box if there are curves that don't have points + on their extremes. + + When the shape has been drawn, the bounds are available as the + 'bounds' attribute of the pen object. It's a 4-tuple: + (xMin, yMin, xMax, yMax) + """ + + def __init__(self, glyphSet): + BasePen.__init__(self, glyphSet) + self.bounds = None + + def _moveTo(self, pt): + bounds = self.bounds + if bounds: + self.bounds = updateBounds(bounds, pt) + else: + x, y = pt + self.bounds = (x, y, x, y) + + def _lineTo(self, pt): + self.bounds = updateBounds(self.bounds, pt) + + def _curveToOne(self, bcp1, bcp2, pt): + bounds = self.bounds + bounds = updateBounds(bounds, bcp1) + bounds = updateBounds(bounds, bcp2) + bounds = updateBounds(bounds, pt) + self.bounds = bounds + + def _qCurveToOne(self, bcp, pt): + bounds = self.bounds + bounds = updateBounds(bounds, bcp) + bounds = updateBounds(bounds, pt) + self.bounds = bounds + + +class BoundsPen(ControlBoundsPen): + + """Pen to calculate the bounds of a shape. It calculates the + correct bounds even when the shape contains curves that don't + have points on their extremes. This is somewhat slower to compute + than the "control bounds". + + When the shape has been drawn, the bounds are available as the + 'bounds' attribute of the pen object. It's a 4-tuple: + (xMin, yMin, xMax, yMax) + """ + + def _curveToOne(self, bcp1, bcp2, pt): + bounds = self.bounds + bounds = updateBounds(bounds, pt) + if not pointInRect(bcp1, bounds) or not pointInRect(bcp2, bounds): + bounds = unionRect(bounds, calcCubicBounds( + self._getCurrentPoint(), bcp1, bcp2, pt)) + self.bounds = bounds + + def _qCurveToOne(self, bcp, pt): + bounds = self.bounds + bounds = updateBounds(bounds, pt) + if not pointInRect(bcp, bounds): + bounds = unionRect(bounds, calcQuadraticBounds( + self._getCurrentPoint(), bcp, pt)) + self.bounds = bounds diff -Nru fonttools-2.4/Tools/fontTools/pens/boundsPen_test.py fonttools-3.0/Tools/fontTools/pens/boundsPen_test.py --- fonttools-2.4/Tools/fontTools/pens/boundsPen_test.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.0/Tools/fontTools/pens/boundsPen_test.py 2015-08-31 17:57:15.000000000 +0000 @@ -0,0 +1,66 @@ +from __future__ import print_function, division, absolute_import +from fontTools.misc.py23 import * +from fontTools.pens.boundsPen import BoundsPen, ControlBoundsPen +import unittest + + +def draw_(pen): + pen.moveTo((0, 0)) + pen.lineTo((0, 100)) + pen.qCurveTo((50, 75), (60, 50), (50, 25), (0, 0)) + pen.curveTo((-50, 25), (-60, 50), (-50, 75), (0, 100)) + pen.closePath() + + +def bounds_(pen): + return " ".join(["%.0f" % c for c in pen.bounds]) + + +class BoundsPenTest(unittest.TestCase): + def test_draw(self): + pen = BoundsPen(None) + draw_(pen) + self.assertEqual("-55 0 58 100", bounds_(pen)) + + def test_empty(self): + pen = BoundsPen(None) + self.assertEqual(None, pen.bounds) + + def test_curve(self): + pen = BoundsPen(None) + pen.moveTo((0, 0)) + pen.curveTo((20, 10), (90, 40), (0, 0)) + self.assertEqual("0 0 45 20", bounds_(pen)) + + def test_quadraticCurve(self): + pen = BoundsPen(None) + pen.moveTo((0, 0)) + pen.qCurveTo((6, 6), (10, 0)) + self.assertEqual("0 0 10 3", bounds_(pen)) + + +class ControlBoundsPenTest(unittest.TestCase): + def test_draw(self): + pen = ControlBoundsPen(None) + draw_(pen) + self.assertEqual("-55 0 60 100", bounds_(pen)) + + def test_empty(self): + pen = ControlBoundsPen(None) + self.assertEqual(None, pen.bounds) + + def test_curve(self): + pen = ControlBoundsPen(None) + pen.moveTo((0, 0)) + pen.curveTo((20, 10), (90, 40), (0, 0)) + self.assertEqual("0 0 90 40", bounds_(pen)) + + def test_quadraticCurve(self): + pen = ControlBoundsPen(None) + pen.moveTo((0, 0)) + pen.qCurveTo((6, 6), (10, 0)) + self.assertEqual("0 0 10 6", bounds_(pen)) + + +if __name__ == '__main__': + unittest.main() diff -Nru fonttools-2.4/Tools/fontTools/pens/cocoaPen.py fonttools-3.0/Tools/fontTools/pens/cocoaPen.py --- fonttools-2.4/Tools/fontTools/pens/cocoaPen.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.0/Tools/fontTools/pens/cocoaPen.py 2015-08-31 17:57:15.000000000 +0000 @@ -0,0 +1,28 @@ +from __future__ import print_function, division, absolute_import +from fontTools.misc.py23 import * +from fontTools.pens.basePen import BasePen + + +__all__ = ["CocoaPen"] + + +class CocoaPen(BasePen): + + def __init__(self, glyphSet, path=None): + BasePen.__init__(self, glyphSet) + if path is None: + from AppKit import NSBezierPath + path = NSBezierPath.bezierPath() + self.path = path + + def _moveTo(self, p): + self.path.moveToPoint_(p) + + def _lineTo(self, p): + self.path.lineToPoint_(p) + + def _curveToOne(self, p1, p2, p3): + self.path.curveToPoint_controlPoint1_controlPoint2_(p3, p1, p2) + + def _closePath(self): + self.path.closePath() diff -Nru fonttools-2.4/Tools/fontTools/pens/__init__.py fonttools-3.0/Tools/fontTools/pens/__init__.py --- fonttools-2.4/Tools/fontTools/pens/__init__.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.0/Tools/fontTools/pens/__init__.py 2015-08-31 17:57:15.000000000 +0000 @@ -0,0 +1,4 @@ +"""Empty __init__.py file to signal Python this directory is a package.""" + +from __future__ import print_function, division, absolute_import +from fontTools.misc.py23 import * diff -Nru fonttools-2.4/Tools/fontTools/pens/pointInsidePen.py fonttools-3.0/Tools/fontTools/pens/pointInsidePen.py --- fonttools-2.4/Tools/fontTools/pens/pointInsidePen.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.0/Tools/fontTools/pens/pointInsidePen.py 2015-08-31 17:57:15.000000000 +0000 @@ -0,0 +1,191 @@ +"""fontTools.pens.pointInsidePen -- Pen implementing "point inside" testing +for shapes. +""" + +from __future__ import print_function, division, absolute_import +from fontTools.misc.py23 import * +from fontTools.pens.basePen import BasePen +from fontTools.misc.bezierTools import solveQuadratic, solveCubic + + +__all__ = ["PointInsidePen"] + + +# working around floating point errors +EPSILON = 1e-10 +ONE_PLUS_EPSILON = 1 + EPSILON +ZERO_MINUS_EPSILON = 0 - EPSILON + + +class PointInsidePen(BasePen): + + """This pen implements "point inside" testing: to test whether + a given point lies inside the shape (black) or outside (white). + Instances of this class can be recycled, as long as the + setTestPoint() method is used to set the new point to test. + + Typical usage: + + pen = PointInsidePen(glyphSet, (100, 200)) + outline.draw(pen) + isInside = pen.getResult() + + Both the even-odd algorithm and the non-zero-winding-rule + algorithm are implemented. The latter is the default, specify + True for the evenOdd argument of __init__ or setTestPoint + to use the even-odd algorithm. + """ + + # This class implements the classical "shoot a ray from the test point + # to infinity and count how many times it intersects the outline" (as well + # as the non-zero variant, where the counter is incremented if the outline + # intersects the ray in one direction and decremented if it intersects in + # the other direction). + # I found an amazingly clear explanation of the subtleties involved in + # implementing this correctly for polygons here: + # http://graphics.cs.ucdavis.edu/~okreylos/TAship/Spring2000/PointInPolygon.html + # I extended the principles outlined on that page to curves. + + def __init__(self, glyphSet, testPoint, evenOdd=0): + BasePen.__init__(self, glyphSet) + self.setTestPoint(testPoint, evenOdd) + + def setTestPoint(self, testPoint, evenOdd=0): + """Set the point to test. Call this _before_ the outline gets drawn.""" + self.testPoint = testPoint + self.evenOdd = evenOdd + self.firstPoint = None + self.intersectionCount = 0 + + def getResult(self): + """After the shape has been drawn, getResult() returns True if the test + point lies within the (black) shape, and False if it doesn't. + """ + if self.firstPoint is not None: + # always make sure the sub paths are closed; the algorithm only works + # for closed paths. + self.closePath() + if self.evenOdd: + result = self.intersectionCount % 2 + else: + result = self.intersectionCount + return not not result + + def _addIntersection(self, goingUp): + if self.evenOdd or goingUp: + self.intersectionCount += 1 + else: + self.intersectionCount -= 1 + + def _moveTo(self, point): + if self.firstPoint is not None: + # always make sure the sub paths are closed; the algorithm only works + # for closed paths. + self.closePath() + self.firstPoint = point + + def _lineTo(self, point): + x, y = self.testPoint + x1, y1 = self._getCurrentPoint() + x2, y2 = point + + if x1 < x and x2 < x: + return + if y1 < y and y2 < y: + return + if y1 >= y and y2 >= y: + return + + dx = x2 - x1 + dy = y2 - y1 + t = (y - y1) / dy + ix = dx * t + x1 + if ix < x: + return + self._addIntersection(y2 > y1) + + def _curveToOne(self, bcp1, bcp2, point): + x, y = self.testPoint + x1, y1 = self._getCurrentPoint() + x2, y2 = bcp1 + x3, y3 = bcp2 + x4, y4 = point + + if x1 < x and x2 < x and x3 < x and x4 < x: + return + if y1 < y and y2 < y and y3 < y and y4 < y: + return + if y1 >= y and y2 >= y and y3 >= y and y4 >= y: + return + + dy = y1 + cy = (y2 - dy) * 3.0 + by = (y3 - y2) * 3.0 - cy + ay = y4 - dy - cy - by + solutions = sorted(solveCubic(ay, by, cy, dy - y)) + solutions = [t for t in solutions if ZERO_MINUS_EPSILON <= t <= ONE_PLUS_EPSILON] + if not solutions: + return + + dx = x1 + cx = (x2 - dx) * 3.0 + bx = (x3 - x2) * 3.0 - cx + ax = x4 - dx - cx - bx + + above = y1 >= y + lastT = None + for t in solutions: + if t == lastT: + continue + lastT = t + t2 = t * t + t3 = t2 * t + + direction = 3*ay*t2 + 2*by*t + cy + if direction == 0.0: + direction = 6*ay*t + 2*by + if direction == 0.0: + direction = ay + goingUp = direction > 0.0 + + xt = ax*t3 + bx*t2 + cx*t + dx + if xt < x: + above = goingUp + continue + + if t == 0.0: + if not goingUp: + self._addIntersection(goingUp) + elif t == 1.0: + if not above: + self._addIntersection(goingUp) + else: + if above != goingUp: + self._addIntersection(goingUp) + #else: + # we're not really intersecting, merely touching the 'top' + above = goingUp + + def _qCurveToOne_unfinished(self, bcp, point): + # XXX need to finish this, for now doing it through a cubic + # (BasePen implements _qCurveTo in terms of a cubic) will + # have to do. + x, y = self.testPoint + x1, y1 = self._getCurrentPoint() + x2, y2 = bcp + x3, y3 = point + c = y1 + b = (y2 - c) * 2.0 + a = y3 - c - b + solutions = sorted(solveQuadratic(a, b, c - y)) + solutions = [t for t in solutions if ZERO_MINUS_EPSILON <= t <= ONE_PLUS_EPSILON] + if not solutions: + return + # XXX + + def _closePath(self): + if self._getCurrentPoint() != self.firstPoint: + self.lineTo(self.firstPoint) + self.firstPoint = None + + _endPath = _closePath diff -Nru fonttools-2.4/Tools/fontTools/pens/pointInsidePen_test.py fonttools-3.0/Tools/fontTools/pens/pointInsidePen_test.py --- fonttools-2.4/Tools/fontTools/pens/pointInsidePen_test.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.0/Tools/fontTools/pens/pointInsidePen_test.py 2015-08-31 17:57:15.000000000 +0000 @@ -0,0 +1,90 @@ +from __future__ import print_function, division, absolute_import +from fontTools.misc.py23 import * +from fontTools.pens.pointInsidePen import PointInsidePen +import unittest + + +class PointInsidePenTest(unittest.TestCase): + def test_line(self): + def draw_triangles(pen): + pen.moveTo((0,0)); pen.lineTo((10,5)); pen.lineTo((10,0)) + pen.moveTo((9,1)); pen.lineTo((4,1)); pen.lineTo((9,4)) + pen.closePath() + + self.assertEqual( + " *********" + " ** *" + " ** *" + " * *" + " *", + self.render(draw_triangles, even_odd=True)) + + self.assertEqual( + " *********" + " *******" + " *****" + " ***" + " *", + self.render(draw_triangles, even_odd=False)) + + def test_curve(self): + def draw_curves(pen): + pen.moveTo((0,0)); pen.curveTo((9,1), (9,4), (0,5)) + pen.moveTo((10,5)); pen.curveTo((1,4), (1,1), (10,0)) + pen.closePath() + + self.assertEqual( + "*** ***" + "**** ****" + "*** ***" + "**** ****" + "*** ***", + self.render(draw_curves, even_odd=True)) + + self.assertEqual( + "*** ***" + "**********" + "**********" + "**********" + "*** ***", + self.render(draw_curves, even_odd=False)) + + def test_qCurve(self): + def draw_qCurves(pen): + pen.moveTo((0,0)); pen.qCurveTo((15,2), (0,5)) + pen.moveTo((10,5)); pen.qCurveTo((-5,3), (10,0)) + pen.closePath() + + self.assertEqual( + "*** **" + "**** ***" + "*** ***" + "*** ****" + "** ***", + self.render(draw_qCurves, even_odd=True)) + + self.assertEqual( + "*** **" + "**********" + "**********" + "**********" + "** ***", + self.render(draw_qCurves, even_odd=False)) + + @staticmethod + def render(draw_function, even_odd): + result = BytesIO() + for y in range(5): + for x in range(10): + pen = PointInsidePen(None, (x + 0.5, y + 0.5), even_odd) + draw_function(pen) + if pen.getResult(): + result.write(b"*") + else: + result.write(b" ") + return tounicode(result.getvalue()) + + +if __name__ == "__main__": + unittest.main() + diff -Nru fonttools-2.4/Tools/fontTools/pens/qtPen.py fonttools-3.0/Tools/fontTools/pens/qtPen.py --- fonttools-2.4/Tools/fontTools/pens/qtPen.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.0/Tools/fontTools/pens/qtPen.py 2015-08-31 17:57:15.000000000 +0000 @@ -0,0 +1,28 @@ +from __future__ import print_function, division, absolute_import +from fontTools.misc.py23 import * +from fontTools.pens.basePen import BasePen + + +__all__ = ["QtPen"] + + +class QtPen(BasePen): + + def __init__(self, glyphSet, path=None): + BasePen.__init__(self, glyphSet) + if path is None: + from PyQt5.QtGui import QPainterPath + path = QPainterPath() + self.path = path + + def _moveTo(self, p): + self.path.moveTo(*p) + + def _lineTo(self, p): + self.path.lineTo(*p) + + def _curveToOne(self, p1, p2, p3): + self.path.cubicTo(*p1+p2+p3) + + def _closePath(self): + self.path.closeSubpath() diff -Nru fonttools-2.4/Tools/fontTools/pens/reportLabPen.py fonttools-3.0/Tools/fontTools/pens/reportLabPen.py --- fonttools-2.4/Tools/fontTools/pens/reportLabPen.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.0/Tools/fontTools/pens/reportLabPen.py 2015-08-31 17:57:15.000000000 +0000 @@ -0,0 +1,72 @@ +from __future__ import print_function, division, absolute_import +from fontTools.misc.py23 import * +from fontTools.pens.basePen import BasePen +from reportlab.graphics.shapes import Path + + +class ReportLabPen(BasePen): + + """A pen for drawing onto a reportlab.graphics.shapes.Path object.""" + + def __init__(self, glyphSet, path=None): + BasePen.__init__(self, glyphSet) + if path is None: + path = Path() + self.path = path + + def _moveTo(self, p): + (x,y) = p + self.path.moveTo(x,y) + + def _lineTo(self, p): + (x,y) = p + self.path.lineTo(x,y) + + def _curveToOne(self, p1, p2, p3): + (x1,y1) = p1 + (x2,y2) = p2 + (x3,y3) = p3 + self.path.curveTo(x1, y1, x2, y2, x3, y3) + + def _closePath(self): + self.path.closePath() + + +if __name__=="__main__": + import sys + if len(sys.argv) < 3: + print("Usage: reportLabPen.py []") + print(" If no image file name is created, by default .png is created.") + print(" example: reportLabPen.py Arial.TTF R test.png") + print(" (The file format will be PNG, regardless of the image file name supplied)") + sys.exit(0) + + from fontTools.ttLib import TTFont + from reportlab.lib import colors + + path = sys.argv[1] + glyphName = sys.argv[2] + if (len(sys.argv) > 3): + imageFile = sys.argv[3] + else: + imageFile = "%s.png" % glyphName + + font = TTFont(path) # it would work just as well with fontTools.t1Lib.T1Font + gs = font.getGlyphSet() + pen = ReportLabPen(gs, Path(fillColor=colors.red, strokeWidth=5)) + g = gs[glyphName] + g.draw(pen) + + w, h = g.width, 1000 + from reportlab.graphics import renderPM + from reportlab.graphics.shapes import Group, Drawing, scale + + # Everything is wrapped in a group to allow transformations. + g = Group(pen.path) + g.translate(0, 200) + g.scale(0.3, 0.3) + + d = Drawing(w, h) + d.add(g) + + renderPM.drawToFile(d, imageFile, fmt="PNG") diff -Nru fonttools-2.4/Tools/fontTools/pens/transformPen.py fonttools-3.0/Tools/fontTools/pens/transformPen.py --- fonttools-2.4/Tools/fontTools/pens/transformPen.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.0/Tools/fontTools/pens/transformPen.py 2015-08-31 17:57:15.000000000 +0000 @@ -0,0 +1,65 @@ +from __future__ import print_function, division, absolute_import +from fontTools.misc.py23 import * +from fontTools.pens.basePen import AbstractPen + + +__all__ = ["TransformPen"] + + +class TransformPen(AbstractPen): + + """Pen that transforms all coordinates using a Affine transformation, + and passes them to another pen. + """ + + def __init__(self, outPen, transformation): + """The 'outPen' argument is another pen object. It will receive the + transformed coordinates. The 'transformation' argument can either + be a six-tuple, or a fontTools.misc.transform.Transform object. + """ + if not hasattr(transformation, "transformPoint"): + from fontTools.misc.transform import Transform + transformation = Transform(*transformation) + self._transformation = transformation + self._transformPoint = transformation.transformPoint + self._outPen = outPen + self._stack = [] + + def moveTo(self, pt): + self._outPen.moveTo(self._transformPoint(pt)) + + def lineTo(self, pt): + self._outPen.lineTo(self._transformPoint(pt)) + + def curveTo(self, *points): + self._outPen.curveTo(*self._transformPoints(points)) + + def qCurveTo(self, *points): + if points[-1] is None: + points = self._transformPoints(points[:-1]) + [None] + else: + points = self._transformPoints(points) + self._outPen.qCurveTo(*points) + + def _transformPoints(self, points): + new = [] + transformPoint = self._transformPoint + for pt in points: + new.append(transformPoint(pt)) + return new + + def closePath(self): + self._outPen.closePath() + + def addComponent(self, glyphName, transformation): + transformation = self._transformation.transform(transformation) + self._outPen.addComponent(glyphName, transformation) + + +if __name__ == "__main__": + from fontTools.pens.basePen import _TestPen + pen = TransformPen(_TestPen(None), (2, 0, 0.5, 2, -10, 0)) + pen.moveTo((0, 0)) + pen.lineTo((0, 100)) + pen.curveTo((50, 75), (60, 50), (50, 25), (0, 0)) + pen.closePath() diff -Nru fonttools-2.4/Tools/fontTools/subset.py fonttools-3.0/Tools/fontTools/subset.py --- fonttools-2.4/Tools/fontTools/subset.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.0/Tools/fontTools/subset.py 2015-08-31 17:57:15.000000000 +0000 @@ -0,0 +1,2742 @@ +# Copyright 2013 Google, Inc. All Rights Reserved. +# +# Google Author(s): Behdad Esfahbod + +from __future__ import print_function, division, absolute_import +from fontTools.misc.py23 import * +from fontTools import ttLib +from fontTools.ttLib.tables import otTables +from fontTools.misc import psCharStrings +import sys +import struct +import time +import array + +__usage__ = "pyftsubset font-file [glyph...] [--option=value]..." + +__doc__="""\ +pyftsubset -- OpenType font subsetter and optimizer + + pyftsubset is an OpenType font subsetter and optimizer, based on fontTools. + It accepts any TT- or CFF-flavored OpenType (.otf or .ttf) or WOFF (.woff) + font file. The subsetted glyph set is based on the specified glyphs + or characters, and specified OpenType layout features. + + The tool also performs some size-reducing optimizations, aimed for using + subset fonts as webfonts. Individual optimizations can be enabled or + disabled, and are enabled by default when they are safe. + +Usage: + """+__usage__+""" + + At least one glyph or one of --gids, --gids-file, --glyphs, --glyphs-file, + --text, --text-file, --unicodes, or --unicodes-file, must be specified. + +Arguments: + font-file + The input font file. + glyph + Specify one or more glyph identifiers to include in the subset. Must be + PS glyph names, or the special string '*' to keep the entire glyph set. + +Initial glyph set specification: + These options populate the initial glyph set. Same option can appear + multiple times, and the results are accummulated. + --gids=[,...] + Specify comma/whitespace-separated list of glyph IDs or ranges as + decimal numbers. For example, --gids=10-12,14 adds glyphs with + numbers 10, 11, 12, and 14. + --gids-file= + Like --gids but reads from a file. Anything after a '#' on any line + is ignored as comments. + --glyphs=[,...] + Specify comma/whitespace-separated PS glyph names to add to the subset. + Note that only PS glyph names are accepted, not gidNNN, U+XXXX, etc + that are accepted on the command line. The special string '*' wil keep + the entire glyph set. + --glyphs-file= + Like --glyphs but reads from a file. Anything after a '#' on any line + is ignored as comments. + --text= + Specify characters to include in the subset, as UTF-8 string. + --text-file= + Like --text but reads from a file. Newline character are not added to + the subset. + --unicodes=[,...] + Specify comma/whitespace-separated list of Unicode codepoints or + ranges as hex numbers, optionally prefixed with 'U+', 'u', etc. + For example, --unicodes=41-5a,61-7a adds ASCII letters, so does + the more verbose --unicodes=U+0041-005A,U+0061-007A. + The special strings '*' will choose all Unicode characters mapped + by the font. + --unicodes-file= + Like --unicodes, but reads from a file. Anything after a '#' on any + line in the file is ignored as comments. + --ignore-missing-glyphs + Do not fail if some requested glyphs or gids are not available in + the font. + --no-ignore-missing-glyphs + Stop and fail if some requested glyphs or gids are not available + in the font. [default] + --ignore-missing-unicodes [default] + Do not fail if some requested Unicode characters (including those + indirectly specified using --text or --text-file) are not available + in the font. + --no-ignore-missing-unicodes + Stop and fail if some requested Unicode characters are not available + in the font. + Note the default discrepancy between ignoring missing glyphs versus + unicodes. This is for historical reasons and in the future + --no-ignore-missing-unicodes might become default. + +Other options: + For the other options listed below, to see the current value of the option, + pass a value of '?' to it, with or without a '='. + Examples: + $ pyftsubset --glyph-names? + Current setting for 'glyph-names' is: False + $ ./pyftsubset --name-IDs=? + Current setting for 'name-IDs' is: [1, 2] + $ ./pyftsubset --hinting? --no-hinting --hinting? + Current setting for 'hinting' is: True + Current setting for 'hinting' is: False + +Output options: + --output-file= + The output font file. If not specified, the subsetted font + will be saved in as font-file.subset. + --flavor= + Specify flavor of output font file. May be 'woff' or 'woff2'. + Note that WOFF2 requires the Brotli Python extension, available + at https://github.com/google/brotli + +Glyph set expansion: + These options control how additional glyphs are added to the subset. + --notdef-glyph + Add the '.notdef' glyph to the subset (ie, keep it). [default] + --no-notdef-glyph + Drop the '.notdef' glyph unless specified in the glyph set. This + saves a few bytes, but is not possible for Postscript-flavored + fonts, as those require '.notdef'. For TrueType-flavored fonts, + this works fine as long as no unsupported glyphs are requested + from the font. + --notdef-outline + Keep the outline of '.notdef' glyph. The '.notdef' glyph outline is + used when glyphs not supported by the font are to be shown. It is not + needed otherwise. + --no-notdef-outline + When including a '.notdef' glyph, remove its outline. This saves + a few bytes. [default] + --recommended-glyphs + Add glyphs 0, 1, 2, and 3 to the subset, as recommended for + TrueType-flavored fonts: '.notdef', 'NULL' or '.null', 'CR', 'space'. + Some legacy software might require this, but no modern system does. + --no-recommended-glyphs + Do not add glyphs 0, 1, 2, and 3 to the subset, unless specified in + glyph set. [default] + --layout-features[+|-]=[,...] + Specify (=), add to (+=) or exclude from (-=) the comma-separated + set of OpenType layout feature tags that will be preserved. + Glyph variants used by the preserved features are added to the + specified subset glyph set. By default, 'calt', 'ccmp', 'clig', 'curs', + 'kern', 'liga', 'locl', 'mark', 'mkmk', 'rclt', 'rlig' and all features + required for script shaping are preserved. To see the full list, try + '--layout-features=?'. Use '*' to keep all features. + Multiple --layout-features options can be provided if necessary. + Examples: + --layout-features+=onum,pnum,ss01 + * Keep the default set of features and 'onum', 'pnum', 'ss01'. + --layout-features-='mark','mkmk' + * Keep the default set of features but drop 'mark' and 'mkmk'. + --layout-features='kern' + * Only keep the 'kern' feature, drop all others. + --layout-features='' + * Drop all features. + --layout-features='*' + * Keep all features. + --layout-features+=aalt --layout-features-=vrt2 + * Keep default set of features plus 'aalt', but drop 'vrt2'. + +Hinting options: + --hinting + Keep hinting [default] + --no-hinting + Drop glyph-specific hinting and font-wide hinting tables, as well + as remove hinting-related bits and pieces from other tables (eg. GPOS). + See --hinting-tables for list of tables that are dropped by default. + Instructions and hints are stripped from 'glyf' and 'CFF ' tables + respectively. This produces (sometimes up to 30%) smaller fonts that + are suitable for extremely high-resolution systems, like high-end + mobile devices and retina displays. + XXX Note: Currently there is a known bug in 'CFF ' hint stripping that + might make the font unusable as a webfont as they will be rejected by + OpenType Sanitizer used in common browsers. For more information see: + https://github.com/behdad/fonttools/issues/144 + The --desubroutinize options works around that bug. + +Optimization options: + --desubroutinize + Remove CFF use of subroutinizes. Subroutinization is a way to make CFF + fonts smaller. For small subsets however, desubroutinizing might make + the font smaller. It has even been reported that desubroutinized CFF + fonts compress better (produce smaller output) WOFF and WOFF2 fonts. + Also see note under --no-hinting. + --no-desubroutinize [default] + Leave CFF subroutinizes as is, only throw away unused subroutinizes. + +Font table options: + --drop-tables[+|-]=
[,
...] + Specify (=), add to (+=) or exclude from (-=) the comma-separated + set of tables that will be be dropped. + By default, the following tables are dropped: + 'BASE', 'JSTF', 'DSIG', 'EBDT', 'EBLC', 'EBSC', 'SVG ', 'PCLT', 'LTSH' + and Graphite tables: 'Feat', 'Glat', 'Gloc', 'Silf', 'Sill' + and color tables: 'CBLC', 'CBDT', 'sbix', 'COLR', 'CPAL'. + The tool will attempt to subset the remaining tables. + Examples: + --drop-tables-='SVG ' + * Drop the default set of tables but keep 'SVG '. + --drop-tables+=GSUB + * Drop the default set of tables and 'GSUB'. + --drop-tables=DSIG + * Only drop the 'DSIG' table, keep all others. + --drop-tables= + * Keep all tables. + --no-subset-tables+=
[,
...] + Add to the set of tables that will not be subsetted. + By default, the following tables are included in this list, as + they do not need subsetting (ignore the fact that 'loca' is listed + here): 'gasp', 'head', 'hhea', 'maxp', 'vhea', 'OS/2', 'loca', + 'name', 'cvt ', 'fpgm', 'prep', 'VMDX', and 'DSIG'. Tables that the tool + does not know how to subset and are not specified here will be dropped + from the font. + Example: + --no-subset-tables+=FFTM + * Keep 'FFTM' table in the font by preventing subsetting. + --hinting-tables[-]=
[,
...] + Specify (=), add to (+=) or exclude from (-=) the list of font-wide + hinting tables that will be dropped if --no-hinting is specified, + Examples: + --hinting-tables-='VDMX' + * Drop font-wide hinting tables except 'VDMX'. + --hinting-tables='' + * Keep all font-wide hinting tables (but strip hints from glyphs). + --legacy-kern + Keep TrueType 'kern' table even when OpenType 'GPOS' is available. + --no-legacy-kern + Drop TrueType 'kern' table if OpenType 'GPOS' is available. [default] + +Font naming options: + These options control what is retained in the 'name' table. For numerical + codes, see: http://www.microsoft.com/typography/otspec/name.htm + --name-IDs[+|-]=[,...] + Specify (=), add to (+=) or exclude from (-=) the set of 'name' table + entry nameIDs that will be preserved. By default only nameID 1 (Family) + and nameID 2 (Style) are preserved. Use '*' to keep all entries. + Examples: + --name-IDs+=0,4,6 + * Also keep Copyright, Full name and PostScript name entry. + --name-IDs='' + * Drop all 'name' table entries. + --name-IDs='*' + * keep all 'name' table entries + --name-legacy + Keep legacy (non-Unicode) 'name' table entries (0.x, 1.x etc.). + XXX Note: This might be needed for some fonts that have no Unicode name + entires for English. See: https://github.com/behdad/fonttools/issues/146 + --no-name-legacy + Drop legacy (non-Unicode) 'name' table entries [default] + --name-languages[+|-]=[,] + Specify (=), add to (+=) or exclude from (-=) the set of 'name' table + langIDs that will be preserved. By default only records with langID + 0x0409 (English) are preserved. Use '*' to keep all langIDs. + --obfuscate-names + Make the font unusable as a system font by replacing name IDs 1, 2, 3, 4, + and 6 with dummy strings (it is still fully functional as webfont). + +Glyph naming and encoding options: + --glyph-names + Keep PS glyph names in TT-flavored fonts. In general glyph names are + not needed for correct use of the font. However, some PDF generators + and PDF viewers might rely on glyph names to extract Unicode text + from PDF documents. + --no-glyph-names + Drop PS glyph names in TT-flavored fonts, by using 'post' table + version 3.0. [default] + --legacy-cmap + Keep the legacy 'cmap' subtables (0.x, 1.x, 4.x etc.). + --no-legacy-cmap + Drop the legacy 'cmap' subtables. [default] + --symbol-cmap + Keep the 3.0 symbol 'cmap'. + --no-symbol-cmap + Drop the 3.0 symbol 'cmap'. [default] + +Other font-specific options: + --recalc-bounds + Recalculate font bounding boxes. + --no-recalc-bounds + Keep original font bounding boxes. This is faster and still safe + for all practical purposes. [default] + --recalc-timestamp + Set font 'modified' timestamp to current time. + --no-recalc-timestamp + Do not modify font 'modified' timestamp. [default] + --canonical-order + Order tables as recommended in the OpenType standard. This is not + required by the standard, nor by any known implementation. + --no-canonical-order + Keep original order of font tables. This is faster. [default] + +Application options: + --verbose + Display verbose information of the subsetting process. + --timing + Display detailed timing information of the subsetting process. + --xml + Display the TTX XML representation of subsetted font. + +Example: + Produce a subset containing the characters ' !"#$%' without performing + size-reducing optimizations: + + $ pyftsubset font.ttf --unicodes="U+0020-0025" \\ + --layout-features='*' --glyph-names --symbol-cmap --legacy-cmap \\ + --notdef-glyph --notdef-outline --recommended-glyphs \\ + --name-IDs='*' --name-legacy --name-languages='*' +""" + + +def _add_method(*clazzes): + """Returns a decorator function that adds a new method to one or + more classes.""" + def wrapper(method): + for clazz in clazzes: + assert clazz.__name__ != 'DefaultTable', \ + 'Oops, table class not found.' + assert not hasattr(clazz, method.__name__), \ + "Oops, class '%s' has method '%s'." % (clazz.__name__, + method.__name__) + setattr(clazz, method.__name__, method) + return None + return wrapper + +def _uniq_sort(l): + return sorted(set(l)) + +def _set_update(s, *others): + # Jython's set.update only takes one other argument. + # Emulate real set.update... + for other in others: + s.update(other) + +def _dict_subset(d, glyphs): + return {g:d[g] for g in glyphs} + + +@_add_method(otTables.Coverage) +def intersect(self, glyphs): + """Returns ascending list of matching coverage values.""" + return [i for i,g in enumerate(self.glyphs) if g in glyphs] + +@_add_method(otTables.Coverage) +def intersect_glyphs(self, glyphs): + """Returns set of intersecting glyphs.""" + return set(g for g in self.glyphs if g in glyphs) + +@_add_method(otTables.Coverage) +def subset(self, glyphs): + """Returns ascending list of remaining coverage values.""" + indices = self.intersect(glyphs) + self.glyphs = [g for g in self.glyphs if g in glyphs] + return indices + +@_add_method(otTables.Coverage) +def remap(self, coverage_map): + """Remaps coverage.""" + self.glyphs = [self.glyphs[i] for i in coverage_map] + +@_add_method(otTables.ClassDef) +def intersect(self, glyphs): + """Returns ascending list of matching class values.""" + return _uniq_sort( + ([0] if any(g not in self.classDefs for g in glyphs) else []) + + [v for g,v in self.classDefs.items() if g in glyphs]) + +@_add_method(otTables.ClassDef) +def intersect_class(self, glyphs, klass): + """Returns set of glyphs matching class.""" + if klass == 0: + return set(g for g in glyphs if g not in self.classDefs) + return set(g for g,v in self.classDefs.items() + if v == klass and g in glyphs) + +@_add_method(otTables.ClassDef) +def subset(self, glyphs, remap=False): + """Returns ascending list of remaining classes.""" + self.classDefs = {g:v for g,v in self.classDefs.items() if g in glyphs} + # Note: while class 0 has the special meaning of "not matched", + # if no glyph will ever /not match/, we can optimize class 0 out too. + indices = _uniq_sort( + ([0] if any(g not in self.classDefs for g in glyphs) else []) + + list(self.classDefs.values())) + if remap: + self.remap(indices) + return indices + +@_add_method(otTables.ClassDef) +def remap(self, class_map): + """Remaps classes.""" + self.classDefs = {g:class_map.index(v) for g,v in self.classDefs.items()} + +@_add_method(otTables.SingleSubst) +def closure_glyphs(self, s, cur_glyphs): + s.glyphs.update(v for g,v in self.mapping.items() if g in cur_glyphs) + +@_add_method(otTables.SingleSubst) +def subset_glyphs(self, s): + self.mapping = {g:v for g,v in self.mapping.items() + if g in s.glyphs and v in s.glyphs} + return bool(self.mapping) + +@_add_method(otTables.MultipleSubst) +def closure_glyphs(self, s, cur_glyphs): + indices = self.Coverage.intersect(cur_glyphs) + _set_update(s.glyphs, *(self.Sequence[i].Substitute for i in indices)) + +@_add_method(otTables.MultipleSubst) +def subset_glyphs(self, s): + indices = self.Coverage.subset(s.glyphs) + self.Sequence = [self.Sequence[i] for i in indices] + # Now drop rules generating glyphs we don't want + indices = [i for i,seq in enumerate(self.Sequence) + if all(sub in s.glyphs for sub in seq.Substitute)] + self.Sequence = [self.Sequence[i] for i in indices] + self.Coverage.remap(indices) + self.SequenceCount = len(self.Sequence) + return bool(self.SequenceCount) + +@_add_method(otTables.AlternateSubst) +def closure_glyphs(self, s, cur_glyphs): + _set_update(s.glyphs, *(vlist for g,vlist in self.alternates.items() + if g in cur_glyphs)) + +@_add_method(otTables.AlternateSubst) +def subset_glyphs(self, s): + self.alternates = {g:vlist + for g,vlist in self.alternates.items() + if g in s.glyphs and + all(v in s.glyphs for v in vlist)} + return bool(self.alternates) + +@_add_method(otTables.LigatureSubst) +def closure_glyphs(self, s, cur_glyphs): + _set_update(s.glyphs, *([seq.LigGlyph for seq in seqs + if all(c in s.glyphs for c in seq.Component)] + for g,seqs in self.ligatures.items() + if g in cur_glyphs)) + +@_add_method(otTables.LigatureSubst) +def subset_glyphs(self, s): + self.ligatures = {g:v for g,v in self.ligatures.items() + if g in s.glyphs} + self.ligatures = {g:[seq for seq in seqs + if seq.LigGlyph in s.glyphs and + all(c in s.glyphs for c in seq.Component)] + for g,seqs in self.ligatures.items()} + self.ligatures = {g:v for g,v in self.ligatures.items() if v} + return bool(self.ligatures) + +@_add_method(otTables.ReverseChainSingleSubst) +def closure_glyphs(self, s, cur_glyphs): + if self.Format == 1: + indices = self.Coverage.intersect(cur_glyphs) + if(not indices or + not all(c.intersect(s.glyphs) + for c in self.LookAheadCoverage + self.BacktrackCoverage)): + return + s.glyphs.update(self.Substitute[i] for i in indices) + else: + assert 0, "unknown format: %s" % self.Format + +@_add_method(otTables.ReverseChainSingleSubst) +def subset_glyphs(self, s): + if self.Format == 1: + indices = self.Coverage.subset(s.glyphs) + self.Substitute = [self.Substitute[i] for i in indices] + # Now drop rules generating glyphs we don't want + indices = [i for i,sub in enumerate(self.Substitute) + if sub in s.glyphs] + self.Substitute = [self.Substitute[i] for i in indices] + self.Coverage.remap(indices) + self.GlyphCount = len(self.Substitute) + return bool(self.GlyphCount and + all(c.subset(s.glyphs) + for c in self.LookAheadCoverage+self.BacktrackCoverage)) + else: + assert 0, "unknown format: %s" % self.Format + +@_add_method(otTables.SinglePos) +def subset_glyphs(self, s): + if self.Format == 1: + return len(self.Coverage.subset(s.glyphs)) + elif self.Format == 2: + indices = self.Coverage.subset(s.glyphs) + self.Value = [self.Value[i] for i in indices] + self.ValueCount = len(self.Value) + return bool(self.ValueCount) + else: + assert 0, "unknown format: %s" % self.Format + +@_add_method(otTables.SinglePos) +def prune_post_subset(self, options): + if not options.hinting: + # Drop device tables + self.ValueFormat &= ~0x00F0 + return True + +@_add_method(otTables.PairPos) +def subset_glyphs(self, s): + if self.Format == 1: + indices = self.Coverage.subset(s.glyphs) + self.PairSet = [self.PairSet[i] for i in indices] + for p in self.PairSet: + p.PairValueRecord = [r for r in p.PairValueRecord + if r.SecondGlyph in s.glyphs] + p.PairValueCount = len(p.PairValueRecord) + # Remove empty pairsets + indices = [i for i,p in enumerate(self.PairSet) if p.PairValueCount] + self.Coverage.remap(indices) + self.PairSet = [self.PairSet[i] for i in indices] + self.PairSetCount = len(self.PairSet) + return bool(self.PairSetCount) + elif self.Format == 2: + class1_map = self.ClassDef1.subset(s.glyphs, remap=True) + class2_map = self.ClassDef2.subset(s.glyphs, remap=True) + self.Class1Record = [self.Class1Record[i] for i in class1_map] + for c in self.Class1Record: + c.Class2Record = [c.Class2Record[i] for i in class2_map] + self.Class1Count = len(class1_map) + self.Class2Count = len(class2_map) + return bool(self.Class1Count and + self.Class2Count and + self.Coverage.subset(s.glyphs)) + else: + assert 0, "unknown format: %s" % self.Format + +@_add_method(otTables.PairPos) +def prune_post_subset(self, options): + if not options.hinting: + # Drop device tables + self.ValueFormat1 &= ~0x00F0 + self.ValueFormat2 &= ~0x00F0 + return True + +@_add_method(otTables.CursivePos) +def subset_glyphs(self, s): + if self.Format == 1: + indices = self.Coverage.subset(s.glyphs) + self.EntryExitRecord = [self.EntryExitRecord[i] for i in indices] + self.EntryExitCount = len(self.EntryExitRecord) + return bool(self.EntryExitCount) + else: + assert 0, "unknown format: %s" % self.Format + +@_add_method(otTables.Anchor) +def prune_hints(self): + # Drop device tables / contour anchor point + self.ensureDecompiled() + self.Format = 1 + +@_add_method(otTables.CursivePos) +def prune_post_subset(self, options): + if not options.hinting: + for rec in self.EntryExitRecord: + if rec.EntryAnchor: rec.EntryAnchor.prune_hints() + if rec.ExitAnchor: rec.ExitAnchor.prune_hints() + return True + +@_add_method(otTables.MarkBasePos) +def subset_glyphs(self, s): + if self.Format == 1: + mark_indices = self.MarkCoverage.subset(s.glyphs) + self.MarkArray.MarkRecord = [self.MarkArray.MarkRecord[i] + for i in mark_indices] + self.MarkArray.MarkCount = len(self.MarkArray.MarkRecord) + base_indices = self.BaseCoverage.subset(s.glyphs) + self.BaseArray.BaseRecord = [self.BaseArray.BaseRecord[i] + for i in base_indices] + self.BaseArray.BaseCount = len(self.BaseArray.BaseRecord) + # Prune empty classes + class_indices = _uniq_sort(v.Class for v in self.MarkArray.MarkRecord) + self.ClassCount = len(class_indices) + for m in self.MarkArray.MarkRecord: + m.Class = class_indices.index(m.Class) + for b in self.BaseArray.BaseRecord: + b.BaseAnchor = [b.BaseAnchor[i] for i in class_indices] + return bool(self.ClassCount and + self.MarkArray.MarkCount and + self.BaseArray.BaseCount) + else: + assert 0, "unknown format: %s" % self.Format + +@_add_method(otTables.MarkBasePos) +def prune_post_subset(self, options): + if not options.hinting: + for m in self.MarkArray.MarkRecord: + if m.MarkAnchor: + m.MarkAnchor.prune_hints() + for b in self.BaseArray.BaseRecord: + for a in b.BaseAnchor: + if a: + a.prune_hints() + return True + +@_add_method(otTables.MarkLigPos) +def subset_glyphs(self, s): + if self.Format == 1: + mark_indices = self.MarkCoverage.subset(s.glyphs) + self.MarkArray.MarkRecord = [self.MarkArray.MarkRecord[i] + for i in mark_indices] + self.MarkArray.MarkCount = len(self.MarkArray.MarkRecord) + ligature_indices = self.LigatureCoverage.subset(s.glyphs) + self.LigatureArray.LigatureAttach = [self.LigatureArray.LigatureAttach[i] + for i in ligature_indices] + self.LigatureArray.LigatureCount = len(self.LigatureArray.LigatureAttach) + # Prune empty classes + class_indices = _uniq_sort(v.Class for v in self.MarkArray.MarkRecord) + self.ClassCount = len(class_indices) + for m in self.MarkArray.MarkRecord: + m.Class = class_indices.index(m.Class) + for l in self.LigatureArray.LigatureAttach: + for c in l.ComponentRecord: + c.LigatureAnchor = [c.LigatureAnchor[i] for i in class_indices] + return bool(self.ClassCount and + self.MarkArray.MarkCount and + self.LigatureArray.LigatureCount) + else: + assert 0, "unknown format: %s" % self.Format + +@_add_method(otTables.MarkLigPos) +def prune_post_subset(self, options): + if not options.hinting: + for m in self.MarkArray.MarkRecord: + if m.MarkAnchor: + m.MarkAnchor.prune_hints() + for l in self.LigatureArray.LigatureAttach: + for c in l.ComponentRecord: + for a in c.LigatureAnchor: + if a: + a.prune_hints() + return True + +@_add_method(otTables.MarkMarkPos) +def subset_glyphs(self, s): + if self.Format == 1: + mark1_indices = self.Mark1Coverage.subset(s.glyphs) + self.Mark1Array.MarkRecord = [self.Mark1Array.MarkRecord[i] + for i in mark1_indices] + self.Mark1Array.MarkCount = len(self.Mark1Array.MarkRecord) + mark2_indices = self.Mark2Coverage.subset(s.glyphs) + self.Mark2Array.Mark2Record = [self.Mark2Array.Mark2Record[i] + for i in mark2_indices] + self.Mark2Array.MarkCount = len(self.Mark2Array.Mark2Record) + # Prune empty classes + class_indices = _uniq_sort(v.Class for v in self.Mark1Array.MarkRecord) + self.ClassCount = len(class_indices) + for m in self.Mark1Array.MarkRecord: + m.Class = class_indices.index(m.Class) + for b in self.Mark2Array.Mark2Record: + b.Mark2Anchor = [b.Mark2Anchor[i] for i in class_indices] + return bool(self.ClassCount and + self.Mark1Array.MarkCount and + self.Mark2Array.MarkCount) + else: + assert 0, "unknown format: %s" % self.Format + +@_add_method(otTables.MarkMarkPos) +def prune_post_subset(self, options): + if not options.hinting: + # Drop device tables or contour anchor point + for m in self.Mark1Array.MarkRecord: + if m.MarkAnchor: + m.MarkAnchor.prune_hints() + for b in self.Mark2Array.Mark2Record: + for m in b.Mark2Anchor: + if m: + m.prune_hints() + return True + +@_add_method(otTables.SingleSubst, + otTables.MultipleSubst, + otTables.AlternateSubst, + otTables.LigatureSubst, + otTables.ReverseChainSingleSubst, + otTables.SinglePos, + otTables.PairPos, + otTables.CursivePos, + otTables.MarkBasePos, + otTables.MarkLigPos, + otTables.MarkMarkPos) +def subset_lookups(self, lookup_indices): + pass + +@_add_method(otTables.SingleSubst, + otTables.MultipleSubst, + otTables.AlternateSubst, + otTables.LigatureSubst, + otTables.ReverseChainSingleSubst, + otTables.SinglePos, + otTables.PairPos, + otTables.CursivePos, + otTables.MarkBasePos, + otTables.MarkLigPos, + otTables.MarkMarkPos) +def collect_lookups(self): + return [] + +@_add_method(otTables.SingleSubst, + otTables.MultipleSubst, + otTables.AlternateSubst, + otTables.LigatureSubst, + otTables.ReverseChainSingleSubst, + otTables.ContextSubst, + otTables.ChainContextSubst, + otTables.ContextPos, + otTables.ChainContextPos) +def prune_post_subset(self, options): + return True + +@_add_method(otTables.SingleSubst, + otTables.AlternateSubst, + otTables.ReverseChainSingleSubst) +def may_have_non_1to1(self): + return False + +@_add_method(otTables.MultipleSubst, + otTables.LigatureSubst, + otTables.ContextSubst, + otTables.ChainContextSubst) +def may_have_non_1to1(self): + return True + +@_add_method(otTables.ContextSubst, + otTables.ChainContextSubst, + otTables.ContextPos, + otTables.ChainContextPos) +def __subset_classify_context(self): + + class ContextHelper(object): + def __init__(self, klass, Format): + if klass.__name__.endswith('Subst'): + Typ = 'Sub' + Type = 'Subst' + else: + Typ = 'Pos' + Type = 'Pos' + if klass.__name__.startswith('Chain'): + Chain = 'Chain' + else: + Chain = '' + ChainTyp = Chain+Typ + + self.Typ = Typ + self.Type = Type + self.Chain = Chain + self.ChainTyp = ChainTyp + + self.LookupRecord = Type+'LookupRecord' + + if Format == 1: + Coverage = lambda r: r.Coverage + ChainCoverage = lambda r: r.Coverage + ContextData = lambda r:(None,) + ChainContextData = lambda r:(None, None, None) + RuleData = lambda r:(r.Input,) + ChainRuleData = lambda r:(r.Backtrack, r.Input, r.LookAhead) + SetRuleData = None + ChainSetRuleData = None + elif Format == 2: + Coverage = lambda r: r.Coverage + ChainCoverage = lambda r: r.Coverage + ContextData = lambda r:(r.ClassDef,) + ChainContextData = lambda r:(r.BacktrackClassDef, + r.InputClassDef, + r.LookAheadClassDef) + RuleData = lambda r:(r.Class,) + ChainRuleData = lambda r:(r.Backtrack, r.Input, r.LookAhead) + def SetRuleData(r, d):(r.Class,) = d + def ChainSetRuleData(r, d):(r.Backtrack, r.Input, r.LookAhead) = d + elif Format == 3: + Coverage = lambda r: r.Coverage[0] + ChainCoverage = lambda r: r.InputCoverage[0] + ContextData = None + ChainContextData = None + RuleData = lambda r: r.Coverage + ChainRuleData = lambda r:(r.BacktrackCoverage + + r.InputCoverage + + r.LookAheadCoverage) + SetRuleData = None + ChainSetRuleData = None + else: + assert 0, "unknown format: %s" % Format + + if Chain: + self.Coverage = ChainCoverage + self.ContextData = ChainContextData + self.RuleData = ChainRuleData + self.SetRuleData = ChainSetRuleData + else: + self.Coverage = Coverage + self.ContextData = ContextData + self.RuleData = RuleData + self.SetRuleData = SetRuleData + + if Format == 1: + self.Rule = ChainTyp+'Rule' + self.RuleCount = ChainTyp+'RuleCount' + self.RuleSet = ChainTyp+'RuleSet' + self.RuleSetCount = ChainTyp+'RuleSetCount' + self.Intersect = lambda glyphs, c, r: [r] if r in glyphs else [] + elif Format == 2: + self.Rule = ChainTyp+'ClassRule' + self.RuleCount = ChainTyp+'ClassRuleCount' + self.RuleSet = ChainTyp+'ClassSet' + self.RuleSetCount = ChainTyp+'ClassSetCount' + self.Intersect = lambda glyphs, c, r: (c.intersect_class(glyphs, r) if c + else (set(glyphs) if r == 0 else set())) + + self.ClassDef = 'InputClassDef' if Chain else 'ClassDef' + self.ClassDefIndex = 1 if Chain else 0 + self.Input = 'Input' if Chain else 'Class' + + if self.Format not in [1, 2, 3]: + return None # Don't shoot the messenger; let it go + if not hasattr(self.__class__, "__ContextHelpers"): + self.__class__.__ContextHelpers = {} + if self.Format not in self.__class__.__ContextHelpers: + helper = ContextHelper(self.__class__, self.Format) + self.__class__.__ContextHelpers[self.Format] = helper + return self.__class__.__ContextHelpers[self.Format] + +@_add_method(otTables.ContextSubst, + otTables.ChainContextSubst) +def closure_glyphs(self, s, cur_glyphs): + c = self.__subset_classify_context() + + indices = c.Coverage(self).intersect(cur_glyphs) + if not indices: + return [] + cur_glyphs = c.Coverage(self).intersect_glyphs(cur_glyphs) + + if self.Format == 1: + ContextData = c.ContextData(self) + rss = getattr(self, c.RuleSet) + rssCount = getattr(self, c.RuleSetCount) + for i in indices: + if i >= rssCount or not rss[i]: continue + for r in getattr(rss[i], c.Rule): + if not r: continue + if not all(all(c.Intersect(s.glyphs, cd, k) for k in klist) + for cd,klist in zip(ContextData, c.RuleData(r))): + continue + chaos = set() + for ll in getattr(r, c.LookupRecord): + if not ll: continue + seqi = ll.SequenceIndex + if seqi in chaos: + # TODO Can we improve this? + pos_glyphs = None + else: + if seqi == 0: + pos_glyphs = frozenset([c.Coverage(self).glyphs[i]]) + else: + pos_glyphs = frozenset([r.Input[seqi - 1]]) + lookup = s.table.LookupList.Lookup[ll.LookupListIndex] + chaos.add(seqi) + if lookup.may_have_non_1to1(): + chaos.update(range(seqi, len(r.Input)+2)) + lookup.closure_glyphs(s, cur_glyphs=pos_glyphs) + elif self.Format == 2: + ClassDef = getattr(self, c.ClassDef) + indices = ClassDef.intersect(cur_glyphs) + ContextData = c.ContextData(self) + rss = getattr(self, c.RuleSet) + rssCount = getattr(self, c.RuleSetCount) + for i in indices: + if i >= rssCount or not rss[i]: continue + for r in getattr(rss[i], c.Rule): + if not r: continue + if not all(all(c.Intersect(s.glyphs, cd, k) for k in klist) + for cd,klist in zip(ContextData, c.RuleData(r))): + continue + chaos = set() + for ll in getattr(r, c.LookupRecord): + if not ll: continue + seqi = ll.SequenceIndex + if seqi in chaos: + # TODO Can we improve this? + pos_glyphs = None + else: + if seqi == 0: + pos_glyphs = frozenset(ClassDef.intersect_class(cur_glyphs, i)) + else: + pos_glyphs = frozenset(ClassDef.intersect_class(s.glyphs, getattr(r, c.Input)[seqi - 1])) + lookup = s.table.LookupList.Lookup[ll.LookupListIndex] + chaos.add(seqi) + if lookup.may_have_non_1to1(): + chaos.update(range(seqi, len(getattr(r, c.Input))+2)) + lookup.closure_glyphs(s, cur_glyphs=pos_glyphs) + elif self.Format == 3: + if not all(x.intersect(s.glyphs) for x in c.RuleData(self)): + return [] + r = self + chaos = set() + for ll in getattr(r, c.LookupRecord): + if not ll: continue + seqi = ll.SequenceIndex + if seqi in chaos: + # TODO Can we improve this? + pos_glyphs = None + else: + if seqi == 0: + pos_glyphs = frozenset(cur_glyphs) + else: + pos_glyphs = frozenset(r.InputCoverage[seqi].intersect_glyphs(s.glyphs)) + lookup = s.table.LookupList.Lookup[ll.LookupListIndex] + chaos.add(seqi) + if lookup.may_have_non_1to1(): + chaos.update(range(seqi, len(r.InputCoverage)+1)) + lookup.closure_glyphs(s, cur_glyphs=pos_glyphs) + else: + assert 0, "unknown format: %s" % self.Format + +@_add_method(otTables.ContextSubst, + otTables.ContextPos, + otTables.ChainContextSubst, + otTables.ChainContextPos) +def subset_glyphs(self, s): + c = self.__subset_classify_context() + + if self.Format == 1: + indices = self.Coverage.subset(s.glyphs) + rss = getattr(self, c.RuleSet) + rssCount = getattr(self, c.RuleSetCount) + rss = [rss[i] for i in indices if i < rssCount] + for rs in rss: + if not rs: continue + ss = getattr(rs, c.Rule) + ss = [r for r in ss + if r and all(all(g in s.glyphs for g in glist) + for glist in c.RuleData(r))] + setattr(rs, c.Rule, ss) + setattr(rs, c.RuleCount, len(ss)) + # Prune empty rulesets + indices = [i for i,rs in enumerate(rss) if rs and getattr(rs, c.Rule)] + self.Coverage.remap(indices) + rss = [rss[i] for i in indices] + setattr(self, c.RuleSet, rss) + setattr(self, c.RuleSetCount, len(rss)) + return bool(rss) + elif self.Format == 2: + if not self.Coverage.subset(s.glyphs): + return False + ContextData = c.ContextData(self) + klass_maps = [x.subset(s.glyphs, remap=True) if x else None for x in ContextData] + + # Keep rulesets for class numbers that survived. + indices = klass_maps[c.ClassDefIndex] + rss = getattr(self, c.RuleSet) + rssCount = getattr(self, c.RuleSetCount) + rss = [rss[i] for i in indices if i < rssCount] + del rssCount + # Delete, but not renumber, unreachable rulesets. + indices = getattr(self, c.ClassDef).intersect(self.Coverage.glyphs) + rss = [rss if i in indices else None for i,rss in enumerate(rss)] + + for rs in rss: + if not rs: continue + ss = getattr(rs, c.Rule) + ss = [r for r in ss + if r and all(all(k in klass_map for k in klist) + for klass_map,klist in zip(klass_maps, c.RuleData(r)))] + setattr(rs, c.Rule, ss) + setattr(rs, c.RuleCount, len(ss)) + + # Remap rule classes + for r in ss: + c.SetRuleData(r, [[klass_map.index(k) for k in klist] + for klass_map,klist in zip(klass_maps, c.RuleData(r))]) + + # Prune empty rulesets + rss = [rs if rs and getattr(rs, c.Rule) else None for rs in rss] + while rss and rss[-1] is None: + del rss[-1] + setattr(self, c.RuleSet, rss) + setattr(self, c.RuleSetCount, len(rss)) + + # TODO: We can do a second round of remapping class values based + # on classes that are actually used in at least one rule. Right + # now we subset classes to c.glyphs only. Or better, rewrite + # the above to do that. + + return bool(rss) + elif self.Format == 3: + return all(x.subset(s.glyphs) for x in c.RuleData(self)) + else: + assert 0, "unknown format: %s" % self.Format + +@_add_method(otTables.ContextSubst, + otTables.ChainContextSubst, + otTables.ContextPos, + otTables.ChainContextPos) +def subset_lookups(self, lookup_indices): + c = self.__subset_classify_context() + + if self.Format in [1, 2]: + for rs in getattr(self, c.RuleSet): + if not rs: continue + for r in getattr(rs, c.Rule): + if not r: continue + setattr(r, c.LookupRecord, + [ll for ll in getattr(r, c.LookupRecord) + if ll and ll.LookupListIndex in lookup_indices]) + for ll in getattr(r, c.LookupRecord): + if not ll: continue + ll.LookupListIndex = lookup_indices.index(ll.LookupListIndex) + elif self.Format == 3: + setattr(self, c.LookupRecord, + [ll for ll in getattr(self, c.LookupRecord) + if ll and ll.LookupListIndex in lookup_indices]) + for ll in getattr(self, c.LookupRecord): + if not ll: continue + ll.LookupListIndex = lookup_indices.index(ll.LookupListIndex) + else: + assert 0, "unknown format: %s" % self.Format + +@_add_method(otTables.ContextSubst, + otTables.ChainContextSubst, + otTables.ContextPos, + otTables.ChainContextPos) +def collect_lookups(self): + c = self.__subset_classify_context() + + if self.Format in [1, 2]: + return [ll.LookupListIndex + for rs in getattr(self, c.RuleSet) if rs + for r in getattr(rs, c.Rule) if r + for ll in getattr(r, c.LookupRecord) if ll] + elif self.Format == 3: + return [ll.LookupListIndex + for ll in getattr(self, c.LookupRecord) if ll] + else: + assert 0, "unknown format: %s" % self.Format + +@_add_method(otTables.ExtensionSubst) +def closure_glyphs(self, s, cur_glyphs): + if self.Format == 1: + self.ExtSubTable.closure_glyphs(s, cur_glyphs) + else: + assert 0, "unknown format: %s" % self.Format + +@_add_method(otTables.ExtensionSubst) +def may_have_non_1to1(self): + if self.Format == 1: + return self.ExtSubTable.may_have_non_1to1() + else: + assert 0, "unknown format: %s" % self.Format + +@_add_method(otTables.ExtensionSubst, + otTables.ExtensionPos) +def subset_glyphs(self, s): + if self.Format == 1: + return self.ExtSubTable.subset_glyphs(s) + else: + assert 0, "unknown format: %s" % self.Format + +@_add_method(otTables.ExtensionSubst, + otTables.ExtensionPos) +def prune_post_subset(self, options): + if self.Format == 1: + return self.ExtSubTable.prune_post_subset(options) + else: + assert 0, "unknown format: %s" % self.Format + +@_add_method(otTables.ExtensionSubst, + otTables.ExtensionPos) +def subset_lookups(self, lookup_indices): + if self.Format == 1: + return self.ExtSubTable.subset_lookups(lookup_indices) + else: + assert 0, "unknown format: %s" % self.Format + +@_add_method(otTables.ExtensionSubst, + otTables.ExtensionPos) +def collect_lookups(self): + if self.Format == 1: + return self.ExtSubTable.collect_lookups() + else: + assert 0, "unknown format: %s" % self.Format + +@_add_method(otTables.Lookup) +def closure_glyphs(self, s, cur_glyphs=None): + if cur_glyphs is None: + cur_glyphs = frozenset(s.glyphs) + + # Memoize + if (id(self), cur_glyphs) in s._doneLookups: + return + s._doneLookups.add((id(self), cur_glyphs)) + + if self in s._activeLookups: + raise Exception("Circular loop in lookup recursion") + s._activeLookups.append(self) + for st in self.SubTable: + if not st: continue + st.closure_glyphs(s, cur_glyphs) + assert(s._activeLookups[-1] == self) + del s._activeLookups[-1] + +@_add_method(otTables.Lookup) +def subset_glyphs(self, s): + self.SubTable = [st for st in self.SubTable if st and st.subset_glyphs(s)] + self.SubTableCount = len(self.SubTable) + return bool(self.SubTableCount) + +@_add_method(otTables.Lookup) +def prune_post_subset(self, options): + ret = False + for st in self.SubTable: + if not st: continue + if st.prune_post_subset(options): ret = True + return ret + +@_add_method(otTables.Lookup) +def subset_lookups(self, lookup_indices): + for s in self.SubTable: + s.subset_lookups(lookup_indices) + +@_add_method(otTables.Lookup) +def collect_lookups(self): + return _uniq_sort(sum((st.collect_lookups() for st in self.SubTable + if st), [])) + +@_add_method(otTables.Lookup) +def may_have_non_1to1(self): + return any(st.may_have_non_1to1() for st in self.SubTable if st) + +@_add_method(otTables.LookupList) +def subset_glyphs(self, s): + """Returns the indices of nonempty lookups.""" + return [i for i,l in enumerate(self.Lookup) if l and l.subset_glyphs(s)] + +@_add_method(otTables.LookupList) +def prune_post_subset(self, options): + ret = False + for l in self.Lookup: + if not l: continue + if l.prune_post_subset(options): ret = True + return ret + +@_add_method(otTables.LookupList) +def subset_lookups(self, lookup_indices): + self.ensureDecompiled() + self.Lookup = [self.Lookup[i] for i in lookup_indices + if i < self.LookupCount] + self.LookupCount = len(self.Lookup) + for l in self.Lookup: + l.subset_lookups(lookup_indices) + +@_add_method(otTables.LookupList) +def neuter_lookups(self, lookup_indices): + """Sets lookups not in lookup_indices to None.""" + self.ensureDecompiled() + self.Lookup = [l if i in lookup_indices else None for i,l in enumerate(self.Lookup)] + +@_add_method(otTables.LookupList) +def closure_lookups(self, lookup_indices): + lookup_indices = _uniq_sort(lookup_indices) + recurse = lookup_indices + while True: + recurse_lookups = sum((self.Lookup[i].collect_lookups() + for i in recurse if i < self.LookupCount), []) + recurse_lookups = [l for l in recurse_lookups + if l not in lookup_indices and l < self.LookupCount] + if not recurse_lookups: + return _uniq_sort(lookup_indices) + recurse_lookups = _uniq_sort(recurse_lookups) + lookup_indices.extend(recurse_lookups) + recurse = recurse_lookups + +@_add_method(otTables.Feature) +def subset_lookups(self, lookup_indices): + self.LookupListIndex = [l for l in self.LookupListIndex + if l in lookup_indices] + # Now map them. + self.LookupListIndex = [lookup_indices.index(l) + for l in self.LookupListIndex] + self.LookupCount = len(self.LookupListIndex) + return self.LookupCount or self.FeatureParams + +@_add_method(otTables.Feature) +def collect_lookups(self): + return self.LookupListIndex[:] + +@_add_method(otTables.FeatureList) +def subset_lookups(self, lookup_indices): + """Returns the indices of nonempty features.""" + # Note: Never ever drop feature 'pref', even if it's empty. + # HarfBuzz chooses shaper for Khmer based on presence of this + # feature. See thread at: + # http://lists.freedesktop.org/archives/harfbuzz/2012-November/002660.html + feature_indices = [i for i,f in enumerate(self.FeatureRecord) + if (f.Feature.subset_lookups(lookup_indices) or + f.FeatureTag == 'pref')] + self.subset_features(feature_indices) + return feature_indices + +@_add_method(otTables.FeatureList) +def collect_lookups(self, feature_indices): + return _uniq_sort(sum((self.FeatureRecord[i].Feature.collect_lookups() + for i in feature_indices + if i < self.FeatureCount), [])) + +@_add_method(otTables.FeatureList) +def subset_features(self, feature_indices): + self.ensureDecompiled() + self.FeatureRecord = [self.FeatureRecord[i] for i in feature_indices] + self.FeatureCount = len(self.FeatureRecord) + return bool(self.FeatureCount) + +@_add_method(otTables.DefaultLangSys, + otTables.LangSys) +def subset_features(self, feature_indices): + if self.ReqFeatureIndex in feature_indices: + self.ReqFeatureIndex = feature_indices.index(self.ReqFeatureIndex) + else: + self.ReqFeatureIndex = 65535 + self.FeatureIndex = [f for f in self.FeatureIndex if f in feature_indices] + # Now map them. + self.FeatureIndex = [feature_indices.index(f) for f in self.FeatureIndex + if f in feature_indices] + self.FeatureCount = len(self.FeatureIndex) + return bool(self.FeatureCount or self.ReqFeatureIndex != 65535) + +@_add_method(otTables.DefaultLangSys, + otTables.LangSys) +def collect_features(self): + feature_indices = self.FeatureIndex[:] + if self.ReqFeatureIndex != 65535: + feature_indices.append(self.ReqFeatureIndex) + return _uniq_sort(feature_indices) + +@_add_method(otTables.Script) +def subset_features(self, feature_indices): + if(self.DefaultLangSys and + not self.DefaultLangSys.subset_features(feature_indices)): + self.DefaultLangSys = None + self.LangSysRecord = [l for l in self.LangSysRecord + if l.LangSys.subset_features(feature_indices)] + self.LangSysCount = len(self.LangSysRecord) + return bool(self.LangSysCount or self.DefaultLangSys) + +@_add_method(otTables.Script) +def collect_features(self): + feature_indices = [l.LangSys.collect_features() for l in self.LangSysRecord] + if self.DefaultLangSys: + feature_indices.append(self.DefaultLangSys.collect_features()) + return _uniq_sort(sum(feature_indices, [])) + +@_add_method(otTables.ScriptList) +def subset_features(self, feature_indices): + self.ScriptRecord = [s for s in self.ScriptRecord + if s.Script.subset_features(feature_indices)] + self.ScriptCount = len(self.ScriptRecord) + return bool(self.ScriptCount) + +@_add_method(otTables.ScriptList) +def collect_features(self): + return _uniq_sort(sum((s.Script.collect_features() + for s in self.ScriptRecord), [])) + +@_add_method(ttLib.getTableClass('GSUB')) +def closure_glyphs(self, s): + s.table = self.table + if self.table.ScriptList: + feature_indices = self.table.ScriptList.collect_features() + else: + feature_indices = [] + if self.table.FeatureList: + lookup_indices = self.table.FeatureList.collect_lookups(feature_indices) + else: + lookup_indices = [] + if self.table.LookupList: + while True: + orig_glyphs = frozenset(s.glyphs) + s._activeLookups = [] + s._doneLookups = set() + for i in lookup_indices: + if i >= self.table.LookupList.LookupCount: continue + if not self.table.LookupList.Lookup[i]: continue + self.table.LookupList.Lookup[i].closure_glyphs(s) + del s._activeLookups, s._doneLookups + if orig_glyphs == s.glyphs: + break + del s.table + +@_add_method(ttLib.getTableClass('GSUB'), + ttLib.getTableClass('GPOS')) +def subset_glyphs(self, s): + s.glyphs = s.glyphs_gsubed + if self.table.LookupList: + lookup_indices = self.table.LookupList.subset_glyphs(s) + else: + lookup_indices = [] + self.subset_lookups(lookup_indices) + return True + +@_add_method(ttLib.getTableClass('GSUB'), + ttLib.getTableClass('GPOS')) +def subset_lookups(self, lookup_indices): + """Retains specified lookups, then removes empty features, language + systems, and scripts.""" + if self.table.LookupList: + self.table.LookupList.subset_lookups(lookup_indices) + if self.table.FeatureList: + feature_indices = self.table.FeatureList.subset_lookups(lookup_indices) + else: + feature_indices = [] + if self.table.ScriptList: + self.table.ScriptList.subset_features(feature_indices) + +@_add_method(ttLib.getTableClass('GSUB'), + ttLib.getTableClass('GPOS')) +def neuter_lookups(self, lookup_indices): + """Sets lookups not in lookup_indices to None.""" + if self.table.LookupList: + self.table.LookupList.neuter_lookups(lookup_indices) + +@_add_method(ttLib.getTableClass('GSUB'), + ttLib.getTableClass('GPOS')) +def prune_lookups(self, remap=True): + """Remove (default) or neuter unreferenced lookups""" + if self.table.ScriptList: + feature_indices = self.table.ScriptList.collect_features() + else: + feature_indices = [] + if self.table.FeatureList: + lookup_indices = self.table.FeatureList.collect_lookups(feature_indices) + else: + lookup_indices = [] + if self.table.LookupList: + lookup_indices = self.table.LookupList.closure_lookups(lookup_indices) + else: + lookup_indices = [] + if remap: + self.subset_lookups(lookup_indices) + else: + self.neuter_lookups(lookup_indices) + +@_add_method(ttLib.getTableClass('GSUB'), + ttLib.getTableClass('GPOS')) +def subset_feature_tags(self, feature_tags): + if self.table.FeatureList: + feature_indices = \ + [i for i,f in enumerate(self.table.FeatureList.FeatureRecord) + if f.FeatureTag in feature_tags] + self.table.FeatureList.subset_features(feature_indices) + else: + feature_indices = [] + if self.table.ScriptList: + self.table.ScriptList.subset_features(feature_indices) + +@_add_method(ttLib.getTableClass('GSUB'), + ttLib.getTableClass('GPOS')) +def prune_features(self): + """Remove unreferenced features""" + if self.table.ScriptList: + feature_indices = self.table.ScriptList.collect_features() + else: + feature_indices = [] + if self.table.FeatureList: + self.table.FeatureList.subset_features(feature_indices) + if self.table.ScriptList: + self.table.ScriptList.subset_features(feature_indices) + +@_add_method(ttLib.getTableClass('GSUB'), + ttLib.getTableClass('GPOS')) +def prune_pre_subset(self, options): + # Drop undesired features + if '*' not in options.layout_features: + self.subset_feature_tags(options.layout_features) + # Neuter unreferenced lookups + self.prune_lookups(remap=False) + return True + +@_add_method(ttLib.getTableClass('GSUB'), + ttLib.getTableClass('GPOS')) +def remove_redundant_langsys(self): + table = self.table + if not table.ScriptList or not table.FeatureList: + return + + features = table.FeatureList.FeatureRecord + + for s in table.ScriptList.ScriptRecord: + d = s.Script.DefaultLangSys + if not d: + continue + for lr in s.Script.LangSysRecord[:]: + l = lr.LangSys + # Compare d and l + if len(d.FeatureIndex) != len(l.FeatureIndex): + continue + if (d.ReqFeatureIndex == 65535) != (l.ReqFeatureIndex == 65535): + continue + + if d.ReqFeatureIndex != 65535: + if features[d.ReqFeatureIndex] != features[l.ReqFeatureIndex]: + continue + + for i in range(len(d.FeatureIndex)): + if features[d.FeatureIndex[i]] != features[l.FeatureIndex[i]]: + break + else: + # LangSys and default are equal; delete LangSys + s.Script.LangSysRecord.remove(lr) + +@_add_method(ttLib.getTableClass('GSUB'), + ttLib.getTableClass('GPOS')) +def prune_post_subset(self, options): + table = self.table + + self.prune_lookups() # XXX Is this actually needed?! + + if table.LookupList: + table.LookupList.prune_post_subset(options) + # XXX Next two lines disabled because OTS is stupid and + # doesn't like NULL offsets here. + #if not table.LookupList.Lookup: + # table.LookupList = None + + if not table.LookupList: + table.FeatureList = None + + if table.FeatureList: + self.remove_redundant_langsys() + # Remove unreferenced features + self.prune_features() + + # XXX Next two lines disabled because OTS is stupid and + # doesn't like NULL offsets here. + #if table.FeatureList and not table.FeatureList.FeatureRecord: + # table.FeatureList = None + + # Never drop scripts themselves as them just being available + # holds semantic significance. + # XXX Next two lines disabled because OTS is stupid and + # doesn't like NULL offsets here. + #if table.ScriptList and not table.ScriptList.ScriptRecord: + # table.ScriptList = None + + return True + +@_add_method(ttLib.getTableClass('GDEF')) +def subset_glyphs(self, s): + glyphs = s.glyphs_gsubed + table = self.table + if table.LigCaretList: + indices = table.LigCaretList.Coverage.subset(glyphs) + table.LigCaretList.LigGlyph = [table.LigCaretList.LigGlyph[i] + for i in indices] + table.LigCaretList.LigGlyphCount = len(table.LigCaretList.LigGlyph) + if table.MarkAttachClassDef: + table.MarkAttachClassDef.classDefs = \ + {g:v for g,v in table.MarkAttachClassDef.classDefs.items() + if g in glyphs} + if table.GlyphClassDef: + table.GlyphClassDef.classDefs = \ + {g:v for g,v in table.GlyphClassDef.classDefs.items() + if g in glyphs} + if table.AttachList: + indices = table.AttachList.Coverage.subset(glyphs) + GlyphCount = table.AttachList.GlyphCount + table.AttachList.AttachPoint = [table.AttachList.AttachPoint[i] + for i in indices + if i < GlyphCount] + table.AttachList.GlyphCount = len(table.AttachList.AttachPoint) + if hasattr(table, "MarkGlyphSetsDef") and table.MarkGlyphSetsDef: + for coverage in table.MarkGlyphSetsDef.Coverage: + coverage.subset(glyphs) + # TODO: The following is disabled. If enabling, we need to go fixup all + # lookups that use MarkFilteringSet and map their set. + # indices = table.MarkGlyphSetsDef.Coverage = \ + # [c for c in table.MarkGlyphSetsDef.Coverage if c.glyphs] + return True + +@_add_method(ttLib.getTableClass('GDEF')) +def prune_post_subset(self, options): + table = self.table + # XXX check these against OTS + if table.LigCaretList and not table.LigCaretList.LigGlyphCount: + table.LigCaretList = None + if table.MarkAttachClassDef and not table.MarkAttachClassDef.classDefs: + table.MarkAttachClassDef = None + if table.GlyphClassDef and not table.GlyphClassDef.classDefs: + table.GlyphClassDef = None + if table.AttachList and not table.AttachList.GlyphCount: + table.AttachList = None + if (hasattr(table, "MarkGlyphSetsDef") and + table.MarkGlyphSetsDef and + not table.MarkGlyphSetsDef.Coverage): + table.MarkGlyphSetsDef = None + if table.Version == 0x00010002/0x10000: + table.Version = 1.0 + return bool(table.LigCaretList or + table.MarkAttachClassDef or + table.GlyphClassDef or + table.AttachList or + (table.Version >= 0x00010002/0x10000 and table.MarkGlyphSetsDef)) + +@_add_method(ttLib.getTableClass('kern')) +def prune_pre_subset(self, options): + # Prune unknown kern table types + self.kernTables = [t for t in self.kernTables if hasattr(t, 'kernTable')] + return bool(self.kernTables) + +@_add_method(ttLib.getTableClass('kern')) +def subset_glyphs(self, s): + glyphs = s.glyphs_gsubed + for t in self.kernTables: + t.kernTable = {(a,b):v for (a,b),v in t.kernTable.items() + if a in glyphs and b in glyphs} + self.kernTables = [t for t in self.kernTables if t.kernTable] + return bool(self.kernTables) + +@_add_method(ttLib.getTableClass('vmtx')) +def subset_glyphs(self, s): + self.metrics = _dict_subset(self.metrics, s.glyphs) + return bool(self.metrics) + +@_add_method(ttLib.getTableClass('hmtx')) +def subset_glyphs(self, s): + self.metrics = _dict_subset(self.metrics, s.glyphs) + return True # Required table + +@_add_method(ttLib.getTableClass('hdmx')) +def subset_glyphs(self, s): + self.hdmx = {sz:_dict_subset(l, s.glyphs) for sz,l in self.hdmx.items()} + return bool(self.hdmx) + +@_add_method(ttLib.getTableClass('VORG')) +def subset_glyphs(self, s): + self.VOriginRecords = {g:v for g,v in self.VOriginRecords.items() + if g in s.glyphs} + self.numVertOriginYMetrics = len(self.VOriginRecords) + return True # Never drop; has default metrics + +@_add_method(ttLib.getTableClass('post')) +def prune_pre_subset(self, options): + if not options.glyph_names: + self.formatType = 3.0 + return True # Required table + +@_add_method(ttLib.getTableClass('post')) +def subset_glyphs(self, s): + self.extraNames = [] # This seems to do it + return True # Required table + +@_add_method(ttLib.getTableModule('glyf').Glyph) +def remapComponentsFast(self, indices): + if not self.data or struct.unpack(">h", self.data[:2])[0] >= 0: + return # Not composite + data = array.array("B", self.data) + i = 10 + more = 1 + while more: + flags =(data[i] << 8) | data[i+1] + glyphID =(data[i+2] << 8) | data[i+3] + # Remap + glyphID = indices.index(glyphID) + data[i+2] = glyphID >> 8 + data[i+3] = glyphID & 0xFF + i += 4 + flags = int(flags) + + if flags & 0x0001: i += 4 # ARG_1_AND_2_ARE_WORDS + else: i += 2 + if flags & 0x0008: i += 2 # WE_HAVE_A_SCALE + elif flags & 0x0040: i += 4 # WE_HAVE_AN_X_AND_Y_SCALE + elif flags & 0x0080: i += 8 # WE_HAVE_A_TWO_BY_TWO + more = flags & 0x0020 # MORE_COMPONENTS + + self.data = data.tostring() + +@_add_method(ttLib.getTableClass('glyf')) +def closure_glyphs(self, s): + decompose = s.glyphs + while True: + components = set() + for g in decompose: + if g not in self.glyphs: + continue + gl = self.glyphs[g] + for c in gl.getComponentNames(self): + if c not in s.glyphs: + components.add(c) + components = set(c for c in components if c not in s.glyphs) + if not components: + break + decompose = components + s.glyphs.update(components) + +@_add_method(ttLib.getTableClass('glyf')) +def prune_pre_subset(self, options): + if options.notdef_glyph and not options.notdef_outline: + g = self[self.glyphOrder[0]] + # Yay, easy! + g.__dict__.clear() + g.data = "" + return True + +@_add_method(ttLib.getTableClass('glyf')) +def subset_glyphs(self, s): + self.glyphs = _dict_subset(self.glyphs, s.glyphs) + indices = [i for i,g in enumerate(self.glyphOrder) if g in s.glyphs] + for v in self.glyphs.values(): + if hasattr(v, "data"): + v.remapComponentsFast(indices) + else: + pass # No need + self.glyphOrder = [g for g in self.glyphOrder if g in s.glyphs] + # Don't drop empty 'glyf' tables, otherwise 'loca' doesn't get subset. + return True + +@_add_method(ttLib.getTableClass('glyf')) +def prune_post_subset(self, options): + remove_hinting = not options.hinting + for v in self.glyphs.values(): + v.trim(remove_hinting=remove_hinting) + return True + +@_add_method(ttLib.getTableClass('CFF ')) +def prune_pre_subset(self, options): + cff = self.cff + # CFF table must have one font only + cff.fontNames = cff.fontNames[:1] + + if options.notdef_glyph and not options.notdef_outline: + for fontname in cff.keys(): + font = cff[fontname] + c,_ = font.CharStrings.getItemAndSelector('.notdef') + # XXX we should preserve the glyph width + c.bytecode = '\x0e' # endchar + c.program = None + + return True # bool(cff.fontNames) + +@_add_method(ttLib.getTableClass('CFF ')) +def subset_glyphs(self, s): + cff = self.cff + for fontname in cff.keys(): + font = cff[fontname] + cs = font.CharStrings + + # Load all glyphs + for g in font.charset: + if g not in s.glyphs: continue + c,sel = cs.getItemAndSelector(g) + + if cs.charStringsAreIndexed: + indices = [i for i,g in enumerate(font.charset) if g in s.glyphs] + csi = cs.charStringsIndex + csi.items = [csi.items[i] for i in indices] + del csi.file, csi.offsets + if hasattr(font, "FDSelect"): + sel = font.FDSelect + # XXX We want to set sel.format to None, such that the + # most compact format is selected. However, OTS was + # broken and couldn't parse a FDSelect format 0 that + # happened before CharStrings. As such, always force + # format 3 until we fix cffLib to always generate + # FDSelect after CharStrings. + # https://github.com/khaledhosny/ots/pull/31 + #sel.format = None + sel.format = 3 + sel.gidArray = [sel.gidArray[i] for i in indices] + cs.charStrings = {g:indices.index(v) + for g,v in cs.charStrings.items() + if g in s.glyphs} + else: + cs.charStrings = {g:v + for g,v in cs.charStrings.items() + if g in s.glyphs} + font.charset = [g for g in font.charset if g in s.glyphs] + font.numGlyphs = len(font.charset) + + return True # any(cff[fontname].numGlyphs for fontname in cff.keys()) + +@_add_method(psCharStrings.T2CharString) +def subset_subroutines(self, subrs, gsubrs): + p = self.program + assert len(p) + for i in range(1, len(p)): + if p[i] == 'callsubr': + assert isinstance(p[i-1], int) + p[i-1] = subrs._used.index(p[i-1] + subrs._old_bias) - subrs._new_bias + elif p[i] == 'callgsubr': + assert isinstance(p[i-1], int) + p[i-1] = gsubrs._used.index(p[i-1] + gsubrs._old_bias) - gsubrs._new_bias + +@_add_method(psCharStrings.T2CharString) +def drop_hints(self): + hints = self._hints + + if hints.has_hint: + self.program = self.program[hints.last_hint:] + if hasattr(self, 'width'): + # Insert width back if needed + if self.width != self.private.defaultWidthX: + self.program.insert(0, self.width - self.private.nominalWidthX) + + if hints.has_hintmask: + i = 0 + p = self.program + while i < len(p): + if p[i] in ['hintmask', 'cntrmask']: + assert i + 1 <= len(p) + del p[i:i+2] + continue + i += 1 + + # TODO: we currently don't drop calls to "empty" subroutines. + + assert len(self.program) + + del self._hints + +class _MarkingT2Decompiler(psCharStrings.SimpleT2Decompiler): + + def __init__(self, localSubrs, globalSubrs): + psCharStrings.SimpleT2Decompiler.__init__(self, + localSubrs, + globalSubrs) + for subrs in [localSubrs, globalSubrs]: + if subrs and not hasattr(subrs, "_used"): + subrs._used = set() + + def op_callsubr(self, index): + self.localSubrs._used.add(self.operandStack[-1]+self.localBias) + psCharStrings.SimpleT2Decompiler.op_callsubr(self, index) + + def op_callgsubr(self, index): + self.globalSubrs._used.add(self.operandStack[-1]+self.globalBias) + psCharStrings.SimpleT2Decompiler.op_callgsubr(self, index) + +class _DehintingT2Decompiler(psCharStrings.SimpleT2Decompiler): + + class Hints(object): + def __init__(self): + # Whether calling this charstring produces any hint stems + self.has_hint = False + # Index to start at to drop all hints + self.last_hint = 0 + # Index up to which we know more hints are possible. + # Only relevant if status is 0 or 1. + self.last_checked = 0 + # The status means: + # 0: after dropping hints, this charstring is empty + # 1: after dropping hints, there may be more hints + # continuing after this + # 2: no more hints possible after this charstring + self.status = 0 + # Has hintmask instructions; not recursive + self.has_hintmask = False + pass + + def __init__(self, css, localSubrs, globalSubrs): + self._css = css + psCharStrings.SimpleT2Decompiler.__init__(self, + localSubrs, + globalSubrs) + + def execute(self, charString): + old_hints = charString._hints if hasattr(charString, '_hints') else None + charString._hints = self.Hints() + + psCharStrings.SimpleT2Decompiler.execute(self, charString) + + hints = charString._hints + + if hints.has_hint or hints.has_hintmask: + self._css.add(charString) + + if hints.status != 2: + # Check from last_check, make sure we didn't have any operators. + for i in range(hints.last_checked, len(charString.program) - 1): + if isinstance(charString.program[i], str): + hints.status = 2 + break + else: + hints.status = 1 # There's *something* here + hints.last_checked = len(charString.program) + + if old_hints: + assert hints.__dict__ == old_hints.__dict__ + + def op_callsubr(self, index): + subr = self.localSubrs[self.operandStack[-1]+self.localBias] + psCharStrings.SimpleT2Decompiler.op_callsubr(self, index) + self.processSubr(index, subr) + + def op_callgsubr(self, index): + subr = self.globalSubrs[self.operandStack[-1]+self.globalBias] + psCharStrings.SimpleT2Decompiler.op_callgsubr(self, index) + self.processSubr(index, subr) + + def op_hstem(self, index): + psCharStrings.SimpleT2Decompiler.op_hstem(self, index) + self.processHint(index) + def op_vstem(self, index): + psCharStrings.SimpleT2Decompiler.op_vstem(self, index) + self.processHint(index) + def op_hstemhm(self, index): + psCharStrings.SimpleT2Decompiler.op_hstemhm(self, index) + self.processHint(index) + def op_vstemhm(self, index): + psCharStrings.SimpleT2Decompiler.op_vstemhm(self, index) + self.processHint(index) + def op_hintmask(self, index): + psCharStrings.SimpleT2Decompiler.op_hintmask(self, index) + self.processHintmask(index) + def op_cntrmask(self, index): + psCharStrings.SimpleT2Decompiler.op_cntrmask(self, index) + self.processHintmask(index) + + def processHintmask(self, index): + cs = self.callingStack[-1] + hints = cs._hints + hints.has_hintmask = True + if hints.status != 2 and hints.has_hint: + # Check from last_check, see if we may be an implicit vstem + for i in range(hints.last_checked, index - 1): + if isinstance(cs.program[i], str): + hints.status = 2 + break + if hints.status != 2: + # We are an implicit vstem + hints.last_hint = index + 1 + hints.status = 0 + hints.last_checked = index + 1 + + def processHint(self, index): + cs = self.callingStack[-1] + hints = cs._hints + hints.has_hint = True + hints.last_hint = index + hints.last_checked = index + + def processSubr(self, index, subr): + cs = self.callingStack[-1] + hints = cs._hints + subr_hints = subr._hints + + if subr_hints.has_hint: + if hints.status != 2: + hints.has_hint = True + hints.last_checked = index + hints.status = subr_hints.status + # Decide where to chop off from + if subr_hints.status == 0: + hints.last_hint = index + else: + hints.last_hint = index - 2 # Leave the subr call in + else: + # In my understanding, this is a font bug. + # I.e., it has hint stems *after* path construction. + # I've seen this in widespread fonts. + # Best to ignore the hints I suppose... + pass + #assert 0 + else: + hints.status = max(hints.status, subr_hints.status) + if hints.status != 2: + # Check from last_check, make sure we didn't have + # any operators. + for i in range(hints.last_checked, index - 1): + if isinstance(cs.program[i], str): + hints.status = 2 + break + hints.last_checked = index + if hints.status != 2: + # Decide where to chop off from + if subr_hints.status == 0: + hints.last_hint = index + else: + hints.last_hint = index - 2 # Leave the subr call in + +class _DesubroutinizingT2Decompiler(psCharStrings.SimpleT2Decompiler): + + def __init__(self, localSubrs, globalSubrs): + psCharStrings.SimpleT2Decompiler.__init__(self, + localSubrs, + globalSubrs) + + def execute(self, charString): + # Note: Currently we recompute _desubroutinized each time. + # This is more robust in some cases, but in other places we assume + # that each subroutine always expands to the same code, so + # maybe it doesn't matter. To speed up we can just not + # recompute _desubroutinized if it's there. For now I just + # double-check that it desubroutinized to the same thing. + old_desubroutinized = charString._desubroutinized if hasattr(charString, '_desubroutinized') else None + + charString._patches = [] + psCharStrings.SimpleT2Decompiler.execute(self, charString) + desubroutinized = charString.program[:] + for idx,expansion in reversed (charString._patches): + assert idx >= 2 + assert desubroutinized[idx - 1] in ['callsubr', 'callgsubr'], desubroutinized[idx - 1] + assert type(desubroutinized[idx - 2]) == int + if expansion[-1] == 'return': + expansion = expansion[:-1] + desubroutinized[idx-2:idx] = expansion + if 'endchar' in desubroutinized: + # Cut off after first endchar + desubroutinized = desubroutinized[:desubroutinized.index('endchar') + 1] + else: + if not len(desubroutinized) or desubroutinized[-1] != 'return': + desubroutinized.append('return') + + charString._desubroutinized = desubroutinized + del charString._patches + + if old_desubroutinized: + assert desubroutinized == old_desubroutinized + + def op_callsubr(self, index): + subr = self.localSubrs[self.operandStack[-1]+self.localBias] + psCharStrings.SimpleT2Decompiler.op_callsubr(self, index) + self.processSubr(index, subr) + + def op_callgsubr(self, index): + subr = self.globalSubrs[self.operandStack[-1]+self.globalBias] + psCharStrings.SimpleT2Decompiler.op_callgsubr(self, index) + self.processSubr(index, subr) + + def processSubr(self, index, subr): + cs = self.callingStack[-1] + cs._patches.append((index, subr._desubroutinized)) + + +@_add_method(ttLib.getTableClass('CFF ')) +def prune_post_subset(self, options): + cff = self.cff + for fontname in cff.keys(): + font = cff[fontname] + cs = font.CharStrings + + # Drop unused FontDictionaries + if hasattr(font, "FDSelect"): + sel = font.FDSelect + indices = _uniq_sort(sel.gidArray) + sel.gidArray = [indices.index (ss) for ss in sel.gidArray] + arr = font.FDArray + arr.items = [arr[i] for i in indices] + del arr.file, arr.offsets + + # Desubroutinize if asked for + if options.desubroutinize: + for g in font.charset: + c,sel = cs.getItemAndSelector(g) + c.decompile() + subrs = getattr(c.private, "Subrs", []) + decompiler = _DesubroutinizingT2Decompiler(subrs, c.globalSubrs) + decompiler.execute(c) + c.program = c._desubroutinized + + # Drop hints if not needed + if not options.hinting: + + # This can be tricky, but doesn't have to. What we do is: + # + # - Run all used glyph charstrings and recurse into subroutines, + # - For each charstring (including subroutines), if it has any + # of the hint stem operators, we mark it as such. + # Upon returning, for each charstring we note all the + # subroutine calls it makes that (recursively) contain a stem, + # - Dropping hinting then consists of the following two ops: + # * Drop the piece of the program in each charstring before the + # last call to a stem op or a stem-calling subroutine, + # * Drop all hintmask operations. + # - It's trickier... A hintmask right after hints and a few numbers + # will act as an implicit vstemhm. As such, we track whether + # we have seen any non-hint operators so far and do the right + # thing, recursively... Good luck understanding that :( + css = set() + for g in font.charset: + c,sel = cs.getItemAndSelector(g) + c.decompile() + subrs = getattr(c.private, "Subrs", []) + decompiler = _DehintingT2Decompiler(css, subrs, c.globalSubrs) + decompiler.execute(c) + for charstring in css: + charstring.drop_hints() + del css + + # Drop font-wide hinting values + all_privs = [] + if hasattr(font, 'FDSelect'): + all_privs.extend(fd.Private for fd in font.FDArray) + else: + all_privs.append(font.Private) + for priv in all_privs: + for k in ['BlueValues', 'OtherBlues', + 'FamilyBlues', 'FamilyOtherBlues', + 'BlueScale', 'BlueShift', 'BlueFuzz', + 'StemSnapH', 'StemSnapV', 'StdHW', 'StdVW']: + if hasattr(priv, k): + setattr(priv, k, None) + + # Renumber subroutines to remove unused ones + + # Mark all used subroutines + for g in font.charset: + c,sel = cs.getItemAndSelector(g) + subrs = getattr(c.private, "Subrs", []) + decompiler = _MarkingT2Decompiler(subrs, c.globalSubrs) + decompiler.execute(c) + + all_subrs = [font.GlobalSubrs] + if hasattr(font, 'FDSelect'): + all_subrs.extend(fd.Private.Subrs for fd in font.FDArray if hasattr(fd.Private, 'Subrs') and fd.Private.Subrs) + elif hasattr(font.Private, 'Subrs') and font.Private.Subrs: + all_subrs.append(font.Private.Subrs) + + subrs = set(subrs) # Remove duplicates + + # Prepare + for subrs in all_subrs: + if not hasattr(subrs, '_used'): + subrs._used = set() + subrs._used = _uniq_sort(subrs._used) + subrs._old_bias = psCharStrings.calcSubrBias(subrs) + subrs._new_bias = psCharStrings.calcSubrBias(subrs._used) + + # Renumber glyph charstrings + for g in font.charset: + c,sel = cs.getItemAndSelector(g) + subrs = getattr(c.private, "Subrs", []) + c.subset_subroutines (subrs, font.GlobalSubrs) + + # Renumber subroutines themselves + for subrs in all_subrs: + if subrs == font.GlobalSubrs: + if not hasattr(font, 'FDSelect') and hasattr(font.Private, 'Subrs'): + local_subrs = font.Private.Subrs + else: + local_subrs = [] + else: + local_subrs = subrs + + subrs.items = [subrs.items[i] for i in subrs._used] + del subrs.file + if hasattr(subrs, 'offsets'): + del subrs.offsets + + for subr in subrs.items: + subr.subset_subroutines (local_subrs, font.GlobalSubrs) + + # Cleanup + for subrs in all_subrs: + del subrs._used, subrs._old_bias, subrs._new_bias + + return True + +@_add_method(ttLib.getTableClass('cmap')) +def closure_glyphs(self, s): + tables = [t for t in self.tables if t.isUnicode()] + + # Close glyphs + for table in tables: + if table.format == 14: + for cmap in table.uvsDict.values(): + glyphs = {g for u,g in cmap if u in s.unicodes_requested} + if None in glyphs: + glyphs.remove(None) + s.glyphs.update(glyphs) + else: + cmap = table.cmap + intersection = s.unicodes_requested.intersection(cmap.keys()) + s.glyphs.update(cmap[u] for u in intersection) + + # Calculate unicodes_missing + s.unicodes_missing = s.unicodes_requested.copy() + for table in tables: + s.unicodes_missing.difference_update(table.cmap) + +@_add_method(ttLib.getTableClass('cmap')) +def prune_pre_subset(self, options): + if not options.legacy_cmap: + # Drop non-Unicode / non-Symbol cmaps + self.tables = [t for t in self.tables if t.isUnicode() or t.isSymbol()] + if not options.symbol_cmap: + self.tables = [t for t in self.tables if not t.isSymbol()] + # TODO(behdad) Only keep one subtable? + # For now, drop format=0 which can't be subset_glyphs easily? + self.tables = [t for t in self.tables if t.format != 0] + self.numSubTables = len(self.tables) + return True # Required table + +@_add_method(ttLib.getTableClass('cmap')) +def subset_glyphs(self, s): + s.glyphs = None # We use s.glyphs_requested and s.unicodes_requested only + for t in self.tables: + if t.format == 14: + # TODO(behdad) We drop all the default-UVS mappings + # for glyphs_requested. So it's the caller's responsibility to make + # sure those are included. + t.uvsDict = {v:[(u,g) for u,g in l + if g in s.glyphs_requested or u in s.unicodes_requested] + for v,l in t.uvsDict.items()} + t.uvsDict = {v:l for v,l in t.uvsDict.items() if l} + elif t.isUnicode(): + t.cmap = {u:g for u,g in t.cmap.items() + if g in s.glyphs_requested or u in s.unicodes_requested} + else: + t.cmap = {u:g for u,g in t.cmap.items() + if g in s.glyphs_requested} + self.tables = [t for t in self.tables + if (t.cmap if t.format != 14 else t.uvsDict)] + self.numSubTables = len(self.tables) + # TODO(behdad) Convert formats when needed. + # In particular, if we have a format=12 without non-BMP + # characters, either drop format=12 one or convert it + # to format=4 if there's not one. + return True # Required table + +@_add_method(ttLib.getTableClass('DSIG')) +def prune_pre_subset(self, options): + # Drop all signatures since they will be invalid + self.usNumSigs = 0 + self.signatureRecords = [] + return True + +@_add_method(ttLib.getTableClass('maxp')) +def prune_pre_subset(self, options): + if not options.hinting: + if self.tableVersion == 0x00010000: + self.maxZones = 1 + self.maxTwilightPoints = 0 + self.maxFunctionDefs = 0 + self.maxInstructionDefs = 0 + self.maxStackElements = 0 + self.maxSizeOfInstructions = 0 + return True + +@_add_method(ttLib.getTableClass('name')) +def prune_pre_subset(self, options): + if '*' not in options.name_IDs: + self.names = [n for n in self.names if n.nameID in options.name_IDs] + if not options.name_legacy: + # TODO(behdad) Sometimes (eg Apple Color Emoji) there's only a macroman + # entry for Latin and no Unicode names. + self.names = [n for n in self.names if n.isUnicode()] + # TODO(behdad) Option to keep only one platform's + if '*' not in options.name_languages: + # TODO(behdad) This is Windows-platform specific! + self.names = [n for n in self.names + if n.langID in options.name_languages] + if options.obfuscate_names: + namerecs = [] + for n in self.names: + if n.nameID in [1, 4]: + n.string = ".\x7f".encode('utf_16_be') if n.isUnicode() else ".\x7f" + elif n.nameID in [2, 6]: + n.string = "\x7f".encode('utf_16_be') if n.isUnicode() else "\x7f" + elif n.nameID == 3: + n.string = "" + elif n.nameID in [16, 17, 18]: + continue + namerecs.append(n) + self.names = namerecs + return True # Required table + + +# TODO(behdad) OS/2 ulUnicodeRange / ulCodePageRange? +# TODO(behdad) Drop AAT tables. +# TODO(behdad) Drop unneeded GSUB/GPOS Script/LangSys entries. +# TODO(behdad) Drop empty GSUB/GPOS, and GDEF if no GSUB/GPOS left +# TODO(behdad) Drop GDEF subitems if unused by lookups +# TODO(behdad) Avoid recursing too much (in GSUB/GPOS and in CFF) +# TODO(behdad) Text direction considerations. +# TODO(behdad) Text script / language considerations. +# TODO(behdad) Optionally drop 'kern' table if GPOS available +# TODO(behdad) Implement --unicode='*' to choose all cmap'ed +# TODO(behdad) Drop old-spec Indic scripts + + +class Options(object): + + class OptionError(Exception): pass + class UnknownOptionError(OptionError): pass + + _drop_tables_default = ['BASE', 'JSTF', 'DSIG', 'EBDT', 'EBLC', + 'EBSC', 'SVG ', 'PCLT', 'LTSH'] + _drop_tables_default += ['Feat', 'Glat', 'Gloc', 'Silf', 'Sill'] # Graphite + _drop_tables_default += ['CBLC', 'CBDT', 'sbix', 'COLR', 'CPAL'] # Color + _no_subset_tables_default = ['gasp', 'head', 'hhea', 'maxp', + 'vhea', 'OS/2', 'loca', 'name', 'cvt ', + 'fpgm', 'prep', 'VDMX', 'DSIG'] + _hinting_tables_default = ['cvt ', 'fpgm', 'prep', 'hdmx', 'VDMX'] + + # Based on HarfBuzz shapers + _layout_features_groups = { + # Default shaper + 'common': ['ccmp', 'liga', 'locl', 'mark', 'mkmk', 'rlig'], + 'horizontal': ['calt', 'clig', 'curs', 'kern', 'rclt'], + 'vertical': ['valt', 'vert', 'vkrn', 'vpal', 'vrt2'], + 'ltr': ['ltra', 'ltrm'], + 'rtl': ['rtla', 'rtlm'], + # Complex shapers + 'arabic': ['init', 'medi', 'fina', 'isol', 'med2', 'fin2', 'fin3', + 'cswh', 'mset'], + 'hangul': ['ljmo', 'vjmo', 'tjmo'], + 'tibetan': ['abvs', 'blws', 'abvm', 'blwm'], + 'indic': ['nukt', 'akhn', 'rphf', 'rkrf', 'pref', 'blwf', 'half', + 'abvf', 'pstf', 'cfar', 'vatu', 'cjct', 'init', 'pres', + 'abvs', 'blws', 'psts', 'haln', 'dist', 'abvm', 'blwm'], + } + _layout_features_default = _uniq_sort(sum( + iter(_layout_features_groups.values()), [])) + + drop_tables = _drop_tables_default + no_subset_tables = _no_subset_tables_default + hinting_tables = _hinting_tables_default + legacy_kern = False # drop 'kern' table if GPOS available + layout_features = _layout_features_default + ignore_missing_glyphs = False + ignore_missing_unicodes = True + hinting = True + glyph_names = False + legacy_cmap = False + symbol_cmap = False + name_IDs = [1, 2] # Family and Style + name_legacy = False + name_languages = [0x0409] # English + obfuscate_names = False # to make webfont unusable as a system font + notdef_glyph = True # gid0 for TrueType / .notdef for CFF + notdef_outline = False # No need for notdef to have an outline really + recommended_glyphs = False # gid1, gid2, gid3 for TrueType + recalc_bounds = False # Recalculate font bounding boxes + recalc_timestamp = False # Recalculate font modified timestamp + canonical_order = False # Order tables as recommended + flavor = None # May be 'woff' or 'woff2' + desubroutinize = False # Desubroutinize CFF CharStrings + + def __init__(self, **kwargs): + self.set(**kwargs) + + def set(self, **kwargs): + for k,v in kwargs.items(): + if not hasattr(self, k): + raise self.UnknownOptionError("Unknown option '%s'" % k) + setattr(self, k, v) + + def parse_opts(self, argv, ignore_unknown=False): + ret = [] + for a in argv: + orig_a = a + if not a.startswith('--'): + ret.append(a) + continue + a = a[2:] + i = a.find('=') + op = '=' + if i == -1: + if a.startswith("no-"): + k = a[3:] + v = False + else: + k = a + v = True + if k.endswith("?"): + k = k[:-1] + v = '?' + else: + k = a[:i] + if k[-1] in "-+": + op = k[-1]+'=' # Op is '-=' or '+=' now. + k = k[:-1] + v = a[i+1:] + ok = k + k = k.replace('-', '_') + if not hasattr(self, k): + if ignore_unknown is True or ok in ignore_unknown: + ret.append(orig_a) + continue + else: + raise self.UnknownOptionError("Unknown option '%s'" % a) + + ov = getattr(self, k) + if v == '?': + print("Current setting for '%s' is: %s" % (ok, ov)) + continue + if isinstance(ov, bool): + v = bool(v) + elif isinstance(ov, int): + v = int(v) + elif isinstance(ov, str): + v = str(v) # redundant + elif isinstance(ov, list): + if isinstance(v, bool): + raise self.OptionError("Option '%s' requires values to be specified using '='" % a) + vv = v.replace(',', ' ').split() + if vv == ['']: + vv = [] + vv = [int(x, 0) if len(x) and x[0] in "0123456789" else x for x in vv] + if op == '=': + v = vv + elif op == '+=': + v = ov + v.extend(vv) + elif op == '-=': + v = ov + for x in vv: + if x in v: + v.remove(x) + else: + assert False + + setattr(self, k, v) + + return ret + + +class Subsetter(object): + + class SubsettingError(Exception): pass + class MissingGlyphsSubsettingError(SubsettingError): pass + class MissingUnicodesSubsettingError(SubsettingError): pass + + def __init__(self, options=None, log=None): + + if not log: + log = Logger() + if not options: + options = Options() + + self.options = options + self.log = log + self.unicodes_requested = set() + self.glyph_names_requested = set() + self.glyph_ids_requested = set() + + def populate(self, glyphs=[], gids=[], unicodes=[], text=""): + self.unicodes_requested.update(unicodes) + if isinstance(text, bytes): + text = text.decode("utf_8") + for u in text: + self.unicodes_requested.add(ord(u)) + self.glyph_names_requested.update(glyphs) + self.glyph_ids_requested.update(gids) + + def _prune_pre_subset(self, font): + + for tag in font.keys(): + if tag == 'GlyphOrder': continue + + if(tag in self.options.drop_tables or + (tag in self.options.hinting_tables and not self.options.hinting) or + (tag == 'kern' and (not self.options.legacy_kern and 'GPOS' in font))): + self.log(tag, "dropped") + del font[tag] + continue + + clazz = ttLib.getTableClass(tag) + + if hasattr(clazz, 'prune_pre_subset'): + table = font[tag] + self.log.lapse("load '%s'" % tag) + retain = table.prune_pre_subset(self.options) + self.log.lapse("prune '%s'" % tag) + if not retain: + self.log(tag, "pruned to empty; dropped") + del font[tag] + continue + else: + self.log(tag, "pruned") + + def _closure_glyphs(self, font): + + realGlyphs = set(font.getGlyphOrder()) + glyph_order = font.getGlyphOrder() + + self.glyphs_requested = set() + self.glyphs_requested.update(self.glyph_names_requested) + self.glyphs_requested.update(glyph_order[i] + for i in self.glyph_ids_requested + if i < len(glyph_order)) + + self.glyphs_missing = set() + self.glyphs_missing.update(self.glyphs_requested.difference(realGlyphs)) + self.glyphs_missing.update(i for i in self.glyph_ids_requested + if i >= len(glyph_order)) + if self.glyphs_missing: + self.log("Missing requested glyphs: %s" % self.glyphs_missing) + if not self.options.ignore_missing_glyphs: + raise self.MissingGlyphsSubsettingError(self.glyphs_missing) + + self.glyphs = self.glyphs_requested.copy() + + self.unicodes_missing = set() + if 'cmap' in font: + font['cmap'].closure_glyphs(self) + self.glyphs.intersection_update(realGlyphs) + self.log.lapse("close glyph list over 'cmap'") + self.glyphs_cmaped = frozenset(self.glyphs) + if self.unicodes_missing: + missing = ["U+%04X" % u for u in self.unicodes_missing] + self.log("Missing glyphs for requested Unicodes: %s" % missing) + if not self.options.ignore_missing_unicodes: + raise self.MissingUnicodesSubsettingError(missing) + del missing + + if self.options.notdef_glyph: + if 'glyf' in font: + self.glyphs.add(font.getGlyphName(0)) + self.log("Added gid0 to subset") + else: + self.glyphs.add('.notdef') + self.log("Added .notdef to subset") + if self.options.recommended_glyphs: + if 'glyf' in font: + for i in range(min(4, len(font.getGlyphOrder()))): + self.glyphs.add(font.getGlyphName(i)) + self.log("Added first four glyphs to subset") + + if 'GSUB' in font: + self.log("Closing glyph list over 'GSUB': %d glyphs before" % + len(self.glyphs)) + self.log.glyphs(self.glyphs, font=font) + font['GSUB'].closure_glyphs(self) + self.glyphs.intersection_update(realGlyphs) + self.log("Closed glyph list over 'GSUB': %d glyphs after" % + len(self.glyphs)) + self.log.glyphs(self.glyphs, font=font) + self.log.lapse("close glyph list over 'GSUB'") + self.glyphs_gsubed = frozenset(self.glyphs) + + if 'glyf' in font: + self.log("Closing glyph list over 'glyf': %d glyphs before" % + len(self.glyphs)) + self.log.glyphs(self.glyphs, font=font) + font['glyf'].closure_glyphs(self) + self.glyphs.intersection_update(realGlyphs) + self.log("Closed glyph list over 'glyf': %d glyphs after" % + len(self.glyphs)) + self.log.glyphs(self.glyphs, font=font) + self.log.lapse("close glyph list over 'glyf'") + self.glyphs_glyfed = frozenset(self.glyphs) + + self.glyphs_all = frozenset(self.glyphs) + + self.log("Retaining %d glyphs: " % len(self.glyphs_all)) + + del self.glyphs + + def _subset_glyphs(self, font): + for tag in font.keys(): + if tag == 'GlyphOrder': continue + clazz = ttLib.getTableClass(tag) + + if tag in self.options.no_subset_tables: + self.log(tag, "subsetting not needed") + elif hasattr(clazz, 'subset_glyphs'): + table = font[tag] + self.glyphs = self.glyphs_all + retain = table.subset_glyphs(self) + del self.glyphs + self.log.lapse("subset '%s'" % tag) + if not retain: + self.log(tag, "subsetted to empty; dropped") + del font[tag] + else: + self.log(tag, "subsetted") + else: + self.log(tag, "NOT subset; don't know how to subset; dropped") + del font[tag] + + glyphOrder = font.getGlyphOrder() + glyphOrder = [g for g in glyphOrder if g in self.glyphs_all] + font.setGlyphOrder(glyphOrder) + font._buildReverseGlyphOrderDict() + self.log.lapse("subset GlyphOrder") + + def _prune_post_subset(self, font): + for tag in font.keys(): + if tag == 'GlyphOrder': continue + clazz = ttLib.getTableClass(tag) + if hasattr(clazz, 'prune_post_subset'): + table = font[tag] + retain = table.prune_post_subset(self.options) + self.log.lapse("prune '%s'" % tag) + if not retain: + self.log(tag, "pruned to empty; dropped") + del font[tag] + else: + self.log(tag, "pruned") + + def subset(self, font): + + self._prune_pre_subset(font) + self._closure_glyphs(font) + self._subset_glyphs(font) + self._prune_post_subset(font) + + +class Logger(object): + + def __init__(self, verbose=False, xml=False, timing=False): + self.verbose = verbose + self.xml = xml + self.timing = timing + self.last_time = self.start_time = time.time() + + def parse_opts(self, argv): + argv = argv[:] + for v in ['verbose', 'xml', 'timing']: + if "--"+v in argv: + setattr(self, v, True) + argv.remove("--"+v) + return argv + + def __call__(self, *things): + if not self.verbose: + return + print(' '.join(str(x) for x in things)) + + def lapse(self, *things): + if not self.timing: + return + new_time = time.time() + print("Took %0.3fs to %s" %(new_time - self.last_time, + ' '.join(str(x) for x in things))) + self.last_time = new_time + + def glyphs(self, glyphs, font=None): + if not self.verbose: + return + self("Glyph names:", sorted(glyphs)) + if font: + reverseGlyphMap = font.getReverseGlyphMap() + self("Glyph IDs: ", sorted(reverseGlyphMap[g] for g in glyphs)) + + def font(self, font, file=sys.stdout): + if not self.xml: + return + from fontTools.misc import xmlWriter + writer = xmlWriter.XMLWriter(file) + for tag in font.keys(): + writer.begintag(tag) + writer.newline() + font[tag].toXML(writer, font) + writer.endtag(tag) + writer.newline() + + +def load_font(fontFile, + options, + allowVID=False, + checkChecksums=False, + dontLoadGlyphNames=False, + lazy=True): + + font = ttLib.TTFont(fontFile, + allowVID=allowVID, + checkChecksums=checkChecksums, + recalcBBoxes=options.recalc_bounds, + recalcTimestamp=options.recalc_timestamp, + lazy=lazy) + + # Hack: + # + # If we don't need glyph names, change 'post' class to not try to + # load them. It avoid lots of headache with broken fonts as well + # as loading time. + # + # Ideally ttLib should provide a way to ask it to skip loading + # glyph names. But it currently doesn't provide such a thing. + # + if dontLoadGlyphNames: + post = ttLib.getTableClass('post') + saved = post.decode_format_2_0 + post.decode_format_2_0 = post.decode_format_3_0 + f = font['post'] + if f.formatType == 2.0: + f.formatType = 3.0 + post.decode_format_2_0 = saved + + return font + +def save_font(font, outfile, options): + if options.flavor and not hasattr(font, 'flavor'): + raise Exception("fonttools version does not support flavors.") + font.flavor = options.flavor + font.save(outfile, reorderTables=options.canonical_order) + +def parse_unicodes(s): + import re + s = re.sub (r"0[xX]", " ", s) + s = re.sub (r"[<+>,;&#\\xXuU\n ]", " ", s) + l = [] + for item in s.split(): + fields = item.split('-') + if len(fields) == 1: + l.append(int(item, 16)) + else: + start,end = fields + l.extend(range(int(start, 16), int(end, 16)+1)) + return l + +def parse_gids(s): + l = [] + for item in s.replace(',', ' ').split(): + fields = item.split('-') + if len(fields) == 1: + l.append(int(fields[0])) + else: + l.extend(range(int(fields[0]), int(fields[1])+1)) + return l + +def parse_glyphs(s): + return s.replace(',', ' ').split() + +def main(args=None): + + if args is None: + args = sys.argv[1:] + + if '--help' in args: + print(__doc__) + sys.exit(0) + + log = Logger() + args = log.parse_opts(args) + + options = Options() + args = options.parse_opts(args, + ignore_unknown=['gids', 'gids-file', + 'glyphs', 'glyphs-file', + 'text', 'text-file', + 'unicodes', 'unicodes-file', + 'output-file']) + + if len(args) < 2: + print("usage:", __usage__, file=sys.stderr) + print("Try pyftsubset --help for more information.", file=sys.stderr) + sys.exit(1) + + fontfile = args[0] + args = args[1:] + + subsetter = Subsetter(options=options, log=log) + outfile = fontfile + '.subset' + glyphs = [] + gids = [] + unicodes = [] + wildcard_glyphs = False + wildcard_unicodes = False + text = "" + for g in args: + if g == '*': + wildcard_glyphs = True + continue + if g.startswith('--output-file='): + outfile = g[14:] + continue + if g.startswith('--text='): + text += g[7:] + continue + if g.startswith('--text-file='): + text += open(g[12:]).read().replace('\n', '') + continue + if g.startswith('--unicodes='): + if g[11:] == '*': + wildcard_unicodes = True + else: + unicodes.extend(parse_unicodes(g[11:])) + continue + if g.startswith('--unicodes-file='): + for line in open(g[16:]).readlines(): + unicodes.extend(parse_unicodes(line.split('#')[0])) + continue + if g.startswith('--gids='): + gids.extend(parse_gids(g[7:])) + continue + if g.startswith('--gids-file='): + for line in open(g[12:]).readlines(): + gids.extend(parse_gids(line.split('#')[0])) + continue + if g.startswith('--glyphs='): + if g[9:] == '*': + wildcard_glyphs = True + else: + glyphs.extend(parse_glyphs(g[9:])) + continue + if g.startswith('--glyphs-file='): + for line in open(g[14:]).readlines(): + glyphs.extend(parse_glyphs(line.split('#')[0])) + continue + glyphs.append(g) + + dontLoadGlyphNames = not options.glyph_names and not glyphs + font = load_font(fontfile, options, dontLoadGlyphNames=dontLoadGlyphNames) + log.lapse("load font") + if wildcard_glyphs: + glyphs.extend(font.getGlyphOrder()) + if wildcard_unicodes: + for t in font['cmap'].tables: + if t.isUnicode(): + unicodes.extend(t.cmap.keys()) + assert '' not in glyphs + + log.lapse("compile glyph list") + log("Text: '%s'" % text) + log("Unicodes:", unicodes) + log("Glyphs:", glyphs) + log("Gids:", gids) + + subsetter.populate(glyphs=glyphs, gids=gids, unicodes=unicodes, text=text) + subsetter.subset(font) + + save_font (font, outfile, options) + log.lapse("compile and save font") + + log.last_time = log.start_time + log.lapse("make one with everything(TOTAL TIME)") + + if log.verbose: + import os + log("Input font:% 7d bytes: %s" % (os.path.getsize(fontfile), fontfile)) + log("Subset font:% 7d bytes: %s" % (os.path.getsize(outfile), outfile)) + + log.font(font) + + font.close() + + +__all__ = [ + 'Options', + 'Subsetter', + 'Logger', + 'load_font', + 'save_font', + 'parse_gids', + 'parse_glyphs', + 'parse_unicodes', + 'main' +] + +if __name__ == '__main__': + main() diff -Nru fonttools-2.4/Tools/fontTools/t1Lib.py fonttools-3.0/Tools/fontTools/t1Lib.py --- fonttools-2.4/Tools/fontTools/t1Lib.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.0/Tools/fontTools/t1Lib.py 2015-08-31 17:57:15.000000000 +0000 @@ -0,0 +1,371 @@ +"""fontTools.t1Lib.py -- Tools for PostScript Type 1 fonts + +Functions for reading and writing raw Type 1 data: + +read(path) + reads any Type 1 font file, returns the raw data and a type indicator: + 'LWFN', 'PFB' or 'OTHER', depending on the format of the file pointed + to by 'path'. + Raises an error when the file does not contain valid Type 1 data. + +write(path, data, kind='OTHER', dohex=False) + writes raw Type 1 data to the file pointed to by 'path'. + 'kind' can be one of 'LWFN', 'PFB' or 'OTHER'; it defaults to 'OTHER'. + 'dohex' is a flag which determines whether the eexec encrypted + part should be written as hexadecimal or binary, but only if kind + is 'LWFN' or 'PFB'. +""" +from __future__ import print_function, division, absolute_import +from fontTools.misc.py23 import * +from fontTools.misc import eexec +from fontTools.misc.macCreatorType import getMacCreatorAndType +import os +import re + +__author__ = "jvr" +__version__ = "1.0b2" +DEBUG = 0 + + +try: + try: + from Carbon import Res + except ImportError: + import Res # MacPython < 2.2 +except ImportError: + haveMacSupport = 0 +else: + haveMacSupport = 1 + import MacOS + + +class T1Error(Exception): pass + + +class T1Font(object): + + """Type 1 font class. + + Uses a minimal interpeter that supports just about enough PS to parse + Type 1 fonts. + """ + + def __init__(self, path=None): + if path is not None: + self.data, type = read(path) + else: + pass # XXX + + def saveAs(self, path, type): + write(path, self.getData(), type) + + def getData(self): + # XXX Todo: if the data has been converted to Python object, + # recreate the PS stream + return self.data + + def getGlyphSet(self): + """Return a generic GlyphSet, which is a dict-like object + mapping glyph names to glyph objects. The returned glyph objects + have a .draw() method that supports the Pen protocol, and will + have an attribute named 'width', but only *after* the .draw() method + has been called. + + In the case of Type 1, the GlyphSet is simply the CharStrings dict. + """ + return self["CharStrings"] + + def __getitem__(self, key): + if not hasattr(self, "font"): + self.parse() + return self.font[key] + + def parse(self): + from fontTools.misc import psLib + from fontTools.misc import psCharStrings + self.font = psLib.suckfont(self.data) + charStrings = self.font["CharStrings"] + lenIV = self.font["Private"].get("lenIV", 4) + assert lenIV >= 0 + subrs = self.font["Private"]["Subrs"] + for glyphName, charString in charStrings.items(): + charString, R = eexec.decrypt(charString, 4330) + charStrings[glyphName] = psCharStrings.T1CharString(charString[lenIV:], + subrs=subrs) + for i in range(len(subrs)): + charString, R = eexec.decrypt(subrs[i], 4330) + subrs[i] = psCharStrings.T1CharString(charString[lenIV:], subrs=subrs) + del self.data + + +# low level T1 data read and write functions + +def read(path, onlyHeader=False): + """reads any Type 1 font file, returns raw data""" + normpath = path.lower() + creator, typ = getMacCreatorAndType(path) + if typ == 'LWFN': + return readLWFN(path, onlyHeader), 'LWFN' + if normpath[-4:] == '.pfb': + return readPFB(path, onlyHeader), 'PFB' + else: + return readOther(path), 'OTHER' + +def write(path, data, kind='OTHER', dohex=False): + assertType1(data) + kind = kind.upper() + try: + os.remove(path) + except os.error: + pass + err = 1 + try: + if kind == 'LWFN': + writeLWFN(path, data) + elif kind == 'PFB': + writePFB(path, data) + else: + writeOther(path, data, dohex) + err = 0 + finally: + if err and not DEBUG: + try: + os.remove(path) + except os.error: + pass + + +# -- internal -- + +LWFNCHUNKSIZE = 2000 +HEXLINELENGTH = 80 + + +def readLWFN(path, onlyHeader=False): + """reads an LWFN font file, returns raw data""" + resRef = Res.FSOpenResFile(path, 1) # read-only + try: + Res.UseResFile(resRef) + n = Res.Count1Resources('POST') + data = [] + for i in range(501, 501 + n): + res = Res.Get1Resource('POST', i) + code = byteord(res.data[0]) + if byteord(res.data[1]) != 0: + raise T1Error('corrupt LWFN file') + if code in [1, 2]: + if onlyHeader and code == 2: + break + data.append(res.data[2:]) + elif code in [3, 5]: + break + elif code == 4: + f = open(path, "rb") + data.append(f.read()) + f.close() + elif code == 0: + pass # comment, ignore + else: + raise T1Error('bad chunk code: ' + repr(code)) + finally: + Res.CloseResFile(resRef) + data = bytesjoin(data) + assertType1(data) + return data + +def readPFB(path, onlyHeader=False): + """reads a PFB font file, returns raw data""" + f = open(path, "rb") + data = [] + while True: + if f.read(1) != bytechr(128): + raise T1Error('corrupt PFB file') + code = byteord(f.read(1)) + if code in [1, 2]: + chunklen = stringToLong(f.read(4)) + chunk = f.read(chunklen) + assert len(chunk) == chunklen + data.append(chunk) + elif code == 3: + break + else: + raise T1Error('bad chunk code: ' + repr(code)) + if onlyHeader: + break + f.close() + data = bytesjoin(data) + assertType1(data) + return data + +def readOther(path): + """reads any (font) file, returns raw data""" + f = open(path, "rb") + data = f.read() + f.close() + assertType1(data) + + chunks = findEncryptedChunks(data) + data = [] + for isEncrypted, chunk in chunks: + if isEncrypted and isHex(chunk[:4]): + data.append(deHexString(chunk)) + else: + data.append(chunk) + return bytesjoin(data) + +# file writing tools + +def writeLWFN(path, data): + Res.FSpCreateResFile(path, "just", "LWFN", 0) + resRef = Res.FSOpenResFile(path, 2) # write-only + try: + Res.UseResFile(resRef) + resID = 501 + chunks = findEncryptedChunks(data) + for isEncrypted, chunk in chunks: + if isEncrypted: + code = 2 + else: + code = 1 + while chunk: + res = Res.Resource(bytechr(code) + '\0' + chunk[:LWFNCHUNKSIZE - 2]) + res.AddResource('POST', resID, '') + chunk = chunk[LWFNCHUNKSIZE - 2:] + resID = resID + 1 + res = Res.Resource(bytechr(5) + '\0') + res.AddResource('POST', resID, '') + finally: + Res.CloseResFile(resRef) + +def writePFB(path, data): + chunks = findEncryptedChunks(data) + f = open(path, "wb") + try: + for isEncrypted, chunk in chunks: + if isEncrypted: + code = 2 + else: + code = 1 + f.write(bytechr(128) + bytechr(code)) + f.write(longToString(len(chunk))) + f.write(chunk) + f.write(bytechr(128) + bytechr(3)) + finally: + f.close() + +def writeOther(path, data, dohex=False): + chunks = findEncryptedChunks(data) + f = open(path, "wb") + try: + hexlinelen = HEXLINELENGTH // 2 + for isEncrypted, chunk in chunks: + if isEncrypted: + code = 2 + else: + code = 1 + if code == 2 and dohex: + while chunk: + f.write(eexec.hexString(chunk[:hexlinelen])) + f.write('\r') + chunk = chunk[hexlinelen:] + else: + f.write(chunk) + finally: + f.close() + + +# decryption tools + +EEXECBEGIN = "currentfile eexec" +EEXECEND = '0' * 64 +EEXECINTERNALEND = "currentfile closefile" +EEXECBEGINMARKER = "%-- eexec start\r" +EEXECENDMARKER = "%-- eexec end\r" + +_ishexRE = re.compile('[0-9A-Fa-f]*$') + +def isHex(text): + return _ishexRE.match(text) is not None + + +def decryptType1(data): + chunks = findEncryptedChunks(data) + data = [] + for isEncrypted, chunk in chunks: + if isEncrypted: + if isHex(chunk[:4]): + chunk = deHexString(chunk) + decrypted, R = eexec.decrypt(chunk, 55665) + decrypted = decrypted[4:] + if decrypted[-len(EEXECINTERNALEND)-1:-1] != EEXECINTERNALEND \ + and decrypted[-len(EEXECINTERNALEND)-2:-2] != EEXECINTERNALEND: + raise T1Error("invalid end of eexec part") + decrypted = decrypted[:-len(EEXECINTERNALEND)-2] + '\r' + data.append(EEXECBEGINMARKER + decrypted + EEXECENDMARKER) + else: + if chunk[-len(EEXECBEGIN)-1:-1] == EEXECBEGIN: + data.append(chunk[:-len(EEXECBEGIN)-1]) + else: + data.append(chunk) + return bytesjoin(data) + +def findEncryptedChunks(data): + chunks = [] + while True: + eBegin = data.find(EEXECBEGIN) + if eBegin < 0: + break + eBegin = eBegin + len(EEXECBEGIN) + 1 + eEnd = data.find(EEXECEND, eBegin) + if eEnd < 0: + raise T1Error("can't find end of eexec part") + cypherText = data[eBegin:eEnd + 2] + if isHex(cypherText[:4]): + cypherText = deHexString(cypherText) + plainText, R = eexec.decrypt(cypherText, 55665) + eEndLocal = plainText.find(EEXECINTERNALEND) + if eEndLocal < 0: + raise T1Error("can't find end of eexec part") + chunks.append((0, data[:eBegin])) + chunks.append((1, cypherText[:eEndLocal + len(EEXECINTERNALEND) + 1])) + data = data[eEnd:] + chunks.append((0, data)) + return chunks + +def deHexString(hexstring): + return eexec.deHexString(strjoin(hexstring.split())) + + +# Type 1 assertion + +_fontType1RE = re.compile(br"/FontType\s+1\s+def") + +def assertType1(data): + for head in [b'%!PS-AdobeFont', b'%!FontType1']: + if data[:len(head)] == head: + break + else: + raise T1Error("not a PostScript font") + if not _fontType1RE.search(data): + raise T1Error("not a Type 1 font") + if data.find(b"currentfile eexec") < 0: + raise T1Error("not an encrypted Type 1 font") + # XXX what else? + return data + + +# pfb helpers + +def longToString(long): + s = "" + for i in range(4): + s += bytechr((long & (0xff << (i * 8))) >> i * 8) + return s + +def stringToLong(s): + if len(s) != 4: + raise ValueError('string must be 4 bytes long') + l = 0 + for i in range(4): + l += byteord(s[i]) << (i * 8) + return l diff -Nru fonttools-2.4/Tools/fontTools/ttLib/__init__.py fonttools-3.0/Tools/fontTools/ttLib/__init__.py --- fonttools-2.4/Tools/fontTools/ttLib/__init__.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.0/Tools/fontTools/ttLib/__init__.py 2015-08-31 17:57:15.000000000 +0000 @@ -0,0 +1,991 @@ +"""fontTools.ttLib -- a package for dealing with TrueType fonts. + +This package offers translators to convert TrueType fonts to Python +objects and vice versa, and additionally from Python to TTX (an XML-based +text format) and vice versa. + +Example interactive session: + +Python 1.5.2c1 (#43, Mar 9 1999, 13:06:43) [CW PPC w/GUSI w/MSL] +Copyright 1991-1995 Stichting Mathematisch Centrum, Amsterdam +>>> from fontTools import ttLib +>>> tt = ttLib.TTFont("afont.ttf") +>>> tt['maxp'].numGlyphs +242 +>>> tt['OS/2'].achVendID +'B&H\000' +>>> tt['head'].unitsPerEm +2048 +>>> tt.saveXML("afont.ttx") +Dumping 'LTSH' table... +Dumping 'OS/2' table... +Dumping 'VDMX' table... +Dumping 'cmap' table... +Dumping 'cvt ' table... +Dumping 'fpgm' table... +Dumping 'glyf' table... +Dumping 'hdmx' table... +Dumping 'head' table... +Dumping 'hhea' table... +Dumping 'hmtx' table... +Dumping 'loca' table... +Dumping 'maxp' table... +Dumping 'name' table... +Dumping 'post' table... +Dumping 'prep' table... +>>> tt2 = ttLib.TTFont() +>>> tt2.importXML("afont.ttx") +>>> tt2['maxp'].numGlyphs +242 +>>> + +""" + +from __future__ import print_function, division, absolute_import +from fontTools.misc.py23 import * +import os +import sys + +haveMacSupport = 0 +if sys.platform == "mac": + haveMacSupport = 1 +elif sys.platform == "darwin": + if sys.version_info[:3] != (2, 2, 0) and sys.version_info[:1] < (3,): + # Python 2.2's Mac support is broken, so don't enable it there. + # Python 3 does not have Res used by macUtils + haveMacSupport = 1 + + +class TTLibError(Exception): pass + + +class TTFont(object): + + """The main font object. It manages file input and output, and offers + a convenient way of accessing tables. + Tables will be only decompiled when necessary, ie. when they're actually + accessed. This means that simple operations can be extremely fast. + """ + + def __init__(self, file=None, res_name_or_index=None, + sfntVersion="\000\001\000\000", flavor=None, checkChecksums=False, + verbose=False, recalcBBoxes=True, allowVID=False, ignoreDecompileErrors=False, + recalcTimestamp=True, fontNumber=-1, lazy=None, quiet=False): + + """The constructor can be called with a few different arguments. + When reading a font from disk, 'file' should be either a pathname + pointing to a file, or a readable file object. + + It we're running on a Macintosh, 'res_name_or_index' maybe an sfnt + resource name or an sfnt resource index number or zero. The latter + case will cause TTLib to autodetect whether the file is a flat file + or a suitcase. (If it's a suitcase, only the first 'sfnt' resource + will be read!) + + The 'checkChecksums' argument is used to specify how sfnt + checksums are treated upon reading a file from disk: + 0: don't check (default) + 1: check, print warnings if a wrong checksum is found + 2: check, raise an exception if a wrong checksum is found. + + The TTFont constructor can also be called without a 'file' + argument: this is the way to create a new empty font. + In this case you can optionally supply the 'sfntVersion' argument, + and a 'flavor' which can be None, or 'woff'. + + If the recalcBBoxes argument is false, a number of things will *not* + be recalculated upon save/compile: + 1) glyph bounding boxes + 2) maxp font bounding box + 3) hhea min/max values + (1) is needed for certain kinds of CJK fonts (ask Werner Lemberg ;-). + Additionally, upon importing an TTX file, this option cause glyphs + to be compiled right away. This should reduce memory consumption + greatly, and therefore should have some impact on the time needed + to parse/compile large fonts. + + If the recalcTimestamp argument is false, the modified timestamp in the + 'head' table will *not* be recalculated upon save/compile. + + If the allowVID argument is set to true, then virtual GID's are + supported. Asking for a glyph ID with a glyph name or GID that is not in + the font will return a virtual GID. This is valid for GSUB and cmap + tables. For SING glyphlets, the cmap table is used to specify Unicode + values for virtual GI's used in GSUB/GPOS rules. If the gid N is requested + and does not exist in the font, or the glyphname has the form glyphN + and does not exist in the font, then N is used as the virtual GID. + Else, the first virtual GID is assigned as 0x1000 -1; for subsequent new + virtual GIDs, the next is one less than the previous. + + If ignoreDecompileErrors is set to True, exceptions raised in + individual tables during decompilation will be ignored, falling + back to the DefaultTable implementation, which simply keeps the + binary data. + + If lazy is set to True, many data structures are loaded lazily, upon + access only. If it is set to False, many data structures are loaded + immediately. The default is lazy=None which is somewhere in between. + """ + + from fontTools.ttLib import sfnt + self.verbose = verbose + self.quiet = quiet + self.lazy = lazy + self.recalcBBoxes = recalcBBoxes + self.recalcTimestamp = recalcTimestamp + self.tables = {} + self.reader = None + + # Permit the user to reference glyphs that are not int the font. + self.last_vid = 0xFFFE # Can't make it be 0xFFFF, as the world is full unsigned short integer counters that get incremented after the last seen GID value. + self.reverseVIDDict = {} + self.VIDDict = {} + self.allowVID = allowVID + self.ignoreDecompileErrors = ignoreDecompileErrors + + if not file: + self.sfntVersion = sfntVersion + self.flavor = flavor + self.flavorData = None + return + if not hasattr(file, "read"): + closeStream = True + # assume file is a string + if haveMacSupport and res_name_or_index is not None: + # on the mac, we deal with sfnt resources as well as flat files + from . import macUtils + if res_name_or_index == 0: + if macUtils.getSFNTResIndices(file): + # get the first available sfnt font. + file = macUtils.SFNTResourceReader(file, 1) + else: + file = open(file, "rb") + else: + file = macUtils.SFNTResourceReader(file, res_name_or_index) + else: + file = open(file, "rb") + + else: + # assume "file" is a readable file object + closeStream = False + # read input file in memory and wrap a stream around it to allow overwriting + tmp = BytesIO(file.read()) + if hasattr(file, 'name'): + # save reference to input file name + tmp.name = file.name + if closeStream: + file.close() + self.reader = sfnt.SFNTReader(tmp, checkChecksums, fontNumber=fontNumber) + self.sfntVersion = self.reader.sfntVersion + self.flavor = self.reader.flavor + self.flavorData = self.reader.flavorData + + def close(self): + """If we still have a reader object, close it.""" + if self.reader is not None: + self.reader.close() + + def save(self, file, makeSuitcase=False, reorderTables=True): + """Save the font to disk. Similarly to the constructor, + the 'file' argument can be either a pathname or a writable + file object. + + On the Mac, if makeSuitcase is true, a suitcase (resource fork) + file will we made instead of a flat .ttf file. + """ + from fontTools.ttLib import sfnt + if not hasattr(file, "write"): + closeStream = 1 + if os.name == "mac" and makeSuitcase: + from . import macUtils + file = macUtils.SFNTResourceWriter(file, self) + else: + file = open(file, "wb") + if os.name == "mac": + from fontTools.misc.macCreator import setMacCreatorAndType + setMacCreatorAndType(file.name, 'mdos', 'BINA') + else: + # assume "file" is a writable file object + closeStream = 0 + + tags = list(self.keys()) + if "GlyphOrder" in tags: + tags.remove("GlyphOrder") + numTables = len(tags) + # write to a temporary stream to allow saving to unseekable streams + tmp = BytesIO() + writer = sfnt.SFNTWriter(tmp, numTables, self.sfntVersion, self.flavor, self.flavorData) + + done = [] + for tag in tags: + self._writeTable(tag, writer, done) + + writer.close() + + if (reorderTables is None or writer.reordersTables() or + (reorderTables is False and self.reader is None)): + # don't reorder tables and save as is + file.write(tmp.getvalue()) + tmp.close() + else: + if reorderTables is False: + # sort tables using the original font's order + tableOrder = list(self.reader.keys()) + else: + # use the recommended order from the OpenType specification + tableOrder = None + tmp.flush() + tmp.seek(0) + tmp2 = BytesIO() + reorderFontTables(tmp, tmp2, tableOrder) + file.write(tmp2.getvalue()) + tmp.close() + tmp2.close() + + if closeStream: + file.close() + + def saveXML(self, fileOrPath, progress=None, quiet=False, + tables=None, skipTables=None, splitTables=False, disassembleInstructions=True, + bitmapGlyphDataFormat='raw'): + """Export the font as TTX (an XML-based text file), or as a series of text + files when splitTables is true. In the latter case, the 'fileOrPath' + argument should be a path to a directory. + The 'tables' argument must either be false (dump all tables) or a + list of tables to dump. The 'skipTables' argument may be a list of tables + to skip, but only when the 'tables' argument is false. + """ + from fontTools import version + from fontTools.misc import xmlWriter + + self.disassembleInstructions = disassembleInstructions + self.bitmapGlyphDataFormat = bitmapGlyphDataFormat + if not tables: + tables = list(self.keys()) + if "GlyphOrder" not in tables: + tables = ["GlyphOrder"] + tables + if skipTables: + for tag in skipTables: + if tag in tables: + tables.remove(tag) + numTables = len(tables) + if progress: + progress.set(0, numTables) + idlefunc = getattr(progress, "idle", None) + else: + idlefunc = None + + writer = xmlWriter.XMLWriter(fileOrPath, idlefunc=idlefunc) + writer.begintag("ttFont", sfntVersion=repr(self.sfntVersion)[1:-1], + ttLibVersion=version) + writer.newline() + + if not splitTables: + writer.newline() + else: + # 'fileOrPath' must now be a path + path, ext = os.path.splitext(fileOrPath) + fileNameTemplate = path + ".%s" + ext + + for i in range(numTables): + if progress: + progress.set(i) + tag = tables[i] + if splitTables: + tablePath = fileNameTemplate % tagToIdentifier(tag) + tableWriter = xmlWriter.XMLWriter(tablePath, idlefunc=idlefunc) + tableWriter.begintag("ttFont", ttLibVersion=version) + tableWriter.newline() + tableWriter.newline() + writer.simpletag(tagToXML(tag), src=os.path.basename(tablePath)) + writer.newline() + else: + tableWriter = writer + self._tableToXML(tableWriter, tag, progress, quiet) + if splitTables: + tableWriter.endtag("ttFont") + tableWriter.newline() + tableWriter.close() + if progress: + progress.set((i + 1)) + writer.endtag("ttFont") + writer.newline() + writer.close() + if self.verbose: + debugmsg("Done dumping TTX") + + def _tableToXML(self, writer, tag, progress, quiet): + if tag in self: + table = self[tag] + report = "Dumping '%s' table..." % tag + else: + report = "No '%s' table found." % tag + if progress: + progress.setLabel(report) + elif self.verbose: + debugmsg(report) + else: + if not quiet: + print(report) + if tag not in self: + return + xmlTag = tagToXML(tag) + attrs = dict() + if hasattr(table, "ERROR"): + attrs['ERROR'] = "decompilation error" + from .tables.DefaultTable import DefaultTable + if table.__class__ == DefaultTable: + attrs['raw'] = True + writer.begintag(xmlTag, **attrs) + writer.newline() + if tag in ("glyf", "CFF "): + table.toXML(writer, self, progress) + else: + table.toXML(writer, self) + writer.endtag(xmlTag) + writer.newline() + writer.newline() + + def importXML(self, file, progress=None, quiet=False): + """Import a TTX file (an XML-based text format), so as to recreate + a font object. + """ + if "maxp" in self and "post" in self: + # Make sure the glyph order is loaded, as it otherwise gets + # lost if the XML doesn't contain the glyph order, yet does + # contain the table which was originally used to extract the + # glyph names from (ie. 'post', 'cmap' or 'CFF '). + self.getGlyphOrder() + + from fontTools.misc import xmlReader + + reader = xmlReader.XMLReader(file, self, progress, quiet) + reader.read() + + def isLoaded(self, tag): + """Return true if the table identified by 'tag' has been + decompiled and loaded into memory.""" + return tag in self.tables + + def has_key(self, tag): + if self.isLoaded(tag): + return True + elif self.reader and tag in self.reader: + return True + elif tag == "GlyphOrder": + return True + else: + return False + + __contains__ = has_key + + def keys(self): + keys = list(self.tables.keys()) + if self.reader: + for key in list(self.reader.keys()): + if key not in keys: + keys.append(key) + + if "GlyphOrder" in keys: + keys.remove("GlyphOrder") + keys = sortedTagList(keys) + return ["GlyphOrder"] + keys + + def __len__(self): + return len(list(self.keys())) + + def __getitem__(self, tag): + tag = Tag(tag) + try: + return self.tables[tag] + except KeyError: + if tag == "GlyphOrder": + table = GlyphOrder(tag) + self.tables[tag] = table + return table + if self.reader is not None: + import traceback + if self.verbose: + debugmsg("Reading '%s' table from disk" % tag) + data = self.reader[tag] + tableClass = getTableClass(tag) + table = tableClass(tag) + self.tables[tag] = table + if self.verbose: + debugmsg("Decompiling '%s' table" % tag) + try: + table.decompile(data, self) + except: + if not self.ignoreDecompileErrors: + raise + # fall back to DefaultTable, retaining the binary table data + print("An exception occurred during the decompilation of the '%s' table" % tag) + from .tables.DefaultTable import DefaultTable + file = StringIO() + traceback.print_exc(file=file) + table = DefaultTable(tag) + table.ERROR = file.getvalue() + self.tables[tag] = table + table.decompile(data, self) + return table + else: + raise KeyError("'%s' table not found" % tag) + + def __setitem__(self, tag, table): + self.tables[Tag(tag)] = table + + def __delitem__(self, tag): + if tag not in self: + raise KeyError("'%s' table not found" % tag) + if tag in self.tables: + del self.tables[tag] + if self.reader and tag in self.reader: + del self.reader[tag] + + def get(self, tag, default=None): + try: + return self[tag] + except KeyError: + return default + + def setGlyphOrder(self, glyphOrder): + self.glyphOrder = glyphOrder + + def getGlyphOrder(self): + try: + return self.glyphOrder + except AttributeError: + pass + if 'CFF ' in self: + cff = self['CFF '] + self.glyphOrder = cff.getGlyphOrder() + elif 'post' in self: + # TrueType font + glyphOrder = self['post'].getGlyphOrder() + if glyphOrder is None: + # + # No names found in the 'post' table. + # Try to create glyph names from the unicode cmap (if available) + # in combination with the Adobe Glyph List (AGL). + # + self._getGlyphNamesFromCmap() + else: + self.glyphOrder = glyphOrder + else: + self._getGlyphNamesFromCmap() + return self.glyphOrder + + def _getGlyphNamesFromCmap(self): + # + # This is rather convoluted, but then again, it's an interesting problem: + # - we need to use the unicode values found in the cmap table to + # build glyph names (eg. because there is only a minimal post table, + # or none at all). + # - but the cmap parser also needs glyph names to work with... + # So here's what we do: + # - make up glyph names based on glyphID + # - load a temporary cmap table based on those names + # - extract the unicode values, build the "real" glyph names + # - unload the temporary cmap table + # + if self.isLoaded("cmap"): + # Bootstrapping: we're getting called by the cmap parser + # itself. This means self.tables['cmap'] contains a partially + # loaded cmap, making it impossible to get at a unicode + # subtable here. We remove the partially loaded cmap and + # restore it later. + # This only happens if the cmap table is loaded before any + # other table that does f.getGlyphOrder() or f.getGlyphName(). + cmapLoading = self.tables['cmap'] + del self.tables['cmap'] + else: + cmapLoading = None + # Make up glyph names based on glyphID, which will be used by the + # temporary cmap and by the real cmap in case we don't find a unicode + # cmap. + numGlyphs = int(self['maxp'].numGlyphs) + glyphOrder = [None] * numGlyphs + glyphOrder[0] = ".notdef" + for i in range(1, numGlyphs): + glyphOrder[i] = "glyph%.5d" % i + # Set the glyph order, so the cmap parser has something + # to work with (so we don't get called recursively). + self.glyphOrder = glyphOrder + # Get a (new) temporary cmap (based on the just invented names) + try: + tempcmap = self['cmap'].getcmap(3, 1) + except KeyError: + tempcmap = None + if tempcmap is not None: + # we have a unicode cmap + from fontTools import agl + cmap = tempcmap.cmap + # create a reverse cmap dict + reversecmap = {} + for unicode, name in list(cmap.items()): + reversecmap[name] = unicode + allNames = {} + for i in range(numGlyphs): + tempName = glyphOrder[i] + if tempName in reversecmap: + unicode = reversecmap[tempName] + if unicode in agl.UV2AGL: + # get name from the Adobe Glyph List + glyphName = agl.UV2AGL[unicode] + else: + # create uni name + glyphName = "uni%04X" % unicode + tempName = glyphName + n = allNames.get(tempName, 0) + if n: + tempName = glyphName + "#" + str(n) + glyphOrder[i] = tempName + allNames[tempName] = n + 1 + # Delete the temporary cmap table from the cache, so it can + # be parsed again with the right names. + del self.tables['cmap'] + else: + pass # no unicode cmap available, stick with the invented names + self.glyphOrder = glyphOrder + if cmapLoading: + # restore partially loaded cmap, so it can continue loading + # using the proper names. + self.tables['cmap'] = cmapLoading + + def getGlyphNames(self): + """Get a list of glyph names, sorted alphabetically.""" + glyphNames = sorted(self.getGlyphOrder()[:]) + return glyphNames + + def getGlyphNames2(self): + """Get a list of glyph names, sorted alphabetically, + but not case sensitive. + """ + from fontTools.misc import textTools + return textTools.caselessSort(self.getGlyphOrder()) + + def getGlyphName(self, glyphID, requireReal=False): + try: + return self.getGlyphOrder()[glyphID] + except IndexError: + if requireReal or not self.allowVID: + # XXX The ??.W8.otf font that ships with OSX uses higher glyphIDs in + # the cmap table than there are glyphs. I don't think it's legal... + return "glyph%.5d" % glyphID + else: + # user intends virtual GID support + try: + glyphName = self.VIDDict[glyphID] + except KeyError: + glyphName ="glyph%.5d" % glyphID + self.last_vid = min(glyphID, self.last_vid ) + self.reverseVIDDict[glyphName] = glyphID + self.VIDDict[glyphID] = glyphName + return glyphName + + def getGlyphID(self, glyphName, requireReal=False): + if not hasattr(self, "_reverseGlyphOrderDict"): + self._buildReverseGlyphOrderDict() + glyphOrder = self.getGlyphOrder() + d = self._reverseGlyphOrderDict + if glyphName not in d: + if glyphName in glyphOrder: + self._buildReverseGlyphOrderDict() + return self.getGlyphID(glyphName) + else: + if requireReal: + raise KeyError(glyphName) + elif not self.allowVID: + # Handle glyphXXX only + if glyphName[:5] == "glyph": + try: + return int(glyphName[5:]) + except (NameError, ValueError): + raise KeyError(glyphName) + else: + # user intends virtual GID support + try: + glyphID = self.reverseVIDDict[glyphName] + except KeyError: + # if name is in glyphXXX format, use the specified name. + if glyphName[:5] == "glyph": + try: + glyphID = int(glyphName[5:]) + except (NameError, ValueError): + glyphID = None + if glyphID is None: + glyphID = self.last_vid -1 + self.last_vid = glyphID + self.reverseVIDDict[glyphName] = glyphID + self.VIDDict[glyphID] = glyphName + return glyphID + + glyphID = d[glyphName] + if glyphName != glyphOrder[glyphID]: + self._buildReverseGlyphOrderDict() + return self.getGlyphID(glyphName) + return glyphID + + def getReverseGlyphMap(self, rebuild=False): + if rebuild or not hasattr(self, "_reverseGlyphOrderDict"): + self._buildReverseGlyphOrderDict() + return self._reverseGlyphOrderDict + + def _buildReverseGlyphOrderDict(self): + self._reverseGlyphOrderDict = d = {} + glyphOrder = self.getGlyphOrder() + for glyphID in range(len(glyphOrder)): + d[glyphOrder[glyphID]] = glyphID + + def _writeTable(self, tag, writer, done): + """Internal helper function for self.save(). Keeps track of + inter-table dependencies. + """ + if tag in done: + return + tableClass = getTableClass(tag) + for masterTable in tableClass.dependencies: + if masterTable not in done: + if masterTable in self: + self._writeTable(masterTable, writer, done) + else: + done.append(masterTable) + tabledata = self.getTableData(tag) + if self.verbose: + debugmsg("writing '%s' table to disk" % tag) + writer[tag] = tabledata + done.append(tag) + + def getTableData(self, tag): + """Returns raw table data, whether compiled or directly read from disk. + """ + tag = Tag(tag) + if self.isLoaded(tag): + if self.verbose: + debugmsg("compiling '%s' table" % tag) + return self.tables[tag].compile(self) + elif self.reader and tag in self.reader: + if self.verbose: + debugmsg("Reading '%s' table from disk" % tag) + return self.reader[tag] + else: + raise KeyError(tag) + + def getGlyphSet(self, preferCFF=True): + """Return a generic GlyphSet, which is a dict-like object + mapping glyph names to glyph objects. The returned glyph objects + have a .draw() method that supports the Pen protocol, and will + have an attribute named 'width'. + + If the font is CFF-based, the outlines will be taken from the 'CFF ' + table. Otherwise the outlines will be taken from the 'glyf' table. + If the font contains both a 'CFF ' and a 'glyf' table, you can use + the 'preferCFF' argument to specify which one should be taken. + """ + glyphs = None + if (preferCFF and "CFF " in self) or "glyf" not in self: + glyphs = _TTGlyphSet(self, list(self["CFF "].cff.values())[0].CharStrings, _TTGlyphCFF) + + if glyphs is None and "glyf" in self: + glyphs = _TTGlyphSet(self, self["glyf"], _TTGlyphGlyf) + + if glyphs is None: + raise TTLibError("Font contains no outlines") + + return glyphs + + +class _TTGlyphSet(object): + + """Generic dict-like GlyphSet class that pulls metrics from hmtx and + glyph shape from TrueType or CFF. + """ + + def __init__(self, ttFont, glyphs, glyphType): + self._glyphs = glyphs + self._hmtx = ttFont['hmtx'] + self._glyphType = glyphType + + def keys(self): + return list(self._glyphs.keys()) + + def has_key(self, glyphName): + return glyphName in self._glyphs + + __contains__ = has_key + + def __getitem__(self, glyphName): + return self._glyphType(self, self._glyphs[glyphName], self._hmtx[glyphName]) + + def get(self, glyphName, default=None): + try: + return self[glyphName] + except KeyError: + return default + +class _TTGlyph(object): + + """Wrapper for a TrueType glyph that supports the Pen protocol, meaning + that it has a .draw() method that takes a pen object as its only + argument. Additionally there is a 'width' attribute. + """ + + def __init__(self, glyphset, glyph, metrics): + self._glyphset = glyphset + self._glyph = glyph + self.width, self.lsb = metrics + + def draw(self, pen): + """Draw the glyph onto Pen. See fontTools.pens.basePen for details + how that works. + """ + self._glyph.draw(pen) + +class _TTGlyphCFF(_TTGlyph): + pass + +class _TTGlyphGlyf(_TTGlyph): + + def draw(self, pen): + """Draw the glyph onto Pen. See fontTools.pens.basePen for details + how that works. + """ + glyfTable = self._glyphset._glyphs + glyph = self._glyph + offset = self.lsb - glyph.xMin if hasattr(glyph, "xMin") else 0 + glyph.draw(pen, glyfTable, offset) + + +class GlyphOrder(object): + + """A pseudo table. The glyph order isn't in the font as a separate + table, but it's nice to present it as such in the TTX format. + """ + + def __init__(self, tag=None): + pass + + def toXML(self, writer, ttFont): + glyphOrder = ttFont.getGlyphOrder() + writer.comment("The 'id' attribute is only for humans; " + "it is ignored when parsed.") + writer.newline() + for i in range(len(glyphOrder)): + glyphName = glyphOrder[i] + writer.simpletag("GlyphID", id=i, name=glyphName) + writer.newline() + + def fromXML(self, name, attrs, content, ttFont): + if not hasattr(self, "glyphOrder"): + self.glyphOrder = [] + ttFont.setGlyphOrder(self.glyphOrder) + if name == "GlyphID": + self.glyphOrder.append(attrs["name"]) + + +def getTableModule(tag): + """Fetch the packer/unpacker module for a table. + Return None when no module is found. + """ + from . import tables + pyTag = tagToIdentifier(tag) + try: + __import__("fontTools.ttLib.tables." + pyTag) + except ImportError as err: + # If pyTag is found in the ImportError message, + # means table is not implemented. If it's not + # there, then some other module is missing, don't + # suppress the error. + if str(err).find(pyTag) >= 0: + return None + else: + raise err + else: + return getattr(tables, pyTag) + + +def getTableClass(tag): + """Fetch the packer/unpacker class for a table. + Return None when no class is found. + """ + module = getTableModule(tag) + if module is None: + from .tables.DefaultTable import DefaultTable + return DefaultTable + pyTag = tagToIdentifier(tag) + tableClass = getattr(module, "table_" + pyTag) + return tableClass + + +def getClassTag(klass): + """Fetch the table tag for a class object.""" + name = klass.__name__ + assert name[:6] == 'table_' + name = name[6:] # Chop 'table_' + return identifierToTag(name) + + +def newTable(tag): + """Return a new instance of a table.""" + tableClass = getTableClass(tag) + return tableClass(tag) + + +def _escapechar(c): + """Helper function for tagToIdentifier()""" + import re + if re.match("[a-z0-9]", c): + return "_" + c + elif re.match("[A-Z]", c): + return c + "_" + else: + return hex(byteord(c))[2:] + + +def tagToIdentifier(tag): + """Convert a table tag to a valid (but UGLY) python identifier, + as well as a filename that's guaranteed to be unique even on a + caseless file system. Each character is mapped to two characters. + Lowercase letters get an underscore before the letter, uppercase + letters get an underscore after the letter. Trailing spaces are + trimmed. Illegal characters are escaped as two hex bytes. If the + result starts with a number (as the result of a hex escape), an + extra underscore is prepended. Examples: + 'glyf' -> '_g_l_y_f' + 'cvt ' -> '_c_v_t' + 'OS/2' -> 'O_S_2f_2' + """ + import re + tag = Tag(tag) + if tag == "GlyphOrder": + return tag + assert len(tag) == 4, "tag should be 4 characters long" + while len(tag) > 1 and tag[-1] == ' ': + tag = tag[:-1] + ident = "" + for c in tag: + ident = ident + _escapechar(c) + if re.match("[0-9]", ident): + ident = "_" + ident + return ident + + +def identifierToTag(ident): + """the opposite of tagToIdentifier()""" + if ident == "GlyphOrder": + return ident + if len(ident) % 2 and ident[0] == "_": + ident = ident[1:] + assert not (len(ident) % 2) + tag = "" + for i in range(0, len(ident), 2): + if ident[i] == "_": + tag = tag + ident[i+1] + elif ident[i+1] == "_": + tag = tag + ident[i] + else: + # assume hex + tag = tag + chr(int(ident[i:i+2], 16)) + # append trailing spaces + tag = tag + (4 - len(tag)) * ' ' + return Tag(tag) + + +def tagToXML(tag): + """Similarly to tagToIdentifier(), this converts a TT tag + to a valid XML element name. Since XML element names are + case sensitive, this is a fairly simple/readable translation. + """ + import re + tag = Tag(tag) + if tag == "OS/2": + return "OS_2" + elif tag == "GlyphOrder": + return tag + if re.match("[A-Za-z_][A-Za-z_0-9]* *$", tag): + return tag.strip() + else: + return tagToIdentifier(tag) + + +def xmlToTag(tag): + """The opposite of tagToXML()""" + if tag == "OS_2": + return Tag("OS/2") + if len(tag) == 8: + return identifierToTag(tag) + else: + return Tag(tag + " " * (4 - len(tag))) + + +def debugmsg(msg): + import time + print(msg + time.strftime(" (%H:%M:%S)", time.localtime(time.time()))) + + +# Table order as recommended in the OpenType specification 1.4 +TTFTableOrder = ["head", "hhea", "maxp", "OS/2", "hmtx", "LTSH", "VDMX", + "hdmx", "cmap", "fpgm", "prep", "cvt ", "loca", "glyf", + "kern", "name", "post", "gasp", "PCLT"] + +OTFTableOrder = ["head", "hhea", "maxp", "OS/2", "name", "cmap", "post", + "CFF "] + +def sortedTagList(tagList, tableOrder=None): + """Return a sorted copy of tagList, sorted according to the OpenType + specification, or according to a custom tableOrder. If given and not + None, tableOrder needs to be a list of tag names. + """ + tagList = sorted(tagList) + if tableOrder is None: + if "DSIG" in tagList: + # DSIG should be last (XXX spec reference?) + tagList.remove("DSIG") + tagList.append("DSIG") + if "CFF " in tagList: + tableOrder = OTFTableOrder + else: + tableOrder = TTFTableOrder + orderedTables = [] + for tag in tableOrder: + if tag in tagList: + orderedTables.append(tag) + tagList.remove(tag) + orderedTables.extend(tagList) + return orderedTables + + +def reorderFontTables(inFile, outFile, tableOrder=None, checkChecksums=False): + """Rewrite a font file, ordering the tables as recommended by the + OpenType specification 1.4. + """ + from fontTools.ttLib.sfnt import SFNTReader, SFNTWriter + reader = SFNTReader(inFile, checkChecksums=checkChecksums) + writer = SFNTWriter(outFile, len(reader.tables), reader.sfntVersion, reader.flavor, reader.flavorData) + tables = list(reader.keys()) + for tag in sortedTagList(tables, tableOrder): + writer[tag] = reader[tag] + writer.close() + + +def maxPowerOfTwo(x): + """Return the highest exponent of two, so that + (2 ** exponent) <= x. Return 0 if x is 0. + """ + exponent = 0 + while x: + x = x >> 1 + exponent = exponent + 1 + return max(exponent - 1, 0) + + +def getSearchRange(n, itemSize=16): + """Calculate searchRange, entrySelector, rangeShift. + """ + # itemSize defaults to 16, for backward compatibility + # with upstream fonttools. + exponent = maxPowerOfTwo(n) + searchRange = (2 ** exponent) * itemSize + entrySelector = exponent + rangeShift = max(0, n * itemSize - searchRange) + return searchRange, entrySelector, rangeShift diff -Nru fonttools-2.4/Tools/fontTools/ttLib/macUtils.py fonttools-3.0/Tools/fontTools/ttLib/macUtils.py --- fonttools-2.4/Tools/fontTools/ttLib/macUtils.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.0/Tools/fontTools/ttLib/macUtils.py 2015-08-31 17:57:15.000000000 +0000 @@ -0,0 +1,73 @@ +"""ttLib.macUtils.py -- Various Mac-specific stuff.""" + +from __future__ import print_function, division, absolute_import +from fontTools.misc.py23 import * +import sys +import os +if sys.platform not in ("mac", "darwin"): + raise ImportError("This module is Mac-only!") +try: + from Carbon import Res +except ImportError: + import Res + + +def MyOpenResFile(path): + mode = 1 # read only + try: + resref = Res.FSOpenResFile(path, mode) + except Res.Error: + # try data fork + resref = Res.FSOpenResourceFile(path, unicode(), mode) + return resref + + +def getSFNTResIndices(path): + """Determine whether a file has a resource fork or not.""" + try: + resref = MyOpenResFile(path) + except Res.Error: + return [] + Res.UseResFile(resref) + numSFNTs = Res.Count1Resources('sfnt') + Res.CloseResFile(resref) + return list(range(1, numSFNTs + 1)) + + +def openTTFonts(path): + """Given a pathname, return a list of TTFont objects. In the case + of a flat TTF/OTF file, the list will contain just one font object; + but in the case of a Mac font suitcase it will contain as many + font objects as there are sfnt resources in the file. + """ + from fontTools import ttLib + fonts = [] + sfnts = getSFNTResIndices(path) + if not sfnts: + fonts.append(ttLib.TTFont(path)) + else: + for index in sfnts: + fonts.append(ttLib.TTFont(path, index)) + if not fonts: + raise ttLib.TTLibError("no fonts found in file '%s'" % path) + return fonts + + +class SFNTResourceReader(object): + + """Simple (Mac-only) read-only file wrapper for 'sfnt' resources.""" + + def __init__(self, path, res_name_or_index): + resref = MyOpenResFile(path) + Res.UseResFile(resref) + if isinstance(res_name_or_index, basestring): + res = Res.Get1NamedResource('sfnt', res_name_or_index) + else: + res = Res.Get1IndResource('sfnt', res_name_or_index) + self.file = BytesIO(res.data) + Res.CloseResFile(resref) + self.name = path + + def __getattr__(self, attr): + # cheap inheritance + return getattr(self.file, attr) diff -Nru fonttools-2.4/Tools/fontTools/ttLib/sfnt.py fonttools-3.0/Tools/fontTools/ttLib/sfnt.py --- fonttools-2.4/Tools/fontTools/ttLib/sfnt.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.0/Tools/fontTools/ttLib/sfnt.py 2015-08-31 17:57:15.000000000 +0000 @@ -0,0 +1,520 @@ +"""ttLib/sfnt.py -- low-level module to deal with the sfnt file format. + +Defines two public classes: + SFNTReader + SFNTWriter + +(Normally you don't have to use these classes explicitly; they are +used automatically by ttLib.TTFont.) + +The reading and writing of sfnt files is separated in two distinct +classes, since whenever to number of tables changes or whenever +a table's length chages you need to rewrite the whole file anyway. +""" + +from __future__ import print_function, division, absolute_import +from fontTools.misc.py23 import * +from fontTools.misc import sstruct +from fontTools.ttLib import getSearchRange +import struct +from collections import OrderedDict + + +class SFNTReader(object): + + def __new__(cls, *args, **kwargs): + """ Return an instance of the SFNTReader sub-class which is compatible + with the input file type. + """ + if args and cls is SFNTReader: + infile = args[0] + sfntVersion = Tag(infile.read(4)) + infile.seek(0) + if sfntVersion == "wOF2": + # return new WOFF2Reader object + from fontTools.ttLib.woff2 import WOFF2Reader + return object.__new__(WOFF2Reader) + # return default object + return object.__new__(cls) + + def __init__(self, file, checkChecksums=1, fontNumber=-1): + self.file = file + self.checkChecksums = checkChecksums + + self.flavor = None + self.flavorData = None + self.DirectoryEntry = SFNTDirectoryEntry + self.sfntVersion = self.file.read(4) + self.file.seek(0) + if self.sfntVersion == b"ttcf": + data = self.file.read(ttcHeaderSize) + if len(data) != ttcHeaderSize: + from fontTools import ttLib + raise ttLib.TTLibError("Not a Font Collection (not enough data)") + sstruct.unpack(ttcHeaderFormat, data, self) + assert self.Version == 0x00010000 or self.Version == 0x00020000, "unrecognized TTC version 0x%08x" % self.Version + if not 0 <= fontNumber < self.numFonts: + from fontTools import ttLib + raise ttLib.TTLibError("specify a font number between 0 and %d (inclusive)" % (self.numFonts - 1)) + offsetTable = struct.unpack(">%dL" % self.numFonts, self.file.read(self.numFonts * 4)) + if self.Version == 0x00020000: + pass # ignoring version 2.0 signatures + self.file.seek(offsetTable[fontNumber]) + data = self.file.read(sfntDirectorySize) + if len(data) != sfntDirectorySize: + from fontTools import ttLib + raise ttLib.TTLibError("Not a Font Collection (not enough data)") + sstruct.unpack(sfntDirectoryFormat, data, self) + elif self.sfntVersion == b"wOFF": + self.flavor = "woff" + self.DirectoryEntry = WOFFDirectoryEntry + data = self.file.read(woffDirectorySize) + if len(data) != woffDirectorySize: + from fontTools import ttLib + raise ttLib.TTLibError("Not a WOFF font (not enough data)") + sstruct.unpack(woffDirectoryFormat, data, self) + else: + data = self.file.read(sfntDirectorySize) + if len(data) != sfntDirectorySize: + from fontTools import ttLib + raise ttLib.TTLibError("Not a TrueType or OpenType font (not enough data)") + sstruct.unpack(sfntDirectoryFormat, data, self) + self.sfntVersion = Tag(self.sfntVersion) + + if self.sfntVersion not in ("\x00\x01\x00\x00", "OTTO", "true"): + from fontTools import ttLib + raise ttLib.TTLibError("Not a TrueType or OpenType font (bad sfntVersion)") + self.tables = OrderedDict() + for i in range(self.numTables): + entry = self.DirectoryEntry() + entry.fromFile(self.file) + tag = Tag(entry.tag) + self.tables[tag] = entry + + # Load flavor data if any + if self.flavor == "woff": + self.flavorData = WOFFFlavorData(self) + + def has_key(self, tag): + return tag in self.tables + + __contains__ = has_key + + def keys(self): + return self.tables.keys() + + def __getitem__(self, tag): + """Fetch the raw table data.""" + entry = self.tables[Tag(tag)] + data = entry.loadData (self.file) + if self.checkChecksums: + if tag == 'head': + # Beh: we have to special-case the 'head' table. + checksum = calcChecksum(data[:8] + b'\0\0\0\0' + data[12:]) + else: + checksum = calcChecksum(data) + if self.checkChecksums > 1: + # Be obnoxious, and barf when it's wrong + assert checksum == entry.checkSum, "bad checksum for '%s' table" % tag + elif checksum != entry.checkSum: + # Be friendly, and just print a warning. + print("bad checksum for '%s' table" % tag) + return data + + def __delitem__(self, tag): + del self.tables[Tag(tag)] + + def close(self): + self.file.close() + + +class SFNTWriter(object): + + def __new__(cls, *args, **kwargs): + """ Return an instance of the SFNTWriter sub-class which is compatible + with the specified 'flavor'. + """ + flavor = None + if kwargs and 'flavor' in kwargs: + flavor = kwargs['flavor'] + elif args and len(args) > 3: + flavor = args[3] + if cls is SFNTWriter: + if flavor == "woff2": + # return new WOFF2Writer object + from fontTools.ttLib.woff2 import WOFF2Writer + return object.__new__(WOFF2Writer) + # return default object + return object.__new__(cls) + + def __init__(self, file, numTables, sfntVersion="\000\001\000\000", + flavor=None, flavorData=None): + self.file = file + self.numTables = numTables + self.sfntVersion = Tag(sfntVersion) + self.flavor = flavor + self.flavorData = flavorData + + if self.flavor == "woff": + self.directoryFormat = woffDirectoryFormat + self.directorySize = woffDirectorySize + self.DirectoryEntry = WOFFDirectoryEntry + + self.signature = "wOFF" + + # to calculate WOFF checksum adjustment, we also need the original SFNT offsets + self.origNextTableOffset = sfntDirectorySize + numTables * sfntDirectoryEntrySize + else: + assert not self.flavor, "Unknown flavor '%s'" % self.flavor + self.directoryFormat = sfntDirectoryFormat + self.directorySize = sfntDirectorySize + self.DirectoryEntry = SFNTDirectoryEntry + + self.searchRange, self.entrySelector, self.rangeShift = getSearchRange(numTables, 16) + + self.nextTableOffset = self.directorySize + numTables * self.DirectoryEntry.formatSize + # clear out directory area + self.file.seek(self.nextTableOffset) + # make sure we're actually where we want to be. (old cStringIO bug) + self.file.write(b'\0' * (self.nextTableOffset - self.file.tell())) + self.tables = OrderedDict() + + def __setitem__(self, tag, data): + """Write raw table data to disk.""" + if tag in self.tables: + from fontTools import ttLib + raise ttLib.TTLibError("cannot rewrite '%s' table" % tag) + + entry = self.DirectoryEntry() + entry.tag = tag + entry.offset = self.nextTableOffset + if tag == 'head': + entry.checkSum = calcChecksum(data[:8] + b'\0\0\0\0' + data[12:]) + self.headTable = data + entry.uncompressed = True + else: + entry.checkSum = calcChecksum(data) + entry.saveData(self.file, data) + + if self.flavor == "woff": + entry.origOffset = self.origNextTableOffset + self.origNextTableOffset += (entry.origLength + 3) & ~3 + + self.nextTableOffset = self.nextTableOffset + ((entry.length + 3) & ~3) + # Add NUL bytes to pad the table data to a 4-byte boundary. + # Don't depend on f.seek() as we need to add the padding even if no + # subsequent write follows (seek is lazy), ie. after the final table + # in the font. + self.file.write(b'\0' * (self.nextTableOffset - self.file.tell())) + assert self.nextTableOffset == self.file.tell() + + self.tables[tag] = entry + + def close(self): + """All tables must have been written to disk. Now write the + directory. + """ + tables = sorted(self.tables.items()) + if len(tables) != self.numTables: + from fontTools import ttLib + raise ttLib.TTLibError("wrong number of tables; expected %d, found %d" % (self.numTables, len(tables))) + + if self.flavor == "woff": + self.signature = b"wOFF" + self.reserved = 0 + + self.totalSfntSize = 12 + self.totalSfntSize += 16 * len(tables) + for tag, entry in tables: + self.totalSfntSize += (entry.origLength + 3) & ~3 + + data = self.flavorData if self.flavorData else WOFFFlavorData() + if data.majorVersion is not None and data.minorVersion is not None: + self.majorVersion = data.majorVersion + self.minorVersion = data.minorVersion + else: + if hasattr(self, 'headTable'): + self.majorVersion, self.minorVersion = struct.unpack(">HH", self.headTable[4:8]) + else: + self.majorVersion = self.minorVersion = 0 + if data.metaData: + self.metaOrigLength = len(data.metaData) + self.file.seek(0,2) + self.metaOffset = self.file.tell() + import zlib + compressedMetaData = zlib.compress(data.metaData) + self.metaLength = len(compressedMetaData) + self.file.write(compressedMetaData) + else: + self.metaOffset = self.metaLength = self.metaOrigLength = 0 + if data.privData: + self.file.seek(0,2) + off = self.file.tell() + paddedOff = (off + 3) & ~3 + self.file.write('\0' * (paddedOff - off)) + self.privOffset = self.file.tell() + self.privLength = len(data.privData) + self.file.write(data.privData) + else: + self.privOffset = self.privLength = 0 + + self.file.seek(0,2) + self.length = self.file.tell() + + else: + assert not self.flavor, "Unknown flavor '%s'" % self.flavor + pass + + directory = sstruct.pack(self.directoryFormat, self) + + self.file.seek(self.directorySize) + seenHead = 0 + for tag, entry in tables: + if tag == "head": + seenHead = 1 + directory = directory + entry.toString() + if seenHead: + self.writeMasterChecksum(directory) + self.file.seek(0) + self.file.write(directory) + + def _calcMasterChecksum(self, directory): + # calculate checkSumAdjustment + tags = list(self.tables.keys()) + checksums = [] + for i in range(len(tags)): + checksums.append(self.tables[tags[i]].checkSum) + + if self.DirectoryEntry != SFNTDirectoryEntry: + # Create a SFNT directory for checksum calculation purposes + self.searchRange, self.entrySelector, self.rangeShift = getSearchRange(self.numTables, 16) + directory = sstruct.pack(sfntDirectoryFormat, self) + tables = sorted(self.tables.items()) + for tag, entry in tables: + sfntEntry = SFNTDirectoryEntry() + sfntEntry.tag = entry.tag + sfntEntry.checkSum = entry.checkSum + sfntEntry.offset = entry.origOffset + sfntEntry.length = entry.origLength + directory = directory + sfntEntry.toString() + + directory_end = sfntDirectorySize + len(self.tables) * sfntDirectoryEntrySize + assert directory_end == len(directory) + + checksums.append(calcChecksum(directory)) + checksum = sum(checksums) & 0xffffffff + # BiboAfba! + checksumadjustment = (0xB1B0AFBA - checksum) & 0xffffffff + return checksumadjustment + + def writeMasterChecksum(self, directory): + checksumadjustment = self._calcMasterChecksum(directory) + # write the checksum to the file + self.file.seek(self.tables['head'].offset + 8) + self.file.write(struct.pack(">L", checksumadjustment)) + + def reordersTables(self): + return False + + +# -- sfnt directory helpers and cruft + +ttcHeaderFormat = """ + > # big endian + TTCTag: 4s # "ttcf" + Version: L # 0x00010000 or 0x00020000 + numFonts: L # number of fonts + # OffsetTable[numFonts]: L # array with offsets from beginning of file + # ulDsigTag: L # version 2.0 only + # ulDsigLength: L # version 2.0 only + # ulDsigOffset: L # version 2.0 only +""" + +ttcHeaderSize = sstruct.calcsize(ttcHeaderFormat) + +sfntDirectoryFormat = """ + > # big endian + sfntVersion: 4s + numTables: H # number of tables + searchRange: H # (max2 <= numTables)*16 + entrySelector: H # log2(max2 <= numTables) + rangeShift: H # numTables*16-searchRange +""" + +sfntDirectorySize = sstruct.calcsize(sfntDirectoryFormat) + +sfntDirectoryEntryFormat = """ + > # big endian + tag: 4s + checkSum: L + offset: L + length: L +""" + +sfntDirectoryEntrySize = sstruct.calcsize(sfntDirectoryEntryFormat) + +woffDirectoryFormat = """ + > # big endian + signature: 4s # "wOFF" + sfntVersion: 4s + length: L # total woff file size + numTables: H # number of tables + reserved: H # set to 0 + totalSfntSize: L # uncompressed size + majorVersion: H # major version of WOFF file + minorVersion: H # minor version of WOFF file + metaOffset: L # offset to metadata block + metaLength: L # length of compressed metadata + metaOrigLength: L # length of uncompressed metadata + privOffset: L # offset to private data block + privLength: L # length of private data block +""" + +woffDirectorySize = sstruct.calcsize(woffDirectoryFormat) + +woffDirectoryEntryFormat = """ + > # big endian + tag: 4s + offset: L + length: L # compressed length + origLength: L # original length + checkSum: L # original checksum +""" + +woffDirectoryEntrySize = sstruct.calcsize(woffDirectoryEntryFormat) + + +class DirectoryEntry(object): + + def __init__(self): + self.uncompressed = False # if True, always embed entry raw + + def fromFile(self, file): + sstruct.unpack(self.format, file.read(self.formatSize), self) + + def fromString(self, str): + sstruct.unpack(self.format, str, self) + + def toString(self): + return sstruct.pack(self.format, self) + + def __repr__(self): + if hasattr(self, "tag"): + return "<%s '%s' at %x>" % (self.__class__.__name__, self.tag, id(self)) + else: + return "<%s at %x>" % (self.__class__.__name__, id(self)) + + def loadData(self, file): + file.seek(self.offset) + data = file.read(self.length) + assert len(data) == self.length + if hasattr(self.__class__, 'decodeData'): + data = self.decodeData(data) + return data + + def saveData(self, file, data): + if hasattr(self.__class__, 'encodeData'): + data = self.encodeData(data) + self.length = len(data) + file.seek(self.offset) + file.write(data) + + def decodeData(self, rawData): + return rawData + + def encodeData(self, data): + return data + +class SFNTDirectoryEntry(DirectoryEntry): + + format = sfntDirectoryEntryFormat + formatSize = sfntDirectoryEntrySize + +class WOFFDirectoryEntry(DirectoryEntry): + + format = woffDirectoryEntryFormat + formatSize = woffDirectoryEntrySize + zlibCompressionLevel = 6 + + def decodeData(self, rawData): + import zlib + if self.length == self.origLength: + data = rawData + else: + assert self.length < self.origLength + data = zlib.decompress(rawData) + assert len (data) == self.origLength + return data + + def encodeData(self, data): + import zlib + self.origLength = len(data) + if not self.uncompressed: + compressedData = zlib.compress(data, self.zlibCompressionLevel) + if self.uncompressed or len(compressedData) >= self.origLength: + # Encode uncompressed + rawData = data + self.length = self.origLength + else: + rawData = compressedData + self.length = len(rawData) + return rawData + +class WOFFFlavorData(): + + Flavor = 'woff' + + def __init__(self, reader=None): + self.majorVersion = None + self.minorVersion = None + self.metaData = None + self.privData = None + if reader: + self.majorVersion = reader.majorVersion + self.minorVersion = reader.minorVersion + if reader.metaLength: + reader.file.seek(reader.metaOffset) + rawData = reader.file.read(reader.metaLength) + assert len(rawData) == reader.metaLength + import zlib + data = zlib.decompress(rawData) + assert len(data) == reader.metaOrigLength + self.metaData = data + if reader.privLength: + reader.file.seek(reader.privOffset) + data = reader.file.read(reader.privLength) + assert len(data) == reader.privLength + self.privData = data + + +def calcChecksum(data): + """Calculate the checksum for an arbitrary block of data. + Optionally takes a 'start' argument, which allows you to + calculate a checksum in chunks by feeding it a previous + result. + + If the data length is not a multiple of four, it assumes + it is to be padded with null byte. + + >>> print(calcChecksum(b"abcd")) + 1633837924 + >>> print(calcChecksum(b"abcdxyz")) + 3655064932 + """ + remainder = len(data) % 4 + if remainder: + data += b"\0" * (4 - remainder) + value = 0 + blockSize = 4096 + assert blockSize % 4 == 0 + for i in range(0, len(data), blockSize): + block = data[i:i+blockSize] + longs = struct.unpack(">%dL" % (len(block) // 4), block) + value = (value + sum(longs)) & 0xffffffff + return value + + +if __name__ == "__main__": + import sys + import doctest + sys.exit(doctest.testmod().failed) diff -Nru fonttools-2.4/Tools/fontTools/ttLib/standardGlyphOrder.py fonttools-3.0/Tools/fontTools/ttLib/standardGlyphOrder.py --- fonttools-2.4/Tools/fontTools/ttLib/standardGlyphOrder.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.0/Tools/fontTools/ttLib/standardGlyphOrder.py 2015-08-31 17:57:15.000000000 +0000 @@ -0,0 +1,274 @@ +from __future__ import print_function, division, absolute_import +from fontTools.misc.py23 import * + +# +# 'post' table formats 1.0 and 2.0 rely on this list of "standard" +# glyphs. +# +# My list is correct according to the Apple documentation for the 'post' +# table: http://developer.apple.com/fonts/TTRefMan/RM06/Chap6post.html +# (However, it seems that TTFdump (from MS) and FontLab disagree, at +# least with respect to the last glyph, which they list as 'dslash' +# instead of 'dcroat'.) +# + +standardGlyphOrder = [ + ".notdef", # 0 + ".null", # 1 + "nonmarkingreturn", # 2 + "space", # 3 + "exclam", # 4 + "quotedbl", # 5 + "numbersign", # 6 + "dollar", # 7 + "percent", # 8 + "ampersand", # 9 + "quotesingle", # 10 + "parenleft", # 11 + "parenright", # 12 + "asterisk", # 13 + "plus", # 14 + "comma", # 15 + "hyphen", # 16 + "period", # 17 + "slash", # 18 + "zero", # 19 + "one", # 20 + "two", # 21 + "three", # 22 + "four", # 23 + "five", # 24 + "six", # 25 + "seven", # 26 + "eight", # 27 + "nine", # 28 + "colon", # 29 + "semicolon", # 30 + "less", # 31 + "equal", # 32 + "greater", # 33 + "question", # 34 + "at", # 35 + "A", # 36 + "B", # 37 + "C", # 38 + "D", # 39 + "E", # 40 + "F", # 41 + "G", # 42 + "H", # 43 + "I", # 44 + "J", # 45 + "K", # 46 + "L", # 47 + "M", # 48 + "N", # 49 + "O", # 50 + "P", # 51 + "Q", # 52 + "R", # 53 + "S", # 54 + "T", # 55 + "U", # 56 + "V", # 57 + "W", # 58 + "X", # 59 + "Y", # 60 + "Z", # 61 + "bracketleft", # 62 + "backslash", # 63 + "bracketright", # 64 + "asciicircum", # 65 + "underscore", # 66 + "grave", # 67 + "a", # 68 + "b", # 69 + "c", # 70 + "d", # 71 + "e", # 72 + "f", # 73 + "g", # 74 + "h", # 75 + "i", # 76 + "j", # 77 + "k", # 78 + "l", # 79 + "m", # 80 + "n", # 81 + "o", # 82 + "p", # 83 + "q", # 84 + "r", # 85 + "s", # 86 + "t", # 87 + "u", # 88 + "v", # 89 + "w", # 90 + "x", # 91 + "y", # 92 + "z", # 93 + "braceleft", # 94 + "bar", # 95 + "braceright", # 96 + "asciitilde", # 97 + "Adieresis", # 98 + "Aring", # 99 + "Ccedilla", # 100 + "Eacute", # 101 + "Ntilde", # 102 + "Odieresis", # 103 + "Udieresis", # 104 + "aacute", # 105 + "agrave", # 106 + "acircumflex", # 107 + "adieresis", # 108 + "atilde", # 109 + "aring", # 110 + "ccedilla", # 111 + "eacute", # 112 + "egrave", # 113 + "ecircumflex", # 114 + "edieresis", # 115 + "iacute", # 116 + "igrave", # 117 + "icircumflex", # 118 + "idieresis", # 119 + "ntilde", # 120 + "oacute", # 121 + "ograve", # 122 + "ocircumflex", # 123 + "odieresis", # 124 + "otilde", # 125 + "uacute", # 126 + "ugrave", # 127 + "ucircumflex", # 128 + "udieresis", # 129 + "dagger", # 130 + "degree", # 131 + "cent", # 132 + "sterling", # 133 + "section", # 134 + "bullet", # 135 + "paragraph", # 136 + "germandbls", # 137 + "registered", # 138 + "copyright", # 139 + "trademark", # 140 + "acute", # 141 + "dieresis", # 142 + "notequal", # 143 + "AE", # 144 + "Oslash", # 145 + "infinity", # 146 + "plusminus", # 147 + "lessequal", # 148 + "greaterequal", # 149 + "yen", # 150 + "mu", # 151 + "partialdiff", # 152 + "summation", # 153 + "product", # 154 + "pi", # 155 + "integral", # 156 + "ordfeminine", # 157 + "ordmasculine", # 158 + "Omega", # 159 + "ae", # 160 + "oslash", # 161 + "questiondown", # 162 + "exclamdown", # 163 + "logicalnot", # 164 + "radical", # 165 + "florin", # 166 + "approxequal", # 167 + "Delta", # 168 + "guillemotleft", # 169 + "guillemotright", # 170 + "ellipsis", # 171 + "nonbreakingspace", # 172 + "Agrave", # 173 + "Atilde", # 174 + "Otilde", # 175 + "OE", # 176 + "oe", # 177 + "endash", # 178 + "emdash", # 179 + "quotedblleft", # 180 + "quotedblright", # 181 + "quoteleft", # 182 + "quoteright", # 183 + "divide", # 184 + "lozenge", # 185 + "ydieresis", # 186 + "Ydieresis", # 187 + "fraction", # 188 + "currency", # 189 + "guilsinglleft", # 190 + "guilsinglright", # 191 + "fi", # 192 + "fl", # 193 + "daggerdbl", # 194 + "periodcentered", # 195 + "quotesinglbase", # 196 + "quotedblbase", # 197 + "perthousand", # 198 + "Acircumflex", # 199 + "Ecircumflex", # 200 + "Aacute", # 201 + "Edieresis", # 202 + "Egrave", # 203 + "Iacute", # 204 + "Icircumflex", # 205 + "Idieresis", # 206 + "Igrave", # 207 + "Oacute", # 208 + "Ocircumflex", # 209 + "apple", # 210 + "Ograve", # 211 + "Uacute", # 212 + "Ucircumflex", # 213 + "Ugrave", # 214 + "dotlessi", # 215 + "circumflex", # 216 + "tilde", # 217 + "macron", # 218 + "breve", # 219 + "dotaccent", # 220 + "ring", # 221 + "cedilla", # 222 + "hungarumlaut", # 223 + "ogonek", # 224 + "caron", # 225 + "Lslash", # 226 + "lslash", # 227 + "Scaron", # 228 + "scaron", # 229 + "Zcaron", # 230 + "zcaron", # 231 + "brokenbar", # 232 + "Eth", # 233 + "eth", # 234 + "Yacute", # 235 + "yacute", # 236 + "Thorn", # 237 + "thorn", # 238 + "minus", # 239 + "multiply", # 240 + "onesuperior", # 241 + "twosuperior", # 242 + "threesuperior", # 243 + "onehalf", # 244 + "onequarter", # 245 + "threequarters", # 246 + "franc", # 247 + "Gbreve", # 248 + "gbreve", # 249 + "Idotaccent", # 250 + "Scedilla", # 251 + "scedilla", # 252 + "Cacute", # 253 + "cacute", # 254 + "Ccaron", # 255 + "ccaron", # 256 + "dcroat" # 257 +] diff -Nru fonttools-2.4/Tools/fontTools/ttLib/tables/asciiTable.py fonttools-3.0/Tools/fontTools/ttLib/tables/asciiTable.py --- fonttools-2.4/Tools/fontTools/ttLib/tables/asciiTable.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.0/Tools/fontTools/ttLib/tables/asciiTable.py 2015-08-31 17:57:15.000000000 +0000 @@ -0,0 +1,22 @@ +from __future__ import print_function, division, absolute_import +from fontTools.misc.py23 import * +from . import DefaultTable + + +class asciiTable(DefaultTable.DefaultTable): + + def toXML(self, writer, ttFont): + data = tostr(self.data) + # removing null bytes. XXX needed?? + data = data.split('\0') + data = strjoin(data) + writer.begintag("source") + writer.newline() + writer.write_noindent(data.replace("\r", "\n")) + writer.newline() + writer.endtag("source") + writer.newline() + + def fromXML(self, name, attrs, content, ttFont): + lines = strjoin(content).replace("\r", "\n").split("\n") + self.data = tobytes("\r".join(lines[1:-1])) diff -Nru fonttools-2.4/Tools/fontTools/ttLib/tables/_a_v_a_r.py fonttools-3.0/Tools/fontTools/ttLib/tables/_a_v_a_r.py --- fonttools-2.4/Tools/fontTools/ttLib/tables/_a_v_a_r.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.0/Tools/fontTools/ttLib/tables/_a_v_a_r.py 2015-08-31 17:57:15.000000000 +0000 @@ -0,0 +1,94 @@ +from __future__ import print_function, division, absolute_import +from fontTools.misc.py23 import * +from fontTools import ttLib +from fontTools.misc import sstruct +from fontTools.misc.fixedTools import fixedToFloat, floatToFixed +from fontTools.misc.textTools import safeEval +from fontTools.ttLib import TTLibError +from . import DefaultTable +import array +import struct +import warnings + + +# Apple's documentation of 'avar': +# https://developer.apple.com/fonts/TrueType-Reference-Manual/RM06/Chap6avar.html + +AVAR_HEADER_FORMAT = """ + > # big endian + version: L + axisCount: L +""" + + +class table__a_v_a_r(DefaultTable.DefaultTable): + dependencies = ["fvar"] + + def __init__(self, tag=None): + DefaultTable.DefaultTable.__init__(self, tag) + self.segments = {} + + def compile(self, ttFont): + axisTags = [axis.axisTag for axis in ttFont["fvar"].axes] + header = {"version": 0x00010000, "axisCount": len(axisTags)} + result = [sstruct.pack(AVAR_HEADER_FORMAT, header)] + for axis in axisTags: + mappings = sorted(self.segments[axis].items()) + result.append(struct.pack(">H", len(mappings))) + for key, value in mappings: + fixedKey = floatToFixed(key, 14) + fixedValue = floatToFixed(value, 14) + result.append(struct.pack(">hh", fixedKey, fixedValue)) + return bytesjoin(result) + + def decompile(self, data, ttFont): + axisTags = [axis.axisTag for axis in ttFont["fvar"].axes] + header = {} + headerSize = sstruct.calcsize(AVAR_HEADER_FORMAT) + header = sstruct.unpack(AVAR_HEADER_FORMAT, data[0:headerSize]) + if header["version"] != 0x00010000: + raise TTLibError("unsupported 'avar' version %04x" % header["version"]) + pos = headerSize + for axis in axisTags: + segments = self.segments[axis] = {} + numPairs = struct.unpack(">H", data[pos:pos+2])[0] + pos = pos + 2 + for _ in range(numPairs): + fromValue, toValue = struct.unpack(">hh", data[pos:pos+4]) + segments[fixedToFloat(fromValue, 14)] = fixedToFloat(toValue, 14) + pos = pos + 4 + self.fixupSegments_(warn=warnings.warn) + + def toXML(self, writer, ttFont, progress=None): + axisTags = [axis.axisTag for axis in ttFont["fvar"].axes] + for axis in axisTags: + writer.begintag("segment", axis=axis) + writer.newline() + for key, value in sorted(self.segments[axis].items()): + writer.simpletag("mapping", **{"from": key, "to": value}) + writer.newline() + writer.endtag("segment") + writer.newline() + + def fromXML(self, name, attrs, content, ttFont): + if name == "segment": + axis = attrs["axis"] + segment = self.segments[axis] = {} + for element in content: + if isinstance(element, tuple): + elementName, elementAttrs, _ = element + if elementName == "mapping": + fromValue = safeEval(elementAttrs["from"]) + toValue = safeEval(elementAttrs["to"]) + if fromValue in segment: + warnings.warn("duplicate entry for %s in axis '%s'" % + (fromValue, axis)) + segment[fromValue] = toValue + self.fixupSegments_(warn=warnings.warn) + + def fixupSegments_(self, warn): + for axis, mappings in self.segments.items(): + for k in [-1.0, 0.0, 1.0]: + if mappings.get(k) != k: + warn("avar axis '%s' should map %s to %s" % (axis, k, k)) + mappings[k] = k diff -Nru fonttools-2.4/Tools/fontTools/ttLib/tables/_a_v_a_r_test.py fonttools-3.0/Tools/fontTools/ttLib/tables/_a_v_a_r_test.py --- fonttools-2.4/Tools/fontTools/ttLib/tables/_a_v_a_r_test.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.0/Tools/fontTools/ttLib/tables/_a_v_a_r_test.py 2015-08-31 17:57:15.000000000 +0000 @@ -0,0 +1,91 @@ +from __future__ import print_function, division, absolute_import, unicode_literals +from fontTools.misc.py23 import * +from fontTools.misc.textTools import deHexStr +from fontTools.misc.xmlWriter import XMLWriter +from fontTools.ttLib import TTLibError +from fontTools.ttLib.tables._a_v_a_r import table__a_v_a_r +from fontTools.ttLib.tables._f_v_a_r import table__f_v_a_r, Axis +import collections +import unittest + + +TEST_DATA = deHexStr( + "00 01 00 00 00 00 00 02 " + "00 04 C0 00 C0 00 00 00 00 00 13 33 33 33 40 00 40 00 " + "00 03 C0 00 C0 00 00 00 00 00 40 00 40 00") + + +class AxisVariationTableTest(unittest.TestCase): + def test_compile(self): + avar = table__a_v_a_r() + avar.segments["wdth"] = {-1.0: -1.0, 0.0: 0.0, 0.3: 0.8, 1.0: 1.0} + avar.segments["wght"] = {-1.0: -1.0, 0.0: 0.0, 1.0: 1.0} + self.assertEqual(TEST_DATA, avar.compile(self.makeFont(["wdth", "wght"]))) + + def test_decompile(self): + avar = table__a_v_a_r() + avar.decompile(TEST_DATA, self.makeFont(["wdth", "wght"])) + self.assertEqual({ + "wdth": {-1.0: -1.0, 0.0: 0.0, 0.3: 0.8, 1.0: 1.0}, + "wght": {-1.0: -1.0, 0.0: 0.0, 1.0: 1.0} + }, avar.segments) + + def test_decompile_unsupportedVersion(self): + avar = table__a_v_a_r() + font = self.makeFont(["wdth", "wght"]) + self.assertRaises(TTLibError, avar.decompile, deHexStr("02 01 03 06 00 00 00 00"), font) + + def test_toXML(self): + avar = table__a_v_a_r() + avar.segments["opsz"] = {-1.0: -1.0, 0.0: 0.0, 0.3: 0.8, 1.0: 1.0} + writer = XMLWriter(BytesIO()) + avar.toXML(writer, self.makeFont(["opsz"])) + self.assertEqual([ + '', + '', + '', + '', + '', + '' + ], self.xml_lines(writer)) + + def test_fromXML(self): + avar = table__a_v_a_r() + avar.fromXML("segment", {"axis":"wdth"}, [ + ("mapping", {"from": "-1.0", "to": "-1.0"}, []), + ("mapping", {"from": "0.0", "to": "0.0"}, []), + ("mapping", {"from": "0.7", "to": "0.2"}, []), + ("mapping", {"from": "1.0", "to": "1.0"}, []) + ], ttFont=None) + self.assertEqual({"wdth": {-1: -1, 0: 0, 0.7: 0.2, 1.0: 1.0}}, avar.segments) + + def test_fixupSegments(self): + avar = table__a_v_a_r() + avar.segments = {"wdth": {0.3: 0.8, 1.0: 0.7}} + warnings = [] + avar.fixupSegments_(lambda w: warnings.append(w)) + self.assertEqual({"wdth": {-1.0: -1.0, 0.0: 0.0, 0.3: 0.8, 1.0: 1.0}}, avar.segments) + self.assertEqual([ + "avar axis 'wdth' should map -1.0 to -1.0", + "avar axis 'wdth' should map 0.0 to 0.0", + "avar axis 'wdth' should map 1.0 to 1.0" + ], warnings) + + @staticmethod + def makeFont(axisTags): + """['opsz', 'wdth'] --> ttFont""" + fvar = table__f_v_a_r() + for tag in axisTags: + axis = Axis() + axis.axisTag = tag + fvar.axes.append(axis) + return {"fvar": fvar} + + @staticmethod + def xml_lines(writer): + content = writer.file.getvalue().decode("utf-8") + return [line.strip() for line in content.splitlines()][1:] + + +if __name__ == "__main__": + unittest.main() diff -Nru fonttools-2.4/Tools/fontTools/ttLib/tables/B_A_S_E_.py fonttools-3.0/Tools/fontTools/ttLib/tables/B_A_S_E_.py --- fonttools-2.4/Tools/fontTools/ttLib/tables/B_A_S_E_.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.0/Tools/fontTools/ttLib/tables/B_A_S_E_.py 2015-08-31 17:57:15.000000000 +0000 @@ -0,0 +1,7 @@ +from __future__ import print_function, division, absolute_import +from fontTools.misc.py23 import * +from .otBase import BaseTTXConverter + + +class table_B_A_S_E_(BaseTTXConverter): + pass diff -Nru fonttools-2.4/Tools/fontTools/ttLib/tables/BitmapGlyphMetrics.py fonttools-3.0/Tools/fontTools/ttLib/tables/BitmapGlyphMetrics.py --- fonttools-2.4/Tools/fontTools/ttLib/tables/BitmapGlyphMetrics.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.0/Tools/fontTools/ttLib/tables/BitmapGlyphMetrics.py 2015-08-31 17:57:15.000000000 +0000 @@ -0,0 +1,58 @@ +# Since bitmap glyph metrics are shared between EBLC and EBDT +# this class gets its own python file. +from __future__ import print_function, division, absolute_import +from fontTools.misc.py23 import * +from fontTools.misc import sstruct +from fontTools.misc.textTools import safeEval + + +bigGlyphMetricsFormat = """ + > # big endian + height: B + width: B + horiBearingX: b + horiBearingY: b + horiAdvance: B + vertBearingX: b + vertBearingY: b + vertAdvance: B +""" + +smallGlyphMetricsFormat = """ + > # big endian + height: B + width: B + BearingX: b + BearingY: b + Advance: B +""" + +class BitmapGlyphMetrics(object): + + def toXML(self, writer, ttFont): + writer.begintag(self.__class__.__name__) + writer.newline() + for metricName in sstruct.getformat(self.__class__.binaryFormat)[1]: + writer.simpletag(metricName, value=getattr(self, metricName)) + writer.newline() + writer.endtag(self.__class__.__name__) + writer.newline() + + def fromXML(self, name, attrs, content, ttFont): + metricNames = set(sstruct.getformat(self.__class__.binaryFormat)[1]) + for element in content: + if not isinstance(element, tuple): + continue + name, attrs, content = element + # Make sure this is a metric that is needed by GlyphMetrics. + if name in metricNames: + vars(self)[name] = safeEval(attrs['value']) + else: + print("Warning: unknown name '%s' being ignored in %s." % name, self.__class__.__name__) + + +class BigGlyphMetrics(BitmapGlyphMetrics): + binaryFormat = bigGlyphMetricsFormat + +class SmallGlyphMetrics(BitmapGlyphMetrics): + binaryFormat = smallGlyphMetricsFormat diff -Nru fonttools-2.4/Tools/fontTools/ttLib/tables/C_B_D_T_.py fonttools-3.0/Tools/fontTools/ttLib/tables/C_B_D_T_.py --- fonttools-2.4/Tools/fontTools/ttLib/tables/C_B_D_T_.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.0/Tools/fontTools/ttLib/tables/C_B_D_T_.py 2015-08-31 17:57:15.000000000 +0000 @@ -0,0 +1,93 @@ +# Copyright 2013 Google, Inc. All Rights Reserved. +# +# Google Author(s): Matt Fontaine + + +from __future__ import print_function, division, absolute_import +from fontTools.misc.py23 import * +from fontTools.misc import sstruct +from . import E_B_D_T_ +from .BitmapGlyphMetrics import BigGlyphMetrics, bigGlyphMetricsFormat, SmallGlyphMetrics, smallGlyphMetricsFormat +from .E_B_D_T_ import BitmapGlyph, BitmapPlusSmallMetricsMixin, BitmapPlusBigMetricsMixin +import struct + +class table_C_B_D_T_(E_B_D_T_.table_E_B_D_T_): + + # Change the data locator table being referenced. + locatorName = 'CBLC' + + # Modify the format class accessor for color bitmap use. + def getImageFormatClass(self, imageFormat): + try: + return E_B_D_T_.table_E_B_D_T_.getImageFormatClass(self, imageFormat) + except KeyError: + return cbdt_bitmap_classes[imageFormat] + +# Helper method for removing export features not supported by color bitmaps. +# Write data in the parent class will default to raw if an option is unsupported. +def _removeUnsupportedForColor(dataFunctions): + dataFunctions = dict(dataFunctions) + del dataFunctions['row'] + return dataFunctions + +class ColorBitmapGlyph(BitmapGlyph): + + fileExtension = '.png' + xmlDataFunctions = _removeUnsupportedForColor(BitmapGlyph.xmlDataFunctions) + +class cbdt_bitmap_format_17(BitmapPlusSmallMetricsMixin, ColorBitmapGlyph): + + def decompile(self): + self.metrics = SmallGlyphMetrics() + dummy, data = sstruct.unpack2(smallGlyphMetricsFormat, self.data, self.metrics) + (dataLen,) = struct.unpack(">L", data[:4]) + data = data[4:] + + # For the image data cut it to the size specified by dataLen. + assert dataLen <= len(data), "Data overun in format 17" + self.imageData = data[:dataLen] + + def compile(self, ttFont): + dataList = [] + dataList.append(sstruct.pack(smallGlyphMetricsFormat, self.metrics)) + dataList.append(struct.pack(">L", len(self.imageData))) + dataList.append(self.imageData) + return bytesjoin(dataList) + +class cbdt_bitmap_format_18(BitmapPlusBigMetricsMixin, ColorBitmapGlyph): + + def decompile(self): + self.metrics = BigGlyphMetrics() + dummy, data = sstruct.unpack2(bigGlyphMetricsFormat, self.data, self.metrics) + (dataLen,) = struct.unpack(">L", data[:4]) + data = data[4:] + + # For the image data cut it to the size specified by dataLen. + assert dataLen <= len(data), "Data overun in format 18" + self.imageData = data[:dataLen] + + def compile(self, ttFont): + dataList = [] + dataList.append(sstruct.pack(bigGlyphMetricsFormat, self.metrics)) + dataList.append(struct.pack(">L", len(self.imageData))) + dataList.append(self.imageData) + return bytesjoin(dataList) + +class cbdt_bitmap_format_19(ColorBitmapGlyph): + + def decompile(self): + (dataLen,) = struct.unpack(">L", self.data[:4]) + data = self.data[4:] + + assert dataLen <= len(data), "Data overun in format 19" + self.imageData = data[:dataLen] + + def compile(self, ttFont): + return struct.pack(">L", len(self.imageData)) + self.imageData + +# Dict for CBDT extended formats. +cbdt_bitmap_classes = { + 17: cbdt_bitmap_format_17, + 18: cbdt_bitmap_format_18, + 19: cbdt_bitmap_format_19, +} diff -Nru fonttools-2.4/Tools/fontTools/ttLib/tables/C_B_L_C_.py fonttools-3.0/Tools/fontTools/ttLib/tables/C_B_L_C_.py --- fonttools-2.4/Tools/fontTools/ttLib/tables/C_B_L_C_.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.0/Tools/fontTools/ttLib/tables/C_B_L_C_.py 2015-08-31 17:57:15.000000000 +0000 @@ -0,0 +1,11 @@ +# Copyright 2013 Google, Inc. All Rights Reserved. +# +# Google Author(s): Matt Fontaine + +from __future__ import print_function, division, absolute_import +from fontTools.misc.py23 import * +from . import E_B_L_C_ + +class table_C_B_L_C_(E_B_L_C_.table_E_B_L_C_): + + dependencies = ['CBDT'] diff -Nru fonttools-2.4/Tools/fontTools/ttLib/tables/C_F_F_.py fonttools-3.0/Tools/fontTools/ttLib/tables/C_F_F_.py --- fonttools-2.4/Tools/fontTools/ttLib/tables/C_F_F_.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.0/Tools/fontTools/ttLib/tables/C_F_F_.py 2015-08-31 17:57:15.000000000 +0000 @@ -0,0 +1,47 @@ +from __future__ import print_function, division, absolute_import +from fontTools.misc.py23 import * +from fontTools import cffLib +from . import DefaultTable + + +class table_C_F_F_(DefaultTable.DefaultTable): + + def __init__(self, tag): + DefaultTable.DefaultTable.__init__(self, tag) + self.cff = cffLib.CFFFontSet() + self._gaveGlyphOrder = False + + def decompile(self, data, otFont): + self.cff.decompile(BytesIO(data), otFont) + assert len(self.cff) == 1, "can't deal with multi-font CFF tables." + + def compile(self, otFont): + f = BytesIO() + self.cff.compile(f, otFont) + return f.getvalue() + + def haveGlyphNames(self): + if hasattr(self.cff[self.cff.fontNames[0]], "ROS"): + return False # CID-keyed font + else: + return True + + def getGlyphOrder(self): + if self._gaveGlyphOrder: + from fontTools import ttLib + raise ttLib.TTLibError("illegal use of getGlyphOrder()") + self._gaveGlyphOrder = True + return self.cff[self.cff.fontNames[0]].getGlyphOrder() + + def setGlyphOrder(self, glyphOrder): + pass + # XXX + #self.cff[self.cff.fontNames[0]].setGlyphOrder(glyphOrder) + + def toXML(self, writer, otFont, progress=None): + self.cff.toXML(writer, progress) + + def fromXML(self, name, attrs, content, otFont): + if not hasattr(self, "cff"): + self.cff = cffLib.CFFFontSet() + self.cff.fromXML(name, attrs, content) diff -Nru fonttools-2.4/Tools/fontTools/ttLib/tables/_c_m_a_p.py fonttools-3.0/Tools/fontTools/ttLib/tables/_c_m_a_p.py --- fonttools-2.4/Tools/fontTools/ttLib/tables/_c_m_a_p.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.0/Tools/fontTools/ttLib/tables/_c_m_a_p.py 2015-08-31 17:57:15.000000000 +0000 @@ -0,0 +1,1294 @@ +from __future__ import print_function, division, absolute_import +from fontTools.misc.py23 import * +from fontTools.misc.textTools import safeEval, readHex +from fontTools.misc.encodingTools import getEncoding +from fontTools.ttLib import getSearchRange +from fontTools.unicode import Unicode +from . import DefaultTable +import sys +import struct +import array +import operator + + +class table__c_m_a_p(DefaultTable.DefaultTable): + + def getcmap(self, platformID, platEncID): + for subtable in self.tables: + if (subtable.platformID == platformID and + subtable.platEncID == platEncID): + return subtable + return None # not found + + def decompile(self, data, ttFont): + tableVersion, numSubTables = struct.unpack(">HH", data[:4]) + self.tableVersion = int(tableVersion) + self.tables = tables = [] + seenOffsets = {} + for i in range(numSubTables): + platformID, platEncID, offset = struct.unpack( + ">HHl", data[4+i*8:4+(i+1)*8]) + platformID, platEncID = int(platformID), int(platEncID) + format, length = struct.unpack(">HH", data[offset:offset+4]) + if format in [8,10,12,13]: + format, reserved, length = struct.unpack(">HHL", data[offset:offset+8]) + elif format in [14]: + format, length = struct.unpack(">HL", data[offset:offset+6]) + + if not length: + print("Error: cmap subtable is reported as having zero length: platformID %s, platEncID %s, format %s offset %s. Skipping table." % (platformID, platEncID,format, offset)) + continue + table = CmapSubtable.newSubtable(format) + table.platformID = platformID + table.platEncID = platEncID + # Note that by default we decompile only the subtable header info; + # any other data gets decompiled only when an attribute of the + # subtable is referenced. + table.decompileHeader(data[offset:offset+int(length)], ttFont) + if offset in seenOffsets: + table.data = None # Mark as decompiled + table.cmap = tables[seenOffsets[offset]].cmap + else: + seenOffsets[offset] = i + tables.append(table) + + def compile(self, ttFont): + self.tables.sort() # sort according to the spec; see CmapSubtable.__lt__() + numSubTables = len(self.tables) + totalOffset = 4 + 8 * numSubTables + data = struct.pack(">HH", self.tableVersion, numSubTables) + tableData = b"" + seen = {} # Some tables are the same object reference. Don't compile them twice. + done = {} # Some tables are different objects, but compile to the same data chunk + for table in self.tables: + try: + offset = seen[id(table.cmap)] + except KeyError: + chunk = table.compile(ttFont) + if chunk in done: + offset = done[chunk] + else: + offset = seen[id(table.cmap)] = done[chunk] = totalOffset + len(tableData) + tableData = tableData + chunk + data = data + struct.pack(">HHl", table.platformID, table.platEncID, offset) + return data + tableData + + def toXML(self, writer, ttFont): + writer.simpletag("tableVersion", version=self.tableVersion) + writer.newline() + for table in self.tables: + table.toXML(writer, ttFont) + + def fromXML(self, name, attrs, content, ttFont): + if name == "tableVersion": + self.tableVersion = safeEval(attrs["version"]) + return + if name[:12] != "cmap_format_": + return + if not hasattr(self, "tables"): + self.tables = [] + format = safeEval(name[12:]) + table = CmapSubtable.newSubtable(format) + table.platformID = safeEval(attrs["platformID"]) + table.platEncID = safeEval(attrs["platEncID"]) + table.fromXML(name, attrs, content, ttFont) + self.tables.append(table) + + +class CmapSubtable(object): + + @staticmethod + def getSubtableClass(format): + """Return the subtable class for a format.""" + return cmap_classes.get(format, cmap_format_unknown) + + @staticmethod + def newSubtable(format): + """Return a new instance of a subtable for format.""" + subtableClass = CmapSubtable.getSubtableClass(format) + return subtableClass(format) + + def __init__(self, format): + self.format = format + self.data = None + self.ttFont = None + + def __getattr__(self, attr): + # allow lazy decompilation of subtables. + if attr[:2] == '__': # don't handle requests for member functions like '__lt__' + raise AttributeError(attr) + if self.data is None: + raise AttributeError(attr) + self.decompile(None, None) # use saved data. + self.data = None # Once this table has been decompiled, make sure we don't + # just return the original data. Also avoids recursion when + # called with an attribute that the cmap subtable doesn't have. + return getattr(self, attr) + + def decompileHeader(self, data, ttFont): + format, length, language = struct.unpack(">HHH", data[:6]) + assert len(data) == length, "corrupt cmap table format %d (data length: %d, header length: %d)" % (format, len(data), length) + self.format = int(format) + self.length = int(length) + self.language = int(language) + self.data = data[6:] + self.ttFont = ttFont + + def toXML(self, writer, ttFont): + writer.begintag(self.__class__.__name__, [ + ("platformID", self.platformID), + ("platEncID", self.platEncID), + ("language", self.language), + ]) + writer.newline() + codes = sorted(self.cmap.items()) + self._writeCodes(codes, writer) + writer.endtag(self.__class__.__name__) + writer.newline() + + def getEncoding(self, default=None): + """Returns the Python encoding name for this cmap subtable based on its platformID, + platEncID, and language. If encoding for these values is not known, by default + None is returned. That can be overriden by passing a value to the default + argument. + + Note that if you want to choose a "preferred" cmap subtable, most of the time + self.isUnicode() is what you want as that one only returns true for the modern, + commonly used, Unicode-compatible triplets, not the legacy ones. + """ + return getEncoding(self.platformID, self.platEncID, self.language, default) + + def isUnicode(self): + return (self.platformID == 0 or + (self.platformID == 3 and self.platEncID in [0, 1, 10])) + + def isSymbol(self): + return self.platformID == 3 and self.platEncID == 0 + + def _writeCodes(self, codes, writer): + isUnicode = self.isUnicode() + for code, name in codes: + writer.simpletag("map", code=hex(code), name=name) + if isUnicode: + writer.comment(Unicode[code]) + writer.newline() + + def __lt__(self, other): + if not isinstance(other, CmapSubtable): + return NotImplemented + + # implemented so that list.sort() sorts according to the spec. + selfTuple = ( + getattr(self, "platformID", None), + getattr(self, "platEncID", None), + getattr(self, "language", None), + self.__dict__) + otherTuple = ( + getattr(other, "platformID", None), + getattr(other, "platEncID", None), + getattr(other, "language", None), + other.__dict__) + return selfTuple < otherTuple + + +class cmap_format_0(CmapSubtable): + + def decompile(self, data, ttFont): + # we usually get here indirectly from the subtable __getattr__ function, in which case both args must be None. + # If not, someone is calling the subtable decompile() directly, and must provide both args. + if data is not None and ttFont is not None: + self.decompileHeader(data, ttFont) + else: + assert (data is None and ttFont is None), "Need both data and ttFont arguments" + data = self.data # decompileHeader assigns the data after the header to self.data + assert 262 == self.length, "Format 0 cmap subtable not 262 bytes" + glyphIdArray = array.array("B") + glyphIdArray.fromstring(self.data) + self.cmap = cmap = {} + lenArray = len(glyphIdArray) + charCodes = list(range(lenArray)) + names = map(self.ttFont.getGlyphName, glyphIdArray) + list(map(operator.setitem, [cmap]*lenArray, charCodes, names)) + + def compile(self, ttFont): + if self.data: + return struct.pack(">HHH", 0, 262, self.language) + self.data + + charCodeList = sorted(self.cmap.items()) + charCodes = [entry[0] for entry in charCodeList] + valueList = [entry[1] for entry in charCodeList] + assert charCodes == list(range(256)) + valueList = map(ttFont.getGlyphID, valueList) + + glyphIdArray = array.array("B", valueList) + data = struct.pack(">HHH", 0, 262, self.language) + glyphIdArray.tostring() + assert len(data) == 262 + return data + + def fromXML(self, name, attrs, content, ttFont): + self.language = safeEval(attrs["language"]) + if not hasattr(self, "cmap"): + self.cmap = {} + cmap = self.cmap + for element in content: + if not isinstance(element, tuple): + continue + name, attrs, content = element + if name != "map": + continue + cmap[safeEval(attrs["code"])] = attrs["name"] + + +subHeaderFormat = ">HHhH" +class SubHeader(object): + def __init__(self): + self.firstCode = None + self.entryCount = None + self.idDelta = None + self.idRangeOffset = None + self.glyphIndexArray = [] + +class cmap_format_2(CmapSubtable): + + def setIDDelta(self, subHeader): + subHeader.idDelta = 0 + # find the minGI which is not zero. + minGI = subHeader.glyphIndexArray[0] + for gid in subHeader.glyphIndexArray: + if (gid != 0) and (gid < minGI): + minGI = gid + # The lowest gid in glyphIndexArray, after subtracting idDelta, must be 1. + # idDelta is a short, and must be between -32K and 32K. minGI can be between 1 and 64K. + # We would like to pick an idDelta such that the first glyphArray GID is 1, + # so that we are more likely to be able to combine glypharray GID subranges. + # This means that we have a problem when minGI is > 32K + # Since the final gi is reconstructed from the glyphArray GID by: + # (short)finalGID = (gid + idDelta) % 0x10000), + # we can get from a glypharray GID of 1 to a final GID of 65K by subtracting 2, and casting the + # negative number to an unsigned short. + + if (minGI > 1): + if minGI > 0x7FFF: + subHeader.idDelta = -(0x10000 - minGI) -1 + else: + subHeader.idDelta = minGI -1 + idDelta = subHeader.idDelta + for i in range(subHeader.entryCount): + gid = subHeader.glyphIndexArray[i] + if gid > 0: + subHeader.glyphIndexArray[i] = gid - idDelta + + def decompile(self, data, ttFont): + # we usually get here indirectly from the subtable __getattr__ function, in which case both args must be None. + # If not, someone is calling the subtable decompile() directly, and must provide both args. + if data is not None and ttFont is not None: + self.decompileHeader(data, ttFont) + else: + assert (data is None and ttFont is None), "Need both data and ttFont arguments" + + data = self.data # decompileHeader assigns the data after the header to self.data + subHeaderKeys = [] + maxSubHeaderindex = 0 + # get the key array, and determine the number of subHeaders. + allKeys = array.array("H") + allKeys.fromstring(data[:512]) + data = data[512:] + if sys.byteorder != "big": + allKeys.byteswap() + subHeaderKeys = [ key//8 for key in allKeys] + maxSubHeaderindex = max(subHeaderKeys) + + #Load subHeaders + subHeaderList = [] + pos = 0 + for i in range(maxSubHeaderindex + 1): + subHeader = SubHeader() + (subHeader.firstCode, subHeader.entryCount, subHeader.idDelta, \ + subHeader.idRangeOffset) = struct.unpack(subHeaderFormat, data[pos:pos + 8]) + pos += 8 + giDataPos = pos + subHeader.idRangeOffset-2 + giList = array.array("H") + giList.fromstring(data[giDataPos:giDataPos + subHeader.entryCount*2]) + if sys.byteorder != "big": + giList.byteswap() + subHeader.glyphIndexArray = giList + subHeaderList.append(subHeader) + # How this gets processed. + # Charcodes may be one or two bytes. + # The first byte of a charcode is mapped through the subHeaderKeys, to select + # a subHeader. For any subheader but 0, the next byte is then mapped through the + # selected subheader. If subheader Index 0 is selected, then the byte itself is + # mapped through the subheader, and there is no second byte. + # Then assume that the subsequent byte is the first byte of the next charcode,and repeat. + # + # Each subheader references a range in the glyphIndexArray whose length is entryCount. + # The range in glyphIndexArray referenced by a sunheader may overlap with the range in glyphIndexArray + # referenced by another subheader. + # The only subheader that will be referenced by more than one first-byte value is the subheader + # that maps the entire range of glyphID values to glyphIndex 0, e.g notdef: + # {firstChar 0, EntryCount 0,idDelta 0,idRangeOffset xx} + # A byte being mapped though a subheader is treated as in index into a mapping of array index to font glyphIndex. + # A subheader specifies a subrange within (0...256) by the + # firstChar and EntryCount values. If the byte value is outside the subrange, then the glyphIndex is zero + # (e.g. glyph not in font). + # If the byte index is in the subrange, then an offset index is calculated as (byteIndex - firstChar). + # The index to glyphIndex mapping is a subrange of the glyphIndexArray. You find the start of the subrange by + # counting idRangeOffset bytes from the idRangeOffset word. The first value in this subrange is the + # glyphIndex for the index firstChar. The offset index should then be used in this array to get the glyphIndex. + # Example for Logocut-Medium + # first byte of charcode = 129; selects subheader 1. + # subheader 1 = {firstChar 64, EntryCount 108,idDelta 42,idRangeOffset 0252} + # second byte of charCode = 66 + # the index offset = 66-64 = 2. + # The subrange of the glyphIndexArray starting at 0x0252 bytes from the idRangeOffset word is: + # [glyphIndexArray index], [subrange array index] = glyphIndex + # [256], [0]=1 from charcode [129, 64] + # [257], [1]=2 from charcode [129, 65] + # [258], [2]=3 from charcode [129, 66] + # [259], [3]=4 from charcode [129, 67] + # So, the glyphIndex = 3 from the array. Then if idDelta is not zero and the glyph ID is not zero, + # add it to the glyphID to get the final glyphIndex + # value. In this case the final glyph index = 3+ 42 -> 45 for the final glyphIndex. Whew! + + self.data = b"" + self.cmap = cmap = {} + notdefGI = 0 + for firstByte in range(256): + subHeadindex = subHeaderKeys[firstByte] + subHeader = subHeaderList[subHeadindex] + if subHeadindex == 0: + if (firstByte < subHeader.firstCode) or (firstByte >= subHeader.firstCode + subHeader.entryCount): + continue # gi is notdef. + else: + charCode = firstByte + offsetIndex = firstByte - subHeader.firstCode + gi = subHeader.glyphIndexArray[offsetIndex] + if gi != 0: + gi = (gi + subHeader.idDelta) % 0x10000 + else: + continue # gi is notdef. + cmap[charCode] = gi + else: + if subHeader.entryCount: + charCodeOffset = firstByte * 256 + subHeader.firstCode + for offsetIndex in range(subHeader.entryCount): + charCode = charCodeOffset + offsetIndex + gi = subHeader.glyphIndexArray[offsetIndex] + if gi != 0: + gi = (gi + subHeader.idDelta) % 0x10000 + else: + continue + cmap[charCode] = gi + # If not subHeader.entryCount, then all char codes with this first byte are + # mapped to .notdef. We can skip this subtable, and leave the glyphs un-encoded, which is the + # same as mapping it to .notdef. + # cmap values are GID's. + glyphOrder = self.ttFont.getGlyphOrder() + gids = list(cmap.values()) + charCodes = list(cmap.keys()) + lenCmap = len(gids) + try: + names = list(map(operator.getitem, [glyphOrder]*lenCmap, gids )) + except IndexError: + getGlyphName = self.ttFont.getGlyphName + names = list(map(getGlyphName, gids )) + list(map(operator.setitem, [cmap]*lenCmap, charCodes, names)) + + def compile(self, ttFont): + if self.data: + return struct.pack(">HHH", self.format, self.length, self.language) + self.data + kEmptyTwoCharCodeRange = -1 + notdefGI = 0 + + items = sorted(self.cmap.items()) + charCodes = [item[0] for item in items] + names = [item[1] for item in items] + nameMap = ttFont.getReverseGlyphMap() + lenCharCodes = len(charCodes) + try: + gids = list(map(operator.getitem, [nameMap]*lenCharCodes, names)) + except KeyError: + nameMap = ttFont.getReverseGlyphMap(rebuild=True) + try: + gids = list(map(operator.getitem, [nameMap]*lenCharCodes, names)) + except KeyError: + # allow virtual GIDs in format 2 tables + gids = [] + for name in names: + try: + gid = nameMap[name] + except KeyError: + try: + if (name[:3] == 'gid'): + gid = eval(name[3:]) + else: + gid = ttFont.getGlyphID(name) + except: + raise KeyError(name) + + gids.append(gid) + + # Process the (char code to gid) item list in char code order. + # By definition, all one byte char codes map to subheader 0. + # For all the two byte char codes, we assume that the first byte maps maps to the empty subhead (with an entry count of 0, + # which defines all char codes in its range to map to notdef) unless proven otherwise. + # Note that since the char code items are processed in char code order, all the char codes with the + # same first byte are in sequential order. + + subHeaderKeys = [ kEmptyTwoCharCodeRange for x in range(256)] # list of indices into subHeaderList. + subHeaderList = [] + + # We force this subheader entry 0 to exist in the subHeaderList in the case where some one comes up + # with a cmap where all the one byte char codes map to notdef, + # with the result that the subhead 0 would not get created just by processing the item list. + charCode = charCodes[0] + if charCode > 255: + subHeader = SubHeader() + subHeader.firstCode = 0 + subHeader.entryCount = 0 + subHeader.idDelta = 0 + subHeader.idRangeOffset = 0 + subHeaderList.append(subHeader) + + lastFirstByte = -1 + items = zip(charCodes, gids) + for charCode, gid in items: + if gid == 0: + continue + firstbyte = charCode >> 8 + secondByte = charCode & 0x00FF + + if firstbyte != lastFirstByte: # Need to update the current subhead, and start a new one. + if lastFirstByte > -1: + # fix GI's and iDelta of current subheader. + self.setIDDelta(subHeader) + + # If it was sunheader 0 for one-byte charCodes, then we need to set the subHeaderKeys value to zero + # for the indices matching the char codes. + if lastFirstByte == 0: + for index in range(subHeader.entryCount): + charCode = subHeader.firstCode + index + subHeaderKeys[charCode] = 0 + + assert (subHeader.entryCount == len(subHeader.glyphIndexArray)), "Error - subhead entry count does not match len of glyphID subrange." + # init new subheader + subHeader = SubHeader() + subHeader.firstCode = secondByte + subHeader.entryCount = 1 + subHeader.glyphIndexArray.append(gid) + subHeaderList.append(subHeader) + subHeaderKeys[firstbyte] = len(subHeaderList) -1 + lastFirstByte = firstbyte + else: + # need to fill in with notdefs all the code points between the last charCode and the current charCode. + codeDiff = secondByte - (subHeader.firstCode + subHeader.entryCount) + for i in range(codeDiff): + subHeader.glyphIndexArray.append(notdefGI) + subHeader.glyphIndexArray.append(gid) + subHeader.entryCount = subHeader.entryCount + codeDiff + 1 + + # fix GI's and iDelta of last subheader that we we added to the subheader array. + self.setIDDelta(subHeader) + + # Now we add a final subheader for the subHeaderKeys which maps to empty two byte charcode ranges. + subHeader = SubHeader() + subHeader.firstCode = 0 + subHeader.entryCount = 0 + subHeader.idDelta = 0 + subHeader.idRangeOffset = 2 + subHeaderList.append(subHeader) + emptySubheadIndex = len(subHeaderList) - 1 + for index in range(256): + if subHeaderKeys[index] == kEmptyTwoCharCodeRange: + subHeaderKeys[index] = emptySubheadIndex + # Since this is the last subheader, the GlyphIndex Array starts two bytes after the start of the + # idRangeOffset word of this subHeader. We can safely point to the first entry in the GlyphIndexArray, + # since the first subrange of the GlyphIndexArray is for subHeader 0, which always starts with + # charcode 0 and GID 0. + + idRangeOffset = (len(subHeaderList)-1)*8 + 2 # offset to beginning of glyphIDArray from first subheader idRangeOffset. + subheadRangeLen = len(subHeaderList) -1 # skip last special empty-set subheader; we've already hardocodes its idRangeOffset to 2. + for index in range(subheadRangeLen): + subHeader = subHeaderList[index] + subHeader.idRangeOffset = 0 + for j in range(index): + prevSubhead = subHeaderList[j] + if prevSubhead.glyphIndexArray == subHeader.glyphIndexArray: # use the glyphIndexArray subarray + subHeader.idRangeOffset = prevSubhead.idRangeOffset - (index-j)*8 + subHeader.glyphIndexArray = [] + break + if subHeader.idRangeOffset == 0: # didn't find one. + subHeader.idRangeOffset = idRangeOffset + idRangeOffset = (idRangeOffset - 8) + subHeader.entryCount*2 # one less subheader, one more subArray. + else: + idRangeOffset = idRangeOffset - 8 # one less subheader + + # Now we can write out the data! + length = 6 + 512 + 8*len(subHeaderList) # header, 256 subHeaderKeys, and subheader array. + for subhead in subHeaderList[:-1]: + length = length + len(subhead.glyphIndexArray)*2 # We can't use subhead.entryCount, as some of the subhead may share subArrays. + dataList = [struct.pack(">HHH", 2, length, self.language)] + for index in subHeaderKeys: + dataList.append(struct.pack(">H", index*8)) + for subhead in subHeaderList: + dataList.append(struct.pack(subHeaderFormat, subhead.firstCode, subhead.entryCount, subhead.idDelta, subhead.idRangeOffset)) + for subhead in subHeaderList[:-1]: + for gi in subhead.glyphIndexArray: + dataList.append(struct.pack(">H", gi)) + data = bytesjoin(dataList) + assert (len(data) == length), "Error: cmap format 2 is not same length as calculated! actual: " + str(len(data))+ " calc : " + str(length) + return data + + def fromXML(self, name, attrs, content, ttFont): + self.language = safeEval(attrs["language"]) + if not hasattr(self, "cmap"): + self.cmap = {} + cmap = self.cmap + + for element in content: + if not isinstance(element, tuple): + continue + name, attrs, content = element + if name != "map": + continue + cmap[safeEval(attrs["code"])] = attrs["name"] + + +cmap_format_4_format = ">7H" + +#uint16 endCode[segCount] # Ending character code for each segment, last = 0xFFFF. +#uint16 reservedPad # This value should be zero +#uint16 startCode[segCount] # Starting character code for each segment +#uint16 idDelta[segCount] # Delta for all character codes in segment +#uint16 idRangeOffset[segCount] # Offset in bytes to glyph indexArray, or 0 +#uint16 glyphIndexArray[variable] # Glyph index array + +def splitRange(startCode, endCode, cmap): + # Try to split a range of character codes into subranges with consecutive + # glyph IDs in such a way that the cmap4 subtable can be stored "most" + # efficiently. I can't prove I've got the optimal solution, but it seems + # to do well with the fonts I tested: none became bigger, many became smaller. + if startCode == endCode: + return [], [endCode] + + lastID = cmap[startCode] + lastCode = startCode + inOrder = None + orderedBegin = None + subRanges = [] + + # Gather subranges in which the glyph IDs are consecutive. + for code in range(startCode + 1, endCode + 1): + glyphID = cmap[code] + + if glyphID - 1 == lastID: + if inOrder is None or not inOrder: + inOrder = 1 + orderedBegin = lastCode + else: + if inOrder: + inOrder = 0 + subRanges.append((orderedBegin, lastCode)) + orderedBegin = None + + lastID = glyphID + lastCode = code + + if inOrder: + subRanges.append((orderedBegin, lastCode)) + assert lastCode == endCode + + # Now filter out those new subranges that would only make the data bigger. + # A new segment cost 8 bytes, not using a new segment costs 2 bytes per + # character. + newRanges = [] + for b, e in subRanges: + if b == startCode and e == endCode: + break # the whole range, we're fine + if b == startCode or e == endCode: + threshold = 4 # split costs one more segment + else: + threshold = 8 # split costs two more segments + if (e - b + 1) > threshold: + newRanges.append((b, e)) + subRanges = newRanges + + if not subRanges: + return [], [endCode] + + if subRanges[0][0] != startCode: + subRanges.insert(0, (startCode, subRanges[0][0] - 1)) + if subRanges[-1][1] != endCode: + subRanges.append((subRanges[-1][1] + 1, endCode)) + + # Fill the "holes" in the segments list -- those are the segments in which + # the glyph IDs are _not_ consecutive. + i = 1 + while i < len(subRanges): + if subRanges[i-1][1] + 1 != subRanges[i][0]: + subRanges.insert(i, (subRanges[i-1][1] + 1, subRanges[i][0] - 1)) + i = i + 1 + i = i + 1 + + # Transform the ranges into startCode/endCode lists. + start = [] + end = [] + for b, e in subRanges: + start.append(b) + end.append(e) + start.pop(0) + + assert len(start) + 1 == len(end) + return start, end + + +class cmap_format_4(CmapSubtable): + + def decompile(self, data, ttFont): + # we usually get here indirectly from the subtable __getattr__ function, in which case both args must be None. + # If not, someone is calling the subtable decompile() directly, and must provide both args. + if data is not None and ttFont is not None: + self.decompileHeader(data, ttFont) + else: + assert (data is None and ttFont is None), "Need both data and ttFont arguments" + + data = self.data # decompileHeader assigns the data after the header to self.data + (segCountX2, searchRange, entrySelector, rangeShift) = \ + struct.unpack(">4H", data[:8]) + data = data[8:] + segCount = segCountX2 // 2 + + allCodes = array.array("H") + allCodes.fromstring(data) + self.data = data = None + + if sys.byteorder != "big": + allCodes.byteswap() + + # divide the data + endCode = allCodes[:segCount] + allCodes = allCodes[segCount+1:] # the +1 is skipping the reservedPad field + startCode = allCodes[:segCount] + allCodes = allCodes[segCount:] + idDelta = allCodes[:segCount] + allCodes = allCodes[segCount:] + idRangeOffset = allCodes[:segCount] + glyphIndexArray = allCodes[segCount:] + lenGIArray = len(glyphIndexArray) + + # build 2-byte character mapping + charCodes = [] + gids = [] + for i in range(len(startCode) - 1): # don't do 0xffff! + start = startCode[i] + delta = idDelta[i] + rangeOffset = idRangeOffset[i] + # *someone* needs to get killed. + partial = rangeOffset // 2 - start + i - len(idRangeOffset) + + rangeCharCodes = list(range(startCode[i], endCode[i] + 1)) + charCodes.extend(rangeCharCodes) + if rangeOffset == 0: + gids.extend([(charCode + delta) & 0xFFFF for charCode in rangeCharCodes]) + else: + for charCode in rangeCharCodes: + index = charCode + partial + assert (index < lenGIArray), "In format 4 cmap, range (%d), the calculated index (%d) into the glyph index array is not less than the length of the array (%d) !" % (i, index, lenGIArray) + if glyphIndexArray[index] != 0: # if not missing glyph + glyphID = glyphIndexArray[index] + delta + else: + glyphID = 0 # missing glyph + gids.append(glyphID & 0xFFFF) + + self.cmap = cmap = {} + lenCmap = len(gids) + glyphOrder = self.ttFont.getGlyphOrder() + try: + names = list(map(operator.getitem, [glyphOrder]*lenCmap, gids )) + except IndexError: + getGlyphName = self.ttFont.getGlyphName + names = list(map(getGlyphName, gids )) + list(map(operator.setitem, [cmap]*lenCmap, charCodes, names)) + + def compile(self, ttFont): + if self.data: + return struct.pack(">HHH", self.format, self.length, self.language) + self.data + + charCodes = list(self.cmap.keys()) + lenCharCodes = len(charCodes) + if lenCharCodes == 0: + startCode = [0xffff] + endCode = [0xffff] + else: + charCodes.sort() + names = list(map(operator.getitem, [self.cmap]*lenCharCodes, charCodes)) + nameMap = ttFont.getReverseGlyphMap() + try: + gids = list(map(operator.getitem, [nameMap]*lenCharCodes, names)) + except KeyError: + nameMap = ttFont.getReverseGlyphMap(rebuild=True) + try: + gids = list(map(operator.getitem, [nameMap]*lenCharCodes, names)) + except KeyError: + # allow virtual GIDs in format 4 tables + gids = [] + for name in names: + try: + gid = nameMap[name] + except KeyError: + try: + if (name[:3] == 'gid'): + gid = eval(name[3:]) + else: + gid = ttFont.getGlyphID(name) + except: + raise KeyError(name) + + gids.append(gid) + cmap = {} # code:glyphID mapping + list(map(operator.setitem, [cmap]*len(charCodes), charCodes, gids)) + + # Build startCode and endCode lists. + # Split the char codes in ranges of consecutive char codes, then split + # each range in more ranges of consecutive/not consecutive glyph IDs. + # See splitRange(). + lastCode = charCodes[0] + endCode = [] + startCode = [lastCode] + for charCode in charCodes[1:]: # skip the first code, it's the first start code + if charCode == lastCode + 1: + lastCode = charCode + continue + start, end = splitRange(startCode[-1], lastCode, cmap) + startCode.extend(start) + endCode.extend(end) + startCode.append(charCode) + lastCode = charCode + start, end = splitRange(startCode[-1], lastCode, cmap) + startCode.extend(start) + endCode.extend(end) + startCode.append(0xffff) + endCode.append(0xffff) + + # build up rest of cruft + idDelta = [] + idRangeOffset = [] + glyphIndexArray = [] + for i in range(len(endCode)-1): # skip the closing codes (0xffff) + indices = [] + for charCode in range(startCode[i], endCode[i] + 1): + indices.append(cmap[charCode]) + if (indices == list(range(indices[0], indices[0] + len(indices)))): + idDelta.append((indices[0] - startCode[i]) % 0x10000) + idRangeOffset.append(0) + else: + # someone *definitely* needs to get killed. + idDelta.append(0) + idRangeOffset.append(2 * (len(endCode) + len(glyphIndexArray) - i)) + glyphIndexArray.extend(indices) + idDelta.append(1) # 0xffff + 1 == (tadaa!) 0. So this end code maps to .notdef + idRangeOffset.append(0) + + # Insane. + segCount = len(endCode) + segCountX2 = segCount * 2 + searchRange, entrySelector, rangeShift = getSearchRange(segCount, 2) + + charCodeArray = array.array("H", endCode + [0] + startCode) + idDeltaArray = array.array("H", idDelta) + restArray = array.array("H", idRangeOffset + glyphIndexArray) + if sys.byteorder != "big": + charCodeArray.byteswap() + idDeltaArray.byteswap() + restArray.byteswap() + data = charCodeArray.tostring() + idDeltaArray.tostring() + restArray.tostring() + + length = struct.calcsize(cmap_format_4_format) + len(data) + header = struct.pack(cmap_format_4_format, self.format, length, self.language, + segCountX2, searchRange, entrySelector, rangeShift) + return header + data + + def fromXML(self, name, attrs, content, ttFont): + self.language = safeEval(attrs["language"]) + if not hasattr(self, "cmap"): + self.cmap = {} + cmap = self.cmap + + for element in content: + if not isinstance(element, tuple): + continue + nameMap, attrsMap, dummyContent = element + if nameMap != "map": + assert 0, "Unrecognized keyword in cmap subtable" + cmap[safeEval(attrsMap["code"])] = attrsMap["name"] + + +class cmap_format_6(CmapSubtable): + + def decompile(self, data, ttFont): + # we usually get here indirectly from the subtable __getattr__ function, in which case both args must be None. + # If not, someone is calling the subtable decompile() directly, and must provide both args. + if data is not None and ttFont is not None: + self.decompileHeader(data, ttFont) + else: + assert (data is None and ttFont is None), "Need both data and ttFont arguments" + + data = self.data # decompileHeader assigns the data after the header to self.data + firstCode, entryCount = struct.unpack(">HH", data[:4]) + firstCode = int(firstCode) + data = data[4:] + #assert len(data) == 2 * entryCount # XXX not true in Apple's Helvetica!!! + glyphIndexArray = array.array("H") + glyphIndexArray.fromstring(data[:2 * int(entryCount)]) + if sys.byteorder != "big": + glyphIndexArray.byteswap() + self.data = data = None + + self.cmap = cmap = {} + + lenArray = len(glyphIndexArray) + charCodes = list(range(firstCode, firstCode + lenArray)) + glyphOrder = self.ttFont.getGlyphOrder() + try: + names = list(map(operator.getitem, [glyphOrder]*lenArray, glyphIndexArray )) + except IndexError: + getGlyphName = self.ttFont.getGlyphName + names = list(map(getGlyphName, glyphIndexArray )) + list(map(operator.setitem, [cmap]*lenArray, charCodes, names)) + + def compile(self, ttFont): + if self.data: + return struct.pack(">HHH", self.format, self.length, self.language) + self.data + cmap = self.cmap + codes = sorted(cmap.keys()) + if codes: # yes, there are empty cmap tables. + codes = list(range(codes[0], codes[-1] + 1)) + firstCode = codes[0] + valueList = [cmap.get(code, ".notdef") for code in codes] + valueList = map(ttFont.getGlyphID, valueList) + glyphIndexArray = array.array("H", valueList) + if sys.byteorder != "big": + glyphIndexArray.byteswap() + data = glyphIndexArray.tostring() + else: + data = b"" + firstCode = 0 + header = struct.pack(">HHHHH", + 6, len(data) + 10, self.language, firstCode, len(codes)) + return header + data + + def fromXML(self, name, attrs, content, ttFont): + self.language = safeEval(attrs["language"]) + if not hasattr(self, "cmap"): + self.cmap = {} + cmap = self.cmap + + for element in content: + if not isinstance(element, tuple): + continue + name, attrs, content = element + if name != "map": + continue + cmap[safeEval(attrs["code"])] = attrs["name"] + + +class cmap_format_12_or_13(CmapSubtable): + + def __init__(self, format): + self.format = format + self.reserved = 0 + self.data = None + self.ttFont = None + + def decompileHeader(self, data, ttFont): + format, reserved, length, language, nGroups = struct.unpack(">HHLLL", data[:16]) + assert len(data) == (16 + nGroups*12) == (length), "corrupt cmap table format %d (data length: %d, header length: %d)" % (self.format, len(data), length) + self.format = format + self.reserved = reserved + self.length = length + self.language = language + self.nGroups = nGroups + self.data = data[16:] + self.ttFont = ttFont + + def decompile(self, data, ttFont): + # we usually get here indirectly from the subtable __getattr__ function, in which case both args must be None. + # If not, someone is calling the subtable decompile() directly, and must provide both args. + if data is not None and ttFont is not None: + self.decompileHeader(data, ttFont) + else: + assert (data is None and ttFont is None), "Need both data and ttFont arguments" + + data = self.data # decompileHeader assigns the data after the header to self.data + charCodes = [] + gids = [] + pos = 0 + for i in range(self.nGroups): + startCharCode, endCharCode, glyphID = struct.unpack(">LLL",data[pos:pos+12] ) + pos += 12 + lenGroup = 1 + endCharCode - startCharCode + charCodes.extend(list(range(startCharCode, endCharCode +1))) + gids.extend(self._computeGIDs(glyphID, lenGroup)) + self.data = data = None + self.cmap = cmap = {} + lenCmap = len(gids) + glyphOrder = self.ttFont.getGlyphOrder() + try: + names = list(map(operator.getitem, [glyphOrder]*lenCmap, gids )) + except IndexError: + getGlyphName = self.ttFont.getGlyphName + names = list(map(getGlyphName, gids )) + list(map(operator.setitem, [cmap]*lenCmap, charCodes, names)) + + def compile(self, ttFont): + if self.data: + return struct.pack(">HHLLL", self.format, self.reserved, self.length, self.language, self.nGroups) + self.data + charCodes = list(self.cmap.keys()) + lenCharCodes = len(charCodes) + names = list(self.cmap.values()) + nameMap = ttFont.getReverseGlyphMap() + try: + gids = list(map(operator.getitem, [nameMap]*lenCharCodes, names)) + except KeyError: + nameMap = ttFont.getReverseGlyphMap(rebuild=True) + try: + gids = list(map(operator.getitem, [nameMap]*lenCharCodes, names)) + except KeyError: + # allow virtual GIDs in format 12 tables + gids = [] + for name in names: + try: + gid = nameMap[name] + except KeyError: + try: + if (name[:3] == 'gid'): + gid = eval(name[3:]) + else: + gid = ttFont.getGlyphID(name) + except: + raise KeyError(name) + + gids.append(gid) + + cmap = {} # code:glyphID mapping + list(map(operator.setitem, [cmap]*len(charCodes), charCodes, gids)) + + charCodes.sort() + index = 0 + startCharCode = charCodes[0] + startGlyphID = cmap[startCharCode] + lastGlyphID = startGlyphID - self._format_step + lastCharCode = startCharCode - 1 + nGroups = 0 + dataList = [] + maxIndex = len(charCodes) + for index in range(maxIndex): + charCode = charCodes[index] + glyphID = cmap[charCode] + if not self._IsInSameRun(glyphID, lastGlyphID, charCode, lastCharCode): + dataList.append(struct.pack(">LLL", startCharCode, lastCharCode, startGlyphID)) + startCharCode = charCode + startGlyphID = glyphID + nGroups = nGroups + 1 + lastGlyphID = glyphID + lastCharCode = charCode + dataList.append(struct.pack(">LLL", startCharCode, lastCharCode, startGlyphID)) + nGroups = nGroups + 1 + data = bytesjoin(dataList) + lengthSubtable = len(data) +16 + assert len(data) == (nGroups*12) == (lengthSubtable-16) + return struct.pack(">HHLLL", self.format, self.reserved, lengthSubtable, self.language, nGroups) + data + + def toXML(self, writer, ttFont): + writer.begintag(self.__class__.__name__, [ + ("platformID", self.platformID), + ("platEncID", self.platEncID), + ("format", self.format), + ("reserved", self.reserved), + ("length", self.length), + ("language", self.language), + ("nGroups", self.nGroups), + ]) + writer.newline() + codes = sorted(self.cmap.items()) + self._writeCodes(codes, writer) + writer.endtag(self.__class__.__name__) + writer.newline() + + def fromXML(self, name, attrs, content, ttFont): + self.format = safeEval(attrs["format"]) + self.reserved = safeEval(attrs["reserved"]) + self.length = safeEval(attrs["length"]) + self.language = safeEval(attrs["language"]) + self.nGroups = safeEval(attrs["nGroups"]) + if not hasattr(self, "cmap"): + self.cmap = {} + cmap = self.cmap + + for element in content: + if not isinstance(element, tuple): + continue + name, attrs, content = element + if name != "map": + continue + cmap[safeEval(attrs["code"])] = attrs["name"] + + +class cmap_format_12(cmap_format_12_or_13): + + _format_step = 1 + + def __init__(self, format=12): + cmap_format_12_or_13.__init__(self, format) + + def _computeGIDs(self, startingGlyph, numberOfGlyphs): + return list(range(startingGlyph, startingGlyph + numberOfGlyphs)) + + def _IsInSameRun(self, glyphID, lastGlyphID, charCode, lastCharCode): + return (glyphID == 1 + lastGlyphID) and (charCode == 1 + lastCharCode) + + +class cmap_format_13(cmap_format_12_or_13): + + _format_step = 0 + + def __init__(self, format=13): + cmap_format_12_or_13.__init__(self, format) + + def _computeGIDs(self, startingGlyph, numberOfGlyphs): + return [startingGlyph] * numberOfGlyphs + + def _IsInSameRun(self, glyphID, lastGlyphID, charCode, lastCharCode): + return (glyphID == lastGlyphID) and (charCode == 1 + lastCharCode) + + +def cvtToUVS(threeByteString): + data = b"\0" + threeByteString + val, = struct.unpack(">L", data) + return val + +def cvtFromUVS(val): + assert 0 <= val < 0x1000000 + fourByteString = struct.pack(">L", val) + return fourByteString[1:] + + +class cmap_format_14(CmapSubtable): + + def decompileHeader(self, data, ttFont): + format, length, numVarSelectorRecords = struct.unpack(">HLL", data[:10]) + self.data = data[10:] + self.length = length + self.numVarSelectorRecords = numVarSelectorRecords + self.ttFont = ttFont + self.language = 0xFF # has no language. + + def decompile(self, data, ttFont): + if data is not None and ttFont is not None: + self.decompileHeader(data, ttFont) + else: + assert (data is None and ttFont is None), "Need both data and ttFont arguments" + data = self.data + + self.cmap = {} # so that clients that expect this to exist in a cmap table won't fail. + uvsDict = {} + recOffset = 0 + for n in range(self.numVarSelectorRecords): + uvs, defOVSOffset, nonDefUVSOffset = struct.unpack(">3sLL", data[recOffset:recOffset +11]) + recOffset += 11 + varUVS = cvtToUVS(uvs) + if defOVSOffset: + startOffset = defOVSOffset - 10 + numValues, = struct.unpack(">L", data[startOffset:startOffset+4]) + startOffset +=4 + for r in range(numValues): + uv, addtlCnt = struct.unpack(">3sB", data[startOffset:startOffset+4]) + startOffset += 4 + firstBaseUV = cvtToUVS(uv) + cnt = addtlCnt+1 + baseUVList = list(range(firstBaseUV, firstBaseUV+cnt)) + glyphList = [None]*cnt + localUVList = zip(baseUVList, glyphList) + try: + uvsDict[varUVS].extend(localUVList) + except KeyError: + uvsDict[varUVS] = list(localUVList) + + if nonDefUVSOffset: + startOffset = nonDefUVSOffset - 10 + numRecs, = struct.unpack(">L", data[startOffset:startOffset+4]) + startOffset +=4 + localUVList = [] + for r in range(numRecs): + uv, gid = struct.unpack(">3sH", data[startOffset:startOffset+5]) + startOffset += 5 + uv = cvtToUVS(uv) + glyphName = self.ttFont.getGlyphName(gid) + localUVList.append( [uv, glyphName] ) + try: + uvsDict[varUVS].extend(localUVList) + except KeyError: + uvsDict[varUVS] = localUVList + + self.uvsDict = uvsDict + + def toXML(self, writer, ttFont): + writer.begintag(self.__class__.__name__, [ + ("platformID", self.platformID), + ("platEncID", self.platEncID), + ("format", self.format), + ("length", self.length), + ("numVarSelectorRecords", self.numVarSelectorRecords), + ]) + writer.newline() + uvsDict = self.uvsDict + uvsList = sorted(uvsDict.keys()) + for uvs in uvsList: + uvList = uvsDict[uvs] + uvList.sort(key=lambda item: (item[1] is not None, item[0], item[1])) + for uv, gname in uvList: + if gname is None: + gname = "None" + # I use the arg rather than th keyword syntax in order to preserve the attribute order. + writer.simpletag("map", [ ("uvs",hex(uvs)), ("uv",hex(uv)), ("name", gname)] ) + writer.newline() + writer.endtag(self.__class__.__name__) + writer.newline() + + def fromXML(self, name, attrs, content, ttFont): + self.format = safeEval(attrs["format"]) + self.length = safeEval(attrs["length"]) + self.numVarSelectorRecords = safeEval(attrs["numVarSelectorRecords"]) + self.language = 0xFF # provide a value so that CmapSubtable.__lt__() won't fail + if not hasattr(self, "cmap"): + self.cmap = {} # so that clients that expect this to exist in a cmap table won't fail. + if not hasattr(self, "uvsDict"): + self.uvsDict = {} + uvsDict = self.uvsDict + + for element in content: + if not isinstance(element, tuple): + continue + name, attrs, content = element + if name != "map": + continue + uvs = safeEval(attrs["uvs"]) + uv = safeEval(attrs["uv"]) + gname = attrs["name"] + if gname == "None": + gname = None + try: + uvsDict[uvs].append( [uv, gname]) + except KeyError: + uvsDict[uvs] = [ [uv, gname] ] + + def compile(self, ttFont): + if self.data: + return struct.pack(">HLL", self.format, self.length, self.numVarSelectorRecords) + self.data + + uvsDict = self.uvsDict + uvsList = sorted(uvsDict.keys()) + self.numVarSelectorRecords = len(uvsList) + offset = 10 + self.numVarSelectorRecords*11 # current value is end of VarSelectorRecords block. + data = [] + varSelectorRecords =[] + for uvs in uvsList: + entryList = uvsDict[uvs] + + defList = [entry for entry in entryList if entry[1] is None] + if defList: + defList = [entry[0] for entry in defList] + defOVSOffset = offset + defList.sort() + + lastUV = defList[0] + cnt = -1 + defRecs = [] + for defEntry in defList: + cnt +=1 + if (lastUV+cnt) != defEntry: + rec = struct.pack(">3sB", cvtFromUVS(lastUV), cnt-1) + lastUV = defEntry + defRecs.append(rec) + cnt = 0 + + rec = struct.pack(">3sB", cvtFromUVS(lastUV), cnt) + defRecs.append(rec) + + numDefRecs = len(defRecs) + data.append(struct.pack(">L", numDefRecs)) + data.extend(defRecs) + offset += 4 + numDefRecs*4 + else: + defOVSOffset = 0 + + ndefList = [entry for entry in entryList if entry[1] is not None] + if ndefList: + nonDefUVSOffset = offset + ndefList.sort() + numNonDefRecs = len(ndefList) + data.append(struct.pack(">L", numNonDefRecs)) + offset += 4 + numNonDefRecs*5 + + for uv, gname in ndefList: + gid = ttFont.getGlyphID(gname) + ndrec = struct.pack(">3sH", cvtFromUVS(uv), gid) + data.append(ndrec) + else: + nonDefUVSOffset = 0 + + vrec = struct.pack(">3sLL", cvtFromUVS(uvs), defOVSOffset, nonDefUVSOffset) + varSelectorRecords.append(vrec) + + data = bytesjoin(varSelectorRecords) + bytesjoin(data) + self.length = 10 + len(data) + headerdata = struct.pack(">HLL", self.format, self.length, self.numVarSelectorRecords) + self.data = headerdata + data + + return self.data + + +class cmap_format_unknown(CmapSubtable): + + def toXML(self, writer, ttFont): + cmapName = self.__class__.__name__[:12] + str(self.format) + writer.begintag(cmapName, [ + ("platformID", self.platformID), + ("platEncID", self.platEncID), + ]) + writer.newline() + writer.dumphex(self.data) + writer.endtag(cmapName) + writer.newline() + + def fromXML(self, name, attrs, content, ttFont): + self.data = readHex(content) + self.cmap = {} + + def decompileHeader(self, data, ttFont): + self.language = 0 # dummy value + self.data = data + + def decompile(self, data, ttFont): + # we usually get here indirectly from the subtable __getattr__ function, in which case both args must be None. + # If not, someone is calling the subtable decompile() directly, and must provide both args. + if data is not None and ttFont is not None: + self.decompileHeader(data, ttFont) + else: + assert (data is None and ttFont is None), "Need both data and ttFont arguments" + + def compile(self, ttFont): + if self.data: + return self.data + else: + return None + +cmap_classes = { + 0: cmap_format_0, + 2: cmap_format_2, + 4: cmap_format_4, + 6: cmap_format_6, + 12: cmap_format_12, + 13: cmap_format_13, + 14: cmap_format_14, +} diff -Nru fonttools-2.4/Tools/fontTools/ttLib/tables/_c_m_a_p_test.py fonttools-3.0/Tools/fontTools/ttLib/tables/_c_m_a_p_test.py --- fonttools-2.4/Tools/fontTools/ttLib/tables/_c_m_a_p_test.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.0/Tools/fontTools/ttLib/tables/_c_m_a_p_test.py 2015-08-31 17:57:15.000000000 +0000 @@ -0,0 +1,53 @@ +from __future__ import print_function, division, absolute_import, unicode_literals +from fontTools.misc.py23 import * +from fontTools import ttLib +import unittest +from ._c_m_a_p import CmapSubtable + +class CmapSubtableTest(unittest.TestCase): + + def makeSubtable(self, platformID, platEncID, langID): + subtable = CmapSubtable(None) + subtable.platformID, subtable.platEncID, subtable.language = (platformID, platEncID, langID) + return subtable + + def test_toUnicode_utf16be(self): + subtable = self.makeSubtable(0, 2, 7) + self.assertEqual("utf_16_be", subtable.getEncoding()) + self.assertEqual(True, subtable.isUnicode()) + + def test_toUnicode_macroman(self): + subtable = self.makeSubtable(1, 0, 7) # MacRoman + self.assertEqual("mac_roman", subtable.getEncoding()) + self.assertEqual(False, subtable.isUnicode()) + + def test_toUnicode_macromanian(self): + subtable = self.makeSubtable(1, 0, 37) # Mac Romanian + self.assertNotEqual(None, subtable.getEncoding()) + self.assertEqual(False, subtable.isUnicode()) + + def test_extended_mac_encodings(self): + subtable = self.makeSubtable(1, 1, 0) # Mac Japanese + self.assertNotEqual(None, subtable.getEncoding()) + self.assertEqual(False, subtable.isUnicode()) + + def test_extended_unknown(self): + subtable = self.makeSubtable(10, 11, 12) + self.assertEqual(subtable.getEncoding(), None) + self.assertEqual(subtable.getEncoding("ascii"), "ascii") + self.assertEqual(subtable.getEncoding(default="xyz"), "xyz") + + def test_decompile_4(self): + subtable = CmapSubtable.newSubtable(4) + font = ttLib.TTFont() + font.setGlyphOrder([]) + subtable.decompile(b'\0' * 3 + b'\x10' + b'\0' * 12, font) + + def test_decompile_12(self): + subtable = CmapSubtable.newSubtable(12) + font = ttLib.TTFont() + font.setGlyphOrder([]) + subtable.decompile(b'\0' * 7 + b'\x10' + b'\0' * 8, font) + +if __name__ == "__main__": + unittest.main() diff -Nru fonttools-2.4/Tools/fontTools/ttLib/tables/C_O_L_R_.py fonttools-3.0/Tools/fontTools/ttLib/tables/C_O_L_R_.py --- fonttools-2.4/Tools/fontTools/ttLib/tables/C_O_L_R_.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.0/Tools/fontTools/ttLib/tables/C_O_L_R_.py 2015-08-31 17:57:15.000000000 +0000 @@ -0,0 +1,159 @@ +# Copyright 2013 Google, Inc. All Rights Reserved. +# +# Google Author(s): Behdad Esfahbod + +from __future__ import print_function, division, absolute_import +from fontTools.misc.py23 import * +from fontTools.misc.textTools import safeEval +from . import DefaultTable +import operator +import struct + + +class table_C_O_L_R_(DefaultTable.DefaultTable): + + """ This table is structured so that you can treat it like a dictionary keyed by glyph name. + ttFont['COLR'][] will return the color layers for any glyph + ttFont['COLR'][] = will set the color layers for any glyph. + """ + + def decompile(self, data, ttFont): + self.getGlyphName = ttFont.getGlyphName # for use in get/set item functions, for access by GID + self.version, numBaseGlyphRecords, offsetBaseGlyphRecord, offsetLayerRecord, numLayerRecords = struct.unpack(">HHLLH", data[:14]) + assert (self.version == 0), "Version of COLR table is higher than I know how to handle" + glyphOrder = ttFont.getGlyphOrder() + gids = [] + layerLists = [] + glyphPos = offsetBaseGlyphRecord + for i in range(numBaseGlyphRecords): + gid, firstLayerIndex, numLayers = struct.unpack(">HHH", data[glyphPos:glyphPos+6]) + glyphPos += 6 + gids.append(gid) + assert (firstLayerIndex + numLayers <= numLayerRecords) + layerPos = offsetLayerRecord + firstLayerIndex * 4 + layers = [] + for j in range(numLayers): + layerGid, colorID = struct.unpack(">HH", data[layerPos:layerPos+4]) + try: + layerName = glyphOrder[layerGid] + except IndexError: + layerName = self.getGlyphName(layerGid) + layerPos += 4 + layers.append(LayerRecord(layerName, colorID)) + layerLists.append(layers) + + self.ColorLayers = colorLayerLists = {} + try: + names = list(map(operator.getitem, [glyphOrder]*numBaseGlyphRecords, gids)) + except IndexError: + getGlyphName = self.getGlyphName + names = list(map(getGlyphName, gids )) + + list(map(operator.setitem, [colorLayerLists]*numBaseGlyphRecords, names, layerLists)) + + def compile(self, ttFont): + ordered = [] + ttFont.getReverseGlyphMap(rebuild=True) + glyphNames = self.ColorLayers.keys() + for glyphName in glyphNames: + try: + gid = ttFont.getGlyphID(glyphName) + except: + assert 0, "COLR table contains a glyph name not in ttFont.getGlyphNames(): " + str(glyphName) + ordered.append([gid, glyphName, self.ColorLayers[glyphName]]) + ordered.sort() + + glyphMap = [] + layerMap = [] + for (gid, glyphName, layers) in ordered: + glyphMap.append(struct.pack(">HHH", gid, len(layerMap), len(layers))) + for layer in layers: + layerMap.append(struct.pack(">HH", ttFont.getGlyphID(layer.name), layer.colorID)) + + dataList = [struct.pack(">HHLLH", self.version, len(glyphMap), 14, 14+6*len(glyphMap), len(layerMap))] + dataList.extend(glyphMap) + dataList.extend(layerMap) + data = bytesjoin(dataList) + return data + + def toXML(self, writer, ttFont): + writer.simpletag("version", value=self.version) + writer.newline() + ordered = [] + glyphNames = self.ColorLayers.keys() + for glyphName in glyphNames: + try: + gid = ttFont.getGlyphID(glyphName) + except: + assert 0, "COLR table contains a glyph name not in ttFont.getGlyphNames(): " + str(glyphName) + ordered.append([gid, glyphName, self.ColorLayers[glyphName]]) + ordered.sort() + for entry in ordered: + writer.begintag("ColorGlyph", name=entry[1]) + writer.newline() + for layer in entry[2]: + layer.toXML(writer, ttFont) + writer.endtag("ColorGlyph") + writer.newline() + + def fromXML(self, name, attrs, content, ttFont): + if not hasattr(self, "ColorLayers"): + self.ColorLayers = {} + self.getGlyphName = ttFont.getGlyphName # for use in get/set item functions, for access by GID + if name == "ColorGlyph": + glyphName = attrs["name"] + for element in content: + if isinstance(element, basestring): + continue + layers = [] + for element in content: + if isinstance(element, basestring): + continue + layer = LayerRecord() + layer.fromXML(element[0], element[1], element[2], ttFont) + layers.append (layer) + operator.setitem(self, glyphName, layers) + elif "value" in attrs: + setattr(self, name, safeEval(attrs["value"])) + + def __getitem__(self, glyphSelector): + if isinstance(glyphSelector, int): + # its a gid, convert to glyph name + glyphSelector = self.getGlyphName(glyphSelector) + + if glyphSelector not in self.ColorLayers: + return None + + return self.ColorLayers[glyphSelector] + + def __setitem__(self, glyphSelector, value): + if isinstance(glyphSelector, int): + # its a gid, convert to glyph name + glyphSelector = self.getGlyphName(glyphSelector) + + if value: + self.ColorLayers[glyphSelector] = value + elif glyphSelector in self.ColorLayers: + del self.ColorLayers[glyphSelector] + + def __delitem__(self, glyphSelector): + del self.ColorLayers[glyphSelector] + +class LayerRecord(object): + + def __init__(self, name=None, colorID=None): + self.name = name + self.colorID = colorID + + def toXML(self, writer, ttFont): + writer.simpletag("layer", name=self.name, colorID=self.colorID) + writer.newline() + + def fromXML(self, eltname, attrs, content, ttFont): + for (name, value) in attrs.items(): + if name == "name": + if isinstance(value, int): + value = ttFont.getGlyphName(value) + setattr(self, name, value) + else: + setattr(self, name, safeEval(value)) diff -Nru fonttools-2.4/Tools/fontTools/ttLib/tables/C_P_A_L_.py fonttools-3.0/Tools/fontTools/ttLib/tables/C_P_A_L_.py --- fonttools-2.4/Tools/fontTools/ttLib/tables/C_P_A_L_.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.0/Tools/fontTools/ttLib/tables/C_P_A_L_.py 2015-08-31 17:57:15.000000000 +0000 @@ -0,0 +1,100 @@ +# Copyright 2013 Google, Inc. All Rights Reserved. +# +# Google Author(s): Behdad Esfahbod + +from __future__ import print_function, division, absolute_import +from fontTools.misc.py23 import * +from fontTools.misc.textTools import safeEval +from . import DefaultTable +import struct + + +class table_C_P_A_L_(DefaultTable.DefaultTable): + + def decompile(self, data, ttFont): + self.version, self.numPaletteEntries, numPalettes, numColorRecords, goffsetFirstColorRecord = struct.unpack(">HHHHL", data[:12]) + assert (self.version == 0), "Version of COLR table is higher than I know how to handle" + self.palettes = [] + pos = 12 + for i in range(numPalettes): + startIndex = struct.unpack(">H", data[pos:pos+2])[0] + assert (startIndex + self.numPaletteEntries <= numColorRecords) + pos += 2 + palette = [] + ppos = goffsetFirstColorRecord + startIndex * 4 + for j in range(self.numPaletteEntries): + palette.append( Color(*struct.unpack(">BBBB", data[ppos:ppos+4])) ) + ppos += 4 + self.palettes.append(palette) + + def compile(self, ttFont): + dataList = [struct.pack(">HHHHL", self.version, self.numPaletteEntries, len(self.palettes), self.numPaletteEntries * len(self.palettes), 12+2*len(self.palettes))] + for i in range(len(self.palettes)): + dataList.append(struct.pack(">H", i*self.numPaletteEntries)) + for palette in self.palettes: + assert(len(palette) == self.numPaletteEntries) + for color in palette: + dataList.append(struct.pack(">BBBB", color.blue,color.green,color.red,color.alpha)) + data = bytesjoin(dataList) + return data + + def toXML(self, writer, ttFont): + writer.simpletag("version", value=self.version) + writer.newline() + writer.simpletag("numPaletteEntries", value=self.numPaletteEntries) + writer.newline() + for index, palette in enumerate(self.palettes): + writer.begintag("palette", index=index) + writer.newline() + assert(len(palette) == self.numPaletteEntries) + for cindex, color in enumerate(palette): + color.toXML(writer, ttFont, cindex) + writer.endtag("palette") + writer.newline() + + def fromXML(self, name, attrs, content, ttFont): + if not hasattr(self, "palettes"): + self.palettes = [] + if name == "palette": + palette = [] + for element in content: + if isinstance(element, basestring): + continue + palette = [] + for element in content: + if isinstance(element, basestring): + continue + color = Color() + color.fromXML(element[0], element[1], element[2], ttFont) + palette.append (color) + self.palettes.append(palette) + elif "value" in attrs: + value = safeEval(attrs["value"]) + setattr(self, name, value) + +class Color(object): + + def __init__(self, blue=None, green=None, red=None, alpha=None): + self.blue = blue + self.green = green + self.red = red + self.alpha = alpha + + def hex(self): + return "#%02X%02X%02X%02X" % (self.red, self.green, self.blue, self.alpha) + + def __repr__(self): + return self.hex() + + def toXML(self, writer, ttFont, index=None): + writer.simpletag("color", value=self.hex(), index=index) + writer.newline() + + def fromXML(self, eltname, attrs, content, ttFont): + value = attrs["value"] + if value[0] == '#': + value = value[1:] + self.red = int(value[0:2], 16) + self.green = int(value[2:4], 16) + self.blue = int(value[4:6], 16) + self.alpha = int(value[6:8], 16) if len (value) >= 8 else 0xFF diff -Nru fonttools-2.4/Tools/fontTools/ttLib/tables/_c_v_t.py fonttools-3.0/Tools/fontTools/ttLib/tables/_c_v_t.py --- fonttools-2.4/Tools/fontTools/ttLib/tables/_c_v_t.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.0/Tools/fontTools/ttLib/tables/_c_v_t.py 2015-08-31 17:57:15.000000000 +0000 @@ -0,0 +1,49 @@ +from __future__ import print_function, division, absolute_import +from fontTools.misc.py23 import * +from fontTools.misc.textTools import safeEval +from . import DefaultTable +import sys +import array + +class table__c_v_t(DefaultTable.DefaultTable): + + def decompile(self, data, ttFont): + values = array.array("h") + values.fromstring(data) + if sys.byteorder != "big": + values.byteswap() + self.values = values + + def compile(self, ttFont): + values = self.values[:] + if sys.byteorder != "big": + values.byteswap() + return values.tostring() + + def toXML(self, writer, ttFont): + for i in range(len(self.values)): + value = self.values[i] + writer.simpletag("cv", value=value, index=i) + writer.newline() + + def fromXML(self, name, attrs, content, ttFont): + if not hasattr(self, "values"): + self.values = array.array("h") + if name == "cv": + index = safeEval(attrs["index"]) + value = safeEval(attrs["value"]) + for i in range(1 + index - len(self.values)): + self.values.append(0) + self.values[index] = value + + def __len__(self): + return len(self.values) + + def __getitem__(self, index): + return self.values[index] + + def __setitem__(self, index, value): + self.values[index] = value + + def __delitem__(self, index): + del self.values[index] diff -Nru fonttools-2.4/Tools/fontTools/ttLib/tables/DefaultTable.py fonttools-3.0/Tools/fontTools/ttLib/tables/DefaultTable.py --- fonttools-2.4/Tools/fontTools/ttLib/tables/DefaultTable.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.0/Tools/fontTools/ttLib/tables/DefaultTable.py 2015-08-31 17:57:15.000000000 +0000 @@ -0,0 +1,47 @@ +from __future__ import print_function, division, absolute_import +from fontTools.misc.py23 import * +from fontTools.ttLib import getClassTag + +class DefaultTable(object): + + dependencies = [] + + def __init__(self, tag=None): + if tag is None: + tag = getClassTag(self.__class__) + self.tableTag = Tag(tag) + + def decompile(self, data, ttFont): + self.data = data + + def compile(self, ttFont): + return self.data + + def toXML(self, writer, ttFont, progress=None): + if hasattr(self, "ERROR"): + writer.comment("An error occurred during the decompilation of this table") + writer.newline() + writer.comment(self.ERROR) + writer.newline() + writer.begintag("hexdata") + writer.newline() + writer.dumphex(self.compile(ttFont)) + writer.endtag("hexdata") + writer.newline() + + def fromXML(self, name, attrs, content, ttFont): + from fontTools.misc.textTools import readHex + from fontTools import ttLib + if name != "hexdata": + raise ttLib.TTLibError("can't handle '%s' element" % name) + self.decompile(readHex(content), ttFont) + + def __repr__(self): + return "<'%s' table at %x>" % (self.tableTag, id(self)) + + def __ne__(self, other): + return not self.__eq__(other) + def __eq__(self, other): + if type(self) != type(other): + return NotImplemented + return self.__dict__ == other.__dict__ diff -Nru fonttools-2.4/Tools/fontTools/ttLib/tables/D_S_I_G_.py fonttools-3.0/Tools/fontTools/ttLib/tables/D_S_I_G_.py --- fonttools-2.4/Tools/fontTools/ttLib/tables/D_S_I_G_.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.0/Tools/fontTools/ttLib/tables/D_S_I_G_.py 2015-08-31 17:57:15.000000000 +0000 @@ -0,0 +1,131 @@ +from __future__ import print_function, division, absolute_import +from fontTools.misc.py23 import * +from fontTools.misc.textTools import safeEval +from fontTools.misc import sstruct +from . import DefaultTable +import base64 + +DSIG_HeaderFormat = """ + > # big endian + ulVersion: L + usNumSigs: H + usFlag: H +""" +# followed by an array of usNumSigs DSIG_Signature records +DSIG_SignatureFormat = """ + > # big endian + ulFormat: L + ulLength: L # length includes DSIG_SignatureBlock header + ulOffset: L +""" +# followed by an array of usNumSigs DSIG_SignatureBlock records, +# each followed immediately by the pkcs7 bytes +DSIG_SignatureBlockFormat = """ + > # big endian + usReserved1: H + usReserved2: H + cbSignature: l # length of following raw pkcs7 data +""" + +# +# NOTE +# the DSIG table format allows for SignatureBlocks residing +# anywhere in the table and possibly in a different order as +# listed in the array after the first table header +# +# this implementation does not keep track of any gaps and/or data +# before or after the actual signature blocks while decompiling, +# and puts them in the same physical order as listed in the header +# on compilation with no padding whatsoever. +# + +class table_D_S_I_G_(DefaultTable.DefaultTable): + + def decompile(self, data, ttFont): + dummy, newData = sstruct.unpack2(DSIG_HeaderFormat, data, self) + assert self.ulVersion == 1, "DSIG ulVersion must be 1" + assert self.usFlag & ~1 == 0, "DSIG usFlag must be 0x1 or 0x0" + self.signatureRecords = sigrecs = [] + for n in range(self.usNumSigs): + sigrec, newData = sstruct.unpack2(DSIG_SignatureFormat, newData, SignatureRecord()) + assert sigrec.ulFormat == 1, "DSIG signature record #%d ulFormat must be 1" % n + sigrecs.append(sigrec) + for sigrec in sigrecs: + dummy, newData = sstruct.unpack2(DSIG_SignatureBlockFormat, data[sigrec.ulOffset:], sigrec) + assert sigrec.usReserved1 == 0, "DSIG signature record #%d usReserverd1 must be 0" % n + assert sigrec.usReserved2 == 0, "DSIG signature record #%d usReserverd2 must be 0" % n + sigrec.pkcs7 = newData[:sigrec.cbSignature] + + def compile(self, ttFont): + packed = sstruct.pack(DSIG_HeaderFormat, self) + headers = [packed] + offset = len(packed) + self.usNumSigs * sstruct.calcsize(DSIG_SignatureFormat) + data = [] + for sigrec in self.signatureRecords: + # first pack signature block + sigrec.cbSignature = len(sigrec.pkcs7) + packed = sstruct.pack(DSIG_SignatureBlockFormat, sigrec) + sigrec.pkcs7 + data.append(packed) + # update redundant length field + sigrec.ulLength = len(packed) + # update running table offset + sigrec.ulOffset = offset + headers.append(sstruct.pack(DSIG_SignatureFormat, sigrec)) + offset += sigrec.ulLength + if offset % 2: + # Pad to even bytes + data.append(b'\0') + return bytesjoin(headers+data) + + def toXML(self, xmlWriter, ttFont): + xmlWriter.comment("note that the Digital Signature will be invalid after recompilation!") + xmlWriter.newline() + xmlWriter.simpletag("tableHeader", version=self.ulVersion, numSigs=self.usNumSigs, flag="0x%X" % self.usFlag) + for sigrec in self.signatureRecords: + xmlWriter.newline() + sigrec.toXML(xmlWriter, ttFont) + xmlWriter.newline() + + def fromXML(self, name, attrs, content, ttFont): + if name == "tableHeader": + self.signatureRecords = [] + self.ulVersion = safeEval(attrs["version"]) + self.usNumSigs = safeEval(attrs["numSigs"]) + self.usFlag = safeEval(attrs["flag"]) + return + if name == "SignatureRecord": + sigrec = SignatureRecord() + sigrec.fromXML(name, attrs, content, ttFont) + self.signatureRecords.append(sigrec) + +pem_spam = lambda l, spam = { + "-----BEGIN PKCS7-----": True, "-----END PKCS7-----": True, "": True +}: not spam.get(l.strip()) + +def b64encode(b): + s = base64.b64encode(b) + # Line-break at 76 chars. + items = [] + while s: + items.append(tostr(s[:76])) + items.append('\n') + s = s[76:] + return strjoin(items) + +class SignatureRecord(object): + def __repr__(self): + return "<%s: %s>" % (self.__class__.__name__, self.__dict__) + + def toXML(self, writer, ttFont): + writer.begintag(self.__class__.__name__, format=self.ulFormat) + writer.newline() + writer.write_noindent("-----BEGIN PKCS7-----\n") + writer.write_noindent(b64encode(self.pkcs7)) + writer.write_noindent("-----END PKCS7-----\n") + writer.endtag(self.__class__.__name__) + + def fromXML(self, name, attrs, content, ttFont): + self.ulFormat = safeEval(attrs["format"]) + self.usReserved1 = safeEval(attrs.get("reserved1", "0")) + self.usReserved2 = safeEval(attrs.get("reserved2", "0")) + self.pkcs7 = base64.b64decode(tobytes(strjoin(filter(pem_spam, content)))) diff -Nru fonttools-2.4/Tools/fontTools/ttLib/tables/E_B_D_T_.py fonttools-3.0/Tools/fontTools/ttLib/tables/E_B_D_T_.py --- fonttools-2.4/Tools/fontTools/ttLib/tables/E_B_D_T_.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.0/Tools/fontTools/ttLib/tables/E_B_D_T_.py 2015-08-31 17:57:15.000000000 +0000 @@ -0,0 +1,759 @@ +from __future__ import print_function, division, absolute_import +from fontTools.misc.py23 import * +from fontTools.misc import sstruct +from fontTools.misc.textTools import safeEval, readHex, hexStr, deHexStr +from .BitmapGlyphMetrics import BigGlyphMetrics, bigGlyphMetricsFormat, SmallGlyphMetrics, smallGlyphMetricsFormat +from . import DefaultTable +import itertools +import os +import struct + +ebdtTableVersionFormat = """ + > # big endian + version: 16.16F +""" + +ebdtComponentFormat = """ + > # big endian + glyphCode: H + xOffset: b + yOffset: b +""" + +class table_E_B_D_T_(DefaultTable.DefaultTable): + + # Keep a reference to the name of the data locator table. + locatorName = 'EBLC' + + # This method can be overridden in subclasses to support new formats + # without changing the other implementation. Also can be used as a + # convenience method for coverting a font file to an alternative format. + def getImageFormatClass(self, imageFormat): + return ebdt_bitmap_classes[imageFormat] + + def decompile(self, data, ttFont): + # Get the version but don't advance the slice. + # Most of the lookup for this table is done relative + # to the begining so slice by the offsets provided + # in the EBLC table. + sstruct.unpack2(ebdtTableVersionFormat, data, self) + + # Keep a dict of glyphs that have been seen so they aren't remade. + # This dict maps intervals of data to the BitmapGlyph. + glyphDict = {} + + # Pull out the EBLC table and loop through glyphs. + # A strike is a concept that spans both tables. + # The actual bitmap data is stored in the EBDT. + locator = ttFont[self.__class__.locatorName] + self.strikeData = [] + for curStrike in locator.strikes: + bitmapGlyphDict = {} + self.strikeData.append(bitmapGlyphDict) + for indexSubTable in curStrike.indexSubTables: + dataIter = zip(indexSubTable.names, indexSubTable.locations) + for curName, curLoc in dataIter: + # Don't create duplicate data entries for the same glyphs. + # Instead just use the structures that already exist if they exist. + if curLoc in glyphDict: + curGlyph = glyphDict[curLoc] + else: + curGlyphData = data[slice(*curLoc)] + imageFormatClass = self.getImageFormatClass(indexSubTable.imageFormat) + curGlyph = imageFormatClass(curGlyphData, ttFont) + glyphDict[curLoc] = curGlyph + bitmapGlyphDict[curName] = curGlyph + + def compile(self, ttFont): + + dataList = [] + dataList.append(sstruct.pack(ebdtTableVersionFormat, self)) + dataSize = len(dataList[0]) + + # Keep a dict of glyphs that have been seen so they aren't remade. + # This dict maps the id of the BitmapGlyph to the interval + # in the data. + glyphDict = {} + + # Go through the bitmap glyph data. Just in case the data for a glyph + # changed the size metrics should be recalculated. There are a variety + # of formats and they get stored in the EBLC table. That is why + # recalculation is defered to the EblcIndexSubTable class and just + # pass what is known about bitmap glyphs from this particular table. + locator = ttFont[self.__class__.locatorName] + for curStrike, curGlyphDict in zip(locator.strikes, self.strikeData): + for curIndexSubTable in curStrike.indexSubTables: + dataLocations = [] + for curName in curIndexSubTable.names: + # Handle the data placement based on seeing the glyph or not. + # Just save a reference to the location if the glyph has already + # been saved in compile. This code assumes that glyphs will only + # be referenced multiple times from indexFormat5. By luck the + # code may still work when referencing poorly ordered fonts with + # duplicate references. If there is a font that is unlucky the + # respective compile methods for the indexSubTables will fail + # their assertions. All fonts seem to follow this assumption. + # More complicated packing may be needed if a counter-font exists. + glyph = curGlyphDict[curName] + objectId = id(glyph) + if objectId not in glyphDict: + data = glyph.compile(ttFont) + data = curIndexSubTable.padBitmapData(data) + startByte = dataSize + dataSize += len(data) + endByte = dataSize + dataList.append(data) + dataLoc = (startByte, endByte) + glyphDict[objectId] = dataLoc + else: + dataLoc = glyphDict[objectId] + dataLocations.append(dataLoc) + # Just use the new data locations in the indexSubTable. + # The respective compile implementations will take care + # of any of the problems in the convertion that may arise. + curIndexSubTable.locations = dataLocations + + return bytesjoin(dataList) + + def toXML(self, writer, ttFont): + # When exporting to XML if one of the data export formats + # requires metrics then those metrics may be in the locator. + # In this case populate the bitmaps with "export metrics". + if ttFont.bitmapGlyphDataFormat in ('row', 'bitwise'): + locator = ttFont[self.__class__.locatorName] + for curStrike, curGlyphDict in zip(locator.strikes, self.strikeData): + for curIndexSubTable in curStrike.indexSubTables: + for curName in curIndexSubTable.names: + glyph = curGlyphDict[curName] + # I'm not sure which metrics have priority here. + # For now if both metrics exist go with glyph metrics. + if hasattr(glyph, 'metrics'): + glyph.exportMetrics = glyph.metrics + else: + glyph.exportMetrics = curIndexSubTable.metrics + glyph.exportBitDepth = curStrike.bitmapSizeTable.bitDepth + + writer.simpletag("header", [('version', self.version)]) + writer.newline() + locator = ttFont[self.__class__.locatorName] + for strikeIndex, bitmapGlyphDict in enumerate(self.strikeData): + writer.begintag('strikedata', [('index', strikeIndex)]) + writer.newline() + for curName, curBitmap in bitmapGlyphDict.items(): + curBitmap.toXML(strikeIndex, curName, writer, ttFont) + writer.endtag('strikedata') + writer.newline() + + def fromXML(self, name, attrs, content, ttFont): + if name == 'header': + self.version = safeEval(attrs['version']) + elif name == 'strikedata': + if not hasattr(self, 'strikeData'): + self.strikeData = [] + strikeIndex = safeEval(attrs['index']) + + bitmapGlyphDict = {} + for element in content: + if not isinstance(element, tuple): + continue + name, attrs, content = element + if name[4:].startswith(_bitmapGlyphSubclassPrefix[4:]): + imageFormat = safeEval(name[len(_bitmapGlyphSubclassPrefix):]) + glyphName = attrs['name'] + imageFormatClass = self.getImageFormatClass(imageFormat) + curGlyph = imageFormatClass(None, None) + curGlyph.fromXML(name, attrs, content, ttFont) + assert glyphName not in bitmapGlyphDict, "Duplicate glyphs with the same name '%s' in the same strike." % glyphName + bitmapGlyphDict[glyphName] = curGlyph + else: + print("Warning: %s being ignored by %s", name, self.__class__.__name__) + + # Grow the strike data array to the appropriate size. The XML + # format allows the strike index value to be out of order. + if strikeIndex >= len(self.strikeData): + self.strikeData += [None] * (strikeIndex + 1 - len(self.strikeData)) + assert self.strikeData[strikeIndex] is None, "Duplicate strike EBDT indices." + self.strikeData[strikeIndex] = bitmapGlyphDict + +class EbdtComponent(object): + + def toXML(self, writer, ttFont): + writer.begintag('ebdtComponent', [('name', self.name)]) + writer.newline() + for componentName in sstruct.getformat(ebdtComponentFormat)[1][1:]: + writer.simpletag(componentName, value=getattr(self, componentName)) + writer.newline() + writer.endtag('ebdtComponent') + writer.newline() + + def fromXML(self, name, attrs, content, ttFont): + self.name = attrs['name'] + componentNames = set(sstruct.getformat(ebdtComponentFormat)[1][1:]) + for element in content: + if not isinstance(element, tuple): + continue + name, attrs, content = element + if name in componentNames: + vars(self)[name] = safeEval(attrs['value']) + else: + print("Warning: unknown name '%s' being ignored by EbdtComponent." % name) + +# Helper functions for dealing with binary. + +def _data2binary(data, numBits): + binaryList = [] + for curByte in data: + value = byteord(curByte) + numBitsCut = min(8, numBits) + for i in range(numBitsCut): + if value & 0x1: + binaryList.append('1') + else: + binaryList.append('0') + value = value >> 1 + numBits -= numBitsCut + return strjoin(binaryList) + +def _binary2data(binary): + byteList = [] + for bitLoc in range(0, len(binary), 8): + byteString = binary[bitLoc:bitLoc+8] + curByte = 0 + for curBit in reversed(byteString): + curByte = curByte << 1 + if curBit == '1': + curByte |= 1 + byteList.append(bytechr(curByte)) + return bytesjoin(byteList) + +def _memoize(f): + class memodict(dict): + def __missing__(self, key): + ret = f(key) + if len(key) == 1: + self[key] = ret + return ret + return memodict().__getitem__ + +# 00100111 -> 11100100 per byte, not to be confused with little/big endian. +# Bitmap data per byte is in the order that binary is written on the page +# with the least significant bit as far right as possible. This is the +# opposite of what makes sense algorithmically and hence this function. +@_memoize +def _reverseBytes(data): + if len(data) != 1: + return bytesjoin(map(_reverseBytes, data)) + byte = byteord(data) + result = 0 + for i in range(8): + result = result << 1 + result |= byte & 1 + byte = byte >> 1 + return bytechr(result) + +# This section of code is for reading and writing image data to/from XML. + +def _writeRawImageData(strikeIndex, glyphName, bitmapObject, writer, ttFont): + writer.begintag('rawimagedata') + writer.newline() + writer.dumphex(bitmapObject.imageData) + writer.endtag('rawimagedata') + writer.newline() + +def _readRawImageData(bitmapObject, name, attrs, content, ttFont): + bitmapObject.imageData = readHex(content) + +def _writeRowImageData(strikeIndex, glyphName, bitmapObject, writer, ttFont): + metrics = bitmapObject.exportMetrics + del bitmapObject.exportMetrics + bitDepth = bitmapObject.exportBitDepth + del bitmapObject.exportBitDepth + + writer.begintag('rowimagedata', bitDepth=bitDepth, width=metrics.width, height=metrics.height) + writer.newline() + for curRow in range(metrics.height): + rowData = bitmapObject.getRow(curRow, bitDepth=bitDepth, metrics=metrics) + writer.simpletag('row', value=hexStr(rowData)) + writer.newline() + writer.endtag('rowimagedata') + writer.newline() + +def _readRowImageData(bitmapObject, name, attrs, content, ttFont): + bitDepth = safeEval(attrs['bitDepth']) + metrics = SmallGlyphMetrics() + metrics.width = safeEval(attrs['width']) + metrics.height = safeEval(attrs['height']) + + dataRows = [] + for element in content: + if not isinstance(element, tuple): + continue + name, attr, content = element + # Chop off 'imagedata' from the tag to get just the option. + if name == 'row': + dataRows.append(deHexStr(attr['value'])) + bitmapObject.setRows(dataRows, bitDepth=bitDepth, metrics=metrics) + +def _writeBitwiseImageData(strikeIndex, glyphName, bitmapObject, writer, ttFont): + metrics = bitmapObject.exportMetrics + del bitmapObject.exportMetrics + bitDepth = bitmapObject.exportBitDepth + del bitmapObject.exportBitDepth + + # A dict for mapping binary to more readable/artistic ASCII characters. + binaryConv = {'0':'.', '1':'@'} + + writer.begintag('bitwiseimagedata', bitDepth=bitDepth, width=metrics.width, height=metrics.height) + writer.newline() + for curRow in range(metrics.height): + rowData = bitmapObject.getRow(curRow, bitDepth=1, metrics=metrics, reverseBytes=True) + rowData = _data2binary(rowData, metrics.width) + # Make the output a readable ASCII art form. + rowData = strjoin(map(binaryConv.get, rowData)) + writer.simpletag('row', value=rowData) + writer.newline() + writer.endtag('bitwiseimagedata') + writer.newline() + +def _readBitwiseImageData(bitmapObject, name, attrs, content, ttFont): + bitDepth = safeEval(attrs['bitDepth']) + metrics = SmallGlyphMetrics() + metrics.width = safeEval(attrs['width']) + metrics.height = safeEval(attrs['height']) + + # A dict for mapping from ASCII to binary. All characters are considered + # a '1' except space, period and '0' which maps to '0'. + binaryConv = {' ':'0', '.':'0', '0':'0'} + + dataRows = [] + for element in content: + if not isinstance(element, tuple): + continue + name, attr, content = element + if name == 'row': + mapParams = zip(attr['value'], itertools.repeat('1')) + rowData = strjoin(itertools.starmap(binaryConv.get, mapParams)) + dataRows.append(_binary2data(rowData)) + + bitmapObject.setRows(dataRows, bitDepth=bitDepth, metrics=metrics, reverseBytes=True) + +def _writeExtFileImageData(strikeIndex, glyphName, bitmapObject, writer, ttFont): + try: + folder = os.path.dirname(writer.file.name) + except AttributeError: + # fall back to current directory if output file's directory isn't found + folder = '.' + folder = os.path.join(folder, 'bitmaps') + filename = glyphName + bitmapObject.fileExtension + if not os.path.isdir(folder): + os.makedirs(folder) + folder = os.path.join(folder, 'strike%d' % strikeIndex) + if not os.path.isdir(folder): + os.makedirs(folder) + + fullPath = os.path.join(folder, filename) + writer.simpletag('extfileimagedata', value=fullPath) + writer.newline() + + with open(fullPath, "wb") as file: + file.write(bitmapObject.imageData) + +def _readExtFileImageData(bitmapObject, name, attrs, content, ttFont): + fullPath = attrs['value'] + with open(fullPath, "rb") as file: + bitmapObject.imageData = file.read() + +# End of XML writing code. + +# Important information about the naming scheme. Used for identifying formats +# in XML. +_bitmapGlyphSubclassPrefix = 'ebdt_bitmap_format_' + +class BitmapGlyph(object): + + # For the external file format. This can be changed in subclasses. This way + # when the extfile option is turned on files have the form: glyphName.ext + # The default is just a flat binary file with no meaning. + fileExtension = '.bin' + + # Keep track of reading and writing of various forms. + xmlDataFunctions = { + 'raw': (_writeRawImageData, _readRawImageData), + 'row': (_writeRowImageData, _readRowImageData), + 'bitwise': (_writeBitwiseImageData, _readBitwiseImageData), + 'extfile': (_writeExtFileImageData, _readExtFileImageData), + } + + def __init__(self, data, ttFont): + self.data = data + self.ttFont = ttFont + # TODO Currently non-lazy decompilation is untested here... + #if not ttFont.lazy: + # self.decompile() + # del self.data + + def __getattr__(self, attr): + # Allow lazy decompile. + if attr[:2] == '__': + raise AttributeError(attr) + if not hasattr(self, "data"): + raise AttributeError(attr) + self.decompile() + del self.data + return getattr(self, attr) + + # Not a fan of this but it is needed for safer safety checking. + def getFormat(self): + return safeEval(self.__class__.__name__[len(_bitmapGlyphSubclassPrefix):]) + + def toXML(self, strikeIndex, glyphName, writer, ttFont): + writer.begintag(self.__class__.__name__, [('name', glyphName)]) + writer.newline() + + self.writeMetrics(writer, ttFont) + # Use the internal write method to write using the correct output format. + self.writeData(strikeIndex, glyphName, writer, ttFont) + + writer.endtag(self.__class__.__name__) + writer.newline() + + def fromXML(self, name, attrs, content, ttFont): + self.readMetrics(name, attrs, content, ttFont) + for element in content: + if not isinstance(element, tuple): + continue + name, attr, content = element + if not name.endswith('imagedata'): + continue + # Chop off 'imagedata' from the tag to get just the option. + option = name[:-len('imagedata')] + assert option in self.__class__.xmlDataFunctions + self.readData(name, attr, content, ttFont) + + # Some of the glyphs have the metrics. This allows for metrics to be + # added if the glyph format has them. Default behavior is to do nothing. + def writeMetrics(self, writer, ttFont): + pass + + # The opposite of write metrics. + def readMetrics(self, name, attrs, content, ttFont): + pass + + def writeData(self, strikeIndex, glyphName, writer, ttFont): + try: + writeFunc, readFunc = self.__class__.xmlDataFunctions[ttFont.bitmapGlyphDataFormat] + except KeyError: + writeFunc = _writeRawImageData + writeFunc(strikeIndex, glyphName, self, writer, ttFont) + + def readData(self, name, attrs, content, ttFont): + # Chop off 'imagedata' from the tag to get just the option. + option = name[:-len('imagedata')] + writeFunc, readFunc = self.__class__.xmlDataFunctions[option] + readFunc(self, name, attrs, content, ttFont) + + +# A closure for creating a mixin for the two types of metrics handling. +# Most of the code is very similar so its easier to deal with here. +# Everything works just by passing the class that the mixin is for. +def _createBitmapPlusMetricsMixin(metricsClass): + # Both metrics names are listed here to make meaningful error messages. + metricStrings = [BigGlyphMetrics.__name__, SmallGlyphMetrics.__name__] + curMetricsName = metricsClass.__name__ + # Find which metrics this is for and determine the opposite name. + metricsId = metricStrings.index(curMetricsName) + oppositeMetricsName = metricStrings[1-metricsId] + + class BitmapPlusMetricsMixin(object): + + def writeMetrics(self, writer, ttFont): + self.metrics.toXML(writer, ttFont) + + def readMetrics(self, name, attrs, content, ttFont): + for element in content: + if not isinstance(element, tuple): + continue + name, attrs, content = element + if name == curMetricsName: + self.metrics = metricsClass() + self.metrics.fromXML(name, attrs, content, ttFont) + elif name == oppositeMetricsName: + print("Warning: %s being ignored in format %d." % oppositeMetricsName, self.getFormat()) + + return BitmapPlusMetricsMixin + +# Since there are only two types of mixin's just create them here. +BitmapPlusBigMetricsMixin = _createBitmapPlusMetricsMixin(BigGlyphMetrics) +BitmapPlusSmallMetricsMixin = _createBitmapPlusMetricsMixin(SmallGlyphMetrics) + +# Data that is bit aligned can be tricky to deal with. These classes implement +# helper functionality for dealing with the data and getting a particular row +# of bitwise data. Also helps implement fancy data export/import in XML. +class BitAlignedBitmapMixin(object): + + def _getBitRange(self, row, bitDepth, metrics): + rowBits = (bitDepth * metrics.width) + bitOffset = row * rowBits + return (bitOffset, bitOffset+rowBits) + + def getRow(self, row, bitDepth=1, metrics=None, reverseBytes=False): + if metrics is None: + metrics = self.metrics + assert 0 <= row and row < metrics.height, "Illegal row access in bitmap" + + # Loop through each byte. This can cover two bytes in the original data or + # a single byte if things happen to be aligned. The very last entry might + # not be aligned so take care to trim the binary data to size and pad with + # zeros in the row data. Bit aligned data is somewhat tricky. + # + # Example of data cut. Data cut represented in x's. + # '|' represents byte boundary. + # data = ...0XX|XXXXXX00|000... => XXXXXXXX + # or + # data = ...0XX|XXXX0000|000... => XXXXXX00 + # or + # data = ...000|XXXXXXXX|000... => XXXXXXXX + # or + # data = ...000|00XXXX00|000... => XXXX0000 + # + dataList = [] + bitRange = self._getBitRange(row, bitDepth, metrics) + stepRange = bitRange + (8,) + for curBit in range(*stepRange): + endBit = min(curBit+8, bitRange[1]) + numBits = endBit - curBit + cutPoint = curBit % 8 + firstByteLoc = curBit // 8 + secondByteLoc = endBit // 8 + if firstByteLoc < secondByteLoc: + numBitsCut = 8 - cutPoint + else: + numBitsCut = endBit - curBit + curByte = _reverseBytes(self.imageData[firstByteLoc]) + firstHalf = byteord(curByte) >> cutPoint + firstHalf = ((1<> numBitsCut) & ((1<<8-numBitsCut)-1) + ordDataList[secondByteLoc] |= secondByte + + # Save the image data with the bits going the correct way. + self.imageData = _reverseBytes(bytesjoin(map(bytechr, ordDataList))) + +class ByteAlignedBitmapMixin(object): + + def _getByteRange(self, row, bitDepth, metrics): + rowBytes = (bitDepth * metrics.width + 7) // 8 + byteOffset = row * rowBytes + return (byteOffset, byteOffset+rowBytes) + + def getRow(self, row, bitDepth=1, metrics=None, reverseBytes=False): + if metrics is None: + metrics = self.metrics + assert 0 <= row and row < metrics.height, "Illegal row access in bitmap" + byteRange = self._getByteRange(row, bitDepth, metrics) + data = self.imageData[slice(*byteRange)] + if reverseBytes: + data = _reverseBytes(data) + return data + + def setRows(self, dataRows, bitDepth=1, metrics=None, reverseBytes=False): + if metrics is None: + metrics = self.metrics + if reverseBytes: + dataRows = map(_reverseBytes, dataRows) + self.imageData = bytesjoin(dataRows) + +class ebdt_bitmap_format_1(ByteAlignedBitmapMixin, BitmapPlusSmallMetricsMixin, BitmapGlyph): + + def decompile(self): + self.metrics = SmallGlyphMetrics() + dummy, data = sstruct.unpack2(smallGlyphMetricsFormat, self.data, self.metrics) + self.imageData = data + + def compile(self, ttFont): + data = sstruct.pack(smallGlyphMetricsFormat, self.metrics) + return data + self.imageData + + +class ebdt_bitmap_format_2(BitAlignedBitmapMixin, BitmapPlusSmallMetricsMixin, BitmapGlyph): + + def decompile(self): + self.metrics = SmallGlyphMetrics() + dummy, data = sstruct.unpack2(smallGlyphMetricsFormat, self.data, self.metrics) + self.imageData = data + + def compile(self, ttFont): + data = sstruct.pack(smallGlyphMetricsFormat, self.metrics) + return data + self.imageData + + +class ebdt_bitmap_format_5(BitAlignedBitmapMixin, BitmapGlyph): + + def decompile(self): + self.imageData = self.data + + def compile(self, ttFont): + return self.imageData + +class ebdt_bitmap_format_6(ByteAlignedBitmapMixin, BitmapPlusBigMetricsMixin, BitmapGlyph): + + def decompile(self): + self.metrics = BigGlyphMetrics() + dummy, data = sstruct.unpack2(bigGlyphMetricsFormat, self.data, self.metrics) + self.imageData = data + + def compile(self, ttFont): + data = sstruct.pack(bigGlyphMetricsFormat, self.metrics) + return data + self.imageData + + +class ebdt_bitmap_format_7(BitAlignedBitmapMixin, BitmapPlusBigMetricsMixin, BitmapGlyph): + + def decompile(self): + self.metrics = BigGlyphMetrics() + dummy, data = sstruct.unpack2(bigGlyphMetricsFormat, self.data, self.metrics) + self.imageData = data + + def compile(self, ttFont): + data = sstruct.pack(bigGlyphMetricsFormat, self.metrics) + return data + self.imageData + + +class ComponentBitmapGlyph(BitmapGlyph): + + def toXML(self, strikeIndex, glyphName, writer, ttFont): + writer.begintag(self.__class__.__name__, [('name', glyphName)]) + writer.newline() + + self.writeMetrics(writer, ttFont) + + writer.begintag('components') + writer.newline() + for curComponent in self.componentArray: + curComponent.toXML(writer, ttFont) + writer.endtag('components') + writer.newline() + + writer.endtag(self.__class__.__name__) + writer.newline() + + def fromXML(self, name, attrs, content, ttFont): + self.readMetrics(name, attrs, content, ttFont) + for element in content: + if not isinstance(element, tuple): + continue + name, attr, content = element + if name == 'components': + self.componentArray = [] + for compElement in content: + if not isinstance(compElement, tuple): + continue + name, attrs, content = compElement + if name == 'ebdtComponent': + curComponent = EbdtComponent() + curComponent.fromXML(name, attrs, content, ttFont) + self.componentArray.append(curComponent) + else: + print("Warning: '%s' being ignored in component array." % name) + + +class ebdt_bitmap_format_8(BitmapPlusSmallMetricsMixin, ComponentBitmapGlyph): + + def decompile(self): + self.metrics = SmallGlyphMetrics() + dummy, data = sstruct.unpack2(smallGlyphMetricsFormat, self.data, self.metrics) + data = data[1:] + + (numComponents,) = struct.unpack(">H", data[:2]) + data = data[2:] + self.componentArray = [] + for i in range(numComponents): + curComponent = EbdtComponent() + dummy, data = sstruct.unpack2(ebdtComponentFormat, data, curComponent) + curComponent.name = self.ttFont.getGlyphName(curComponent.glyphCode) + self.componentArray.append(curComponent) + + def compile(self, ttFont): + dataList = [] + dataList.append(sstruct.pack(smallGlyphMetricsFormat, self.metrics)) + dataList.append(b'\0') + dataList.append(struct.pack(">H", len(self.componentArray))) + for curComponent in self.componentArray: + curComponent.glyphCode = ttFont.getGlyphID(curComponent.name) + dataList.append(sstruct.pack(ebdtComponentFormat, curComponent)) + return bytesjoin(dataList) + + +class ebdt_bitmap_format_9(BitmapPlusBigMetricsMixin, ComponentBitmapGlyph): + + def decompile(self): + self.metrics = BigGlyphMetrics() + dummy, data = sstruct.unpack2(bigGlyphMetricsFormat, self.data, self.metrics) + (numComponents,) = struct.unpack(">H", data[:2]) + data = data[2:] + self.componentArray = [] + for i in range(numComponents): + curComponent = EbdtComponent() + dummy, data = sstruct.unpack2(ebdtComponentFormat, data, curComponent) + curComponent.name = self.ttFont.getGlyphName(curComponent.glyphCode) + self.componentArray.append(curComponent) + + def compile(self, ttFont): + dataList = [] + dataList.append(sstruct.pack(bigGlyphMetricsFormat, self.metrics)) + dataList.append(struct.pack(">H", len(self.componentArray))) + for curComponent in self.componentArray: + curComponent.glyphCode = ttFont.getGlyphID(curComponent.name) + dataList.append(sstruct.pack(ebdtComponentFormat, curComponent)) + return bytesjoin(dataList) + + +# Dictionary of bitmap formats to the class representing that format +# currently only the ones listed in this map are the ones supported. +ebdt_bitmap_classes = { + 1: ebdt_bitmap_format_1, + 2: ebdt_bitmap_format_2, + 5: ebdt_bitmap_format_5, + 6: ebdt_bitmap_format_6, + 7: ebdt_bitmap_format_7, + 8: ebdt_bitmap_format_8, + 9: ebdt_bitmap_format_9, + } diff -Nru fonttools-2.4/Tools/fontTools/ttLib/tables/E_B_L_C_.py fonttools-3.0/Tools/fontTools/ttLib/tables/E_B_L_C_.py --- fonttools-2.4/Tools/fontTools/ttLib/tables/E_B_L_C_.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.0/Tools/fontTools/ttLib/tables/E_B_L_C_.py 2015-08-31 17:57:15.000000000 +0000 @@ -0,0 +1,617 @@ +from __future__ import print_function, division, absolute_import +from fontTools.misc.py23 import * +from fontTools.misc import sstruct +from . import DefaultTable +from fontTools.misc.textTools import safeEval +from .BitmapGlyphMetrics import BigGlyphMetrics, bigGlyphMetricsFormat, SmallGlyphMetrics, smallGlyphMetricsFormat +import struct +import itertools +from collections import deque + +eblcHeaderFormat = """ + > # big endian + version: 16.16F + numSizes: I +""" +# The table format string is split to handle sbitLineMetrics simply. +bitmapSizeTableFormatPart1 = """ + > # big endian + indexSubTableArrayOffset: I + indexTablesSize: I + numberOfIndexSubTables: I + colorRef: I +""" +# The compound type for hori and vert. +sbitLineMetricsFormat = """ + > # big endian + ascender: b + descender: b + widthMax: B + caretSlopeNumerator: b + caretSlopeDenominator: b + caretOffset: b + minOriginSB: b + minAdvanceSB: b + maxBeforeBL: b + minAfterBL: b + pad1: b + pad2: b +""" +# hori and vert go between the two parts. +bitmapSizeTableFormatPart2 = """ + > # big endian + startGlyphIndex: H + endGlyphIndex: H + ppemX: B + ppemY: B + bitDepth: B + flags: b +""" + +indexSubTableArrayFormat = ">HHL" +indexSubTableArraySize = struct.calcsize(indexSubTableArrayFormat) + +indexSubHeaderFormat = ">HHL" +indexSubHeaderSize = struct.calcsize(indexSubHeaderFormat) + +codeOffsetPairFormat = ">HH" +codeOffsetPairSize = struct.calcsize(codeOffsetPairFormat) + +class table_E_B_L_C_(DefaultTable.DefaultTable): + + dependencies = ['EBDT'] + + # This method can be overridden in subclasses to support new formats + # without changing the other implementation. Also can be used as a + # convenience method for coverting a font file to an alternative format. + def getIndexFormatClass(self, indexFormat): + return eblc_sub_table_classes[indexFormat] + + def decompile(self, data, ttFont): + + # Save the original data because offsets are from the start of the table. + origData = data + + dummy, data = sstruct.unpack2(eblcHeaderFormat, data, self) + + self.strikes = [] + for curStrikeIndex in range(self.numSizes): + curStrike = Strike() + self.strikes.append(curStrike) + curTable = curStrike.bitmapSizeTable + dummy, data = sstruct.unpack2(bitmapSizeTableFormatPart1, data, curTable) + for metric in ('hori', 'vert'): + metricObj = SbitLineMetrics() + vars(curTable)[metric] = metricObj + dummy, data = sstruct.unpack2(sbitLineMetricsFormat, data, metricObj) + dummy, data = sstruct.unpack2(bitmapSizeTableFormatPart2, data, curTable) + + for curStrike in self.strikes: + curTable = curStrike.bitmapSizeTable + for subtableIndex in range(curTable.numberOfIndexSubTables): + lowerBound = curTable.indexSubTableArrayOffset + subtableIndex * indexSubTableArraySize + upperBound = lowerBound + indexSubTableArraySize + data = origData[lowerBound:upperBound] + + tup = struct.unpack(indexSubTableArrayFormat, data) + (firstGlyphIndex, lastGlyphIndex, additionalOffsetToIndexSubtable) = tup + offsetToIndexSubTable = curTable.indexSubTableArrayOffset + additionalOffsetToIndexSubtable + data = origData[offsetToIndexSubTable:] + + tup = struct.unpack(indexSubHeaderFormat, data[:indexSubHeaderSize]) + (indexFormat, imageFormat, imageDataOffset) = tup + + indexFormatClass = self.getIndexFormatClass(indexFormat) + indexSubTable = indexFormatClass(data[indexSubHeaderSize:], ttFont) + indexSubTable.firstGlyphIndex = firstGlyphIndex + indexSubTable.lastGlyphIndex = lastGlyphIndex + indexSubTable.additionalOffsetToIndexSubtable = additionalOffsetToIndexSubtable + indexSubTable.indexFormat = indexFormat + indexSubTable.imageFormat = imageFormat + indexSubTable.imageDataOffset = imageDataOffset + curStrike.indexSubTables.append(indexSubTable) + + def compile(self, ttFont): + + dataList = [] + self.numSizes = len(self.strikes) + dataList.append(sstruct.pack(eblcHeaderFormat, self)) + + # Data size of the header + bitmapSizeTable needs to be calculated + # in order to form offsets. This value will hold the size of the data + # in dataList after all the data is consolidated in dataList. + dataSize = len(dataList[0]) + + # The table will be structured in the following order: + # (0) header + # (1) Each bitmapSizeTable [1 ... self.numSizes] + # (2) Alternate between indexSubTableArray and indexSubTable + # for each bitmapSizeTable present. + # + # The issue is maintaining the proper offsets when table information + # gets moved around. All offsets and size information must be recalculated + # when building the table to allow editing within ttLib and also allow easy + # import/export to and from XML. All of this offset information is lost + # when exporting to XML so everything must be calculated fresh so importing + # from XML will work cleanly. Only byte offset and size information is + # calculated fresh. Count information like numberOfIndexSubTables is + # checked through assertions. If the information in this table was not + # touched or was changed properly then these types of values should match. + # + # The table will be rebuilt the following way: + # (0) Precompute the size of all the bitmapSizeTables. This is needed to + # compute the offsets properly. + # (1) For each bitmapSizeTable compute the indexSubTable and + # indexSubTableArray pair. The indexSubTable must be computed first + # so that the offset information in indexSubTableArray can be + # calculated. Update the data size after each pairing. + # (2) Build each bitmapSizeTable. + # (3) Consolidate all the data into the main dataList in the correct order. + + for curStrike in self.strikes: + dataSize += sstruct.calcsize(bitmapSizeTableFormatPart1) + dataSize += len(('hori', 'vert')) * sstruct.calcsize(sbitLineMetricsFormat) + dataSize += sstruct.calcsize(bitmapSizeTableFormatPart2) + + indexSubTablePairDataList = [] + for curStrike in self.strikes: + curTable = curStrike.bitmapSizeTable + curTable.numberOfIndexSubTables = len(curStrike.indexSubTables) + curTable.indexSubTableArrayOffset = dataSize + + # Precompute the size of the indexSubTableArray. This information + # is important for correctly calculating the new value for + # additionalOffsetToIndexSubtable. + sizeOfSubTableArray = curTable.numberOfIndexSubTables * indexSubTableArraySize + lowerBound = dataSize + dataSize += sizeOfSubTableArray + upperBound = dataSize + + indexSubTableDataList = [] + for indexSubTable in curStrike.indexSubTables: + indexSubTable.additionalOffsetToIndexSubtable = dataSize - curTable.indexSubTableArrayOffset + glyphIds = list(map(ttFont.getGlyphID, indexSubTable.names)) + indexSubTable.firstGlyphIndex = min(glyphIds) + indexSubTable.lastGlyphIndex = max(glyphIds) + data = indexSubTable.compile(ttFont) + indexSubTableDataList.append(data) + dataSize += len(data) + curTable.startGlyphIndex = min(ist.firstGlyphIndex for ist in curStrike.indexSubTables) + curTable.endGlyphIndex = max(ist.lastGlyphIndex for ist in curStrike.indexSubTables) + + for i in curStrike.indexSubTables: + data = struct.pack(indexSubHeaderFormat, i.firstGlyphIndex, i.lastGlyphIndex, i.additionalOffsetToIndexSubtable) + indexSubTablePairDataList.append(data) + indexSubTablePairDataList.extend(indexSubTableDataList) + curTable.indexTablesSize = dataSize - curTable.indexSubTableArrayOffset + + for curStrike in self.strikes: + curTable = curStrike.bitmapSizeTable + data = sstruct.pack(bitmapSizeTableFormatPart1, curTable) + dataList.append(data) + for metric in ('hori', 'vert'): + metricObj = vars(curTable)[metric] + data = sstruct.pack(sbitLineMetricsFormat, metricObj) + dataList.append(data) + data = sstruct.pack(bitmapSizeTableFormatPart2, curTable) + dataList.append(data) + dataList.extend(indexSubTablePairDataList) + + return bytesjoin(dataList) + + def toXML(self, writer, ttFont): + writer.simpletag('header', [('version', self.version)]) + writer.newline() + for curIndex, curStrike in enumerate(self.strikes): + curStrike.toXML(curIndex, writer, ttFont) + + def fromXML(self, name, attrs, content, ttFont): + if name == 'header': + self.version = safeEval(attrs['version']) + elif name == 'strike': + if not hasattr(self, 'strikes'): + self.strikes = [] + strikeIndex = safeEval(attrs['index']) + curStrike = Strike() + curStrike.fromXML(name, attrs, content, ttFont, self) + + # Grow the strike array to the appropriate size. The XML format + # allows for the strike index value to be out of order. + if strikeIndex >= len(self.strikes): + self.strikes += [None] * (strikeIndex + 1 - len(self.strikes)) + assert self.strikes[strikeIndex] is None, "Duplicate strike EBLC indices." + self.strikes[strikeIndex] = curStrike + +class Strike(object): + + def __init__(self): + self.bitmapSizeTable = BitmapSizeTable() + self.indexSubTables = [] + + def toXML(self, strikeIndex, writer, ttFont): + writer.begintag('strike', [('index', strikeIndex)]) + writer.newline() + self.bitmapSizeTable.toXML(writer, ttFont) + writer.comment('GlyphIds are written but not read. The firstGlyphIndex and\nlastGlyphIndex values will be recalculated by the compiler.') + writer.newline() + for indexSubTable in self.indexSubTables: + indexSubTable.toXML(writer, ttFont) + writer.endtag('strike') + writer.newline() + + def fromXML(self, name, attrs, content, ttFont, locator): + for element in content: + if not isinstance(element, tuple): + continue + name, attrs, content = element + if name == 'bitmapSizeTable': + self.bitmapSizeTable.fromXML(name, attrs, content, ttFont) + elif name.startswith(_indexSubTableSubclassPrefix): + indexFormat = safeEval(name[len(_indexSubTableSubclassPrefix):]) + indexFormatClass = locator.getIndexFormatClass(indexFormat) + indexSubTable = indexFormatClass(None, None) + indexSubTable.indexFormat = indexFormat + indexSubTable.fromXML(name, attrs, content, ttFont) + self.indexSubTables.append(indexSubTable) + + +class BitmapSizeTable(object): + + # Returns all the simple metric names that bitmap size table + # cares about in terms of XML creation. + def _getXMLMetricNames(self): + dataNames = sstruct.getformat(bitmapSizeTableFormatPart1)[1] + dataNames = dataNames + sstruct.getformat(bitmapSizeTableFormatPart2)[1] + # Skip the first 3 data names because they are byte offsets and counts. + return dataNames[3:] + + def toXML(self, writer, ttFont): + writer.begintag('bitmapSizeTable') + writer.newline() + for metric in ('hori', 'vert'): + getattr(self, metric).toXML(metric, writer, ttFont) + for metricName in self._getXMLMetricNames(): + writer.simpletag(metricName, value=getattr(self, metricName)) + writer.newline() + writer.endtag('bitmapSizeTable') + writer.newline() + + def fromXML(self, name, attrs, content, ttFont): + # Create a lookup for all the simple names that make sense to + # bitmap size table. Only read the information from these names. + dataNames = set(self._getXMLMetricNames()) + for element in content: + if not isinstance(element, tuple): + continue + name, attrs, content = element + if name == 'sbitLineMetrics': + direction = attrs['direction'] + assert direction in ('hori', 'vert'), "SbitLineMetrics direction specified invalid." + metricObj = SbitLineMetrics() + metricObj.fromXML(name, attrs, content, ttFont) + vars(self)[direction] = metricObj + elif name in dataNames: + vars(self)[name] = safeEval(attrs['value']) + else: + print("Warning: unknown name '%s' being ignored in BitmapSizeTable." % name) + + +class SbitLineMetrics(object): + + def toXML(self, name, writer, ttFont): + writer.begintag('sbitLineMetrics', [('direction', name)]) + writer.newline() + for metricName in sstruct.getformat(sbitLineMetricsFormat)[1]: + writer.simpletag(metricName, value=getattr(self, metricName)) + writer.newline() + writer.endtag('sbitLineMetrics') + writer.newline() + + def fromXML(self, name, attrs, content, ttFont): + metricNames = set(sstruct.getformat(sbitLineMetricsFormat)[1]) + for element in content: + if not isinstance(element, tuple): + continue + name, attrs, content = element + if name in metricNames: + vars(self)[name] = safeEval(attrs['value']) + +# Important information about the naming scheme. Used for identifying subtables. +_indexSubTableSubclassPrefix = 'eblc_index_sub_table_' + +class EblcIndexSubTable(object): + + def __init__(self, data, ttFont): + self.data = data + self.ttFont = ttFont + # TODO Currently non-lazy decompiling doesn't work for this class... + #if not ttFont.lazy: + # self.decompile() + # del self.data, self.ttFont + + def __getattr__(self, attr): + # Allow lazy decompile. + if attr[:2] == '__': + raise AttributeError(attr) + if not hasattr(self, "data"): + raise AttributeError(attr) + self.decompile() + del self.data, self.ttFont + return getattr(self, attr) + + # This method just takes care of the indexSubHeader. Implementing subclasses + # should call it to compile the indexSubHeader and then continue compiling + # the remainder of their unique format. + def compile(self, ttFont): + return struct.pack(indexSubHeaderFormat, self.indexFormat, self.imageFormat, self.imageDataOffset) + + # Creates the XML for bitmap glyphs. Each index sub table basically makes + # the same XML except for specific metric information that is written + # out via a method call that a subclass implements optionally. + def toXML(self, writer, ttFont): + writer.begintag(self.__class__.__name__, [ + ('imageFormat', self.imageFormat), + ('firstGlyphIndex', self.firstGlyphIndex), + ('lastGlyphIndex', self.lastGlyphIndex), + ]) + writer.newline() + self.writeMetrics(writer, ttFont) + # Write out the names as thats all thats needed to rebuild etc. + # For font debugging of consecutive formats the ids are also written. + # The ids are not read when moving from the XML format. + glyphIds = map(ttFont.getGlyphID, self.names) + for glyphName, glyphId in zip(self.names, glyphIds): + writer.simpletag('glyphLoc', name=glyphName, id=glyphId) + writer.newline() + writer.endtag(self.__class__.__name__) + writer.newline() + + def fromXML(self, name, attrs, content, ttFont): + # Read all the attributes. Even though the glyph indices are + # recalculated, they are still read in case there needs to + # be an immediate export of the data. + self.imageFormat = safeEval(attrs['imageFormat']) + self.firstGlyphIndex = safeEval(attrs['firstGlyphIndex']) + self.lastGlyphIndex = safeEval(attrs['lastGlyphIndex']) + + self.readMetrics(name, attrs, content, ttFont) + + self.names = [] + for element in content: + if not isinstance(element, tuple): + continue + name, attrs, content = element + if name == 'glyphLoc': + self.names.append(attrs['name']) + + # A helper method that writes the metrics for the index sub table. It also + # is responsible for writing the image size for fixed size data since fixed + # size is not recalculated on compile. Default behavior is to do nothing. + def writeMetrics(self, writer, ttFont): + pass + + # A helper method that is the inverse of writeMetrics. + def readMetrics(self, name, attrs, content, ttFont): + pass + + # This method is for fixed glyph data sizes. There are formats where + # the glyph data is fixed but are actually composite glyphs. To handle + # this the font spec in indexSubTable makes the data the size of the + # fixed size by padding the component arrays. This function abstracts + # out this padding process. Input is data unpadded. Output is data + # padded only in fixed formats. Default behavior is to return the data. + def padBitmapData(self, data): + return data + + # Remove any of the glyph locations and names that are flagged as skipped. + # This only occurs in formats {1,3}. + def removeSkipGlyphs(self): + # Determines if a name, location pair is a valid data location. + # Skip glyphs are marked when the size is equal to zero. + def isValidLocation(args): + (name, (startByte, endByte)) = args + return startByte < endByte + # Remove all skip glyphs. + dataPairs = list(filter(isValidLocation, zip(self.names, self.locations))) + self.names, self.locations = list(map(list, zip(*dataPairs))) + +# A closure for creating a custom mixin. This is done because formats 1 and 3 +# are very similar. The only difference between them is the size per offset +# value. Code put in here should handle both cases generally. +def _createOffsetArrayIndexSubTableMixin(formatStringForDataType): + + # Prep the data size for the offset array data format. + dataFormat = '>'+formatStringForDataType + offsetDataSize = struct.calcsize(dataFormat) + + class OffsetArrayIndexSubTableMixin(object): + + def decompile(self): + + numGlyphs = self.lastGlyphIndex - self.firstGlyphIndex + 1 + indexingOffsets = [glyphIndex * offsetDataSize for glyphIndex in range(numGlyphs+2)] + indexingLocations = zip(indexingOffsets, indexingOffsets[1:]) + offsetArray = [struct.unpack(dataFormat, self.data[slice(*loc)])[0] for loc in indexingLocations] + + glyphIds = list(range(self.firstGlyphIndex, self.lastGlyphIndex+1)) + modifiedOffsets = [offset + self.imageDataOffset for offset in offsetArray] + self.locations = list(zip(modifiedOffsets, modifiedOffsets[1:])) + + self.names = list(map(self.ttFont.getGlyphName, glyphIds)) + self.removeSkipGlyphs() + + def compile(self, ttFont): + # First make sure that all the data lines up properly. Formats 1 and 3 + # must have all its data lined up consecutively. If not this will fail. + for curLoc, nxtLoc in zip(self.locations, self.locations[1:]): + assert curLoc[1] == nxtLoc[0], "Data must be consecutive in indexSubTable offset formats" + + glyphIds = list(map(ttFont.getGlyphID, self.names)) + # Make sure that all ids are sorted strictly increasing. + assert all(glyphIds[i] < glyphIds[i+1] for i in range(len(glyphIds)-1)) + + # Run a simple algorithm to add skip glyphs to the data locations at + # the places where an id is not present. + idQueue = deque(glyphIds) + locQueue = deque(self.locations) + allGlyphIds = list(range(self.firstGlyphIndex, self.lastGlyphIndex+1)) + allLocations = [] + for curId in allGlyphIds: + if curId != idQueue[0]: + allLocations.append((locQueue[0][0], locQueue[0][0])) + else: + idQueue.popleft() + allLocations.append(locQueue.popleft()) + + # Now that all the locations are collected, pack them appropriately into + # offsets. This is the form where offset[i] is the location and + # offset[i+1]-offset[i] is the size of the data location. + offsets = list(allLocations[0]) + [loc[1] for loc in allLocations[1:]] + # Image data offset must be less than or equal to the minimum of locations. + # This offset may change the value for round tripping but is safer and + # allows imageDataOffset to not be required to be in the XML version. + self.imageDataOffset = min(offsets) + offsetArray = [offset - self.imageDataOffset for offset in offsets] + + dataList = [EblcIndexSubTable.compile(self, ttFont)] + dataList += [struct.pack(dataFormat, offsetValue) for offsetValue in offsetArray] + # Take care of any padding issues. Only occurs in format 3. + if offsetDataSize * len(dataList) % 4 != 0: + dataList.append(struct.pack(dataFormat, 0)) + return bytesjoin(dataList) + + return OffsetArrayIndexSubTableMixin + +# A Mixin for functionality shared between the different kinds +# of fixed sized data handling. Both kinds have big metrics so +# that kind of special processing is also handled in this mixin. +class FixedSizeIndexSubTableMixin(object): + + def writeMetrics(self, writer, ttFont): + writer.simpletag('imageSize', value=self.imageSize) + writer.newline() + self.metrics.toXML(writer, ttFont) + + def readMetrics(self, name, attrs, content, ttFont): + for element in content: + if not isinstance(element, tuple): + continue + name, attrs, content = element + if name == 'imageSize': + self.imageSize = safeEval(attrs['value']) + elif name == BigGlyphMetrics.__name__: + self.metrics = BigGlyphMetrics() + self.metrics.fromXML(name, attrs, content, ttFont) + elif name == SmallGlyphMetrics.__name__: + print("Warning: SmallGlyphMetrics being ignored in format %d." % self.indexFormat) + + def padBitmapData(self, data): + # Make sure that the data isn't bigger than the fixed size. + assert len(data) <= self.imageSize, "Data in indexSubTable format %d must be less than the fixed size." % self.indexFormat + # Pad the data so that it matches the fixed size. + pad = (self.imageSize - len(data)) * b'\0' + return data + pad + +class eblc_index_sub_table_1(_createOffsetArrayIndexSubTableMixin('L'), EblcIndexSubTable): + pass + +class eblc_index_sub_table_2(FixedSizeIndexSubTableMixin, EblcIndexSubTable): + + def decompile(self): + (self.imageSize,) = struct.unpack(">L", self.data[:4]) + self.metrics = BigGlyphMetrics() + sstruct.unpack2(bigGlyphMetricsFormat, self.data[4:], self.metrics) + glyphIds = list(range(self.firstGlyphIndex, self.lastGlyphIndex+1)) + offsets = [self.imageSize * i + self.imageDataOffset for i in range(len(glyphIds)+1)] + self.locations = list(zip(offsets, offsets[1:])) + self.names = list(map(self.ttFont.getGlyphName, glyphIds)) + + def compile(self, ttFont): + glyphIds = list(map(ttFont.getGlyphID, self.names)) + # Make sure all the ids are consecutive. This is required by Format 2. + assert glyphIds == list(range(self.firstGlyphIndex, self.lastGlyphIndex+1)), "Format 2 ids must be consecutive." + self.imageDataOffset = min(zip(*self.locations)[0]) + + dataList = [EblcIndexSubTable.compile(self, ttFont)] + dataList.append(struct.pack(">L", self.imageSize)) + dataList.append(sstruct.pack(bigGlyphMetricsFormat, self.metrics)) + return bytesjoin(dataList) + +class eblc_index_sub_table_3(_createOffsetArrayIndexSubTableMixin('H'), EblcIndexSubTable): + pass + +class eblc_index_sub_table_4(EblcIndexSubTable): + + def decompile(self): + + (numGlyphs,) = struct.unpack(">L", self.data[:4]) + data = self.data[4:] + indexingOffsets = [glyphIndex * codeOffsetPairSize for glyphIndex in range(numGlyphs+2)] + indexingLocations = zip(indexingOffsets, indexingOffsets[1:]) + glyphArray = [struct.unpack(codeOffsetPairFormat, data[slice(*loc)]) for loc in indexingLocations] + glyphIds, offsets = list(map(list, zip(*glyphArray))) + # There are one too many glyph ids. Get rid of the last one. + glyphIds.pop() + + offsets = [offset + self.imageDataOffset for offset in offsets] + self.locations = list(zip(offsets, offsets[1:])) + self.names = list(map(self.ttFont.getGlyphName, glyphIds)) + + def compile(self, ttFont): + # First make sure that all the data lines up properly. Format 4 + # must have all its data lined up consecutively. If not this will fail. + for curLoc, nxtLoc in zip(self.locations, self.locations[1:]): + assert curLoc[1] == nxtLoc[0], "Data must be consecutive in indexSubTable format 4" + + offsets = list(self.locations[0]) + [loc[1] for loc in self.locations[1:]] + # Image data offset must be less than or equal to the minimum of locations. + # Resetting this offset may change the value for round tripping but is safer + # and allows imageDataOffset to not be required to be in the XML version. + self.imageDataOffset = min(offsets) + offsets = [offset - self.imageDataOffset for offset in offsets] + glyphIds = list(map(ttFont.getGlyphID, self.names)) + # Create an iterator over the ids plus a padding value. + idsPlusPad = list(itertools.chain(glyphIds, [0])) + + dataList = [EblcIndexSubTable.compile(self, ttFont)] + dataList.append(struct.pack(">L", len(glyphIds))) + tmp = [struct.pack(codeOffsetPairFormat, *cop) for cop in zip(idsPlusPad, offsets)] + dataList += tmp + data = bytesjoin(dataList) + return data + +class eblc_index_sub_table_5(FixedSizeIndexSubTableMixin, EblcIndexSubTable): + + def decompile(self): + self.origDataLen = 0 + (self.imageSize,) = struct.unpack(">L", self.data[:4]) + data = self.data[4:] + self.metrics, data = sstruct.unpack2(bigGlyphMetricsFormat, data, BigGlyphMetrics()) + (numGlyphs,) = struct.unpack(">L", data[:4]) + data = data[4:] + glyphIds = [struct.unpack(">H", data[2*i:2*(i+1)])[0] for i in range(numGlyphs)] + + offsets = [self.imageSize * i + self.imageDataOffset for i in range(len(glyphIds)+1)] + self.locations = list(zip(offsets, offsets[1:])) + self.names = list(map(self.ttFont.getGlyphName, glyphIds)) + + def compile(self, ttFont): + self.imageDataOffset = min(zip(*self.locations)[0]) + dataList = [EblcIndexSubTable.compile(self, ttFont)] + dataList.append(struct.pack(">L", self.imageSize)) + dataList.append(sstruct.pack(bigGlyphMetricsFormat, self.metrics)) + glyphIds = list(map(ttFont.getGlyphID, self.names)) + dataList.append(struct.pack(">L", len(glyphIds))) + dataList += [struct.pack(">H", curId) for curId in glyphIds] + if len(glyphIds) % 2 == 1: + dataList.append(struct.pack(">H", 0)) + return bytesjoin(dataList) + +# Dictionary of indexFormat to the class representing that format. +eblc_sub_table_classes = { + 1: eblc_index_sub_table_1, + 2: eblc_index_sub_table_2, + 3: eblc_index_sub_table_3, + 4: eblc_index_sub_table_4, + 5: eblc_index_sub_table_5, + } diff -Nru fonttools-2.4/Tools/fontTools/ttLib/tables/_f_e_a_t.py fonttools-3.0/Tools/fontTools/ttLib/tables/_f_e_a_t.py --- fonttools-2.4/Tools/fontTools/ttLib/tables/_f_e_a_t.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.0/Tools/fontTools/ttLib/tables/_f_e_a_t.py 2015-08-31 17:57:15.000000000 +0000 @@ -0,0 +1,7 @@ +from __future__ import print_function, division, absolute_import +from fontTools.misc.py23 import * +from .otBase import BaseTTXConverter + + +class table__f_e_a_t(BaseTTXConverter): + pass diff -Nru fonttools-2.4/Tools/fontTools/ttLib/tables/F_F_T_M_.py fonttools-3.0/Tools/fontTools/ttLib/tables/F_F_T_M_.py --- fonttools-2.4/Tools/fontTools/ttLib/tables/F_F_T_M_.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.0/Tools/fontTools/ttLib/tables/F_F_T_M_.py 2015-08-31 17:57:15.000000000 +0000 @@ -0,0 +1,42 @@ +from __future__ import print_function, division, absolute_import +from fontTools.misc.py23 import * +from fontTools.misc import sstruct +from fontTools.misc.textTools import safeEval +from fontTools.misc.timeTools import timestampFromString, timestampToString +from . import DefaultTable + +FFTMFormat = """ + > # big endian + version: I + FFTimeStamp: Q + sourceCreated: Q + sourceModified: Q +""" + +class table_F_F_T_M_(DefaultTable.DefaultTable): + + def decompile(self, data, ttFont): + dummy, rest = sstruct.unpack2(FFTMFormat, data, self) + + def compile(self, ttFont): + data = sstruct.pack(FFTMFormat, self) + return data + + def toXML(self, writer, ttFont): + writer.comment("FontForge's timestamp, font source creation and modification dates") + writer.newline() + formatstring, names, fixes = sstruct.getformat(FFTMFormat) + for name in names: + value = getattr(self, name) + if name in ("FFTimeStamp", "sourceCreated", "sourceModified"): + value = timestampToString(value) + writer.simpletag(name, value=value) + writer.newline() + + def fromXML(self, name, attrs, content, ttFont): + value = attrs["value"] + if name in ("FFTimeStamp", "sourceCreated", "sourceModified"): + value = timestampFromString(value) + else: + value = safeEval(value) + setattr(self, name, value) diff -Nru fonttools-2.4/Tools/fontTools/ttLib/tables/_f_p_g_m.py fonttools-3.0/Tools/fontTools/ttLib/tables/_f_p_g_m.py --- fonttools-2.4/Tools/fontTools/ttLib/tables/_f_p_g_m.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.0/Tools/fontTools/ttLib/tables/_f_p_g_m.py 2015-08-31 17:57:15.000000000 +0000 @@ -0,0 +1,51 @@ +from __future__ import print_function, division, absolute_import +from fontTools.misc.py23 import * +from . import DefaultTable +from . import ttProgram + +class table__f_p_g_m(DefaultTable.DefaultTable): + + def decompile(self, data, ttFont): + program = ttProgram.Program() + program.fromBytecode(data) + self.program = program + + def compile(self, ttFont): + return self.program.getBytecode() + + def toXML(self, writer, ttFont): + self.program.toXML(writer, ttFont) + writer.newline() + + def fromXML(self, name, attrs, content, ttFont): + program = ttProgram.Program() + program.fromXML(name, attrs, content, ttFont) + self.program = program + + def __bool__(self): + """ + >>> fpgm = table__f_p_g_m() + >>> bool(fpgm) + False + >>> p = ttProgram.Program() + >>> fpgm.program = p + >>> bool(fpgm) + False + >>> bc = bytearray([0]) + >>> p.fromBytecode(bc) + >>> bool(fpgm) + True + >>> p.bytecode.pop() + 0 + >>> bool(fpgm) + False + """ + return hasattr(self, 'program') and bool(self.program) + + __nonzero__ = __bool__ + + +if __name__ == "__main__": + import sys + import doctest + sys.exit(doctest.testmod().failed) diff -Nru fonttools-2.4/Tools/fontTools/ttLib/tables/_f_v_a_r.py fonttools-3.0/Tools/fontTools/ttLib/tables/_f_v_a_r.py --- fonttools-2.4/Tools/fontTools/ttLib/tables/_f_v_a_r.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.0/Tools/fontTools/ttLib/tables/_f_v_a_r.py 2015-08-31 17:57:15.000000000 +0000 @@ -0,0 +1,187 @@ +from __future__ import print_function, division, absolute_import +from fontTools.misc.py23 import * +from fontTools.misc import sstruct +from fontTools.misc.fixedTools import fixedToFloat, floatToFixed +from fontTools.misc.textTools import safeEval, num2binary, binary2num +from fontTools.ttLib import TTLibError +from . import DefaultTable +import struct + + +# Apple's documentation of 'fvar': +# https://developer.apple.com/fonts/TrueType-Reference-Manual/RM06/Chap6fvar.html + +FVAR_HEADER_FORMAT = """ + > # big endian + version: L + offsetToData: H + countSizePairs: H + axisCount: H + axisSize: H + instanceCount: H + instanceSize: H +""" + +FVAR_AXIS_FORMAT = """ + > # big endian + axisTag: 4s + minValue: 16.16F + defaultValue: 16.16F + maxValue: 16.16F + flags: H + nameID: H +""" + +FVAR_INSTANCE_FORMAT = """ + > # big endian + nameID: H + flags: H +""" + +class table__f_v_a_r(DefaultTable.DefaultTable): + dependencies = ["name"] + + def __init__(self, tag="fvar"): + DefaultTable.DefaultTable.__init__(self, tag) + self.axes = [] + self.instances = [] + + def compile(self, ttFont): + header = { + "version": 0x00010000, + "offsetToData": sstruct.calcsize(FVAR_HEADER_FORMAT), + "countSizePairs": 2, + "axisCount": len(self.axes), + "axisSize": sstruct.calcsize(FVAR_AXIS_FORMAT), + "instanceCount": len(self.instances), + "instanceSize": sstruct.calcsize(FVAR_INSTANCE_FORMAT) + len(self.axes) * 4 + } + result = [sstruct.pack(FVAR_HEADER_FORMAT, header)] + result.extend([axis.compile() for axis in self.axes]) + axisTags = [axis.axisTag for axis in self.axes] + result.extend([instance.compile(axisTags) for instance in self.instances]) + return bytesjoin(result) + + def decompile(self, data, ttFont): + header = {} + headerSize = sstruct.calcsize(FVAR_HEADER_FORMAT) + header = sstruct.unpack(FVAR_HEADER_FORMAT, data[0:headerSize]) + if header["version"] != 0x00010000: + raise TTLibError("unsupported 'fvar' version %04x" % header["version"]) + pos = header["offsetToData"] + axisSize = header["axisSize"] + for _ in range(header["axisCount"]): + axis = Axis() + axis.decompile(data[pos:pos+axisSize]) + self.axes.append(axis) + pos += axisSize + instanceSize = header["instanceSize"] + axisTags = [axis.axisTag for axis in self.axes] + for _ in range(header["instanceCount"]): + instance = NamedInstance() + instance.decompile(data[pos:pos+instanceSize], axisTags) + self.instances.append(instance) + pos += instanceSize + + def toXML(self, writer, ttFont, progress=None): + for axis in self.axes: + axis.toXML(writer, ttFont) + for instance in self.instances: + instance.toXML(writer, ttFont) + + def fromXML(self, name, attrs, content, ttFont): + if name == "Axis": + axis = Axis() + axis.fromXML(name, attrs, content, ttFont) + self.axes.append(axis) + elif name == "NamedInstance": + instance = NamedInstance() + instance.fromXML(name, attrs, content, ttFont) + self.instances.append(instance) + +class Axis(object): + def __init__(self): + self.axisTag = None + self.nameID = 0 + self.flags = 0 # not exposed in XML because spec defines no values + self.minValue = -1.0 + self.defaultValue = 0.0 + self.maxValue = 1.0 + + def compile(self): + return sstruct.pack(FVAR_AXIS_FORMAT, self) + + def decompile(self, data): + sstruct.unpack2(FVAR_AXIS_FORMAT, data, self) + + def toXML(self, writer, ttFont): + name = ttFont["name"].getDebugName(self.nameID) + if name is not None: + writer.newline() + writer.comment(name) + writer.newline() + writer.begintag("Axis") + writer.newline() + for tag, value in [("AxisTag", self.axisTag), + ("MinValue", str(self.minValue)), + ("DefaultValue", str(self.defaultValue)), + ("MaxValue", str(self.maxValue)), + ("NameID", str(self.nameID))]: + writer.begintag(tag) + writer.write(value) + writer.endtag(tag) + writer.newline() + writer.endtag("Axis") + writer.newline() + + def fromXML(self, name, _attrs, content, ttFont): + assert(name == "Axis") + for tag, _, value in filter(lambda t: type(t) is tuple, content): + value = ''.join(value) + if tag == "AxisTag": + self.axisTag = value + elif tag in ["MinValue", "DefaultValue", "MaxValue", "NameID"]: + setattr(self, tag[0].lower() + tag[1:], safeEval(value)) + +class NamedInstance(object): + def __init__(self): + self.nameID = 0 + self.flags = 0 # not exposed in XML because spec defines no values + self.coordinates = {} + + def compile(self, axisTags): + result = [sstruct.pack(FVAR_INSTANCE_FORMAT, self)] + for axis in axisTags: + fixedCoord = floatToFixed(self.coordinates[axis], 16) + result.append(struct.pack(">l", fixedCoord)) + return bytesjoin(result) + + def decompile(self, data, axisTags): + sstruct.unpack2(FVAR_INSTANCE_FORMAT, data, self) + pos = sstruct.calcsize(FVAR_INSTANCE_FORMAT) + for axis in axisTags: + value = struct.unpack(">l", data[pos : pos + 4])[0] + self.coordinates[axis] = fixedToFloat(value, 16) + pos += 4 + + def toXML(self, writer, ttFont): + name = ttFont["name"].getDebugName(self.nameID) + if name is not None: + writer.newline() + writer.comment(name) + writer.newline() + writer.begintag("NamedInstance", nameID=self.nameID) + writer.newline() + for axis in ttFont["fvar"].axes: + writer.simpletag("coord", axis=axis.axisTag, + value=self.coordinates[axis.axisTag]) + writer.newline() + writer.endtag("NamedInstance") + writer.newline() + + def fromXML(self, name, attrs, content, ttFont): + assert(name == "NamedInstance") + self.nameID = safeEval(attrs["nameID"]) + for tag, elementAttrs, _ in filter(lambda t: type(t) is tuple, content): + if tag == "coord": + self.coordinates[elementAttrs["axis"]] = safeEval(elementAttrs["value"]) diff -Nru fonttools-2.4/Tools/fontTools/ttLib/tables/_f_v_a_r_test.py fonttools-3.0/Tools/fontTools/ttLib/tables/_f_v_a_r_test.py --- fonttools-2.4/Tools/fontTools/ttLib/tables/_f_v_a_r_test.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.0/Tools/fontTools/ttLib/tables/_f_v_a_r_test.py 2015-08-31 17:57:15.000000000 +0000 @@ -0,0 +1,190 @@ +from __future__ import print_function, division, absolute_import, unicode_literals +from fontTools.misc.py23 import * +from fontTools.misc.textTools import deHexStr +from fontTools.misc.xmlWriter import XMLWriter +from fontTools.ttLib import TTLibError +from fontTools.ttLib.tables._f_v_a_r import table__f_v_a_r, Axis, NamedInstance +from fontTools.ttLib.tables._n_a_m_e import table__n_a_m_e, NameRecord +import unittest + + + +FVAR_DATA = deHexStr( + "00 01 00 00 00 10 00 02 00 02 00 14 00 02 00 0C " + "77 67 68 74 00 64 00 00 01 90 00 00 03 84 00 00 00 00 01 01 " + "77 64 74 68 00 32 00 00 00 64 00 00 00 c8 00 00 00 00 01 02 " + "01 03 00 00 01 2c 00 00 00 64 00 00 " + "01 04 00 00 01 2c 00 00 00 4b 00 00") + +FVAR_AXIS_DATA = deHexStr( + "6F 70 73 7a ff ff 80 00 00 01 4c cd 00 01 80 00 00 00 01 59") + +FVAR_INSTANCE_DATA = deHexStr("01 59 00 00 00 00 b3 33 00 00 80 00") + + +def xml_lines(writer): + content = writer.file.getvalue().decode("utf-8") + return [line.strip() for line in content.splitlines()][1:] + + +def AddName(font, name): + nameTable = font.get("name") + if nameTable is None: + nameTable = font["name"] = table__n_a_m_e() + nameTable.names = [] + namerec = NameRecord() + namerec.nameID = 1 + max([n.nameID for n in nameTable.names] + [256]) + namerec.string = name.encode('mac_roman') + namerec.platformID, namerec.platEncID, namerec.langID = (1, 0, 0) + nameTable.names.append(namerec) + return namerec + + +def MakeFont(): + axes = [("wght", "Weight", 100, 400, 900), ("wdth", "Width", 50, 100, 200)] + instances = [("Light", 300, 100), ("Light Condensed", 300, 75)] + fvarTable = table__f_v_a_r() + font = {"fvar": fvarTable} + for tag, name, minValue, defaultValue, maxValue in axes: + axis = Axis() + axis.axisTag = tag + axis.defaultValue = defaultValue + axis.minValue, axis.maxValue = minValue, maxValue + axis.nameID = AddName(font, name).nameID + fvarTable.axes.append(axis) + for name, weight, width in instances: + inst = NamedInstance() + inst.nameID = AddName(font, name).nameID + inst.coordinates = {"wght": weight, "wdth": width} + fvarTable.instances.append(inst) + return font + + +class FontVariationTableTest(unittest.TestCase): + def test_compile(self): + font = MakeFont() + h = font["fvar"].compile(font) + self.assertEqual(FVAR_DATA, font["fvar"].compile(font)) + + def test_decompile(self): + fvar = table__f_v_a_r() + fvar.decompile(FVAR_DATA, ttFont={"fvar": fvar}) + self.assertEqual(["wght", "wdth"], [a.axisTag for a in fvar.axes]) + self.assertEqual([259, 260], [i.nameID for i in fvar.instances]) + + def test_toXML(self): + font = MakeFont() + writer = XMLWriter(BytesIO()) + font["fvar"].toXML(writer, font) + xml = writer.file.getvalue().decode("utf-8") + self.assertEqual(2, xml.count("")) + self.assertTrue("wght" in xml) + self.assertTrue("wdth" in xml) + self.assertEqual(2, xml.count("" in xml) + self.assertTrue("" in xml) + + def test_fromXML(self): + fvar = table__f_v_a_r() + fvar.fromXML("Axis", {}, [("AxisTag", {}, ["opsz"])], ttFont=None) + fvar.fromXML("Axis", {}, [("AxisTag", {}, ["slnt"])], ttFont=None) + fvar.fromXML("NamedInstance", {"nameID": "765"}, [], ttFont=None) + fvar.fromXML("NamedInstance", {"nameID": "234"}, [], ttFont=None) + self.assertEqual(["opsz", "slnt"], [a.axisTag for a in fvar.axes]) + self.assertEqual([765, 234], [i.nameID for i in fvar.instances]) + + +class AxisTest(unittest.TestCase): + def test_compile(self): + axis = Axis() + axis.axisTag, axis.nameID = ('opsz', 345) + axis.minValue, axis.defaultValue, axis.maxValue = (-0.5, 1.3, 1.5) + self.assertEqual(FVAR_AXIS_DATA, axis.compile()) + + def test_decompile(self): + axis = Axis() + axis.decompile(FVAR_AXIS_DATA) + self.assertEqual("opsz", axis.axisTag) + self.assertEqual(345, axis.nameID) + self.assertEqual(-0.5, axis.minValue) + self.assertEqual(1.3, axis.defaultValue) + self.assertEqual(1.5, axis.maxValue) + + def test_toXML(self): + font = MakeFont() + axis = Axis() + axis.decompile(FVAR_AXIS_DATA) + AddName(font, "Optical Size").nameID = 256 + axis.nameID = 256 + writer = XMLWriter(BytesIO()) + axis.toXML(writer, font) + self.assertEqual([ + '', + '', + '', + 'opsz', + '-0.5', + '1.3', + '1.5', + '256', + '' + ], xml_lines(writer)) + + def test_fromXML(self): + axis = Axis() + axis.fromXML("Axis", {}, [ + ("AxisTag", {}, ["wght"]), + ("MinValue", {}, ["100"]), + ("DefaultValue", {}, ["400"]), + ("MaxValue", {}, ["900"]), + ("NameID", {}, ["256"]) + ], ttFont=None) + self.assertEqual("wght", axis.axisTag) + self.assertEqual(100, axis.minValue) + self.assertEqual(400, axis.defaultValue) + self.assertEqual(900, axis.maxValue) + self.assertEqual(256, axis.nameID) + + +class NamedInstanceTest(unittest.TestCase): + def test_compile(self): + inst = NamedInstance() + inst.nameID = 345 + inst.coordinates = {"wght": 0.7, "wdth": 0.5} + self.assertEqual(FVAR_INSTANCE_DATA, inst.compile(["wght", "wdth"])) + + def test_decompile(self): + inst = NamedInstance() + inst.decompile(FVAR_INSTANCE_DATA, ["wght", "wdth"]) + self.assertEqual(345, inst.nameID) + self.assertEqual({"wght": 0.7, "wdth": 0.5}, inst.coordinates) + + def test_toXML(self): + font = MakeFont() + inst = NamedInstance() + inst.nameID = AddName(font, "Light Condensed").nameID + inst.coordinates = {"wght": 0.7, "wdth": 0.5} + writer = XMLWriter(BytesIO()) + inst.toXML(writer, font) + self.assertEqual([ + '', + '', + '' % inst.nameID, + '', + '', + '' + ], xml_lines(writer)) + + def test_fromXML(self): + inst = NamedInstance() + attrs = {"nameID": "345"} + inst.fromXML("NamedInstance", attrs, [ + ("coord", {"axis": "wght", "value": "0.7"}, []), + ("coord", {"axis": "wdth", "value": "0.5"}, []), + ], ttFont=MakeFont()) + self.assertEqual(345, inst.nameID) + self.assertEqual({"wght": 0.7, "wdth": 0.5}, inst.coordinates) + + +if __name__ == "__main__": + unittest.main() diff -Nru fonttools-2.4/Tools/fontTools/ttLib/tables/_g_a_s_p.py fonttools-3.0/Tools/fontTools/ttLib/tables/_g_a_s_p.py --- fonttools-2.4/Tools/fontTools/ttLib/tables/_g_a_s_p.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.0/Tools/fontTools/ttLib/tables/_g_a_s_p.py 2015-08-31 17:57:15.000000000 +0000 @@ -0,0 +1,51 @@ +from __future__ import print_function, division, absolute_import +from fontTools.misc.py23 import * +from fontTools.misc.textTools import safeEval +from . import DefaultTable +import struct + + +GASP_SYMMETRIC_GRIDFIT = 0x0004 +GASP_SYMMETRIC_SMOOTHING = 0x0008 +GASP_DOGRAY = 0x0002 +GASP_GRIDFIT = 0x0001 + +class table__g_a_s_p(DefaultTable.DefaultTable): + + def decompile(self, data, ttFont): + self.version, numRanges = struct.unpack(">HH", data[:4]) + assert 0 <= self.version <= 1, "unknown 'gasp' format: %s" % self.version + data = data[4:] + self.gaspRange = {} + for i in range(numRanges): + rangeMaxPPEM, rangeGaspBehavior = struct.unpack(">HH", data[:4]) + self.gaspRange[int(rangeMaxPPEM)] = int(rangeGaspBehavior) + data = data[4:] + assert not data, "too much data" + + def compile(self, ttFont): + version = 0 # ignore self.version + numRanges = len(self.gaspRange) + data = b"" + items = sorted(self.gaspRange.items()) + for rangeMaxPPEM, rangeGaspBehavior in items: + data = data + struct.pack(">HH", rangeMaxPPEM, rangeGaspBehavior) + if rangeGaspBehavior & ~(GASP_GRIDFIT | GASP_DOGRAY): + version = 1 + data = struct.pack(">HH", version, numRanges) + data + return data + + def toXML(self, writer, ttFont): + items = sorted(self.gaspRange.items()) + for rangeMaxPPEM, rangeGaspBehavior in items: + writer.simpletag("gaspRange", [ + ("rangeMaxPPEM", rangeMaxPPEM), + ("rangeGaspBehavior", rangeGaspBehavior)]) + writer.newline() + + def fromXML(self, name, attrs, content, ttFont): + if name != "gaspRange": + return + if not hasattr(self, "gaspRange"): + self.gaspRange = {} + self.gaspRange[safeEval(attrs["rangeMaxPPEM"])] = safeEval(attrs["rangeGaspBehavior"]) diff -Nru fonttools-2.4/Tools/fontTools/ttLib/tables/G_D_E_F_.py fonttools-3.0/Tools/fontTools/ttLib/tables/G_D_E_F_.py --- fonttools-2.4/Tools/fontTools/ttLib/tables/G_D_E_F_.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.0/Tools/fontTools/ttLib/tables/G_D_E_F_.py 2015-08-31 17:57:15.000000000 +0000 @@ -0,0 +1,7 @@ +from __future__ import print_function, division, absolute_import +from fontTools.misc.py23 import * +from .otBase import BaseTTXConverter + + +class table_G_D_E_F_(BaseTTXConverter): + pass diff -Nru fonttools-2.4/Tools/fontTools/ttLib/tables/_g_l_y_f.py fonttools-3.0/Tools/fontTools/ttLib/tables/_g_l_y_f.py --- fonttools-2.4/Tools/fontTools/ttLib/tables/_g_l_y_f.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.0/Tools/fontTools/ttLib/tables/_g_l_y_f.py 2015-08-31 17:57:15.000000000 +0000 @@ -0,0 +1,1246 @@ +"""_g_l_y_f.py -- Converter classes for the 'glyf' table.""" + +from __future__ import print_function, division, absolute_import +from fontTools.misc.py23 import * +from fontTools.misc import sstruct +from fontTools import ttLib +from fontTools.misc.textTools import safeEval, pad +from fontTools.misc.arrayTools import calcBounds, calcIntBounds, pointInRect +from fontTools.misc.bezierTools import calcQuadraticBounds +from fontTools.misc.fixedTools import fixedToFloat as fi2fl, floatToFixed as fl2fi +from . import DefaultTable +from . import ttProgram +import sys +import struct +import array +import warnings + +# +# The Apple and MS rasterizers behave differently for +# scaled composite components: one does scale first and then translate +# and the other does it vice versa. MS defined some flags to indicate +# the difference, but it seems nobody actually _sets_ those flags. +# +# Funny thing: Apple seems to _only_ do their thing in the +# WE_HAVE_A_SCALE (eg. Chicago) case, and not when it's WE_HAVE_AN_X_AND_Y_SCALE +# (eg. Charcoal)... +# +SCALE_COMPONENT_OFFSET_DEFAULT = 0 # 0 == MS, 1 == Apple + + +class table__g_l_y_f(DefaultTable.DefaultTable): + + def decompile(self, data, ttFont): + loca = ttFont['loca'] + last = int(loca[0]) + noname = 0 + self.glyphs = {} + self.glyphOrder = glyphOrder = ttFont.getGlyphOrder() + for i in range(0, len(loca)-1): + try: + glyphName = glyphOrder[i] + except IndexError: + noname = noname + 1 + glyphName = 'ttxautoglyph%s' % i + next = int(loca[i+1]) + glyphdata = data[last:next] + if len(glyphdata) != (next - last): + raise ttLib.TTLibError("not enough 'glyf' table data") + glyph = Glyph(glyphdata) + self.glyphs[glyphName] = glyph + last = next + if len(data) - next >= 4: + warnings.warn("too much 'glyf' table data: expected %d, received %d bytes" % + (next, len(data))) + if noname: + warnings.warn('%s glyphs have no name' % noname) + if ttFont.lazy is False: # Be lazy for None and True + for glyph in self.glyphs.values(): + glyph.expand(self) + + def compile(self, ttFont): + if not hasattr(self, "glyphOrder"): + self.glyphOrder = ttFont.getGlyphOrder() + padding = self.padding if hasattr(self, 'padding') else None + locations = [] + currentLocation = 0 + dataList = [] + recalcBBoxes = ttFont.recalcBBoxes + for glyphName in self.glyphOrder: + glyph = self.glyphs[glyphName] + glyphData = glyph.compile(self, recalcBBoxes) + if padding: + glyphData = pad(glyphData, size=padding) + locations.append(currentLocation) + currentLocation = currentLocation + len(glyphData) + dataList.append(glyphData) + locations.append(currentLocation) + + if padding is None and currentLocation < 0x20000: + # See if we can pad any odd-lengthed glyphs to allow loca + # table to use the short offsets. + indices = [i for i,glyphData in enumerate(dataList) if len(glyphData) % 2 == 1] + if indices and currentLocation + len(indices) < 0x20000: + # It fits. Do it. + for i in indices: + dataList[i] += b'\0' + currentLocation = 0 + for i,glyphData in enumerate(dataList): + locations[i] = currentLocation + currentLocation += len(glyphData) + locations[len(dataList)] = currentLocation + + data = bytesjoin(dataList) + if 'loca' in ttFont: + ttFont['loca'].set(locations) + if 'maxp' in ttFont: + ttFont['maxp'].numGlyphs = len(self.glyphs) + return data + + def toXML(self, writer, ttFont, progress=None): + writer.newline() + glyphNames = ttFont.getGlyphNames() + writer.comment("The xMin, yMin, xMax and yMax values\nwill be recalculated by the compiler.") + writer.newline() + writer.newline() + counter = 0 + progressStep = 10 + numGlyphs = len(glyphNames) + for glyphName in glyphNames: + if not counter % progressStep and progress is not None: + progress.setLabel("Dumping 'glyf' table... (%s)" % glyphName) + progress.increment(progressStep / numGlyphs) + counter = counter + 1 + glyph = self[glyphName] + if glyph.numberOfContours: + writer.begintag('TTGlyph', [ + ("name", glyphName), + ("xMin", glyph.xMin), + ("yMin", glyph.yMin), + ("xMax", glyph.xMax), + ("yMax", glyph.yMax), + ]) + writer.newline() + glyph.toXML(writer, ttFont) + writer.endtag('TTGlyph') + writer.newline() + else: + writer.simpletag('TTGlyph', name=glyphName) + writer.comment("contains no outline data") + writer.newline() + writer.newline() + + def fromXML(self, name, attrs, content, ttFont): + if name != "TTGlyph": + return + if not hasattr(self, "glyphs"): + self.glyphs = {} + if not hasattr(self, "glyphOrder"): + self.glyphOrder = ttFont.getGlyphOrder() + glyphName = attrs["name"] + if ttFont.verbose: + ttLib.debugmsg("unpacking glyph '%s'" % glyphName) + glyph = Glyph() + for attr in ['xMin', 'yMin', 'xMax', 'yMax']: + setattr(glyph, attr, safeEval(attrs.get(attr, '0'))) + self.glyphs[glyphName] = glyph + for element in content: + if not isinstance(element, tuple): + continue + name, attrs, content = element + glyph.fromXML(name, attrs, content, ttFont) + if not ttFont.recalcBBoxes: + glyph.compact(self, 0) + + def setGlyphOrder(self, glyphOrder): + self.glyphOrder = glyphOrder + + def getGlyphName(self, glyphID): + return self.glyphOrder[glyphID] + + def getGlyphID(self, glyphName): + # XXX optimize with reverse dict!!! + return self.glyphOrder.index(glyphName) + + def keys(self): + return self.glyphs.keys() + + def has_key(self, glyphName): + return glyphName in self.glyphs + + __contains__ = has_key + + def __getitem__(self, glyphName): + glyph = self.glyphs[glyphName] + glyph.expand(self) + return glyph + + def __setitem__(self, glyphName, glyph): + self.glyphs[glyphName] = glyph + if glyphName not in self.glyphOrder: + self.glyphOrder.append(glyphName) + + def __delitem__(self, glyphName): + del self.glyphs[glyphName] + self.glyphOrder.remove(glyphName) + + def __len__(self): + assert len(self.glyphOrder) == len(self.glyphs) + return len(self.glyphs) + + +glyphHeaderFormat = """ + > # big endian + numberOfContours: h + xMin: h + yMin: h + xMax: h + yMax: h +""" + +# flags +flagOnCurve = 0x01 +flagXShort = 0x02 +flagYShort = 0x04 +flagRepeat = 0x08 +flagXsame = 0x10 +flagYsame = 0x20 +flagReserved1 = 0x40 +flagReserved2 = 0x80 + +_flagSignBytes = { + 0: 2, + flagXsame: 0, + flagXShort|flagXsame: +1, + flagXShort: -1, + flagYsame: 0, + flagYShort|flagYsame: +1, + flagYShort: -1, +} + +def flagBest(x, y, onCurve): + """For a given x,y delta pair, returns the flag that packs this pair + most efficiently, as well as the number of byte cost of such flag.""" + + flag = flagOnCurve if onCurve else 0 + cost = 0 + # do x + if x == 0: + flag = flag | flagXsame + elif -255 <= x <= 255: + flag = flag | flagXShort + if x > 0: + flag = flag | flagXsame + cost += 1 + else: + cost += 2 + # do y + if y == 0: + flag = flag | flagYsame + elif -255 <= y <= 255: + flag = flag | flagYShort + if y > 0: + flag = flag | flagYsame + cost += 1 + else: + cost += 2 + return flag, cost + +def flagFits(newFlag, oldFlag, mask): + newBytes = _flagSignBytes[newFlag & mask] + oldBytes = _flagSignBytes[oldFlag & mask] + return newBytes == oldBytes or abs(newBytes) > abs(oldBytes) + +def flagSupports(newFlag, oldFlag): + return ((oldFlag & flagOnCurve) == (newFlag & flagOnCurve) and + flagFits(newFlag, oldFlag, flagXsame|flagXShort) and + flagFits(newFlag, oldFlag, flagYsame|flagYShort)) + +def flagEncodeCoord(flag, mask, coord, coordBytes): + byteCount = _flagSignBytes[flag & mask] + if byteCount == 1: + coordBytes.append(coord) + elif byteCount == -1: + coordBytes.append(-coord) + elif byteCount == 2: + coordBytes.append((coord >> 8) & 0xFF) + coordBytes.append(coord & 0xFF) + +def flagEncodeCoords(flag, x, y, xBytes, yBytes): + flagEncodeCoord(flag, flagXsame|flagXShort, x, xBytes) + flagEncodeCoord(flag, flagYsame|flagYShort, y, yBytes) + + +ARG_1_AND_2_ARE_WORDS = 0x0001 # if set args are words otherwise they are bytes +ARGS_ARE_XY_VALUES = 0x0002 # if set args are xy values, otherwise they are points +ROUND_XY_TO_GRID = 0x0004 # for the xy values if above is true +WE_HAVE_A_SCALE = 0x0008 # Sx = Sy, otherwise scale == 1.0 +NON_OVERLAPPING = 0x0010 # set to same value for all components (obsolete!) +MORE_COMPONENTS = 0x0020 # indicates at least one more glyph after this one +WE_HAVE_AN_X_AND_Y_SCALE = 0x0040 # Sx, Sy +WE_HAVE_A_TWO_BY_TWO = 0x0080 # t00, t01, t10, t11 +WE_HAVE_INSTRUCTIONS = 0x0100 # instructions follow +USE_MY_METRICS = 0x0200 # apply these metrics to parent glyph +OVERLAP_COMPOUND = 0x0400 # used by Apple in GX fonts +SCALED_COMPONENT_OFFSET = 0x0800 # composite designed to have the component offset scaled (designed for Apple) +UNSCALED_COMPONENT_OFFSET = 0x1000 # composite designed not to have the component offset scaled (designed for MS) + + +class Glyph(object): + + def __init__(self, data=""): + if not data: + # empty char + self.numberOfContours = 0 + return + self.data = data + + def compact(self, glyfTable, recalcBBoxes=True): + data = self.compile(glyfTable, recalcBBoxes) + self.__dict__.clear() + self.data = data + + def expand(self, glyfTable): + if not hasattr(self, "data"): + # already unpacked + return + if not self.data: + # empty char + self.numberOfContours = 0 + return + dummy, data = sstruct.unpack2(glyphHeaderFormat, self.data, self) + del self.data + if self.isComposite(): + self.decompileComponents(data, glyfTable) + else: + self.decompileCoordinates(data) + + def compile(self, glyfTable, recalcBBoxes=True): + if hasattr(self, "data"): + return self.data + if self.numberOfContours == 0: + return "" + if recalcBBoxes: + self.recalcBounds(glyfTable) + data = sstruct.pack(glyphHeaderFormat, self) + if self.isComposite(): + data = data + self.compileComponents(glyfTable) + else: + data = data + self.compileCoordinates() + return data + + def toXML(self, writer, ttFont): + if self.isComposite(): + for compo in self.components: + compo.toXML(writer, ttFont) + if hasattr(self, "program"): + writer.begintag("instructions") + self.program.toXML(writer, ttFont) + writer.endtag("instructions") + writer.newline() + else: + last = 0 + for i in range(self.numberOfContours): + writer.begintag("contour") + writer.newline() + for j in range(last, self.endPtsOfContours[i] + 1): + writer.simpletag("pt", [ + ("x", self.coordinates[j][0]), + ("y", self.coordinates[j][1]), + ("on", self.flags[j] & flagOnCurve)]) + writer.newline() + last = self.endPtsOfContours[i] + 1 + writer.endtag("contour") + writer.newline() + if self.numberOfContours: + writer.begintag("instructions") + self.program.toXML(writer, ttFont) + writer.endtag("instructions") + writer.newline() + + def fromXML(self, name, attrs, content, ttFont): + if name == "contour": + if self.numberOfContours < 0: + raise ttLib.TTLibError("can't mix composites and contours in glyph") + self.numberOfContours = self.numberOfContours + 1 + coordinates = GlyphCoordinates() + flags = [] + for element in content: + if not isinstance(element, tuple): + continue + name, attrs, content = element + if name != "pt": + continue # ignore anything but "pt" + coordinates.append((safeEval(attrs["x"]), safeEval(attrs["y"]))) + flags.append(not not safeEval(attrs["on"])) + flags = array.array("B", flags) + if not hasattr(self, "coordinates"): + self.coordinates = coordinates + self.flags = flags + self.endPtsOfContours = [len(coordinates)-1] + else: + self.coordinates.extend (coordinates) + self.flags.extend(flags) + self.endPtsOfContours.append(len(self.coordinates)-1) + elif name == "component": + if self.numberOfContours > 0: + raise ttLib.TTLibError("can't mix composites and contours in glyph") + self.numberOfContours = -1 + if not hasattr(self, "components"): + self.components = [] + component = GlyphComponent() + self.components.append(component) + component.fromXML(name, attrs, content, ttFont) + elif name == "instructions": + self.program = ttProgram.Program() + for element in content: + if not isinstance(element, tuple): + continue + name, attrs, content = element + self.program.fromXML(name, attrs, content, ttFont) + + def getCompositeMaxpValues(self, glyfTable, maxComponentDepth=1): + assert self.isComposite() + nContours = 0 + nPoints = 0 + for compo in self.components: + baseGlyph = glyfTable[compo.glyphName] + if baseGlyph.numberOfContours == 0: + continue + elif baseGlyph.numberOfContours > 0: + nP, nC = baseGlyph.getMaxpValues() + else: + nP, nC, maxComponentDepth = baseGlyph.getCompositeMaxpValues( + glyfTable, maxComponentDepth + 1) + nPoints = nPoints + nP + nContours = nContours + nC + return nPoints, nContours, maxComponentDepth + + def getMaxpValues(self): + assert self.numberOfContours > 0 + return len(self.coordinates), len(self.endPtsOfContours) + + def decompileComponents(self, data, glyfTable): + self.components = [] + more = 1 + haveInstructions = 0 + while more: + component = GlyphComponent() + more, haveInstr, data = component.decompile(data, glyfTable) + haveInstructions = haveInstructions | haveInstr + self.components.append(component) + if haveInstructions: + numInstructions, = struct.unpack(">h", data[:2]) + data = data[2:] + self.program = ttProgram.Program() + self.program.fromBytecode(data[:numInstructions]) + data = data[numInstructions:] + if len(data) >= 4: + warnings.warn("too much glyph data at the end of composite glyph: %d excess bytes" % len(data)) + + def decompileCoordinates(self, data): + endPtsOfContours = array.array("h") + endPtsOfContours.fromstring(data[:2*self.numberOfContours]) + if sys.byteorder != "big": + endPtsOfContours.byteswap() + self.endPtsOfContours = endPtsOfContours.tolist() + + data = data[2*self.numberOfContours:] + + instructionLength, = struct.unpack(">h", data[:2]) + data = data[2:] + self.program = ttProgram.Program() + self.program.fromBytecode(data[:instructionLength]) + data = data[instructionLength:] + nCoordinates = self.endPtsOfContours[-1] + 1 + flags, xCoordinates, yCoordinates = \ + self.decompileCoordinatesRaw(nCoordinates, data) + + # fill in repetitions and apply signs + self.coordinates = coordinates = GlyphCoordinates.zeros(nCoordinates) + xIndex = 0 + yIndex = 0 + for i in range(nCoordinates): + flag = flags[i] + # x coordinate + if flag & flagXShort: + if flag & flagXsame: + x = xCoordinates[xIndex] + else: + x = -xCoordinates[xIndex] + xIndex = xIndex + 1 + elif flag & flagXsame: + x = 0 + else: + x = xCoordinates[xIndex] + xIndex = xIndex + 1 + # y coordinate + if flag & flagYShort: + if flag & flagYsame: + y = yCoordinates[yIndex] + else: + y = -yCoordinates[yIndex] + yIndex = yIndex + 1 + elif flag & flagYsame: + y = 0 + else: + y = yCoordinates[yIndex] + yIndex = yIndex + 1 + coordinates[i] = (x, y) + assert xIndex == len(xCoordinates) + assert yIndex == len(yCoordinates) + coordinates.relativeToAbsolute() + # discard all flags but for "flagOnCurve" + self.flags = array.array("B", (f & flagOnCurve for f in flags)) + + def decompileCoordinatesRaw(self, nCoordinates, data): + # unpack flags and prepare unpacking of coordinates + flags = array.array("B", [0] * nCoordinates) + # Warning: deep Python trickery going on. We use the struct module to unpack + # the coordinates. We build a format string based on the flags, so we can + # unpack the coordinates in one struct.unpack() call. + xFormat = ">" # big endian + yFormat = ">" # big endian + i = j = 0 + while True: + flag = byteord(data[i]) + i = i + 1 + repeat = 1 + if flag & flagRepeat: + repeat = byteord(data[i]) + 1 + i = i + 1 + for k in range(repeat): + if flag & flagXShort: + xFormat = xFormat + 'B' + elif not (flag & flagXsame): + xFormat = xFormat + 'h' + if flag & flagYShort: + yFormat = yFormat + 'B' + elif not (flag & flagYsame): + yFormat = yFormat + 'h' + flags[j] = flag + j = j + 1 + if j >= nCoordinates: + break + assert j == nCoordinates, "bad glyph flags" + data = data[i:] + # unpack raw coordinates, krrrrrr-tching! + xDataLen = struct.calcsize(xFormat) + yDataLen = struct.calcsize(yFormat) + if len(data) - (xDataLen + yDataLen) >= 4: + warnings.warn("too much glyph data: %d excess bytes" % (len(data) - (xDataLen + yDataLen))) + xCoordinates = struct.unpack(xFormat, data[:xDataLen]) + yCoordinates = struct.unpack(yFormat, data[xDataLen:xDataLen+yDataLen]) + return flags, xCoordinates, yCoordinates + + def compileComponents(self, glyfTable): + data = b"" + lastcomponent = len(self.components) - 1 + more = 1 + haveInstructions = 0 + for i in range(len(self.components)): + if i == lastcomponent: + haveInstructions = hasattr(self, "program") + more = 0 + compo = self.components[i] + data = data + compo.compile(more, haveInstructions, glyfTable) + if haveInstructions: + instructions = self.program.getBytecode() + data = data + struct.pack(">h", len(instructions)) + instructions + return data + + def compileCoordinates(self): + assert len(self.coordinates) == len(self.flags) + data = [] + endPtsOfContours = array.array("h", self.endPtsOfContours) + if sys.byteorder != "big": + endPtsOfContours.byteswap() + data.append(endPtsOfContours.tostring()) + instructions = self.program.getBytecode() + data.append(struct.pack(">h", len(instructions))) + data.append(instructions) + + deltas = self.coordinates.copy() + if deltas.isFloat(): + # Warn? + xPoints = [int(round(x)) for x in xPoints] + yPoints = [int(round(y)) for y in xPoints] + deltas.absoluteToRelative() + + # TODO(behdad): Add a configuration option for this? + deltas = self.compileDeltasGreedy(self.flags, deltas) + #deltas = self.compileDeltasOptimal(self.flags, deltas) + + data.extend(deltas) + return bytesjoin(data) + + def compileDeltasGreedy(self, flags, deltas): + # Implements greedy algorithm for packing coordinate deltas: + # uses shortest representation one coordinate at a time. + compressedflags = [] + xPoints = [] + yPoints = [] + lastflag = None + repeat = 0 + for flag,(x,y) in zip(flags, deltas): + # Oh, the horrors of TrueType + # do x + if x == 0: + flag = flag | flagXsame + elif -255 <= x <= 255: + flag = flag | flagXShort + if x > 0: + flag = flag | flagXsame + else: + x = -x + xPoints.append(bytechr(x)) + else: + xPoints.append(struct.pack(">h", x)) + # do y + if y == 0: + flag = flag | flagYsame + elif -255 <= y <= 255: + flag = flag | flagYShort + if y > 0: + flag = flag | flagYsame + else: + y = -y + yPoints.append(bytechr(y)) + else: + yPoints.append(struct.pack(">h", y)) + # handle repeating flags + if flag == lastflag and repeat != 255: + repeat = repeat + 1 + if repeat == 1: + compressedflags.append(flag) + else: + compressedflags[-2] = flag | flagRepeat + compressedflags[-1] = repeat + else: + repeat = 0 + compressedflags.append(flag) + lastflag = flag + compressedFlags = array.array("B", compressedflags).tostring() + compressedXs = bytesjoin(xPoints) + compressedYs = bytesjoin(yPoints) + return (compressedFlags, compressedXs, compressedYs) + + def compileDeltasOptimal(self, flags, deltas): + # Implements optimal, dynaic-programming, algorithm for packing coordinate + # deltas. The savings are negligible :(. + candidates = [] + bestTuple = None + bestCost = 0 + repeat = 0 + for flag,(x,y) in zip(flags, deltas): + # Oh, the horrors of TrueType + flag, coordBytes = flagBest(x, y, flag) + bestCost += 1 + coordBytes + newCandidates = [(bestCost, bestTuple, flag, coordBytes), + (bestCost+1, bestTuple, (flag|flagRepeat), coordBytes)] + for lastCost,lastTuple,lastFlag,coordBytes in candidates: + if lastCost + coordBytes <= bestCost + 1 and (lastFlag & flagRepeat) and (lastFlag < 0xff00) and flagSupports(lastFlag, flag): + if (lastFlag & 0xFF) == (flag|flagRepeat) and lastCost == bestCost + 1: + continue + newCandidates.append((lastCost + coordBytes, lastTuple, lastFlag+256, coordBytes)) + candidates = newCandidates + bestTuple = min(candidates, key=lambda t:t[0]) + bestCost = bestTuple[0] + + flags = [] + while bestTuple: + cost, bestTuple, flag, coordBytes = bestTuple + flags.append(flag) + flags.reverse() + + compressedFlags = array.array("B") + compressedXs = array.array("B") + compressedYs = array.array("B") + coords = iter(deltas) + ff = [] + for flag in flags: + repeatCount, flag = flag >> 8, flag & 0xFF + compressedFlags.append(flag) + if flag & flagRepeat: + assert(repeatCount > 0) + compressedFlags.append(repeatCount) + else: + assert(repeatCount == 0) + for i in range(1 + repeatCount): + x,y = next(coords) + flagEncodeCoords(flag, x, y, compressedXs, compressedYs) + ff.append(flag) + try: + next(coords) + raise Exception("internal error") + except StopIteration: + pass + compressedFlags = compressedFlags.tostring() + compressedXs = compressedXs.tostring() + compressedYs = compressedYs.tostring() + + return (compressedFlags, compressedXs, compressedYs) + + def recalcBounds(self, glyfTable): + coords, endPts, flags = self.getCoordinates(glyfTable) + if len(coords) > 0: + if 0: + # This branch calculates exact glyph outline bounds + # analytically, handling cases without on-curve + # extremas, etc. However, the glyf table header + # simply says that the bounds should be min/max x/y + # "for coordinate data", so I suppose that means no + # fancy thing here, just get extremas of all coord + # points (on and off). As such, this branch is + # disabled. + + # Collect on-curve points + onCurveCoords = [coords[j] for j in range(len(coords)) + if flags[j] & flagOnCurve] + # Add implicit on-curve points + start = 0 + for end in endPts: + last = end + for j in range(start, end + 1): + if not ((flags[j] | flags[last]) & flagOnCurve): + x = (coords[last][0] + coords[j][0]) / 2 + y = (coords[last][1] + coords[j][1]) / 2 + onCurveCoords.append((x,y)) + last = j + start = end + 1 + # Add bounds for curves without an explicit extrema + start = 0 + for end in endPts: + last = end + for j in range(start, end + 1): + if not (flags[j] & flagOnCurve): + next = j + 1 if j < end else start + bbox = calcBounds([coords[last], coords[next]]) + if not pointInRect(coords[j], bbox): + # Ouch! + warnings.warn("Outline has curve with implicit extrema.") + # Ouch! Find analytical curve bounds. + pthis = coords[j] + plast = coords[last] + if not (flags[last] & flagOnCurve): + plast = ((pthis[0]+plast[0])/2, (pthis[1]+plast[1])/2) + pnext = coords[next] + if not (flags[next] & flagOnCurve): + pnext = ((pthis[0]+pnext[0])/2, (pthis[1]+pnext[1])/2) + bbox = calcQuadraticBounds(plast, pthis, pnext) + onCurveCoords.append((bbox[0],bbox[1])) + onCurveCoords.append((bbox[2],bbox[3])) + last = j + start = end + 1 + + self.xMin, self.yMin, self.xMax, self.yMax = calcIntBounds(onCurveCoords) + else: + self.xMin, self.yMin, self.xMax, self.yMax = calcIntBounds(coords) + else: + self.xMin, self.yMin, self.xMax, self.yMax = (0, 0, 0, 0) + + def isComposite(self): + """Can be called on compact or expanded glyph.""" + if hasattr(self, "data") and self.data: + return struct.unpack(">h", self.data[:2])[0] == -1 + else: + return self.numberOfContours == -1 + + def __getitem__(self, componentIndex): + if not self.isComposite(): + raise ttLib.TTLibError("can't use glyph as sequence") + return self.components[componentIndex] + + def getCoordinates(self, glyfTable): + if self.numberOfContours > 0: + return self.coordinates, self.endPtsOfContours, self.flags + elif self.isComposite(): + # it's a composite + allCoords = GlyphCoordinates() + allFlags = array.array("B") + allEndPts = [] + for compo in self.components: + g = glyfTable[compo.glyphName] + coordinates, endPts, flags = g.getCoordinates(glyfTable) + if hasattr(compo, "firstPt"): + # move according to two reference points + x1,y1 = allCoords[compo.firstPt] + x2,y2 = coordinates[compo.secondPt] + move = x1-x2, y1-y2 + else: + move = compo.x, compo.y + + coordinates = GlyphCoordinates(coordinates) + if not hasattr(compo, "transform"): + coordinates.translate(move) + else: + apple_way = compo.flags & SCALED_COMPONENT_OFFSET + ms_way = compo.flags & UNSCALED_COMPONENT_OFFSET + assert not (apple_way and ms_way) + if not (apple_way or ms_way): + scale_component_offset = SCALE_COMPONENT_OFFSET_DEFAULT # see top of this file + else: + scale_component_offset = apple_way + if scale_component_offset: + # the Apple way: first move, then scale (ie. scale the component offset) + coordinates.translate(move) + coordinates.transform(compo.transform) + else: + # the MS way: first scale, then move + coordinates.transform(compo.transform) + coordinates.translate(move) + offset = len(allCoords) + allEndPts.extend(e + offset for e in endPts) + allCoords.extend(coordinates) + allFlags.extend(flags) + return allCoords, allEndPts, allFlags + else: + return GlyphCoordinates(), [], array.array("B") + + def getComponentNames(self, glyfTable): + if not hasattr(self, "data"): + if self.isComposite(): + return [c.glyphName for c in self.components] + else: + return [] + + # Extract components without expanding glyph + + if not self.data or struct.unpack(">h", self.data[:2])[0] >= 0: + return [] # Not composite + + data = self.data + i = 10 + components = [] + more = 1 + while more: + flags, glyphID = struct.unpack(">HH", data[i:i+4]) + i += 4 + flags = int(flags) + components.append(glyfTable.getGlyphName(int(glyphID))) + + if flags & ARG_1_AND_2_ARE_WORDS: i += 4 + else: i += 2 + if flags & WE_HAVE_A_SCALE: i += 2 + elif flags & WE_HAVE_AN_X_AND_Y_SCALE: i += 4 + elif flags & WE_HAVE_A_TWO_BY_TWO: i += 8 + more = flags & MORE_COMPONENTS + + return components + + def trim(self, remove_hinting=False): + """ Remove padding and, if requested, hinting, from a glyph. + This works on both expanded and compacted glyphs, without + expanding it.""" + if not hasattr(self, "data"): + if remove_hinting: + self.program = ttProgram.Program() + self.program.fromBytecode([]) + # No padding to trim. + return + if not self.data: + return + numContours = struct.unpack(">h", self.data[:2])[0] + data = array.array("B", self.data) + i = 10 + if numContours >= 0: + i += 2 * numContours # endPtsOfContours + nCoordinates = ((data[i-2] << 8) | data[i-1]) + 1 + instructionLen = (data[i] << 8) | data[i+1] + if remove_hinting: + # Zero instruction length + data[i] = data [i+1] = 0 + i += 2 + if instructionLen: + # Splice it out + data = data[:i] + data[i+instructionLen:] + instructionLen = 0 + else: + i += 2 + instructionLen + + coordBytes = 0 + j = 0 + while True: + flag = data[i] + i = i + 1 + repeat = 1 + if flag & flagRepeat: + repeat = data[i] + 1 + i = i + 1 + xBytes = yBytes = 0 + if flag & flagXShort: + xBytes = 1 + elif not (flag & flagXsame): + xBytes = 2 + if flag & flagYShort: + yBytes = 1 + elif not (flag & flagYsame): + yBytes = 2 + coordBytes += (xBytes + yBytes) * repeat + j += repeat + if j >= nCoordinates: + break + assert j == nCoordinates, "bad glyph flags" + i += coordBytes + # Remove padding + data = data[:i] + else: + more = 1 + we_have_instructions = False + while more: + flags =(data[i] << 8) | data[i+1] + if remove_hinting: + flags &= ~WE_HAVE_INSTRUCTIONS + if flags & WE_HAVE_INSTRUCTIONS: + we_have_instructions = True + data[i+0] = flags >> 8 + data[i+1] = flags & 0xFF + i += 4 + flags = int(flags) + + if flags & ARG_1_AND_2_ARE_WORDS: i += 4 + else: i += 2 + if flags & WE_HAVE_A_SCALE: i += 2 + elif flags & WE_HAVE_AN_X_AND_Y_SCALE: i += 4 + elif flags & WE_HAVE_A_TWO_BY_TWO: i += 8 + more = flags & MORE_COMPONENTS + if we_have_instructions: + instructionLen = (data[i] << 8) | data[i+1] + i += 2 + instructionLen + # Remove padding + data = data[:i] + + self.data = data.tostring() + + def removeHinting(self): + self.trim (remove_hinting=True) + + def draw(self, pen, glyfTable, offset=0): + + if self.isComposite(): + for component in self.components: + glyphName, transform = component.getComponentInfo() + pen.addComponent(glyphName, transform) + return + + coordinates, endPts, flags = self.getCoordinates(glyfTable) + if offset: + coordinates = coordinates.copy() + coordinates.translate((offset, 0)) + start = 0 + for end in endPts: + end = end + 1 + contour = coordinates[start:end] + cFlags = flags[start:end] + start = end + if 1 not in cFlags: + # There is not a single on-curve point on the curve, + # use pen.qCurveTo's special case by specifying None + # as the on-curve point. + contour.append(None) + pen.qCurveTo(*contour) + else: + # Shuffle the points so that contour the is guaranteed + # to *end* in an on-curve point, which we'll use for + # the moveTo. + firstOnCurve = cFlags.index(1) + 1 + contour = contour[firstOnCurve:] + contour[:firstOnCurve] + cFlags = cFlags[firstOnCurve:] + cFlags[:firstOnCurve] + pen.moveTo(contour[-1]) + while contour: + nextOnCurve = cFlags.index(1) + 1 + if nextOnCurve == 1: + pen.lineTo(contour[0]) + else: + pen.qCurveTo(*contour[:nextOnCurve]) + contour = contour[nextOnCurve:] + cFlags = cFlags[nextOnCurve:] + pen.closePath() + + def __ne__(self, other): + return not self.__eq__(other) + def __eq__(self, other): + if type(self) != type(other): + return NotImplemented + return self.__dict__ == other.__dict__ + + +class GlyphComponent(object): + + def __init__(self): + pass + + def getComponentInfo(self): + """Return the base glyph name and a transform.""" + # XXX Ignoring self.firstPt & self.lastpt for now: I need to implement + # something equivalent in fontTools.objects.glyph (I'd rather not + # convert it to an absolute offset, since it is valuable information). + # This method will now raise "AttributeError: x" on glyphs that use + # this TT feature. + if hasattr(self, "transform"): + [[xx, xy], [yx, yy]] = self.transform + trans = (xx, xy, yx, yy, self.x, self.y) + else: + trans = (1, 0, 0, 1, self.x, self.y) + return self.glyphName, trans + + def decompile(self, data, glyfTable): + flags, glyphID = struct.unpack(">HH", data[:4]) + self.flags = int(flags) + glyphID = int(glyphID) + self.glyphName = glyfTable.getGlyphName(int(glyphID)) + #print ">>", reprflag(self.flags) + data = data[4:] + + if self.flags & ARG_1_AND_2_ARE_WORDS: + if self.flags & ARGS_ARE_XY_VALUES: + self.x, self.y = struct.unpack(">hh", data[:4]) + else: + x, y = struct.unpack(">HH", data[:4]) + self.firstPt, self.secondPt = int(x), int(y) + data = data[4:] + else: + if self.flags & ARGS_ARE_XY_VALUES: + self.x, self.y = struct.unpack(">bb", data[:2]) + else: + x, y = struct.unpack(">BB", data[:2]) + self.firstPt, self.secondPt = int(x), int(y) + data = data[2:] + + if self.flags & WE_HAVE_A_SCALE: + scale, = struct.unpack(">h", data[:2]) + self.transform = [[fi2fl(scale,14), 0], [0, fi2fl(scale,14)]] # fixed 2.14 + data = data[2:] + elif self.flags & WE_HAVE_AN_X_AND_Y_SCALE: + xscale, yscale = struct.unpack(">hh", data[:4]) + self.transform = [[fi2fl(xscale,14), 0], [0, fi2fl(yscale,14)]] # fixed 2.14 + data = data[4:] + elif self.flags & WE_HAVE_A_TWO_BY_TWO: + (xscale, scale01, + scale10, yscale) = struct.unpack(">hhhh", data[:8]) + self.transform = [[fi2fl(xscale,14), fi2fl(scale01,14)], + [fi2fl(scale10,14), fi2fl(yscale,14)]] # fixed 2.14 + data = data[8:] + more = self.flags & MORE_COMPONENTS + haveInstructions = self.flags & WE_HAVE_INSTRUCTIONS + self.flags = self.flags & (ROUND_XY_TO_GRID | USE_MY_METRICS | + SCALED_COMPONENT_OFFSET | UNSCALED_COMPONENT_OFFSET | + NON_OVERLAPPING) + return more, haveInstructions, data + + def compile(self, more, haveInstructions, glyfTable): + data = b"" + + # reset all flags we will calculate ourselves + flags = self.flags & (ROUND_XY_TO_GRID | USE_MY_METRICS | + SCALED_COMPONENT_OFFSET | UNSCALED_COMPONENT_OFFSET | + NON_OVERLAPPING) + if more: + flags = flags | MORE_COMPONENTS + if haveInstructions: + flags = flags | WE_HAVE_INSTRUCTIONS + + if hasattr(self, "firstPt"): + if (0 <= self.firstPt <= 255) and (0 <= self.secondPt <= 255): + data = data + struct.pack(">BB", self.firstPt, self.secondPt) + else: + data = data + struct.pack(">HH", self.firstPt, self.secondPt) + flags = flags | ARG_1_AND_2_ARE_WORDS + else: + flags = flags | ARGS_ARE_XY_VALUES + if (-128 <= self.x <= 127) and (-128 <= self.y <= 127): + data = data + struct.pack(">bb", self.x, self.y) + else: + data = data + struct.pack(">hh", self.x, self.y) + flags = flags | ARG_1_AND_2_ARE_WORDS + + if hasattr(self, "transform"): + transform = [[fl2fi(x,14) for x in row] for row in self.transform] + if transform[0][1] or transform[1][0]: + flags = flags | WE_HAVE_A_TWO_BY_TWO + data = data + struct.pack(">hhhh", + transform[0][0], transform[0][1], + transform[1][0], transform[1][1]) + elif transform[0][0] != transform[1][1]: + flags = flags | WE_HAVE_AN_X_AND_Y_SCALE + data = data + struct.pack(">hh", + transform[0][0], transform[1][1]) + else: + flags = flags | WE_HAVE_A_SCALE + data = data + struct.pack(">h", + transform[0][0]) + + glyphID = glyfTable.getGlyphID(self.glyphName) + return struct.pack(">HH", flags, glyphID) + data + + def toXML(self, writer, ttFont): + attrs = [("glyphName", self.glyphName)] + if not hasattr(self, "firstPt"): + attrs = attrs + [("x", self.x), ("y", self.y)] + else: + attrs = attrs + [("firstPt", self.firstPt), ("secondPt", self.secondPt)] + + if hasattr(self, "transform"): + transform = self.transform + if transform[0][1] or transform[1][0]: + attrs = attrs + [ + ("scalex", transform[0][0]), ("scale01", transform[0][1]), + ("scale10", transform[1][0]), ("scaley", transform[1][1]), + ] + elif transform[0][0] != transform[1][1]: + attrs = attrs + [ + ("scalex", transform[0][0]), ("scaley", transform[1][1]), + ] + else: + attrs = attrs + [("scale", transform[0][0])] + attrs = attrs + [("flags", hex(self.flags))] + writer.simpletag("component", attrs) + writer.newline() + + def fromXML(self, name, attrs, content, ttFont): + self.glyphName = attrs["glyphName"] + if "firstPt" in attrs: + self.firstPt = safeEval(attrs["firstPt"]) + self.secondPt = safeEval(attrs["secondPt"]) + else: + self.x = safeEval(attrs["x"]) + self.y = safeEval(attrs["y"]) + if "scale01" in attrs: + scalex = safeEval(attrs["scalex"]) + scale01 = safeEval(attrs["scale01"]) + scale10 = safeEval(attrs["scale10"]) + scaley = safeEval(attrs["scaley"]) + self.transform = [[scalex, scale01], [scale10, scaley]] + elif "scalex" in attrs: + scalex = safeEval(attrs["scalex"]) + scaley = safeEval(attrs["scaley"]) + self.transform = [[scalex, 0], [0, scaley]] + elif "scale" in attrs: + scale = safeEval(attrs["scale"]) + self.transform = [[scale, 0], [0, scale]] + self.flags = safeEval(attrs["flags"]) + + def __ne__(self, other): + return not self.__eq__(other) + def __eq__(self, other): + if type(self) != type(other): + return NotImplemented + return self.__dict__ == other.__dict__ + +class GlyphCoordinates(object): + + def __init__(self, iterable=[]): + self._a = array.array("h") + self.extend(iterable) + + def isFloat(self): + return self._a.typecode == 'f' + + def _ensureFloat(self): + if self.isFloat(): + return + # The conversion to list() is to work around Jython bug + self._a = array.array("f", list(self._a)) + + def _checkFloat(self, p): + if any(isinstance(v, float) for v in p): + p = [int(v) if int(v) == v else v for v in p] + if any(isinstance(v, float) for v in p): + self._ensureFloat() + return p + + @staticmethod + def zeros(count): + return GlyphCoordinates([(0,0)] * count) + + def copy(self): + c = GlyphCoordinates() + c._a.extend(self._a) + return c + + def __len__(self): + return len(self._a) // 2 + + def __getitem__(self, k): + if isinstance(k, slice): + indices = range(*k.indices(len(self))) + return [self[i] for i in indices] + return self._a[2*k],self._a[2*k+1] + + def __setitem__(self, k, v): + if isinstance(k, slice): + indices = range(*k.indices(len(self))) + # XXX This only works if len(v) == len(indices) + # TODO Implement __delitem__ + for j,i in enumerate(indices): + self[i] = v[j] + return + v = self._checkFloat(v) + self._a[2*k],self._a[2*k+1] = v + + def __repr__(self): + return 'GlyphCoordinates(['+','.join(str(c) for c in self)+'])' + + def append(self, p): + p = self._checkFloat(p) + self._a.extend(tuple(p)) + + def extend(self, iterable): + for p in iterable: + p = self._checkFloat(p) + self._a.extend(p) + + def relativeToAbsolute(self): + a = self._a + x,y = 0,0 + for i in range(len(a) // 2): + a[2*i ] = x = a[2*i ] + x + a[2*i+1] = y = a[2*i+1] + y + + def absoluteToRelative(self): + a = self._a + x,y = 0,0 + for i in range(len(a) // 2): + dx = a[2*i ] - x + dy = a[2*i+1] - y + x = a[2*i ] + y = a[2*i+1] + a[2*i ] = dx + a[2*i+1] = dy + + def translate(self, p): + (x,y) = p + a = self._a + for i in range(len(a) // 2): + a[2*i ] += x + a[2*i+1] += y + + def transform(self, t): + a = self._a + for i in range(len(a) // 2): + x = a[2*i ] + y = a[2*i+1] + px = x * t[0][0] + y * t[1][0] + py = x * t[0][1] + y * t[1][1] + self[i] = (px, py) + + def __ne__(self, other): + return not self.__eq__(other) + def __eq__(self, other): + if type(self) != type(other): + return NotImplemented + return self._a == other._a + + +def reprflag(flag): + bin = "" + if isinstance(flag, str): + flag = byteord(flag) + while flag: + if flag & 0x01: + bin = "1" + bin + else: + bin = "0" + bin + flag = flag >> 1 + bin = (14 - len(bin)) * "0" + bin + return bin diff -Nru fonttools-2.4/Tools/fontTools/ttLib/tables/G_M_A_P_.py fonttools-3.0/Tools/fontTools/ttLib/tables/G_M_A_P_.py --- fonttools-2.4/Tools/fontTools/ttLib/tables/G_M_A_P_.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.0/Tools/fontTools/ttLib/tables/G_M_A_P_.py 2015-08-31 17:57:15.000000000 +0000 @@ -0,0 +1,128 @@ +from __future__ import print_function, division, absolute_import +from fontTools.misc.py23 import * +from fontTools.misc import sstruct +from fontTools.misc.textTools import safeEval +from . import DefaultTable + +GMAPFormat = """ + > # big endian + tableVersionMajor: H + tableVersionMinor: H + flags: H + recordsCount: H + recordsOffset: H + fontNameLength: H +""" +# psFontName is a byte string which follows the record above. This is zero padded +# to the beginning of the records array. The recordsOffsst is 32 bit aligned. + +GMAPRecordFormat1 = """ + > # big endian + UV: L + cid: H + gid: H + ggid: H + name: 32s +""" + + +class GMAPRecord(object): + def __init__(self, uv=0, cid=0, gid=0, ggid=0, name=""): + self.UV = uv + self.cid = cid + self.gid = gid + self.ggid = ggid + self.name = name + + def toXML(self, writer, ttFont): + writer.begintag("GMAPRecord") + writer.newline() + writer.simpletag("UV", value=self.UV) + writer.newline() + writer.simpletag("cid", value=self.cid) + writer.newline() + writer.simpletag("gid", value=self.gid) + writer.newline() + writer.simpletag("glyphletGid", value=self.gid) + writer.newline() + writer.simpletag("GlyphletName", value=self.name) + writer.newline() + writer.endtag("GMAPRecord") + writer.newline() + + def fromXML(self, name, attrs, content, ttFont): + value = attrs["value"] + if name == "GlyphletName": + self.name = value + else: + setattr(self, name, safeEval(value)) + + def compile(self, ttFont): + if self.UV is None: + self.UV = 0 + nameLen = len(self.name) + if nameLen < 32: + self.name = self.name + "\0"*(32 - nameLen) + data = sstruct.pack(GMAPRecordFormat1, self) + return data + + def __repr__(self): + return "GMAPRecord[ UV: " + str(self.UV) + ", cid: " + str(self.cid) + ", gid: " + str(self.gid) + ", ggid: " + str(self.ggid) + ", Glyphlet Name: " + str(self.name) + " ]" + + +class table_G_M_A_P_(DefaultTable.DefaultTable): + + dependencies = [] + + def decompile(self, data, ttFont): + dummy, newData = sstruct.unpack2(GMAPFormat, data, self) + self.psFontName = tostr(newData[:self.fontNameLength]) + assert (self.recordsOffset % 4) == 0, "GMAP error: recordsOffset is not 32 bit aligned." + newData = data[self.recordsOffset:] + self.gmapRecords = [] + for i in range (self.recordsCount): + gmapRecord, newData = sstruct.unpack2(GMAPRecordFormat1, newData, GMAPRecord()) + gmapRecord.name = gmapRecord.name.strip('\0') + self.gmapRecords.append(gmapRecord) + + def compile(self, ttFont): + self.recordsCount = len(self.gmapRecords) + self.fontNameLength = len(self.psFontName) + self.recordsOffset = 4 * (((self.fontNameLength + 12) + 3) // 4) + data = sstruct.pack(GMAPFormat, self) + data = data + tobytes(self.psFontName) + data = data + b"\0" * (self.recordsOffset - len(data)) + for record in self.gmapRecords: + data = data + record.compile(ttFont) + return data + + def toXML(self, writer, ttFont): + writer.comment("Most of this table will be recalculated by the compiler") + writer.newline() + formatstring, names, fixes = sstruct.getformat(GMAPFormat) + for name in names: + value = getattr(self, name) + writer.simpletag(name, value=value) + writer.newline() + writer.simpletag("PSFontName", value=self.psFontName) + writer.newline() + for gmapRecord in self.gmapRecords: + gmapRecord.toXML(writer, ttFont) + + def fromXML(self, name, attrs, content, ttFont): + if name == "GMAPRecord": + if not hasattr(self, "gmapRecords"): + self.gmapRecords = [] + gmapRecord = GMAPRecord() + self.gmapRecords.append(gmapRecord) + for element in content: + if isinstance(element, basestring): + continue + name, attrs, content = element + gmapRecord.fromXML(name, attrs, content, ttFont) + else: + value = attrs["value"] + if name == "PSFontName": + self.psFontName = value + else: + setattr(self, name, safeEval(value)) diff -Nru fonttools-2.4/Tools/fontTools/ttLib/tables/G_P_K_G_.py fonttools-3.0/Tools/fontTools/ttLib/tables/G_P_K_G_.py --- fonttools-2.4/Tools/fontTools/ttLib/tables/G_P_K_G_.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.0/Tools/fontTools/ttLib/tables/G_P_K_G_.py 2015-08-31 17:57:15.000000000 +0000 @@ -0,0 +1,129 @@ +from __future__ import print_function, division, absolute_import +from fontTools.misc.py23 import * +from fontTools.misc import sstruct +from fontTools.misc.textTools import safeEval, readHex +from . import DefaultTable +import sys +import array + +GPKGFormat = """ + > # big endian + version: H + flags: H + numGMAPs: H + numGlyplets: H +""" +# psFontName is a byte string which follows the record above. This is zero padded +# to the beginning of the records array. The recordsOffsst is 32 bit aligned. + + +class table_G_P_K_G_(DefaultTable.DefaultTable): + + def decompile(self, data, ttFont): + dummy, newData = sstruct.unpack2(GPKGFormat, data, self) + + GMAPoffsets = array.array("I") + endPos = (self.numGMAPs+1) * 4 + GMAPoffsets.fromstring(newData[:endPos]) + if sys.byteorder != "big": + GMAPoffsets.byteswap() + self.GMAPs = [] + for i in range(self.numGMAPs): + start = GMAPoffsets[i] + end = GMAPoffsets[i+1] + self.GMAPs.append(data[start:end]) + pos = endPos + endPos = pos + (self.numGlyplets + 1)*4 + glyphletOffsets = array.array("I") + glyphletOffsets.fromstring(newData[pos:endPos]) + if sys.byteorder != "big": + glyphletOffsets.byteswap() + self.glyphlets = [] + for i in range(self.numGlyplets): + start = glyphletOffsets[i] + end = glyphletOffsets[i+1] + self.glyphlets.append(data[start:end]) + + def compile(self, ttFont): + self.numGMAPs = len(self.GMAPs) + self.numGlyplets = len(self.glyphlets) + GMAPoffsets = [0]*(self.numGMAPs + 1) + glyphletOffsets = [0]*(self.numGlyplets + 1) + + dataList =[ sstruct.pack(GPKGFormat, self)] + + pos = len(dataList[0]) + (self.numGMAPs + 1)*4 + (self.numGlyplets + 1)*4 + GMAPoffsets[0] = pos + for i in range(1, self.numGMAPs +1): + pos += len(self.GMAPs[i-1]) + GMAPoffsets[i] = pos + gmapArray = array.array("I", GMAPoffsets) + if sys.byteorder != "big": + gmapArray.byteswap() + dataList.append(gmapArray.tostring()) + + glyphletOffsets[0] = pos + for i in range(1, self.numGlyplets +1): + pos += len(self.glyphlets[i-1]) + glyphletOffsets[i] = pos + glyphletArray = array.array("I", glyphletOffsets) + if sys.byteorder != "big": + glyphletArray.byteswap() + dataList.append(glyphletArray.tostring()) + dataList += self.GMAPs + dataList += self.glyphlets + data = bytesjoin(dataList) + return data + + def toXML(self, writer, ttFont): + writer.comment("Most of this table will be recalculated by the compiler") + writer.newline() + formatstring, names, fixes = sstruct.getformat(GPKGFormat) + for name in names: + value = getattr(self, name) + writer.simpletag(name, value=value) + writer.newline() + + writer.begintag("GMAPs") + writer.newline() + for gmapData in self.GMAPs: + writer.begintag("hexdata") + writer.newline() + writer.dumphex(gmapData) + writer.endtag("hexdata") + writer.newline() + writer.endtag("GMAPs") + writer.newline() + + writer.begintag("glyphlets") + writer.newline() + for glyphletData in self.glyphlets: + writer.begintag("hexdata") + writer.newline() + writer.dumphex(glyphletData) + writer.endtag("hexdata") + writer.newline() + writer.endtag("glyphlets") + writer.newline() + + def fromXML(self, name, attrs, content, ttFont): + if name == "GMAPs": + if not hasattr(self, "GMAPs"): + self.GMAPs = [] + for element in content: + if isinstance(element, basestring): + continue + itemName, itemAttrs, itemContent = element + if itemName == "hexdata": + self.GMAPs.append(readHex(itemContent)) + elif name == "glyphlets": + if not hasattr(self, "glyphlets"): + self.glyphlets = [] + for element in content: + if isinstance(element, basestring): + continue + itemName, itemAttrs, itemContent = element + if itemName == "hexdata": + self.glyphlets.append(readHex(itemContent)) + else: + setattr(self, name, safeEval(attrs["value"])) diff -Nru fonttools-2.4/Tools/fontTools/ttLib/tables/G_P_O_S_.py fonttools-3.0/Tools/fontTools/ttLib/tables/G_P_O_S_.py --- fonttools-2.4/Tools/fontTools/ttLib/tables/G_P_O_S_.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.0/Tools/fontTools/ttLib/tables/G_P_O_S_.py 2015-08-31 17:57:15.000000000 +0000 @@ -0,0 +1,7 @@ +from __future__ import print_function, division, absolute_import +from fontTools.misc.py23 import * +from .otBase import BaseTTXConverter + + +class table_G_P_O_S_(BaseTTXConverter): + pass diff -Nru fonttools-2.4/Tools/fontTools/ttLib/tables/G_S_U_B_.py fonttools-3.0/Tools/fontTools/ttLib/tables/G_S_U_B_.py --- fonttools-2.4/Tools/fontTools/ttLib/tables/G_S_U_B_.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.0/Tools/fontTools/ttLib/tables/G_S_U_B_.py 2015-08-31 17:57:15.000000000 +0000 @@ -0,0 +1,7 @@ +from __future__ import print_function, division, absolute_import +from fontTools.misc.py23 import * +from .otBase import BaseTTXConverter + + +class table_G_S_U_B_(BaseTTXConverter): + pass diff -Nru fonttools-2.4/Tools/fontTools/ttLib/tables/_g_v_a_r.py fonttools-3.0/Tools/fontTools/ttLib/tables/_g_v_a_r.py --- fonttools-2.4/Tools/fontTools/ttLib/tables/_g_v_a_r.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.0/Tools/fontTools/ttLib/tables/_g_v_a_r.py 2015-08-31 17:57:15.000000000 +0000 @@ -0,0 +1,717 @@ +from __future__ import print_function, division, absolute_import +from fontTools.misc.py23 import * +from fontTools import ttLib +from fontTools.misc import sstruct +from fontTools.misc.fixedTools import fixedToFloat, floatToFixed +from fontTools.misc.textTools import safeEval +from fontTools.ttLib import TTLibError +from . import DefaultTable +import array +import io +import sys +import struct + +# Apple's documentation of 'gvar': +# https://developer.apple.com/fonts/TrueType-Reference-Manual/RM06/Chap6gvar.html +# +# FreeType2 source code for parsing 'gvar': +# http://git.savannah.gnu.org/cgit/freetype/freetype2.git/tree/src/truetype/ttgxvar.c + +GVAR_HEADER_FORMAT = """ + > # big endian + version: H + reserved: H + axisCount: H + sharedCoordCount: H + offsetToCoord: I + glyphCount: H + flags: H + offsetToData: I +""" + +GVAR_HEADER_SIZE = sstruct.calcsize(GVAR_HEADER_FORMAT) + +TUPLES_SHARE_POINT_NUMBERS = 0x8000 +TUPLE_COUNT_MASK = 0x0fff + +EMBEDDED_TUPLE_COORD = 0x8000 +INTERMEDIATE_TUPLE = 0x4000 +PRIVATE_POINT_NUMBERS = 0x2000 +TUPLE_INDEX_MASK = 0x0fff + +DELTAS_ARE_ZERO = 0x80 +DELTAS_ARE_WORDS = 0x40 +DELTA_RUN_COUNT_MASK = 0x3f + +POINTS_ARE_WORDS = 0x80 +POINT_RUN_COUNT_MASK = 0x7f + + +class table__g_v_a_r(DefaultTable.DefaultTable): + + dependencies = ["fvar", "glyf"] + + def compile(self, ttFont): + axisTags = [axis.axisTag for axis in ttFont["fvar"].axes] + + sharedCoords = self.compileSharedCoords_(axisTags) + sharedCoordIndices = {coord:i for i, coord in enumerate(sharedCoords)} + sharedCoordSize = sum([len(c) for c in sharedCoords]) + + compiledGlyphs = self.compileGlyphs_(ttFont, axisTags, sharedCoordIndices) + offset = 0 + offsets = [] + for glyph in compiledGlyphs: + offsets.append(offset) + offset += len(glyph) + offsets.append(offset) + compiledOffsets, tableFormat = self.compileOffsets_(offsets) + + header = {} + header["version"] = self.version + header["reserved"] = self.reserved + header["axisCount"] = len(axisTags) + header["sharedCoordCount"] = len(sharedCoords) + header["offsetToCoord"] = GVAR_HEADER_SIZE + len(compiledOffsets) + header["glyphCount"] = len(compiledGlyphs) + header["flags"] = tableFormat + header["offsetToData"] = header["offsetToCoord"] + sharedCoordSize + compiledHeader = sstruct.pack(GVAR_HEADER_FORMAT, header) + + result = [compiledHeader, compiledOffsets] + result.extend(sharedCoords) + result.extend(compiledGlyphs) + return bytesjoin(result) + + def compileSharedCoords_(self, axisTags): + coordCount = {} + for variations in self.variations.values(): + for gvar in variations: + coord = gvar.compileCoord(axisTags) + coordCount[coord] = coordCount.get(coord, 0) + 1 + sharedCoords = [(count, coord) for (coord, count) in coordCount.items() if count > 1] + sharedCoords.sort(reverse=True) + MAX_NUM_SHARED_COORDS = TUPLE_INDEX_MASK + 1 + sharedCoords = sharedCoords[:MAX_NUM_SHARED_COORDS] + return [c[1] for c in sharedCoords] # Strip off counts. + + def compileGlyphs_(self, ttFont, axisTags, sharedCoordIndices): + result = [] + for glyphName in ttFont.getGlyphOrder(): + glyph = ttFont["glyf"][glyphName] + numPointsInGlyph = self.getNumPoints_(glyph) + result.append(self.compileGlyph_(glyphName, numPointsInGlyph, axisTags, sharedCoordIndices)) + return result + + def compileGlyph_(self, glyphName, numPointsInGlyph, axisTags, sharedCoordIndices): + variations = self.variations.get(glyphName, []) + variations = [v for v in variations if v.hasImpact()] + if len(variations) == 0: + return b"" + + # Each glyph variation tuples modifies a set of control points. To indicate + # which exact points are getting modified, a single tuple can either refer + # to a shared set of points, or the tuple can supply its private point numbers. + # Because the impact of sharing can be positive (no need for a private point list) + # or negative (need to supply 0,0 deltas for unused points), it is not obvious + # how to determine which tuples should take their points from the shared + # pool versus have their own. Perhaps we should resort to brute force, + # and try all combinations? However, if a glyph has n variation tuples, + # we would need to try 2^n combinations (because each tuple may or may not + # be part of the shared set). How many variations tuples do glyphs have? + # + # Skia.ttf: {3: 1, 5: 11, 6: 41, 7: 62, 8: 387, 13: 1, 14: 3} + # JamRegular.ttf: {3: 13, 4: 122, 5: 1, 7: 4, 8: 1, 9: 1, 10: 1} + # BuffaloGalRegular.ttf: {1: 16, 2: 13, 4: 2, 5: 4, 6: 19, 7: 1, 8: 3, 9: 18} + # (Reading example: In Skia.ttf, 41 glyphs have 6 variation tuples). + # + # Is this even worth optimizing? If we never use a shared point list, + # the private lists will consume 112K for Skia, 5K for BuffaloGalRegular, + # and 15K for JamRegular. If we always use a shared point list, + # the shared lists will consume 16K for Skia, 3K for BuffaloGalRegular, + # and 10K for JamRegular. However, in the latter case the delta arrays + # will become larger, but I haven't yet measured by how much. From + # gut feeling (which may be wrong), the optimum is to share some but + # not all points; however, then we would need to try all combinations. + # + # For the time being, we try two variants and then pick the better one: + # (a) each tuple supplies its own private set of points; + # (b) all tuples refer to a shared set of points, which consists of + # "every control point in the glyph". + allPoints = set(range(numPointsInGlyph)) + tuples = [] + data = [] + someTuplesSharePoints = False + for gvar in variations: + privateTuple, privateData = gvar.compile(axisTags, sharedCoordIndices, sharedPoints=None) + sharedTuple, sharedData = gvar.compile(axisTags, sharedCoordIndices, sharedPoints=allPoints) + # TODO: If we use shared points, Apple MacOS X 10.9.5 cannot display our fonts. + # This is probably a problem with our code; find the problem and fix it. + #if (len(sharedTuple) + len(sharedData)) < (len(privateTuple) + len(privateData)): + if False: + tuples.append(sharedTuple) + data.append(sharedData) + someTuplesSharePoints = True + else: + tuples.append(privateTuple) + data.append(privateData) + if someTuplesSharePoints: + data = bytechr(0) + bytesjoin(data) # 0x00 = "all points in glyph" + tupleCount = TUPLES_SHARE_POINT_NUMBERS | len(tuples) + else: + data = bytesjoin(data) + tupleCount = len(tuples) + tuples = bytesjoin(tuples) + result = struct.pack(">HH", tupleCount, 4 + len(tuples)) + tuples + data + if len(result) % 2 != 0: + result = result + b"\0" # padding + return result + + def decompile(self, data, ttFont): + axisTags = [axis.axisTag for axis in ttFont["fvar"].axes] + glyphs = ttFont.getGlyphOrder() + sstruct.unpack(GVAR_HEADER_FORMAT, data[0:GVAR_HEADER_SIZE], self) + assert len(glyphs) == self.glyphCount + assert len(axisTags) == self.axisCount + offsets = self.decompileOffsets_(data[GVAR_HEADER_SIZE:], tableFormat=(self.flags & 1), glyphCount=self.glyphCount) + sharedCoords = self.decompileSharedCoords_(axisTags, data) + self.variations = {} + for i in range(self.glyphCount): + glyphName = glyphs[i] + glyph = ttFont["glyf"][glyphName] + numPointsInGlyph = self.getNumPoints_(glyph) + gvarData = data[self.offsetToData + offsets[i] : self.offsetToData + offsets[i + 1]] + self.variations[glyphName] = \ + self.decompileGlyph_(numPointsInGlyph, sharedCoords, axisTags, gvarData) + + def decompileSharedCoords_(self, axisTags, data): + result, _pos = GlyphVariation.decompileCoords_(axisTags, self.sharedCoordCount, data, self.offsetToCoord) + return result + + @staticmethod + def decompileOffsets_(data, tableFormat, glyphCount): + if tableFormat == 0: + # Short format: array of UInt16 + offsets = array.array("H") + offsetsSize = (glyphCount + 1) * 2 + else: + # Long format: array of UInt32 + offsets = array.array("I") + offsetsSize = (glyphCount + 1) * 4 + offsets.fromstring(data[0 : offsetsSize]) + if sys.byteorder != "big": + offsets.byteswap() + + # In the short format, offsets need to be multiplied by 2. + # This is not documented in Apple's TrueType specification, + # but can be inferred from the FreeType implementation, and + # we could verify it with two sample GX fonts. + if tableFormat == 0: + offsets = [off * 2 for off in offsets] + + return offsets + + @staticmethod + def compileOffsets_(offsets): + """Packs a list of offsets into a 'gvar' offset table. + + Returns a pair (bytestring, tableFormat). Bytestring is the + packed offset table. Format indicates whether the table + uses short (tableFormat=0) or long (tableFormat=1) integers. + The returned tableFormat should get packed into the flags field + of the 'gvar' header. + """ + assert len(offsets) >= 2 + for i in range(1, len(offsets)): + assert offsets[i - 1] <= offsets[i] + if max(offsets) <= 0xffff * 2: + packed = array.array("H", [n >> 1 for n in offsets]) + tableFormat = 0 + else: + packed = array.array("I", offsets) + tableFormat = 1 + if sys.byteorder != "big": + packed.byteswap() + return (packed.tostring(), tableFormat) + + def decompileGlyph_(self, numPointsInGlyph, sharedCoords, axisTags, data): + if len(data) < 4: + return [] + numAxes = len(axisTags) + tuples = [] + flags, offsetToData = struct.unpack(">HH", data[:4]) + pos = 4 + dataPos = offsetToData + if (flags & TUPLES_SHARE_POINT_NUMBERS) != 0: + sharedPoints, dataPos = GlyphVariation.decompilePoints_(numPointsInGlyph, data, dataPos) + else: + sharedPoints = [] + for _ in range(flags & TUPLE_COUNT_MASK): + dataSize, flags = struct.unpack(">HH", data[pos:pos+4]) + tupleSize = GlyphVariation.getTupleSize_(flags, numAxes) + tupleData = data[pos : pos + tupleSize] + pointDeltaData = data[dataPos : dataPos + dataSize] + tuples.append(self.decompileTuple_(numPointsInGlyph, sharedCoords, sharedPoints, axisTags, tupleData, pointDeltaData)) + pos += tupleSize + dataPos += dataSize + return tuples + + @staticmethod + def decompileTuple_(numPointsInGlyph, sharedCoords, sharedPoints, axisTags, data, tupleData): + flags = struct.unpack(">H", data[2:4])[0] + + pos = 4 + if (flags & EMBEDDED_TUPLE_COORD) == 0: + coord = sharedCoords[flags & TUPLE_INDEX_MASK] + else: + coord, pos = GlyphVariation.decompileCoord_(axisTags, data, pos) + if (flags & INTERMEDIATE_TUPLE) != 0: + minCoord, pos = GlyphVariation.decompileCoord_(axisTags, data, pos) + maxCoord, pos = GlyphVariation.decompileCoord_(axisTags, data, pos) + else: + minCoord, maxCoord = table__g_v_a_r.computeMinMaxCoord_(coord) + axes = {} + for axis in axisTags: + coords = minCoord[axis], coord[axis], maxCoord[axis] + if coords != (0.0, 0.0, 0.0): + axes[axis] = coords + pos = 0 + if (flags & PRIVATE_POINT_NUMBERS) != 0: + points, pos = GlyphVariation.decompilePoints_(numPointsInGlyph, tupleData, pos) + else: + points = sharedPoints + deltas_x, pos = GlyphVariation.decompileDeltas_(len(points), tupleData, pos) + deltas_y, pos = GlyphVariation.decompileDeltas_(len(points), tupleData, pos) + deltas = [None] * numPointsInGlyph + for p, x, y in zip(points, deltas_x, deltas_y): + deltas[p] = (x, y) + return GlyphVariation(axes, deltas) + + @staticmethod + def computeMinMaxCoord_(coord): + minCoord = {} + maxCoord = {} + for (axis, value) in coord.items(): + minCoord[axis] = min(value, 0.0) # -0.3 --> -0.3; 0.7 --> 0.0 + maxCoord[axis] = max(value, 0.0) # -0.3 --> 0.0; 0.7 --> 0.7 + return (minCoord, maxCoord) + + def toXML(self, writer, ttFont, progress=None): + writer.simpletag("version", value=self.version) + writer.newline() + writer.simpletag("reserved", value=self.reserved) + writer.newline() + axisTags = [axis.axisTag for axis in ttFont["fvar"].axes] + for glyphName in ttFont.getGlyphOrder(): + variations = self.variations.get(glyphName) + if not variations: + continue + writer.begintag("glyphVariations", glyph=glyphName) + writer.newline() + for gvar in variations: + gvar.toXML(writer, axisTags) + writer.endtag("glyphVariations") + writer.newline() + + def fromXML(self, name, attrs, content, ttFont): + if name == "version": + self.version = safeEval(attrs["value"]) + elif name == "reserved": + self.reserved = safeEval(attrs["value"]) + elif name == "glyphVariations": + if not hasattr(self, "variations"): + self.variations = {} + glyphName = attrs["glyph"] + glyph = ttFont["glyf"][glyphName] + numPointsInGlyph = self.getNumPoints_(glyph) + glyphVariations = [] + for element in content: + if isinstance(element, tuple): + name, attrs, content = element + if name == "tuple": + gvar = GlyphVariation({}, [None] * numPointsInGlyph) + glyphVariations.append(gvar) + for tupleElement in content: + if isinstance(tupleElement, tuple): + tupleName, tupleAttrs, tupleContent = tupleElement + gvar.fromXML(tupleName, tupleAttrs, tupleContent) + self.variations[glyphName] = glyphVariations + + @staticmethod + def getNumPoints_(glyph): + NUM_PHANTOM_POINTS = 4 + if glyph.isComposite(): + return len(glyph.components) + NUM_PHANTOM_POINTS + else: + # Empty glyphs (eg. space, nonmarkingreturn) have no "coordinates" attribute. + return len(getattr(glyph, "coordinates", [])) + NUM_PHANTOM_POINTS + + +class GlyphVariation(object): + def __init__(self, axes, coordinates): + self.axes = axes + self.coordinates = coordinates + + def __repr__(self): + axes = ",".join(sorted(["%s=%s" % (name, value) for (name, value) in self.axes.items()])) + return "" % (axes, self.coordinates) + + def __eq__(self, other): + return self.coordinates == other.coordinates and self.axes == other.axes + + def getUsedPoints(self): + result = set() + for i, point in enumerate(self.coordinates): + if point is not None: + result.add(i) + return result + + def hasImpact(self): + """Returns True if this GlyphVariation has any visible impact. + + If the result is False, the GlyphVariation can be omitted from the font + without making any visible difference. + """ + for c in self.coordinates: + if c is not None: + return True + return False + + def toXML(self, writer, axisTags): + writer.begintag("tuple") + writer.newline() + for axis in axisTags: + value = self.axes.get(axis) + if value is not None: + minValue, value, maxValue = value + defaultMinValue = min(value, 0.0) # -0.3 --> -0.3; 0.7 --> 0.0 + defaultMaxValue = max(value, 0.0) # -0.3 --> 0.0; 0.7 --> 0.7 + if minValue == defaultMinValue and maxValue == defaultMaxValue: + writer.simpletag("coord", axis=axis, value=value) + else: + writer.simpletag("coord", axis=axis, value=value, min=minValue, max=maxValue) + writer.newline() + wrote_any_points = False + for i, point in enumerate(self.coordinates): + if point is not None: + writer.simpletag("delta", pt=i, x=point[0], y=point[1]) + writer.newline() + wrote_any_points = True + if not wrote_any_points: + writer.comment("no deltas") + writer.newline() + writer.endtag("tuple") + writer.newline() + + def fromXML(self, name, attrs, _content): + if name == "coord": + axis = attrs["axis"] + value = float(attrs["value"]) + defaultMinValue = min(value, 0.0) # -0.3 --> -0.3; 0.7 --> 0.0 + defaultMaxValue = max(value, 0.0) # -0.3 --> 0.0; 0.7 --> 0.7 + minValue = float(attrs.get("min", defaultMinValue)) + maxValue = float(attrs.get("max", defaultMaxValue)) + self.axes[axis] = (minValue, value, maxValue) + elif name == "delta": + point = safeEval(attrs["pt"]) + x = safeEval(attrs["x"]) + y = safeEval(attrs["y"]) + self.coordinates[point] = (x, y) + + def compile(self, axisTags, sharedCoordIndices, sharedPoints): + tupleData = [] + + coord = self.compileCoord(axisTags) + if coord in sharedCoordIndices: + flags = sharedCoordIndices[coord] + else: + flags = EMBEDDED_TUPLE_COORD + tupleData.append(coord) + + intermediateCoord = self.compileIntermediateCoord(axisTags) + if intermediateCoord is not None: + flags |= INTERMEDIATE_TUPLE + tupleData.append(intermediateCoord) + + if sharedPoints is not None: + auxData = self.compileDeltas(sharedPoints) + else: + flags |= PRIVATE_POINT_NUMBERS + points = self.getUsedPoints() + numPointsInGlyph = len(self.coordinates) + auxData = self.compilePoints(points, numPointsInGlyph) + self.compileDeltas(points) + + tupleData = struct.pack('>HH', len(auxData), flags) + bytesjoin(tupleData) + return (tupleData, auxData) + + def compileCoord(self, axisTags): + result = [] + for axis in axisTags: + _minValue, value, _maxValue = self.axes.get(axis, (0.0, 0.0, 0.0)) + result.append(struct.pack(">h", floatToFixed(value, 14))) + return bytesjoin(result) + + def compileIntermediateCoord(self, axisTags): + needed = False + for axis in axisTags: + minValue, value, maxValue = self.axes.get(axis, (0.0, 0.0, 0.0)) + defaultMinValue = min(value, 0.0) # -0.3 --> -0.3; 0.7 --> 0.0 + defaultMaxValue = max(value, 0.0) # -0.3 --> 0.0; 0.7 --> 0.7 + if (minValue != defaultMinValue) or (maxValue != defaultMaxValue): + needed = True + break + if not needed: + return None + minCoords = [] + maxCoords = [] + for axis in axisTags: + minValue, value, maxValue = self.axes.get(axis, (0.0, 0.0, 0.0)) + minCoords.append(struct.pack(">h", floatToFixed(minValue, 14))) + maxCoords.append(struct.pack(">h", floatToFixed(maxValue, 14))) + return bytesjoin(minCoords + maxCoords) + + @staticmethod + def decompileCoord_(axisTags, data, offset): + coord = {} + pos = offset + for axis in axisTags: + coord[axis] = fixedToFloat(struct.unpack(">h", data[pos:pos+2])[0], 14) + pos += 2 + return coord, pos + + @staticmethod + def decompileCoords_(axisTags, numCoords, data, offset): + result = [] + pos = offset + for _ in range(numCoords): + coord, pos = GlyphVariation.decompileCoord_(axisTags, data, pos) + result.append(coord) + return result, pos + + @staticmethod + def compilePoints(points, numPointsInGlyph): + # If the set consists of all points in the glyph, it gets encoded with + # a special encoding: a single zero byte. + if len(points) == numPointsInGlyph: + return b"\0" + + # In the 'gvar' table, the packing of point numbers is a little surprising. + # It consists of multiple runs, each being a delta-encoded list of integers. + # For example, the point set {17, 18, 19, 20, 21, 22, 23} gets encoded as + # [6, 17, 1, 1, 1, 1, 1, 1]. The first value (6) is the run length minus 1. + # There are two types of runs, with values being either 8 or 16 bit unsigned + # integers. + points = list(points) + points.sort() + numPoints = len(points) + + # The binary representation starts with the total number of points in the set, + # encoded into one or two bytes depending on the value. + if numPoints < 0x80: + result = [bytechr(numPoints)] + else: + result = [bytechr((numPoints >> 8) | 0x80) + bytechr(numPoints & 0xff)] + + MAX_RUN_LENGTH = 127 + pos = 0 + while pos < numPoints: + run = io.BytesIO() + runLength = 0 + lastValue = 0 + useByteEncoding = (points[pos] <= 0xff) + while pos < numPoints and runLength <= MAX_RUN_LENGTH: + curValue = points[pos] + delta = curValue - lastValue + if useByteEncoding and delta > 0xff: + # we need to start a new run (which will not use byte encoding) + break + if useByteEncoding: + run.write(bytechr(delta)) + else: + run.write(bytechr(delta >> 8)) + run.write(bytechr(delta & 0xff)) + lastValue = curValue + pos += 1 + runLength += 1 + if useByteEncoding: + runHeader = bytechr(runLength - 1) + else: + runHeader = bytechr((runLength - 1) | POINTS_ARE_WORDS) + result.append(runHeader) + result.append(run.getvalue()) + + return bytesjoin(result) + + @staticmethod + def decompilePoints_(numPointsInGlyph, data, offset): + """(numPointsInGlyph, data, offset) --> ([point1, point2, ...], newOffset)""" + pos = offset + numPointsInData = byteord(data[pos]) + pos += 1 + if (numPointsInData & POINTS_ARE_WORDS) != 0: + numPointsInData = (numPointsInData & POINT_RUN_COUNT_MASK) << 8 | byteord(data[pos]) + pos += 1 + if numPointsInData == 0: + return (range(numPointsInGlyph), pos) + result = [] + while len(result) < numPointsInData: + runHeader = byteord(data[pos]) + pos += 1 + numPointsInRun = (runHeader & POINT_RUN_COUNT_MASK) + 1 + point = 0 + if (runHeader & POINTS_ARE_WORDS) == 0: + for _ in range(numPointsInRun): + point += byteord(data[pos]) + pos += 1 + result.append(point) + else: + for _ in range(numPointsInRun): + point += struct.unpack(">H", data[pos:pos+2])[0] + pos += 2 + result.append(point) + if max(result) >= numPointsInGlyph: + raise TTLibError("malformed 'gvar' table") + return (result, pos) + + def compileDeltas(self, points): + deltaX = [] + deltaY = [] + for p in sorted(list(points)): + c = self.coordinates[p] + if c is not None: + deltaX.append(c[0]) + deltaY.append(c[1]) + return self.compileDeltaValues_(deltaX) + self.compileDeltaValues_(deltaY) + + @staticmethod + def compileDeltaValues_(deltas): + """[value1, value2, value3, ...] --> bytestring + + Emits a sequence of runs. Each run starts with a + byte-sized header whose 6 least significant bits + (header & 0x3F) indicate how many values are encoded + in this run. The stored length is the actual length + minus one; run lengths are thus in the range [1..64]. + If the header byte has its most significant bit (0x80) + set, all values in this run are zero, and no data + follows. Otherwise, the header byte is followed by + ((header & 0x3F) + 1) signed values. If (header & + 0x40) is clear, the delta values are stored as signed + bytes; if (header & 0x40) is set, the delta values are + signed 16-bit integers. + """ # Explaining the format because the 'gvar' spec is hard to understand. + stream = io.BytesIO() + pos = 0 + while pos < len(deltas): + value = deltas[pos] + if value == 0: + pos = GlyphVariation.encodeDeltaRunAsZeroes_(deltas, pos, stream) + elif value >= -128 and value <= 127: + pos = GlyphVariation.encodeDeltaRunAsBytes_(deltas, pos, stream) + else: + pos = GlyphVariation.encodeDeltaRunAsWords_(deltas, pos, stream) + return stream.getvalue() + + @staticmethod + def encodeDeltaRunAsZeroes_(deltas, offset, stream): + runLength = 0 + pos = offset + numDeltas = len(deltas) + while pos < numDeltas and runLength < 64 and deltas[pos] == 0: + pos += 1 + runLength += 1 + assert runLength >= 1 and runLength <= 64 + stream.write(bytechr(DELTAS_ARE_ZERO | (runLength - 1))) + return pos + + @staticmethod + def encodeDeltaRunAsBytes_(deltas, offset, stream): + runLength = 0 + pos = offset + numDeltas = len(deltas) + while pos < numDeltas and runLength < 64: + value = deltas[pos] + if value < -128 or value > 127: + break + # Within a byte-encoded run of deltas, a single zero + # is best stored literally as 0x00 value. However, + # if are two or more zeroes in a sequence, it is + # better to start a new run. For example, the sequence + # of deltas [15, 15, 0, 15, 15] becomes 6 bytes + # (04 0F 0F 00 0F 0F) when storing the zero value + # literally, but 7 bytes (01 0F 0F 80 01 0F 0F) + # when starting a new run. + if value == 0 and pos+1 < numDeltas and deltas[pos+1] == 0: + break + pos += 1 + runLength += 1 + assert runLength >= 1 and runLength <= 64 + stream.write(bytechr(runLength - 1)) + for i in range(offset, pos): + stream.write(struct.pack('b', deltas[i])) + return pos + + @staticmethod + def encodeDeltaRunAsWords_(deltas, offset, stream): + runLength = 0 + pos = offset + numDeltas = len(deltas) + while pos < numDeltas and runLength < 64: + value = deltas[pos] + # Within a word-encoded run of deltas, it is easiest + # to start a new run (with a different encoding) + # whenever we encounter a zero value. For example, + # the sequence [0x6666, 0, 0x7777] needs 7 bytes when + # storing the zero literally (42 66 66 00 00 77 77), + # and equally 7 bytes when starting a new run + # (40 66 66 80 40 77 77). + if value == 0: + break + + # Within a word-encoded run of deltas, a single value + # in the range (-128..127) should be encoded literally + # because it is more compact. For example, the sequence + # [0x6666, 2, 0x7777] becomes 7 bytes when storing + # the value literally (42 66 66 00 02 77 77), but 8 bytes + # when starting a new run (40 66 66 00 02 40 77 77). + isByteEncodable = lambda value: value >= -128 and value <= 127 + if isByteEncodable(value) and pos+1 < numDeltas and isByteEncodable(deltas[pos+1]): + break + pos += 1 + runLength += 1 + assert runLength >= 1 and runLength <= 64 + stream.write(bytechr(DELTAS_ARE_WORDS | (runLength - 1))) + for i in range(offset, pos): + stream.write(struct.pack('>h', deltas[i])) + return pos + + @staticmethod + def decompileDeltas_(numDeltas, data, offset): + """(numDeltas, data, offset) --> ([delta, delta, ...], newOffset)""" + result = [] + pos = offset + while len(result) < numDeltas: + runHeader = byteord(data[pos]) + pos += 1 + numDeltasInRun = (runHeader & DELTA_RUN_COUNT_MASK) + 1 + if (runHeader & DELTAS_ARE_ZERO) != 0: + result.extend([0] * numDeltasInRun) + elif (runHeader & DELTAS_ARE_WORDS) != 0: + for _ in range(numDeltasInRun): + result.append(struct.unpack(">h", data[pos:pos+2])[0]) + pos += 2 + else: + for _ in range(numDeltasInRun): + result.append(struct.unpack(">b", data[pos:pos+1])[0]) + pos += 1 + assert len(result) == numDeltas + return (result, pos) + + @staticmethod + def getTupleSize_(flags, axisCount): + size = 4 + if (flags & EMBEDDED_TUPLE_COORD) != 0: + size += axisCount * 2 + if (flags & INTERMEDIATE_TUPLE) != 0: + size += axisCount * 4 + return size diff -Nru fonttools-2.4/Tools/fontTools/ttLib/tables/_g_v_a_r_test.py fonttools-3.0/Tools/fontTools/ttLib/tables/_g_v_a_r_test.py --- fonttools-2.4/Tools/fontTools/ttLib/tables/_g_v_a_r_test.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.0/Tools/fontTools/ttLib/tables/_g_v_a_r_test.py 2015-08-31 17:57:15.000000000 +0000 @@ -0,0 +1,539 @@ +from __future__ import print_function, division, absolute_import, unicode_literals +from fontTools.misc.py23 import * +from fontTools.misc.textTools import deHexStr, hexStr +from fontTools.misc.xmlWriter import XMLWriter +from fontTools.ttLib import TTLibError +from fontTools.ttLib.tables._g_v_a_r import table__g_v_a_r, GlyphVariation +import random +import unittest + +def hexencode(s): + h = hexStr(s).upper() + return ' '.join([h[i:i+2] for i in range(0, len(h), 2)]) + +# Glyph variation table of uppercase I in the Skia font, as printed in Apple's +# TrueType spec. The actual Skia font uses a different table for uppercase I. +# https://developer.apple.com/fonts/TrueType-Reference-Manual/RM06/Chap6gvar.html +SKIA_GVAR_I = deHexStr( + "00 08 00 24 00 33 20 00 00 15 20 01 00 1B 20 02 " + "00 24 20 03 00 15 20 04 00 26 20 07 00 0D 20 06 " + "00 1A 20 05 00 40 01 01 01 81 80 43 FF 7E FF 7E " + "FF 7E FF 7E 00 81 45 01 01 01 03 01 04 01 04 01 " + "04 01 02 80 40 00 82 81 81 04 3A 5A 3E 43 20 81 " + "04 0E 40 15 45 7C 83 00 0D 9E F3 F2 F0 F0 F0 F0 " + "F3 9E A0 A1 A1 A1 9F 80 00 91 81 91 00 0D 0A 0A " + "09 0A 0A 0A 0A 0A 0A 0A 0A 0A 0A 0B 80 00 15 81 " + "81 00 C4 89 00 C4 83 00 0D 80 99 98 96 96 96 96 " + "99 80 82 83 83 83 81 80 40 FF 18 81 81 04 E6 F9 " + "10 21 02 81 04 E8 E5 EB 4D DA 83 00 0D CE D3 D4 " + "D3 D3 D3 D5 D2 CE CC CD CD CD CD 80 00 A1 81 91 " + "00 0D 07 03 04 02 02 02 03 03 07 07 08 08 08 07 " + "80 00 09 81 81 00 28 40 00 A4 02 24 24 66 81 04 " + "08 FA FA FA 28 83 00 82 02 FF FF FF 83 02 01 01 " + "01 84 91 00 80 06 07 08 08 08 08 0A 07 80 03 FE " + "FF FF FF 81 00 08 81 82 02 EE EE EE 8B 6D 00") + +# Shared coordinates in the Skia font, as printed in Apple's TrueType spec. +SKIA_SHARED_COORDS = deHexStr( + "40 00 00 00 C0 00 00 00 00 00 40 00 00 00 C0 00 " + "C0 00 C0 00 40 00 C0 00 40 00 40 00 C0 00 40 00") + + +class GlyphVariationTableTest(unittest.TestCase): + def test_compileOffsets_shortFormat(self): + self.assertEqual((deHexStr("00 00 00 02 FF C0"), 0), + table__g_v_a_r.compileOffsets_([0, 4, 0x1ff80])) + + def test_compileOffsets_longFormat(self): + self.assertEqual((deHexStr("00 00 00 00 00 00 00 04 CA FE BE EF"), 1), + table__g_v_a_r.compileOffsets_([0, 4, 0xCAFEBEEF])) + + def test_decompileOffsets_shortFormat(self): + decompileOffsets = table__g_v_a_r.decompileOffsets_ + data = deHexStr("00 11 22 33 44 55 66 77 88 99 aa bb") + self.assertEqual([2*0x0011, 2*0x2233, 2*0x4455, 2*0x6677, 2*0x8899, 2*0xaabb], + list(decompileOffsets(data, tableFormat=0, glyphCount=5))) + + def test_decompileOffsets_longFormat(self): + decompileOffsets = table__g_v_a_r.decompileOffsets_ + data = deHexStr("00 11 22 33 44 55 66 77 88 99 aa bb") + self.assertEqual([0x00112233, 0x44556677, 0x8899aabb], + list(decompileOffsets(data, tableFormat=1, glyphCount=2))) + + def test_compileGlyph_noVariations(self): + table = table__g_v_a_r() + table.variations = {} + self.assertEqual(b"", table.compileGlyph_("glyphname", 8, ["wght", "opsz"], {})) + + def test_compileGlyph_emptyVariations(self): + table = table__g_v_a_r() + table.variations = {"glyphname": []} + self.assertEqual(b"", table.compileGlyph_("glyphname", 8, ["wght", "opsz"], {})) + + def test_compileGlyph_onlyRedundantVariations(self): + table = table__g_v_a_r() + axes = {"wght": (0.3, 0.4, 0.5), "opsz": (0.7, 0.8, 0.9)} + table.variations = {"glyphname": [ + GlyphVariation(axes, [None] * 4), + GlyphVariation(axes, [None] * 4), + GlyphVariation(axes, [None] * 4) + ]} + self.assertEqual(b"", table.compileGlyph_("glyphname", 8, ["wght", "opsz"], {})) + + def test_compileGlyph_roundTrip(self): + table = table__g_v_a_r() + axisTags = ["wght", "wdth"] + numPointsInGlyph = 4 + glyphCoords = [(1,1), (2,2), (3,3), (4,4)] + gvar1 = GlyphVariation({"wght": (0.5, 1.0, 1.0), "wdth": (1.0, 1.0, 1.0)}, glyphCoords) + gvar2 = GlyphVariation({"wght": (1.0, 1.0, 1.0), "wdth": (1.0, 1.0, 1.0)}, glyphCoords) + table.variations = {"oslash": [gvar1, gvar2]} + data = table.compileGlyph_("oslash", numPointsInGlyph, axisTags, {}) + self.assertEqual([gvar1, gvar2], table.decompileGlyph_(numPointsInGlyph, {}, axisTags, data)) + + def test_compileSharedCoords(self): + table = table__g_v_a_r() + table.variations = {} + deltas = [None] * 4 + table.variations["A"] = [ + GlyphVariation({"wght": (1.0, 1.0, 1.0), "wdth": (0.5, 0.7, 1.0)}, deltas) + ] + table.variations["B"] = [ + GlyphVariation({"wght": (1.0, 1.0, 1.0), "wdth": (0.2, 0.7, 1.0)}, deltas), + GlyphVariation({"wght": (1.0, 1.0, 1.0), "wdth": (0.2, 0.8, 1.0)}, deltas) + ] + table.variations["C"] = [ + GlyphVariation({"wght": (1.0, 1.0, 1.0), "wdth": (0.3, 0.7, 1.0)}, deltas), + GlyphVariation({"wght": (1.0, 1.0, 1.0), "wdth": (0.3, 0.8, 1.0)}, deltas), + GlyphVariation({"wght": (1.0, 1.0, 1.0), "wdth": (0.3, 0.9, 1.0)}, deltas) + ] + # {"wght":1.0, "wdth":0.7} is shared 3 times; {"wght":1.0, "wdth":0.8} is shared twice. + # Min and max values are not part of the shared coordinate pool and should get ignored. + result = table.compileSharedCoords_(["wght", "wdth"]) + self.assertEqual(["40 00 2C CD", "40 00 33 33"], [hexencode(c) for c in result]) + + def test_decompileSharedCoords_Skia(self): + table = table__g_v_a_r() + table.offsetToCoord = 0 + table.sharedCoordCount = 8 + sharedCoords = table.decompileSharedCoords_(["wght", "wdth"], SKIA_SHARED_COORDS) + self.assertEqual([ + {"wght": 1.0, "wdth": 0.0}, + {"wght": -1.0, "wdth": 0.0}, + {"wght": 0.0, "wdth": 1.0}, + {"wght": 0.0, "wdth": -1.0}, + {"wght": -1.0, "wdth": -1.0}, + {"wght": 1.0, "wdth": -1.0}, + {"wght": 1.0, "wdth": 1.0}, + {"wght": -1.0, "wdth": 1.0} + ], sharedCoords) + + def test_decompileSharedCoords_empty(self): + table = table__g_v_a_r() + table.offsetToCoord = 0 + table.sharedCoordCount = 0 + self.assertEqual([], table.decompileSharedCoords_(["wght"], b"")) + + def test_decompileGlyph_Skia_I(self): + axes = ["wght", "wdth"] + table = table__g_v_a_r() + table.offsetToCoord = 0 + table.sharedCoordCount = 8 + table.axisCount = len(axes) + sharedCoords = table.decompileSharedCoords_(axes, SKIA_SHARED_COORDS) + tuples = table.decompileGlyph_(18, sharedCoords, axes, SKIA_GVAR_I) + self.assertEqual(8, len(tuples)) + self.assertEqual({"wght": (0.0, 1.0, 1.0)}, tuples[0].axes) + self.assertEqual("257,0 -127,0 -128,58 -130,90 -130,62 -130,67 -130,32 -127,0 257,0 " + "259,14 260,64 260,21 260,69 258,124 0,0 130,0 0,0 0,0", + " ".join(["%d,%d" % c for c in tuples[0].coordinates])) + + def test_decompileGlyph_empty(self): + table = table__g_v_a_r() + self.assertEqual([], table.decompileGlyph_(numPointsInGlyph=5, sharedCoords=[], axisTags=[], data=b"")) + + def test_computeMinMaxCord(self): + coord = {"wght": -0.3, "wdth": 0.7} + minCoord, maxCoord = table__g_v_a_r.computeMinMaxCoord_(coord) + self.assertEqual({"wght": -0.3, "wdth": 0.0}, minCoord) + self.assertEqual({"wght": 0.0, "wdth": 0.7}, maxCoord) + +class GlyphVariationTest(unittest.TestCase): + def test_equal(self): + gvar1 = GlyphVariation({"wght":(0.0, 1.0, 1.0)}, [(0,0), (9,8), (7,6)]) + gvar2 = GlyphVariation({"wght":(0.0, 1.0, 1.0)}, [(0,0), (9,8), (7,6)]) + self.assertEqual(gvar1, gvar2) + + def test_equal_differentAxes(self): + gvar1 = GlyphVariation({"wght":(0.0, 1.0, 1.0)}, [(0,0), (9,8), (7,6)]) + gvar2 = GlyphVariation({"wght":(0.7, 0.8, 0.9)}, [(0,0), (9,8), (7,6)]) + self.assertNotEqual(gvar1, gvar2) + + def test_equal_differentCoordinates(self): + gvar1 = GlyphVariation({"wght":(0.0, 1.0, 1.0)}, [(0,0), (9,8), (7,6)]) + gvar2 = GlyphVariation({"wght":(0.0, 1.0, 1.0)}, [(0,0), (9,8)]) + self.assertNotEqual(gvar1, gvar2) + + def test_hasImpact_someDeltasNotZero(self): + axes = {"wght":(0.0, 1.0, 1.0)} + gvar = GlyphVariation(axes, [(0,0), (9,8), (7,6)]) + self.assertTrue(gvar.hasImpact()) + + def test_hasImpact_allDeltasZero(self): + axes = {"wght":(0.0, 1.0, 1.0)} + gvar = GlyphVariation(axes, [(0,0), (0,0), (0,0)]) + self.assertTrue(gvar.hasImpact()) + + def test_hasImpact_allDeltasNone(self): + axes = {"wght":(0.0, 1.0, 1.0)} + gvar = GlyphVariation(axes, [None, None, None]) + self.assertFalse(gvar.hasImpact()) + + def test_toXML(self): + writer = XMLWriter(BytesIO()) + axes = {"wdth":(0.3, 0.4, 0.5), "wght":(0.0, 1.0, 1.0), "opsz":(-0.7, -0.7, 0.0)} + g = GlyphVariation(axes, [(9,8), None, (7,6), (0,0), (-1,-2), None]) + g.toXML(writer, ["wdth", "wght", "opsz"]) + self.assertEqual([ + '', + '', + '', + '', + '', + '', + '', + '', + '' + ], GlyphVariationTest.xml_lines(writer)) + + def test_toXML_allDeltasNone(self): + writer = XMLWriter(BytesIO()) + axes = {"wght":(0.0, 1.0, 1.0)} + g = GlyphVariation(axes, [None] * 5) + g.toXML(writer, ["wght", "wdth"]) + self.assertEqual([ + '', + '', + '', + '' + ], GlyphVariationTest.xml_lines(writer)) + + def test_fromXML(self): + g = GlyphVariation({}, [None] * 4) + g.fromXML("coord", {"axis":"wdth", "min":"0.3", "value":"0.4", "max":"0.5"}, []) + g.fromXML("coord", {"axis":"wght", "value":"1.0"}, []) + g.fromXML("coord", {"axis":"opsz", "value":"-0.5"}, []) + g.fromXML("delta", {"pt":"1", "x":"33", "y":"44"}, []) + g.fromXML("delta", {"pt":"2", "x":"-2", "y":"170"}, []) + self.assertEqual({ + "wdth":( 0.3, 0.4, 0.5), + "wght":( 0.0, 1.0, 1.0), + "opsz":(-0.5, -0.5, 0.0) + }, g.axes) + self.assertEqual([None, (33, 44), (-2, 170), None], g.coordinates) + + def test_compile_sharedCoords_nonIntermediate_sharedPoints(self): + gvar = GlyphVariation({"wght": (0.0, 0.5, 0.5), "wdth": (0.0, 0.8, 0.8)}, + [(7,4), (8,5), (9,6)]) + axisTags = ["wght", "wdth"] + sharedCoordIndices = { gvar.compileCoord(axisTags): 0x77 } + tuple, data = gvar.compile(axisTags, sharedCoordIndices, sharedPoints={0,1,2}) + # len(data)=8; flags=None; tupleIndex=0x77 + # embeddedCoord=[]; intermediateCoord=[] + self.assertEqual("00 08 00 77", hexencode(tuple)) + self.assertEqual("02 07 08 09 " # deltaX: [7, 8, 9] + "02 04 05 06", # deltaY: [4, 5, 6] + hexencode(data)) + + def test_compile_sharedCoords_intermediate_sharedPoints(self): + gvar = GlyphVariation({"wght": (0.3, 0.5, 0.7), "wdth": (0.1, 0.8, 0.9)}, + [(7,4), (8,5), (9,6)]) + axisTags = ["wght", "wdth"] + sharedCoordIndices = { gvar.compileCoord(axisTags): 0x77 } + tuple, data = gvar.compile(axisTags, sharedCoordIndices, sharedPoints={0,1,2}) + # len(data)=8; flags=INTERMEDIATE_TUPLE; tupleIndex=0x77 + # embeddedCoord=[]; intermediateCoord=[(0.3, 0.1), (0.7, 0.9)] + self.assertEqual("00 08 40 77 13 33 06 66 2C CD 39 9A", hexencode(tuple)) + self.assertEqual("02 07 08 09 " # deltaX: [7, 8, 9] + "02 04 05 06", # deltaY: [4, 5, 6] + hexencode(data)) + + def test_compile_sharedCoords_nonIntermediate_privatePoints(self): + gvar = GlyphVariation({"wght": (0.0, 0.5, 0.5), "wdth": (0.0, 0.8, 0.8)}, + [(7,4), (8,5), (9,6)]) + axisTags = ["wght", "wdth"] + sharedCoordIndices = { gvar.compileCoord(axisTags): 0x77 } + tuple, data = gvar.compile(axisTags, sharedCoordIndices, sharedPoints=None) + # len(data)=13; flags=PRIVATE_POINT_NUMBERS; tupleIndex=0x77 + # embeddedCoord=[]; intermediateCoord=[] + self.assertEqual("00 09 20 77", hexencode(tuple)) + self.assertEqual("00 " # all points in glyph + "02 07 08 09 " # deltaX: [7, 8, 9] + "02 04 05 06", # deltaY: [4, 5, 6] + hexencode(data)) + + def test_compile_sharedCoords_intermediate_privatePoints(self): + gvar = GlyphVariation({"wght": (0.0, 0.5, 1.0), "wdth": (0.0, 0.8, 1.0)}, + [(7,4), (8,5), (9,6)]) + axisTags = ["wght", "wdth"] + sharedCoordIndices = { gvar.compileCoord(axisTags): 0x77 } + tuple, data = gvar.compile(axisTags, sharedCoordIndices, sharedPoints=None) + # len(data)=13; flags=PRIVATE_POINT_NUMBERS; tupleIndex=0x77 + # embeddedCoord=[]; intermediateCoord=[(0.0, 0.0), (1.0, 1.0)] + self.assertEqual("00 09 60 77 00 00 00 00 40 00 40 00", hexencode(tuple)) + self.assertEqual("00 " # all points in glyph + "02 07 08 09 " # deltaX: [7, 8, 9] + "02 04 05 06", # deltaY: [4, 5, 6] + hexencode(data)) + + def test_compile_embeddedCoords_nonIntermediate_sharedPoints(self): + gvar = GlyphVariation({"wght": (0.0, 0.5, 0.5), "wdth": (0.0, 0.8, 0.8)}, + [(7,4), (8,5), (9,6)]) + axisTags = ["wght", "wdth"] + tuple, data = gvar.compile(axisTags, sharedCoordIndices={}, sharedPoints={0,1,2}) + # len(data)=8; flags=EMBEDDED_TUPLE_COORD + # embeddedCoord=[(0.5, 0.8)]; intermediateCoord=[] + self.assertEqual("00 08 80 00 20 00 33 33", hexencode(tuple)) + self.assertEqual("02 07 08 09 " # deltaX: [7, 8, 9] + "02 04 05 06", # deltaY: [4, 5, 6] + hexencode(data)) + + def test_compile_embeddedCoords_intermediate_sharedPoints(self): + gvar = GlyphVariation({"wght": (0.0, 0.5, 1.0), "wdth": (0.0, 0.8, 0.8)}, + [(7,4), (8,5), (9,6)]) + axisTags = ["wght", "wdth"] + tuple, data = gvar.compile(axisTags, sharedCoordIndices={}, sharedPoints={0,1,2}) + # len(data)=8; flags=EMBEDDED_TUPLE_COORD + # embeddedCoord=[(0.5, 0.8)]; intermediateCoord=[(0.0, 0.0), (1.0, 0.8)] + self.assertEqual("00 08 C0 00 20 00 33 33 00 00 00 00 40 00 33 33", hexencode(tuple)) + self.assertEqual("02 07 08 09 " # deltaX: [7, 8, 9] + "02 04 05 06", # deltaY: [4, 5, 6] + hexencode(data)) + + def test_compile_embeddedCoords_nonIntermediate_privatePoints(self): + gvar = GlyphVariation({"wght": (0.0, 0.5, 0.5), "wdth": (0.0, 0.8, 0.8)}, + [(7,4), (8,5), (9,6)]) + axisTags = ["wght", "wdth"] + tuple, data = gvar.compile(axisTags, sharedCoordIndices={}, sharedPoints=None) + # len(data)=13; flags=PRIVATE_POINT_NUMBERS|EMBEDDED_TUPLE_COORD + # embeddedCoord=[(0.5, 0.8)]; intermediateCoord=[] + self.assertEqual("00 09 A0 00 20 00 33 33", hexencode(tuple)) + self.assertEqual("00 " # all points in glyph + "02 07 08 09 " # deltaX: [7, 8, 9] + "02 04 05 06", # deltaY: [4, 5, 6] + hexencode(data)) + + def test_compile_embeddedCoords_intermediate_privatePoints(self): + gvar = GlyphVariation({"wght": (0.4, 0.5, 0.6), "wdth": (0.7, 0.8, 0.9)}, + [(7,4), (8,5), (9,6)]) + axisTags = ["wght", "wdth"] + tuple, data = gvar.compile(axisTags, sharedCoordIndices={}, sharedPoints=None) + # len(data)=13; flags=PRIVATE_POINT_NUMBERS|INTERMEDIATE_TUPLE|EMBEDDED_TUPLE_COORD + # embeddedCoord=(0.5, 0.8); intermediateCoord=[(0.4, 0.7), (0.6, 0.9)] + self.assertEqual("00 09 E0 00 20 00 33 33 19 9A 2C CD 26 66 39 9A", hexencode(tuple)) + self.assertEqual("00 " # all points in glyph + "02 07 08 09 " # deltaX: [7, 8, 9] + "02 04 05 06", # deltaY: [4, 5, 6] + hexencode(data)) + + def test_compileCoord(self): + gvar = GlyphVariation({"wght": (-1.0, -1.0, -1.0), "wdth": (0.4, 0.5, 0.6)}, [None] * 4) + self.assertEqual("C0 00 20 00", hexencode(gvar.compileCoord(["wght", "wdth"]))) + self.assertEqual("20 00 C0 00", hexencode(gvar.compileCoord(["wdth", "wght"]))) + self.assertEqual("C0 00", hexencode(gvar.compileCoord(["wght"]))) + + def test_compileIntermediateCoord(self): + gvar = GlyphVariation({"wght": (-1.0, -1.0, 0.0), "wdth": (0.4, 0.5, 0.6)}, [None] * 4) + self.assertEqual("C0 00 19 9A 00 00 26 66", hexencode(gvar.compileIntermediateCoord(["wght", "wdth"]))) + self.assertEqual("19 9A C0 00 26 66 00 00", hexencode(gvar.compileIntermediateCoord(["wdth", "wght"]))) + self.assertEqual(None, gvar.compileIntermediateCoord(["wght"])) + self.assertEqual("19 9A 26 66", hexencode(gvar.compileIntermediateCoord(["wdth"]))) + + def test_decompileCoord(self): + decompileCoord = GlyphVariation.decompileCoord_ + data = deHexStr("DE AD C0 00 20 00 DE AD") + self.assertEqual(({"wght": -1.0, "wdth": 0.5}, 6), decompileCoord(["wght", "wdth"], data, 2)) + + def test_decompileCoord_roundTrip(self): + # Make sure we are not affected by https://github.com/behdad/fonttools/issues/286 + data = deHexStr("7F B9 80 35") + values, _ = GlyphVariation.decompileCoord_(["wght", "wdth"], data, 0) + axisValues = {axis:(val, val, val) for axis, val in values.items()} + gvar = GlyphVariation(axisValues, [None] * 4) + self.assertEqual("7F B9 80 35", hexencode(gvar.compileCoord(["wght", "wdth"]))) + + def test_decompileCoords(self): + decompileCoords = GlyphVariation.decompileCoords_ + axes = ["wght", "wdth", "opsz"] + coords = [ + {"wght": 1.0, "wdth": 0.0, "opsz": 0.5}, + {"wght": -1.0, "wdth": 0.0, "opsz": 0.25}, + {"wght": 0.0, "wdth": -1.0, "opsz": 1.0} + ] + data = deHexStr("DE AD 40 00 00 00 20 00 C0 00 00 00 10 00 00 00 C0 00 40 00") + self.assertEqual((coords, 20), decompileCoords(axes, numCoords=3, data=data, offset=2)) + + def test_compilePoints(self): + compilePoints = lambda p: GlyphVariation.compilePoints(set(p), numPointsInGlyph=999) + self.assertEqual("00", hexencode(compilePoints(range(999)))) # all points in glyph + self.assertEqual("01 00 07", hexencode(compilePoints([7]))) + self.assertEqual("01 80 FF FF", hexencode(compilePoints([65535]))) + self.assertEqual("02 01 09 06", hexencode(compilePoints([9, 15]))) + self.assertEqual("06 05 07 01 F7 02 01 F2", hexencode(compilePoints([7, 8, 255, 257, 258, 500]))) + self.assertEqual("03 01 07 01 80 01 F4", hexencode(compilePoints([7, 8, 500]))) + self.assertEqual("04 01 07 01 81 BE EF 0C 0F", hexencode(compilePoints([7, 8, 0xBEEF, 0xCAFE]))) + self.assertEqual("81 2C" + # 300 points (0x12c) in total + " 7F 00" + (127 * " 01") + # first run, contains 128 points: [0 .. 127] + " 7F 80" + (127 * " 01") + # second run, contains 128 points: [128 .. 255] + " AB 01 00" + (43 * " 00 01"), # third run, contains 44 points: [256 .. 299] + hexencode(compilePoints(range(300)))) + self.assertEqual("81 8F" + # 399 points (0x18f) in total + " 7F 00" + (127 * " 01") + # first run, contains 128 points: [0 .. 127] + " 7F 80" + (127 * " 01") + # second run, contains 128 points: [128 .. 255] + " FF 01 00" + (127 * " 00 01") + # third run, contains 128 points: [256 .. 383] + " 8E 01 80" + (14 * " 00 01"), # fourth run, contains 15 points: [384 .. 398] + hexencode(compilePoints(range(399)))) + + def test_decompilePoints(self): + numPointsInGlyph = 65536 + allPoints = list(range(numPointsInGlyph)) + def decompilePoints(data, offset): + points, offset = GlyphVariation.decompilePoints_(numPointsInGlyph, deHexStr(data), offset) + # Conversion to list needed for Python 3. + return (list(points), offset) + # all points in glyph + self.assertEqual((allPoints, 1), decompilePoints("00", 0)) + # all points in glyph (in overly verbose encoding, not explicitly prohibited by spec) + self.assertEqual((allPoints, 2), decompilePoints("80 00", 0)) + # 2 points; first run: [9, 9+6] + self.assertEqual(([9, 15], 4), decompilePoints("02 01 09 06", 0)) + # 2 points; first run: [0xBEEF, 0xCAFE]. (0x0C0F = 0xCAFE - 0xBEEF) + self.assertEqual(([0xBEEF, 0xCAFE], 6), decompilePoints("02 81 BE EF 0C 0F", 0)) + # 1 point; first run: [7] + self.assertEqual(([7], 3), decompilePoints("01 00 07", 0)) + # 1 point; first run: [7] in overly verbose encoding + self.assertEqual(([7], 4), decompilePoints("01 80 00 07", 0)) + # 1 point; first run: [65535]; requires words to be treated as unsigned numbers + self.assertEqual(([65535], 4), decompilePoints("01 80 FF FF", 0)) + # 4 points; first run: [7, 8]; second run: [255, 257]. 257 is stored in delta-encoded bytes (0xFF + 2). + self.assertEqual(([7, 8, 255, 257], 7), decompilePoints("04 01 07 01 01 FF 02", 0)) + # combination of all encodings, preceded and followed by 4 bytes of unused data + data = "DE AD DE AD 04 01 07 01 81 BE EF 0C 0F DE AD DE AD" + self.assertEqual(([7, 8, 0xBEEF, 0xCAFE], 13), decompilePoints(data, 4)) + self.assertSetEqual(set(range(300)), set(decompilePoints( + "81 2C" + # 300 points (0x12c) in total + " 7F 00" + (127 * " 01") + # first run, contains 128 points: [0 .. 127] + " 7F 80" + (127 * " 01") + # second run, contains 128 points: [128 .. 255] + " AB 01 00" + (43 * " 00 01"), # third run, contains 44 points: [256 .. 299] + 0)[0])) + self.assertSetEqual(set(range(399)), set(decompilePoints( + "81 8F" + # 399 points (0x18f) in total + " 7F 00" + (127 * " 01") + # first run, contains 128 points: [0 .. 127] + " 7F 80" + (127 * " 01") + # second run, contains 128 points: [128 .. 255] + " FF 01 00" + (127 * " 00 01") + # third run, contains 128 points: [256 .. 383] + " 8E 01 80" + (14 * " 00 01"), # fourth run, contains 15 points: [384 .. 398] + 0)[0])) + + def test_decompilePoints_shouldGuardAgainstBadPointNumbers(self): + decompilePoints = GlyphVariation.decompilePoints_ + # 2 points; first run: [3, 9]. + numPointsInGlyph = 8 + self.assertRaises(TTLibError, decompilePoints, numPointsInGlyph, deHexStr("02 01 03 06"), 0) + + def test_decompilePoints_roundTrip(self): + numPointsInGlyph = 500 # greater than 255, so we also exercise code path for 16-bit encoding + compile = lambda points: GlyphVariation.compilePoints(points, numPointsInGlyph) + decompile = lambda data: set(GlyphVariation.decompilePoints_(numPointsInGlyph, data, 0)[0]) + for i in range(50): + points = set(random.sample(range(numPointsInGlyph), 30)) + self.assertSetEqual(points, decompile(compile(points)), + "failed round-trip decompile/compilePoints; points=%s" % points) + allPoints = set(range(numPointsInGlyph)) + self.assertSetEqual(allPoints, decompile(compile(allPoints))) + + def test_compileDeltas(self): + gvar = GlyphVariation({}, [(0,0), (1, 0), (2, 0), (3, 3)]) + points = {1, 2} + # deltaX for points: [1, 2]; deltaY for points: [0, 0] + self.assertEqual("01 01 02 81", hexencode(gvar.compileDeltas(points))) + + def test_compileDeltaValues(self): + compileDeltaValues = lambda values: hexencode(GlyphVariation.compileDeltaValues_(values)) + # zeroes + self.assertEqual("80", compileDeltaValues([0])) + self.assertEqual("BF", compileDeltaValues([0] * 64)) + self.assertEqual("BF 80", compileDeltaValues([0] * 65)) + self.assertEqual("BF A3", compileDeltaValues([0] * 100)) + self.assertEqual("BF BF BF BF", compileDeltaValues([0] * 256)) + # bytes + self.assertEqual("00 01", compileDeltaValues([1])) + self.assertEqual("06 01 02 03 7F 80 FF FE", compileDeltaValues([1, 2, 3, 127, -128, -1, -2])) + self.assertEqual("3F" + (64 * " 7F"), compileDeltaValues([127] * 64)) + self.assertEqual("3F" + (64 * " 7F") + " 00 7F", compileDeltaValues([127] * 65)) + # words + self.assertEqual("40 66 66", compileDeltaValues([0x6666])) + self.assertEqual("43 66 66 7F FF FF FF 80 00", compileDeltaValues([0x6666, 32767, -1, -32768])) + self.assertEqual("7F" + (64 * " 11 22"), compileDeltaValues([0x1122] * 64)) + self.assertEqual("7F" + (64 * " 11 22") + " 40 11 22", compileDeltaValues([0x1122] * 65)) + # bytes, zeroes, bytes: a single zero is more compact when encoded as part of the bytes run + self.assertEqual("04 7F 7F 00 7F 7F", compileDeltaValues([127, 127, 0, 127, 127])) + self.assertEqual("01 7F 7F 81 01 7F 7F", compileDeltaValues([127, 127, 0, 0, 127, 127])) + self.assertEqual("01 7F 7F 82 01 7F 7F", compileDeltaValues([127, 127, 0, 0, 0, 127, 127])) + self.assertEqual("01 7F 7F 83 01 7F 7F", compileDeltaValues([127, 127, 0, 0, 0, 0, 127, 127])) + # bytes, zeroes + self.assertEqual("01 01 00", compileDeltaValues([1, 0])) + self.assertEqual("00 01 81", compileDeltaValues([1, 0, 0])) + # words, bytes, words: a single byte is more compact when encoded as part of the words run + self.assertEqual("42 66 66 00 02 77 77", compileDeltaValues([0x6666, 2, 0x7777])) + self.assertEqual("40 66 66 01 02 02 40 77 77", compileDeltaValues([0x6666, 2, 2, 0x7777])) + # words, zeroes, words + self.assertEqual("40 66 66 80 40 77 77", compileDeltaValues([0x6666, 0, 0x7777])) + self.assertEqual("40 66 66 81 40 77 77", compileDeltaValues([0x6666, 0, 0, 0x7777])) + self.assertEqual("40 66 66 82 40 77 77", compileDeltaValues([0x6666, 0, 0, 0, 0x7777])) + # words, zeroes, bytes + self.assertEqual("40 66 66 80 02 01 02 03", compileDeltaValues([0x6666, 0, 1, 2, 3])) + self.assertEqual("40 66 66 81 02 01 02 03", compileDeltaValues([0x6666, 0, 0, 1, 2, 3])) + self.assertEqual("40 66 66 82 02 01 02 03", compileDeltaValues([0x6666, 0, 0, 0, 1, 2, 3])) + # words, zeroes + self.assertEqual("40 66 66 80", compileDeltaValues([0x6666, 0])) + self.assertEqual("40 66 66 81", compileDeltaValues([0x6666, 0, 0])) + + def test_decompileDeltas(self): + decompileDeltas = GlyphVariation.decompileDeltas_ + # 83 = zero values (0x80), count = 4 (1 + 0x83 & 0x3F) + self.assertEqual(([0, 0, 0, 0], 1), decompileDeltas(4, deHexStr("83"), 0)) + # 41 01 02 FF FF = signed 16-bit values (0x40), count = 2 (1 + 0x41 & 0x3F) + self.assertEqual(([258, -1], 5), decompileDeltas(2, deHexStr("41 01 02 FF FF"), 0)) + # 01 81 07 = signed 8-bit values, count = 2 (1 + 0x01 & 0x3F) + self.assertEqual(([-127, 7], 3), decompileDeltas(2, deHexStr("01 81 07"), 0)) + # combination of all three encodings, preceded and followed by 4 bytes of unused data + data = deHexStr("DE AD BE EF 83 40 01 02 01 81 80 DE AD BE EF") + self.assertEqual(([0, 0, 0, 0, 258, -127, -128], 11), decompileDeltas(7, data, 4)) + + def test_decompileDeltas_roundTrip(self): + numDeltas = 30 + compile = GlyphVariation.compileDeltaValues_ + decompile = lambda data: GlyphVariation.decompileDeltas_(numDeltas, data, 0)[0] + for i in range(50): + deltas = random.sample(range(-128, 127), 10) + deltas.extend(random.sample(range(-32768, 32767), 10)) + deltas.extend([0] * 10) + random.shuffle(deltas) + self.assertListEqual(deltas, decompile(compile(deltas))) + + def test_getTupleSize(self): + getTupleSize = GlyphVariation.getTupleSize_ + numAxes = 3 + self.assertEqual(4 + numAxes * 2, getTupleSize(0x8042, numAxes)) + self.assertEqual(4 + numAxes * 4, getTupleSize(0x4077, numAxes)) + self.assertEqual(4, getTupleSize(0x2077, numAxes)) + self.assertEqual(4, getTupleSize(11, numAxes)) + + @staticmethod + def xml_lines(writer): + content = writer.file.getvalue().decode("utf-8") + return [line.strip() for line in content.splitlines()][1:] + + +if __name__ == "__main__": + unittest.main() diff -Nru fonttools-2.4/Tools/fontTools/ttLib/tables/_h_d_m_x.py fonttools-3.0/Tools/fontTools/ttLib/tables/_h_d_m_x.py --- fonttools-2.4/Tools/fontTools/ttLib/tables/_h_d_m_x.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.0/Tools/fontTools/ttLib/tables/_h_d_m_x.py 2015-08-31 17:57:15.000000000 +0000 @@ -0,0 +1,121 @@ +from __future__ import print_function, division, absolute_import +from fontTools.misc.py23 import * +from fontTools.misc import sstruct +from . import DefaultTable +import array + +hdmxHeaderFormat = """ + > # big endian! + version: H + numRecords: H + recordSize: l +""" + +try: + from collections.abc import Mapping +except: + from UserDict import DictMixin as Mapping + +class _GlyphnamedList(Mapping): + + def __init__(self, reverseGlyphOrder, data): + self._array = data + self._map = dict(reverseGlyphOrder) + + def __getitem__(self, k): + return self._array[self._map[k]] + + def __len__(self): + return len(self._map) + + def __iter__(self): + return iter(self._map) + + def keys(self): + return self._map.keys() + +class table__h_d_m_x(DefaultTable.DefaultTable): + + def decompile(self, data, ttFont): + numGlyphs = ttFont['maxp'].numGlyphs + glyphOrder = ttFont.getGlyphOrder() + dummy, data = sstruct.unpack2(hdmxHeaderFormat, data, self) + self.hdmx = {} + for i in range(self.numRecords): + ppem = byteord(data[0]) + maxSize = byteord(data[1]) + widths = _GlyphnamedList(ttFont.getReverseGlyphMap(), array.array("B", data[2:2+numGlyphs])) + self.hdmx[ppem] = widths + data = data[self.recordSize:] + assert len(data) == 0, "too much hdmx data" + + def compile(self, ttFont): + self.version = 0 + numGlyphs = ttFont['maxp'].numGlyphs + glyphOrder = ttFont.getGlyphOrder() + self.recordSize = 4 * ((2 + numGlyphs + 3) // 4) + pad = (self.recordSize - 2 - numGlyphs) * b"\0" + self.numRecords = len(self.hdmx) + data = sstruct.pack(hdmxHeaderFormat, self) + items = sorted(self.hdmx.items()) + for ppem, widths in items: + data = data + bytechr(ppem) + bytechr(max(widths.values())) + for glyphID in range(len(glyphOrder)): + width = widths[glyphOrder[glyphID]] + data = data + bytechr(width) + data = data + pad + return data + + def toXML(self, writer, ttFont): + writer.begintag("hdmxData") + writer.newline() + ppems = sorted(self.hdmx.keys()) + records = [] + format = "" + for ppem in ppems: + widths = self.hdmx[ppem] + records.append(widths) + format = format + "%4d" + glyphNames = ttFont.getGlyphOrder()[:] + glyphNames.sort() + maxNameLen = max(map(len, glyphNames)) + format = "%" + repr(maxNameLen) + 's:' + format + ' ;' + writer.write(format % (("ppem",) + tuple(ppems))) + writer.newline() + writer.newline() + for glyphName in glyphNames: + row = [] + for ppem in ppems: + widths = self.hdmx[ppem] + row.append(widths[glyphName]) + if ";" in glyphName: + glyphName = "\\x3b".join(glyphName.split(";")) + writer.write(format % ((glyphName,) + tuple(row))) + writer.newline() + writer.endtag("hdmxData") + writer.newline() + + def fromXML(self, name, attrs, content, ttFont): + if name != "hdmxData": + return + content = strjoin(content) + lines = content.split(";") + topRow = lines[0].split() + assert topRow[0] == "ppem:", "illegal hdmx format" + ppems = list(map(int, topRow[1:])) + self.hdmx = hdmx = {} + for ppem in ppems: + hdmx[ppem] = {} + lines = (line.split() for line in lines[1:]) + for line in lines: + if not line: + continue + assert line[0][-1] == ":", "illegal hdmx format" + glyphName = line[0][:-1] + if "\\" in glyphName: + from fontTools.misc.textTools import safeEval + glyphName = safeEval('"""' + glyphName + '"""') + line = list(map(int, line[1:])) + assert len(line) == len(ppems), "illegal hdmx format" + for i in range(len(ppems)): + hdmx[ppems[i]][glyphName] = line[i] diff -Nru fonttools-2.4/Tools/fontTools/ttLib/tables/_h_e_a_d.py fonttools-3.0/Tools/fontTools/ttLib/tables/_h_e_a_d.py --- fonttools-2.4/Tools/fontTools/ttLib/tables/_h_e_a_d.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.0/Tools/fontTools/ttLib/tables/_h_e_a_d.py 2015-08-31 17:57:15.000000000 +0000 @@ -0,0 +1,92 @@ +from __future__ import print_function, division, absolute_import +from fontTools.misc.py23 import * +from fontTools.misc import sstruct +from fontTools.misc.textTools import safeEval, num2binary, binary2num +from fontTools.misc.timeTools import timestampFromString, timestampToString, timestampNow +from fontTools.misc.timeTools import epoch_diff as mac_epoch_diff # For backward compat +from . import DefaultTable +import warnings + + +headFormat = """ + > # big endian + tableVersion: 16.16F + fontRevision: 16.16F + checkSumAdjustment: I + magicNumber: I + flags: H + unitsPerEm: H + created: Q + modified: Q + xMin: h + yMin: h + xMax: h + yMax: h + macStyle: H + lowestRecPPEM: H + fontDirectionHint: h + indexToLocFormat: h + glyphDataFormat: h +""" + +class table__h_e_a_d(DefaultTable.DefaultTable): + + dependencies = ['maxp', 'loca'] + + def decompile(self, data, ttFont): + dummy, rest = sstruct.unpack2(headFormat, data, self) + if rest: + # this is quite illegal, but there seem to be fonts out there that do this + warnings.warn("extra bytes at the end of 'head' table") + assert rest == "\0\0" + + # For timestamp fields, ignore the top four bytes. Some fonts have + # bogus values there. Since till 2038 those bytes only can be zero, + # ignore them. + # + # https://github.com/behdad/fonttools/issues/99#issuecomment-66776810 + for stamp in 'created', 'modified': + value = getattr(self, stamp) + if value > 0xFFFFFFFF: + warnings.warn("'%s' timestamp out of range; ignoring top bytes" % stamp) + value &= 0xFFFFFFFF + setattr(self, stamp, value) + if value < 0x7C259DC0: # January 1, 1970 00:00:00 + warnings.warn("'%s' timestamp seems very low; regarding as unix timestamp" % stamp) + value += 0x7C259DC0 + setattr(self, stamp, value) + + def compile(self, ttFont): + if ttFont.recalcTimestamp: + self.modified = timestampNow() + data = sstruct.pack(headFormat, self) + return data + + def toXML(self, writer, ttFont): + writer.comment("Most of this table will be recalculated by the compiler") + writer.newline() + formatstring, names, fixes = sstruct.getformat(headFormat) + for name in names: + value = getattr(self, name) + if name in ("created", "modified"): + value = timestampToString(value) + if name in ("magicNumber", "checkSumAdjustment"): + if value < 0: + value = value + 0x100000000 + value = hex(value) + if value[-1:] == "L": + value = value[:-1] + elif name in ("macStyle", "flags"): + value = num2binary(value, 16) + writer.simpletag(name, value=value) + writer.newline() + + def fromXML(self, name, attrs, content, ttFont): + value = attrs["value"] + if name in ("created", "modified"): + value = timestampFromString(value) + elif name in ("macStyle", "flags"): + value = binary2num(value) + else: + value = safeEval(value) + setattr(self, name, value) diff -Nru fonttools-2.4/Tools/fontTools/ttLib/tables/_h_h_e_a.py fonttools-3.0/Tools/fontTools/ttLib/tables/_h_h_e_a.py --- fonttools-2.4/Tools/fontTools/ttLib/tables/_h_h_e_a.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.0/Tools/fontTools/ttLib/tables/_h_h_e_a.py 2015-08-31 17:57:15.000000000 +0000 @@ -0,0 +1,91 @@ +from __future__ import print_function, division, absolute_import +from fontTools.misc.py23 import * +from fontTools.misc import sstruct +from fontTools.misc.textTools import safeEval +from . import DefaultTable + +hheaFormat = """ + > # big endian + tableVersion: 16.16F + ascent: h + descent: h + lineGap: h + advanceWidthMax: H + minLeftSideBearing: h + minRightSideBearing: h + xMaxExtent: h + caretSlopeRise: h + caretSlopeRun: h + caretOffset: h + reserved0: h + reserved1: h + reserved2: h + reserved3: h + metricDataFormat: h + numberOfHMetrics: H +""" + + +class table__h_h_e_a(DefaultTable.DefaultTable): + + # Note: Keep in sync with table__v_h_e_a + + dependencies = ['hmtx', 'glyf'] + + def decompile(self, data, ttFont): + sstruct.unpack(hheaFormat, data, self) + + def compile(self, ttFont): + if ttFont.isLoaded('glyf') and ttFont.recalcBBoxes: + self.recalc(ttFont) + return sstruct.pack(hheaFormat, self) + + def recalc(self, ttFont): + hmtxTable = ttFont['hmtx'] + if 'glyf' in ttFont: + glyfTable = ttFont['glyf'] + INFINITY = 100000 + advanceWidthMax = 0 + minLeftSideBearing = +INFINITY # arbitrary big number + minRightSideBearing = +INFINITY # arbitrary big number + xMaxExtent = -INFINITY # arbitrary big negative number + + for name in ttFont.getGlyphOrder(): + width, lsb = hmtxTable[name] + advanceWidthMax = max(advanceWidthMax, width) + g = glyfTable[name] + if g.numberOfContours == 0: + continue + if g.numberOfContours < 0 and not hasattr(g, "xMax"): + # Composite glyph without extents set. + # Calculate those. + g.recalcBounds(glyfTable) + minLeftSideBearing = min(minLeftSideBearing, lsb) + rsb = width - lsb - (g.xMax - g.xMin) + minRightSideBearing = min(minRightSideBearing, rsb) + extent = lsb + (g.xMax - g.xMin) + xMaxExtent = max(xMaxExtent, extent) + + if xMaxExtent == -INFINITY: + # No glyph has outlines. + minLeftSideBearing = 0 + minRightSideBearing = 0 + xMaxExtent = 0 + + self.advanceWidthMax = advanceWidthMax + self.minLeftSideBearing = minLeftSideBearing + self.minRightSideBearing = minRightSideBearing + self.xMaxExtent = xMaxExtent + else: + # XXX CFF recalc... + pass + + def toXML(self, writer, ttFont): + formatstring, names, fixes = sstruct.getformat(hheaFormat) + for name in names: + value = getattr(self, name) + writer.simpletag(name, value=value) + writer.newline() + + def fromXML(self, name, attrs, content, ttFont): + setattr(self, name, safeEval(attrs["value"])) diff -Nru fonttools-2.4/Tools/fontTools/ttLib/tables/_h_m_t_x.py fonttools-3.0/Tools/fontTools/ttLib/tables/_h_m_t_x.py --- fonttools-2.4/Tools/fontTools/ttLib/tables/_h_m_t_x.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.0/Tools/fontTools/ttLib/tables/_h_m_t_x.py 2015-08-31 17:57:15.000000000 +0000 @@ -0,0 +1,101 @@ +from __future__ import print_function, division, absolute_import +from fontTools.misc.py23 import * +from fontTools.misc.textTools import safeEval +from . import DefaultTable +import sys +import array +import warnings + + +class table__h_m_t_x(DefaultTable.DefaultTable): + + headerTag = 'hhea' + advanceName = 'width' + sideBearingName = 'lsb' + numberOfMetricsName = 'numberOfHMetrics' + + def decompile(self, data, ttFont): + numGlyphs = ttFont['maxp'].numGlyphs + numberOfMetrics = int(getattr(ttFont[self.headerTag], self.numberOfMetricsName)) + if numberOfMetrics > numGlyphs: + numberOfMetrics = numGlyphs # We warn later. + # Note: advanceWidth is unsigned, but we read/write as signed. + metrics = array.array("h", data[:4 * numberOfMetrics]) + if sys.byteorder != "big": + metrics.byteswap() + data = data[4 * numberOfMetrics:] + numberOfSideBearings = numGlyphs - numberOfMetrics + sideBearings = array.array("h", data[:2 * numberOfSideBearings]) + data = data[2 * numberOfSideBearings:] + + if sys.byteorder != "big": + sideBearings.byteswap() + if data: + warnings.warn("too much 'hmtx'/'vmtx' table data") + self.metrics = {} + glyphOrder = ttFont.getGlyphOrder() + for i in range(numberOfMetrics): + glyphName = glyphOrder[i] + self.metrics[glyphName] = list(metrics[i*2:i*2+2]) + lastAdvance = metrics[-2] + for i in range(numberOfSideBearings): + glyphName = glyphOrder[i + numberOfMetrics] + self.metrics[glyphName] = [lastAdvance, sideBearings[i]] + + def compile(self, ttFont): + metrics = [] + for glyphName in ttFont.getGlyphOrder(): + metrics.append(self.metrics[glyphName]) + lastAdvance = metrics[-1][0] + lastIndex = len(metrics) + while metrics[lastIndex-2][0] == lastAdvance: + lastIndex -= 1 + if lastIndex <= 1: + # all advances are equal + lastIndex = 1 + break + additionalMetrics = metrics[lastIndex:] + additionalMetrics = [sb for advance, sb in additionalMetrics] + metrics = metrics[:lastIndex] + setattr(ttFont[self.headerTag], self.numberOfMetricsName, len(metrics)) + + allMetrics = [] + for item in metrics: + allMetrics.extend(item) + allMetrics = array.array("h", allMetrics) + if sys.byteorder != "big": + allMetrics.byteswap() + data = allMetrics.tostring() + + additionalMetrics = array.array("h", additionalMetrics) + if sys.byteorder != "big": + additionalMetrics.byteswap() + data = data + additionalMetrics.tostring() + return data + + def toXML(self, writer, ttFont): + names = sorted(self.metrics.keys()) + for glyphName in names: + advance, sb = self.metrics[glyphName] + writer.simpletag("mtx", [ + ("name", glyphName), + (self.advanceName, advance), + (self.sideBearingName, sb), + ]) + writer.newline() + + def fromXML(self, name, attrs, content, ttFont): + if not hasattr(self, "metrics"): + self.metrics = {} + if name == "mtx": + self.metrics[attrs["name"]] = [safeEval(attrs[self.advanceName]), + safeEval(attrs[self.sideBearingName])] + + def __delitem__(self, glyphName): + del self.metrics[glyphName] + + def __getitem__(self, glyphName): + return self.metrics[glyphName] + + def __setitem__(self, glyphName, advance_sb_pair): + self.metrics[glyphName] = tuple(advance_sb_pair) diff -Nru fonttools-2.4/Tools/fontTools/ttLib/tables/__init__.py fonttools-3.0/Tools/fontTools/ttLib/tables/__init__.py --- fonttools-2.4/Tools/fontTools/ttLib/tables/__init__.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.0/Tools/fontTools/ttLib/tables/__init__.py 2015-08-31 17:57:15.000000000 +0000 @@ -0,0 +1,74 @@ + +from __future__ import print_function, division, absolute_import +from fontTools.misc.py23 import * + +# DON'T EDIT! This file is generated by MetaTools/buildTableList.py. +def _moduleFinderHint(): + """Dummy function to let modulefinder know what tables may be + dynamically imported. Generated by MetaTools/buildTableList.py. + + >>> _moduleFinderHint() + """ + from . import B_A_S_E_ + from . import C_B_D_T_ + from . import C_B_L_C_ + from . import C_F_F_ + from . import C_O_L_R_ + from . import C_P_A_L_ + from . import D_S_I_G_ + from . import E_B_D_T_ + from . import E_B_L_C_ + from . import F_F_T_M_ + from . import G_D_E_F_ + from . import G_M_A_P_ + from . import G_P_K_G_ + from . import G_P_O_S_ + from . import G_S_U_B_ + from . import J_S_T_F_ + from . import L_T_S_H_ + from . import M_A_T_H_ + from . import M_E_T_A_ + from . import O_S_2f_2 + from . import S_I_N_G_ + from . import S_V_G_ + from . import T_S_I_B_ + from . import T_S_I_D_ + from . import T_S_I_J_ + from . import T_S_I_P_ + from . import T_S_I_S_ + from . import T_S_I_V_ + from . import T_S_I__0 + from . import T_S_I__1 + from . import T_S_I__2 + from . import T_S_I__3 + from . import T_S_I__5 + from . import V_D_M_X_ + from . import V_O_R_G_ + from . import _a_v_a_r + from . import _c_m_a_p + from . import _c_v_t + from . import _f_e_a_t + from . import _f_p_g_m + from . import _f_v_a_r + from . import _g_a_s_p + from . import _g_l_y_f + from . import _g_v_a_r + from . import _h_d_m_x + from . import _h_e_a_d + from . import _h_h_e_a + from . import _h_m_t_x + from . import _k_e_r_n + from . import _l_o_c_a + from . import _l_t_a_g + from . import _m_a_x_p + from . import _m_e_t_a + from . import _n_a_m_e + from . import _p_o_s_t + from . import _p_r_e_p + from . import _s_b_i_x + from . import _v_h_e_a + from . import _v_m_t_x + +if __name__ == "__main__": + import doctest, sys + sys.exit(doctest.testmod().failed) diff -Nru fonttools-2.4/Tools/fontTools/ttLib/tables/J_S_T_F_.py fonttools-3.0/Tools/fontTools/ttLib/tables/J_S_T_F_.py --- fonttools-2.4/Tools/fontTools/ttLib/tables/J_S_T_F_.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.0/Tools/fontTools/ttLib/tables/J_S_T_F_.py 2015-08-31 17:57:15.000000000 +0000 @@ -0,0 +1,7 @@ +from __future__ import print_function, division, absolute_import +from fontTools.misc.py23 import * +from .otBase import BaseTTXConverter + + +class table_J_S_T_F_(BaseTTXConverter): + pass diff -Nru fonttools-2.4/Tools/fontTools/ttLib/tables/_k_e_r_n.py fonttools-3.0/Tools/fontTools/ttLib/tables/_k_e_r_n.py --- fonttools-2.4/Tools/fontTools/ttLib/tables/_k_e_r_n.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.0/Tools/fontTools/ttLib/tables/_k_e_r_n.py 2015-08-31 17:57:15.000000000 +0000 @@ -0,0 +1,200 @@ +from __future__ import print_function, division, absolute_import +from fontTools.misc.py23 import * +from fontTools.ttLib import getSearchRange +from fontTools.misc.textTools import safeEval, readHex +from fontTools.misc.fixedTools import fixedToFloat as fi2fl, floatToFixed as fl2fi +from . import DefaultTable +import struct +import array +import warnings + + +class table__k_e_r_n(DefaultTable.DefaultTable): + + def getkern(self, format): + for subtable in self.kernTables: + if subtable.version == format: + return subtable + return None # not found + + def decompile(self, data, ttFont): + version, nTables = struct.unpack(">HH", data[:4]) + apple = False + if (len(data) >= 8) and (version == 1): + # AAT Apple's "new" format. Hm. + version, nTables = struct.unpack(">LL", data[:8]) + self.version = fi2fl(version, 16) + data = data[8:] + apple = True + else: + self.version = version + data = data[4:] + tablesIndex = [] + self.kernTables = [] + for i in range(nTables): + if self.version == 1.0: + # Apple + length, coverage, tupleIndex = struct.unpack(">lHH", data[:8]) + version = coverage & 0xff + else: + version, length = struct.unpack(">HH", data[:4]) + length = int(length) + if version not in kern_classes: + subtable = KernTable_format_unkown(version) + else: + subtable = kern_classes[version]() + subtable.apple = apple + subtable.decompile(data[:length], ttFont) + self.kernTables.append(subtable) + data = data[length:] + + def compile(self, ttFont): + if hasattr(self, "kernTables"): + nTables = len(self.kernTables) + else: + nTables = 0 + if self.version == 1.0: + # AAT Apple's "new" format. + data = struct.pack(">ll", fl2fi(self.version, 16), nTables) + else: + data = struct.pack(">HH", self.version, nTables) + if hasattr(self, "kernTables"): + for subtable in self.kernTables: + data = data + subtable.compile(ttFont) + return data + + def toXML(self, writer, ttFont): + writer.simpletag("version", value=self.version) + writer.newline() + for subtable in self.kernTables: + subtable.toXML(writer, ttFont) + + def fromXML(self, name, attrs, content, ttFont): + if name == "version": + self.version = safeEval(attrs["value"]) + return + if name != "kernsubtable": + return + if not hasattr(self, "kernTables"): + self.kernTables = [] + format = safeEval(attrs["format"]) + if format not in kern_classes: + subtable = KernTable_format_unkown(format) + else: + subtable = kern_classes[format]() + self.kernTables.append(subtable) + subtable.fromXML(name, attrs, content, ttFont) + + +class KernTable_format_0(object): + + def decompile(self, data, ttFont): + version, length, coverage = (0,0,0) + if not self.apple: + version, length, coverage = struct.unpack(">HHH", data[:6]) + data = data[6:] + else: + version, length, coverage = struct.unpack(">LHH", data[:8]) + data = data[8:] + self.version, self.coverage = int(version), int(coverage) + + self.kernTable = kernTable = {} + + nPairs, searchRange, entrySelector, rangeShift = struct.unpack(">HHHH", data[:8]) + data = data[8:] + + nPairs = min(nPairs, len(data) // 6) + datas = array.array("H", data[:6 * nPairs]) + if sys.byteorder != "big": + datas.byteswap() + it = iter(datas) + glyphOrder = ttFont.getGlyphOrder() + for k in range(nPairs): + left, right, value = next(it), next(it), next(it) + if value >= 32768: value -= 65536 + try: + kernTable[(glyphOrder[left], glyphOrder[right])] = value + except IndexError: + # Slower, but will not throw an IndexError on an invalid glyph id. + kernTable[(ttFont.getGlyphName(left), ttFont.getGlyphName(right))] = value + if len(data) > 6 * nPairs: + warnings.warn("excess data in 'kern' subtable: %d bytes" % len(data)) + + def compile(self, ttFont): + nPairs = len(self.kernTable) + searchRange, entrySelector, rangeShift = getSearchRange(nPairs, 6) + data = struct.pack(">HHHH", nPairs, searchRange, entrySelector, rangeShift) + + # yeehee! (I mean, turn names into indices) + try: + reverseOrder = ttFont.getReverseGlyphMap() + kernTable = sorted((reverseOrder[left], reverseOrder[right], value) for ((left,right),value) in self.kernTable.items()) + except KeyError: + # Slower, but will not throw KeyError on invalid glyph id. + getGlyphID = ttFont.getGlyphID + kernTable = sorted((getGlyphID(left), getGlyphID(right), value) for ((left,right),value) in self.kernTable.items()) + + for left, right, value in kernTable: + data = data + struct.pack(">HHh", left, right, value) + return struct.pack(">HHH", self.version, len(data) + 6, self.coverage) + data + + def toXML(self, writer, ttFont): + writer.begintag("kernsubtable", coverage=self.coverage, format=0) + writer.newline() + items = sorted(self.kernTable.items()) + for (left, right), value in items: + writer.simpletag("pair", [ + ("l", left), + ("r", right), + ("v", value) + ]) + writer.newline() + writer.endtag("kernsubtable") + writer.newline() + + def fromXML(self, name, attrs, content, ttFont): + self.coverage = safeEval(attrs["coverage"]) + self.version = safeEval(attrs["format"]) + if not hasattr(self, "kernTable"): + self.kernTable = {} + for element in content: + if not isinstance(element, tuple): + continue + name, attrs, content = element + self.kernTable[(attrs["l"], attrs["r"])] = safeEval(attrs["v"]) + + def __getitem__(self, pair): + return self.kernTable[pair] + + def __setitem__(self, pair, value): + self.kernTable[pair] = value + + def __delitem__(self, pair): + del self.kernTable[pair] + + +class KernTable_format_unkown(object): + + def __init__(self, format): + self.format = format + + def decompile(self, data, ttFont): + self.data = data + + def compile(self, ttFont): + return self.data + + def toXML(self, writer, ttFont): + writer.begintag("kernsubtable", format=self.format) + writer.newline() + writer.comment("unknown 'kern' subtable format") + writer.newline() + writer.dumphex(self.data) + writer.endtag("kernsubtable") + writer.newline() + + def fromXML(self, name, attrs, content, ttFont): + self.decompile(readHex(content), ttFont) + + +kern_classes = {0: KernTable_format_0} diff -Nru fonttools-2.4/Tools/fontTools/ttLib/tables/_k_e_r_n_test.py fonttools-3.0/Tools/fontTools/ttLib/tables/_k_e_r_n_test.py --- fonttools-2.4/Tools/fontTools/ttLib/tables/_k_e_r_n_test.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.0/Tools/fontTools/ttLib/tables/_k_e_r_n_test.py 2015-08-31 17:57:15.000000000 +0000 @@ -0,0 +1,29 @@ +from __future__ import print_function, absolute_import +from fontTools.misc.py23 import * +from fontTools import ttLib +import unittest +from ._k_e_r_n import KernTable_format_0 + +class MockFont(object): + + def getGlyphOrder(self): + return ["glyph00000", "glyph00001", "glyph00002", "glyph00003"] + + def getGlyphName(self, glyphID): + return "glyph%.5d" % glyphID + +class KernTable_format_0_Test(unittest.TestCase): + + def test_decompileBadGlyphId(self): + subtable = KernTable_format_0() + subtable.apple = False + subtable.decompile( b'\x00' * 6 + + b'\x00' + b'\x02' + b'\x00' * 6 + + b'\x00' + b'\x01' + b'\x00' + b'\x03' + b'\x00' + b'\x01' + + b'\x00' + b'\x01' + b'\xFF' + b'\xFF' + b'\x00' + b'\x02', + MockFont()) + self.assertEqual(subtable[("glyph00001", "glyph00003")], 1) + self.assertEqual(subtable[("glyph00001", "glyph65535")], 2) + +if __name__ == "__main__": + unittest.main() diff -Nru fonttools-2.4/Tools/fontTools/ttLib/tables/_l_o_c_a.py fonttools-3.0/Tools/fontTools/ttLib/tables/_l_o_c_a.py --- fonttools-2.4/Tools/fontTools/ttLib/tables/_l_o_c_a.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.0/Tools/fontTools/ttLib/tables/_l_o_c_a.py 2015-08-31 17:57:15.000000000 +0000 @@ -0,0 +1,60 @@ +from __future__ import print_function, division, absolute_import +from fontTools.misc.py23 import * +from . import DefaultTable +import sys +import array +import warnings + +class table__l_o_c_a(DefaultTable.DefaultTable): + + dependencies = ['glyf'] + + def decompile(self, data, ttFont): + longFormat = ttFont['head'].indexToLocFormat + if longFormat: + format = "I" + else: + format = "H" + locations = array.array(format) + locations.fromstring(data) + if sys.byteorder != "big": + locations.byteswap() + if not longFormat: + l = array.array("I") + for i in range(len(locations)): + l.append(locations[i] * 2) + locations = l + if len(locations) < (ttFont['maxp'].numGlyphs + 1): + warnings.warn("corrupt 'loca' table, or wrong numGlyphs in 'maxp': %d %d" % (len(locations) - 1, ttFont['maxp'].numGlyphs)) + self.locations = locations + + def compile(self, ttFont): + try: + max_location = max(self.locations) + except AttributeError: + self.set([]) + max_location = 0 + if max_location < 0x20000 and all(l % 2 == 0 for l in self.locations): + locations = array.array("H") + for i in range(len(self.locations)): + locations.append(self.locations[i] // 2) + ttFont['head'].indexToLocFormat = 0 + else: + locations = array.array("I", self.locations) + ttFont['head'].indexToLocFormat = 1 + if sys.byteorder != "big": + locations.byteswap() + return locations.tostring() + + def set(self, locations): + self.locations = array.array("I", locations) + + def toXML(self, writer, ttFont): + writer.comment("The 'loca' table will be calculated by the compiler") + writer.newline() + + def __getitem__(self, index): + return self.locations[index] + + def __len__(self): + return len(self.locations) diff -Nru fonttools-2.4/Tools/fontTools/ttLib/tables/_l_t_a_g.py fonttools-3.0/Tools/fontTools/ttLib/tables/_l_t_a_g.py --- fonttools-2.4/Tools/fontTools/ttLib/tables/_l_t_a_g.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.0/Tools/fontTools/ttLib/tables/_l_t_a_g.py 2015-08-31 17:57:15.000000000 +0000 @@ -0,0 +1,50 @@ +from __future__ import print_function, division, absolute_import +from fontTools.misc.py23 import * +from fontTools.misc.textTools import safeEval +from . import DefaultTable +import struct + +# https://developer.apple.com/fonts/TrueType-Reference-Manual/RM06/Chap6ltag.html + +class table__l_t_a_g(DefaultTable.DefaultTable): + + def decompile(self, data, ttFont): + self.version, self.flags, numTags = struct.unpack(">LLL", data[:12]) + assert self.version == 1 + self.tags = [] + for i in range(numTags): + pos = 12 + i * 4 + offset, length = struct.unpack(">HH", data[pos:pos+4]) + tag = data[offset:offset+length].decode("ascii") + self.tags.append(tag) + + def compile(self, ttFont): + dataList = [struct.pack(">LLL", self.version, self.flags, len(self.tags))] + stringPool = "" + for tag in self.tags: + offset = stringPool.find(tag) + if offset < 0: + offset = len(stringPool) + stringPool = stringPool + tag + offset = offset + 12 + len(self.tags) * 4 + dataList.append(struct.pack(">HH", offset, len(tag))) + dataList.append(stringPool) + return bytesjoin(dataList) + + def toXML(self, writer, ttFont): + writer.simpletag("version", value=self.version) + writer.newline() + writer.simpletag("flags", value=self.flags) + writer.newline() + for tag in self.tags: + writer.simpletag("LanguageTag", tag=tag) + writer.newline() + + def fromXML(self, name, attrs, content, ttFont): + if not hasattr(self, "tags"): + self.tags = [] + if name == "LanguageTag": + self.tags.append(attrs["tag"]) + elif "value" in attrs: + value = safeEval(attrs["value"]) + setattr(self, name, value) diff -Nru fonttools-2.4/Tools/fontTools/ttLib/tables/_l_t_a_g_test.py fonttools-3.0/Tools/fontTools/ttLib/tables/_l_t_a_g_test.py --- fonttools-2.4/Tools/fontTools/ttLib/tables/_l_t_a_g_test.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.0/Tools/fontTools/ttLib/tables/_l_t_a_g_test.py 2015-08-31 17:57:15.000000000 +0000 @@ -0,0 +1,49 @@ +from __future__ import print_function, division, absolute_import +from fontTools.misc.py23 import * +from fontTools.misc.xmlWriter import XMLWriter +import os +import struct +import unittest +from ._l_t_a_g import table__l_t_a_g + +class Test_l_t_a_g(unittest.TestCase): + + DATA_ = struct.pack(b">LLLHHHHHH", 1, 0, 3, 24 + 0, 2, 24 + 2, 7, 24 + 2, 2) + b"enzh-Hant" + TAGS_ = ["en", "zh-Hant", "zh"] + + def test_decompile_compile(self): + table = table__l_t_a_g() + table.decompile(self.DATA_, ttFont=None) + self.assertEqual(1, table.version) + self.assertEqual(0, table.flags) + self.assertEqual(self.TAGS_, table.tags) + self.assertEqual(self.DATA_, table.compile(ttFont=None)) + + def test_fromXML(self): + table = table__l_t_a_g() + table.fromXML("version", {"value": "1"}, content=None, ttFont=None) + table.fromXML("flags", {"value": "777"}, content=None, ttFont=None) + table.fromXML("LanguageTag", {"tag": "sr-Latn"}, content=None, ttFont=None) + table.fromXML("LanguageTag", {"tag": "fa"}, content=None, ttFont=None) + self.assertEqual(1, table.version) + self.assertEqual(777, table.flags) + self.assertEqual(["sr-Latn", "fa"], table.tags) + + def test_toXML(self): + writer = XMLWriter(BytesIO()) + table = table__l_t_a_g() + table.decompile(self.DATA_, ttFont=None) + table.toXML(writer, ttFont=None) + expected = os.linesep.join([ + '', + '', + '', + '', + '', + '' + ]) + os.linesep + self.assertEqual(expected.encode("utf_8"), writer.file.getvalue()) + + +if __name__ == '__main__': + unittest.main() diff -Nru fonttools-2.4/Tools/fontTools/ttLib/tables/L_T_S_H_.py fonttools-3.0/Tools/fontTools/ttLib/tables/L_T_S_H_.py --- fonttools-2.4/Tools/fontTools/ttLib/tables/L_T_S_H_.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.0/Tools/fontTools/ttLib/tables/L_T_S_H_.py 2015-08-31 17:57:15.000000000 +0000 @@ -0,0 +1,50 @@ +from __future__ import print_function, division, absolute_import +from fontTools.misc.py23 import * +from fontTools.misc.textTools import safeEval +from . import DefaultTable +import struct +import array + +# XXX I've lowered the strictness, to make sure Apple's own Chicago +# XXX gets through. They're looking into it, I hope to raise the standards +# XXX back to normal eventually. + +class table_L_T_S_H_(DefaultTable.DefaultTable): + + def decompile(self, data, ttFont): + version, numGlyphs = struct.unpack(">HH", data[:4]) + data = data[4:] + assert version == 0, "unknown version: %s" % version + assert (len(data) % numGlyphs) < 4, "numGlyphs doesn't match data length" + # ouch: the assertion is not true in Chicago! + #assert numGlyphs == ttFont['maxp'].numGlyphs + yPels = array.array("B") + yPels.fromstring(data) + self.yPels = {} + for i in range(numGlyphs): + self.yPels[ttFont.getGlyphName(i)] = yPels[i] + + def compile(self, ttFont): + version = 0 + names = list(self.yPels.keys()) + numGlyphs = len(names) + yPels = [0] * numGlyphs + # ouch: the assertion is not true in Chicago! + #assert len(self.yPels) == ttFont['maxp'].numGlyphs == numGlyphs + for name in names: + yPels[ttFont.getGlyphID(name)] = self.yPels[name] + yPels = array.array("B", yPels) + return struct.pack(">HH", version, numGlyphs) + yPels.tostring() + + def toXML(self, writer, ttFont): + names = sorted(self.yPels.keys()) + for name in names: + writer.simpletag("yPel", name=name, value=self.yPels[name]) + writer.newline() + + def fromXML(self, name, attrs, content, ttFont): + if not hasattr(self, "yPels"): + self.yPels = {} + if name != "yPel": + return # ignore unknown tags + self.yPels[attrs["name"]] = safeEval(attrs["value"]) diff -Nru fonttools-2.4/Tools/fontTools/ttLib/tables/M_A_T_H_.py fonttools-3.0/Tools/fontTools/ttLib/tables/M_A_T_H_.py --- fonttools-2.4/Tools/fontTools/ttLib/tables/M_A_T_H_.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.0/Tools/fontTools/ttLib/tables/M_A_T_H_.py 2015-08-31 17:57:15.000000000 +0000 @@ -0,0 +1,7 @@ +from __future__ import print_function, division, absolute_import +from fontTools.misc.py23 import * +from .otBase import BaseTTXConverter + + +class table_M_A_T_H_(BaseTTXConverter): + pass diff -Nru fonttools-2.4/Tools/fontTools/ttLib/tables/_m_a_x_p.py fonttools-3.0/Tools/fontTools/ttLib/tables/_m_a_x_p.py --- fonttools-2.4/Tools/fontTools/ttLib/tables/_m_a_x_p.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.0/Tools/fontTools/ttLib/tables/_m_a_x_p.py 2015-08-31 17:57:15.000000000 +0000 @@ -0,0 +1,139 @@ +from __future__ import print_function, division, absolute_import +from fontTools.misc.py23 import * +from fontTools.misc import sstruct +from fontTools.misc.textTools import safeEval +from . import DefaultTable + +maxpFormat_0_5 = """ + > # big endian + tableVersion: i + numGlyphs: H +""" + +maxpFormat_1_0_add = """ + > # big endian + maxPoints: H + maxContours: H + maxCompositePoints: H + maxCompositeContours: H + maxZones: H + maxTwilightPoints: H + maxStorage: H + maxFunctionDefs: H + maxInstructionDefs: H + maxStackElements: H + maxSizeOfInstructions: H + maxComponentElements: H + maxComponentDepth: H +""" + + +class table__m_a_x_p(DefaultTable.DefaultTable): + + dependencies = ['glyf'] + + def decompile(self, data, ttFont): + dummy, data = sstruct.unpack2(maxpFormat_0_5, data, self) + self.numGlyphs = int(self.numGlyphs) + if self.tableVersion != 0x00005000: + dummy, data = sstruct.unpack2(maxpFormat_1_0_add, data, self) + assert len(data) == 0 + + def compile(self, ttFont): + if 'glyf' in ttFont: + if ttFont.isLoaded('glyf') and ttFont.recalcBBoxes: + self.recalc(ttFont) + else: + pass # CFF + self.numGlyphs = len(ttFont.getGlyphOrder()) + if self.tableVersion != 0x00005000: + self.tableVersion = 0x00010000 + data = sstruct.pack(maxpFormat_0_5, self) + if self.tableVersion == 0x00010000: + data = data + sstruct.pack(maxpFormat_1_0_add, self) + return data + + def recalc(self, ttFont): + """Recalculate the font bounding box, and most other maxp values except + for the TT instructions values. Also recalculate the value of bit 1 + of the flags field and the font bounding box of the 'head' table. + """ + glyfTable = ttFont['glyf'] + hmtxTable = ttFont['hmtx'] + headTable = ttFont['head'] + self.numGlyphs = len(glyfTable) + INFINITY = 100000 + xMin = +INFINITY + yMin = +INFINITY + xMax = -INFINITY + yMax = -INFINITY + maxPoints = 0 + maxContours = 0 + maxCompositePoints = 0 + maxCompositeContours = 0 + maxComponentElements = 0 + maxComponentDepth = 0 + allXMaxIsLsb = 1 + for glyphName in ttFont.getGlyphOrder(): + g = glyfTable[glyphName] + if g.numberOfContours: + if hmtxTable[glyphName][1] != g.xMin: + allXMaxIsLsb = 0 + xMin = min(xMin, g.xMin) + yMin = min(yMin, g.yMin) + xMax = max(xMax, g.xMax) + yMax = max(yMax, g.yMax) + if g.numberOfContours > 0: + nPoints, nContours = g.getMaxpValues() + maxPoints = max(maxPoints, nPoints) + maxContours = max(maxContours, nContours) + else: + nPoints, nContours, componentDepth = g.getCompositeMaxpValues(glyfTable) + maxCompositePoints = max(maxCompositePoints, nPoints) + maxCompositeContours = max(maxCompositeContours, nContours) + maxComponentElements = max(maxComponentElements, len(g.components)) + maxComponentDepth = max(maxComponentDepth, componentDepth) + if xMin == +INFINITY: + headTable.xMin = 0 + headTable.yMin = 0 + headTable.xMax = 0 + headTable.yMax = 0 + else: + headTable.xMin = xMin + headTable.yMin = yMin + headTable.xMax = xMax + headTable.yMax = yMax + self.maxPoints = maxPoints + self.maxContours = maxContours + self.maxCompositePoints = maxCompositePoints + self.maxCompositeContours = maxCompositeContours + self.maxComponentDepth = maxComponentDepth + if allXMaxIsLsb: + headTable.flags = headTable.flags | 0x2 + else: + headTable.flags = headTable.flags & ~0x2 + + def testrepr(self): + items = sorted(self.__dict__.items()) + print(". . . . . . . . .") + for combo in items: + print(" %s: %s" % combo) + print(". . . . . . . . .") + + def toXML(self, writer, ttFont): + if self.tableVersion != 0x00005000: + writer.comment("Most of this table will be recalculated by the compiler") + writer.newline() + formatstring, names, fixes = sstruct.getformat(maxpFormat_0_5) + if self.tableVersion != 0x00005000: + formatstring, names_1_0, fixes = sstruct.getformat(maxpFormat_1_0_add) + names = names + names_1_0 + for name in names: + value = getattr(self, name) + if name == "tableVersion": + value = hex(value) + writer.simpletag(name, value=value) + writer.newline() + + def fromXML(self, name, attrs, content, ttFont): + setattr(self, name, safeEval(attrs["value"])) diff -Nru fonttools-2.4/Tools/fontTools/ttLib/tables/_m_e_t_a.py fonttools-3.0/Tools/fontTools/ttLib/tables/_m_e_t_a.py --- fonttools-2.4/Tools/fontTools/ttLib/tables/_m_e_t_a.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.0/Tools/fontTools/ttLib/tables/_m_e_t_a.py 2015-08-31 17:57:15.000000000 +0000 @@ -0,0 +1,93 @@ +from __future__ import print_function, division, absolute_import +from fontTools.misc.py23 import * +from fontTools.misc import sstruct +from fontTools.misc.textTools import readHex +from fontTools.ttLib import TTLibError +from . import DefaultTable + +# Apple's documentation of 'meta': +# https://developer.apple.com/fonts/TrueType-Reference-Manual/RM06/Chap6meta.html + +META_HEADER_FORMAT = """ + > # big endian + version: L + flags: L + dataOffset: L + numDataMaps: L +""" + +# According to Apple's spec, the dataMaps entries contain a dataOffset +# that is documented as "Offset from the beginning of the data section +# to the data for this tag". However, this is *not* the case with +# the fonts that Apple ships pre-installed on MacOS X Yosemite 10.10.4, +# and it also does not reflect how Apple's ftxdumperfuser tool is parsing +# the 'meta' table (tested ftxdumperfuser build 330, FontToolbox.framework +# build 187). Instead of what is claimed in the spec, the data maps contain +# a dataOffset relative to the very beginning of the 'meta' table. +# The dataOffset field of the 'meta' header apparently gets ignored. + +DATA_MAP_FORMAT = """ + > # big endian + tag: 4s + dataOffset: L + dataLength: L +""" + + +class table__m_e_t_a(DefaultTable.DefaultTable): + def __init__(self, tag="meta"): + DefaultTable.DefaultTable.__init__(self, tag) + self.data = {} + + def decompile(self, data, ttFont): + headerSize = sstruct.calcsize(META_HEADER_FORMAT) + header = sstruct.unpack(META_HEADER_FORMAT, data[0 : headerSize]) + if header["version"] != 1: + raise TTLibError("unsupported 'meta' version %d" % + header["version"]) + dataMapSize = sstruct.calcsize(DATA_MAP_FORMAT) + for i in range(header["numDataMaps"]): + dataMapOffset = headerSize + i * dataMapSize + dataMap = sstruct.unpack( + DATA_MAP_FORMAT, + data[dataMapOffset : dataMapOffset + dataMapSize]) + tag = dataMap["tag"] + offset = dataMap["dataOffset"] + self.data[tag] = data[offset : offset + dataMap["dataLength"]] + + def compile(self, ttFont): + keys = sorted(self.data.keys()) + headerSize = sstruct.calcsize(META_HEADER_FORMAT) + dataOffset = headerSize + len(keys) * sstruct.calcsize(DATA_MAP_FORMAT) + header = sstruct.pack(META_HEADER_FORMAT, { + "version": 1, + "flags": 0, + "dataOffset": dataOffset, + "numDataMaps": len(keys) + }) + dataMaps = [] + dataBlocks = [] + for tag in keys: + data = self.data[tag] + dataMaps.append(sstruct.pack(DATA_MAP_FORMAT, { + "tag": tag, + "dataOffset": dataOffset, + "dataLength": len(data) + })) + dataBlocks.append(data) + dataOffset += len(data) + return bytesjoin([header] + dataMaps + dataBlocks) + + def toXML(self, writer, ttFont, progress=None): + for tag in sorted(self.data.keys()): + writer.begintag("hexdata", tag=tag) + writer.newline() + writer.dumphex(self.data[tag]) + writer.endtag("hexdata") + writer.newline() + + def fromXML(self, name, attrs, content, ttFont): + if name == "hexdata": + self.data[attrs["tag"]] = readHex(content) + else: + raise TTLibError("can't handle '%s' element" % name) diff -Nru fonttools-2.4/Tools/fontTools/ttLib/tables/M_E_T_A_.py fonttools-3.0/Tools/fontTools/ttLib/tables/M_E_T_A_.py --- fonttools-2.4/Tools/fontTools/ttLib/tables/M_E_T_A_.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.0/Tools/fontTools/ttLib/tables/M_E_T_A_.py 2015-08-31 17:57:15.000000000 +0000 @@ -0,0 +1,305 @@ +from __future__ import print_function, division, absolute_import +from fontTools.misc.py23 import * +from fontTools.misc import sstruct +from fontTools.misc.textTools import safeEval +from . import DefaultTable +import pdb +import struct + + +METAHeaderFormat = """ + > # big endian + tableVersionMajor: H + tableVersionMinor: H + metaEntriesVersionMajor: H + metaEntriesVersionMinor: H + unicodeVersion: L + metaFlags: H + nMetaRecs: H +""" +# This record is followed by nMetaRecs of METAGlyphRecordFormat. +# This in turn is followd by as many METAStringRecordFormat entries +# as specified by the METAGlyphRecordFormat entries +# this is followed by the strings specifried in the METAStringRecordFormat +METAGlyphRecordFormat = """ + > # big endian + glyphID: H + nMetaEntry: H +""" +# This record is followd by a variable data length field: +# USHORT or ULONG hdrOffset +# Offset from start of META table to the beginning +# of this glyphs array of ns Metadata string entries. +# Size determined by metaFlags field +# METAGlyphRecordFormat entries must be sorted by glyph ID + +METAStringRecordFormat = """ + > # big endian + labelID: H + stringLen: H +""" +# This record is followd by a variable data length field: +# USHORT or ULONG stringOffset +# METAStringRecordFormat entries must be sorted in order of labelID +# There may be more than one entry with the same labelID +# There may be more than one strign with the same content. + +# Strings shall be Unicode UTF-8 encoded, and null-terminated. + +METALabelDict = { + 0: "MojikumiX4051", # An integer in the range 1-20 + 1: "UNIUnifiedBaseChars", + 2: "BaseFontName", + 3: "Language", + 4: "CreationDate", + 5: "FoundryName", + 6: "FoundryCopyright", + 7: "OwnerURI", + 8: "WritingScript", + 10: "StrokeCount", + 11: "IndexingRadical", +} + + +def getLabelString(labelID): + try: + label = METALabelDict[labelID] + except KeyError: + label = "Unknown label" + return str(label) + + +class table_M_E_T_A_(DefaultTable.DefaultTable): + + dependencies = [] + + def decompile(self, data, ttFont): + dummy, newData = sstruct.unpack2(METAHeaderFormat, data, self) + self.glyphRecords = [] + for i in range(self.nMetaRecs): + glyphRecord, newData = sstruct.unpack2(METAGlyphRecordFormat, newData, GlyphRecord()) + if self.metaFlags == 0: + [glyphRecord.offset] = struct.unpack(">H", newData[:2]) + newData = newData[2:] + elif self.metaFlags == 1: + [glyphRecord.offset] = struct.unpack(">H", newData[:4]) + newData = newData[4:] + else: + assert 0, "The metaFlags field in the META table header has a value other than 0 or 1 :" + str(self.metaFlags) + glyphRecord.stringRecs = [] + newData = data[glyphRecord.offset:] + for j in range(glyphRecord.nMetaEntry): + stringRec, newData = sstruct.unpack2(METAStringRecordFormat, newData, StringRecord()) + if self.metaFlags == 0: + [stringRec.offset] = struct.unpack(">H", newData[:2]) + newData = newData[2:] + else: + [stringRec.offset] = struct.unpack(">H", newData[:4]) + newData = newData[4:] + stringRec.string = data[stringRec.offset:stringRec.offset + stringRec.stringLen] + glyphRecord.stringRecs.append(stringRec) + self.glyphRecords.append(glyphRecord) + + def compile(self, ttFont): + offsetOK = 0 + self.nMetaRecs = len(self.glyphRecords) + count = 0 + while (offsetOK != 1): + count = count + 1 + if count > 4: + pdb.set_trace() + metaData = sstruct.pack(METAHeaderFormat, self) + stringRecsOffset = len(metaData) + self.nMetaRecs * (6 + 2*(self.metaFlags & 1)) + stringRecSize = (6 + 2*(self.metaFlags & 1)) + for glyphRec in self.glyphRecords: + glyphRec.offset = stringRecsOffset + if (glyphRec.offset > 65535) and ((self.metaFlags & 1) == 0): + self.metaFlags = self.metaFlags + 1 + offsetOK = -1 + break + metaData = metaData + glyphRec.compile(self) + stringRecsOffset = stringRecsOffset + (glyphRec.nMetaEntry * stringRecSize) + # this will be the String Record offset for the next GlyphRecord. + if offsetOK == -1: + offsetOK = 0 + continue + + # metaData now contains the header and all of the GlyphRecords. Its length should bw + # the offset to the first StringRecord. + stringOffset = stringRecsOffset + for glyphRec in self.glyphRecords: + assert (glyphRec.offset == len(metaData)), "Glyph record offset did not compile correctly! for rec:" + str(glyphRec) + for stringRec in glyphRec.stringRecs: + stringRec.offset = stringOffset + if (stringRec.offset > 65535) and ((self.metaFlags & 1) == 0): + self.metaFlags = self.metaFlags + 1 + offsetOK = -1 + break + metaData = metaData + stringRec.compile(self) + stringOffset = stringOffset + stringRec.stringLen + if offsetOK == -1: + offsetOK = 0 + continue + + if ((self.metaFlags & 1) == 1) and (stringOffset < 65536): + self.metaFlags = self.metaFlags - 1 + continue + else: + offsetOK = 1 + + # metaData now contains the header and all of the GlyphRecords and all of the String Records. + # Its length should be the offset to the first string datum. + for glyphRec in self.glyphRecords: + for stringRec in glyphRec.stringRecs: + assert (stringRec.offset == len(metaData)), "String offset did not compile correctly! for string:" + str(stringRec.string) + metaData = metaData + stringRec.string + + return metaData + + def toXML(self, writer, ttFont): + writer.comment("Lengths and number of entries in this table will be recalculated by the compiler") + writer.newline() + formatstring, names, fixes = sstruct.getformat(METAHeaderFormat) + for name in names: + value = getattr(self, name) + writer.simpletag(name, value=value) + writer.newline() + for glyphRec in self.glyphRecords: + glyphRec.toXML(writer, ttFont) + + def fromXML(self, name, attrs, content, ttFont): + if name == "GlyphRecord": + if not hasattr(self, "glyphRecords"): + self.glyphRecords = [] + glyphRec = GlyphRecord() + self.glyphRecords.append(glyphRec) + for element in content: + if isinstance(element, basestring): + continue + name, attrs, content = element + glyphRec.fromXML(name, attrs, content, ttFont) + glyphRec.offset = -1 + glyphRec.nMetaEntry = len(glyphRec.stringRecs) + else: + setattr(self, name, safeEval(attrs["value"])) + + +class GlyphRecord(object): + def __init__(self): + self.glyphID = -1 + self.nMetaEntry = -1 + self.offset = -1 + self.stringRecs = [] + + def toXML(self, writer, ttFont): + writer.begintag("GlyphRecord") + writer.newline() + writer.simpletag("glyphID", value=self.glyphID) + writer.newline() + writer.simpletag("nMetaEntry", value=self.nMetaEntry) + writer.newline() + for stringRec in self.stringRecs: + stringRec.toXML(writer, ttFont) + writer.endtag("GlyphRecord") + writer.newline() + + def fromXML(self, name, attrs, content, ttFont): + if name == "StringRecord": + stringRec = StringRecord() + self.stringRecs.append(stringRec) + for element in content: + if isinstance(element, basestring): + continue + stringRec.fromXML(name, attrs, content, ttFont) + stringRec.stringLen = len(stringRec.string) + else: + setattr(self, name, safeEval(attrs["value"])) + + def compile(self, parentTable): + data = sstruct.pack(METAGlyphRecordFormat, self) + if parentTable.metaFlags == 0: + datum = struct.pack(">H", self.offset) + elif parentTable.metaFlags == 1: + datum = struct.pack(">L", self.offset) + data = data + datum + return data + + def __repr__(self): + return "GlyphRecord[ glyphID: " + str(self.glyphID) + ", nMetaEntry: " + str(self.nMetaEntry) + ", offset: " + str(self.offset) + " ]" + +# XXX The following two functions are really broken around UTF-8 vs Unicode + +def mapXMLToUTF8(string): + uString = unicode() + strLen = len(string) + i = 0 + while i < strLen: + prefixLen = 0 + if (string[i:i+3] == "&#x"): + prefixLen = 3 + elif (string[i:i+7] == "&#x"): + prefixLen = 7 + if prefixLen: + i = i+prefixLen + j= i + while string[i] != ";": + i = i+1 + valStr = string[j:i] + + uString = uString + unichr(eval('0x' + valStr)) + else: + uString = uString + unichr(byteord(string[i])) + i = i +1 + + return uString.encode('utf_8') + + +def mapUTF8toXML(string): + uString = string.decode('utf_8') + string = "" + for uChar in uString: + i = ord(uChar) + if (i < 0x80) and (i > 0x1F): + string = string + uChar + else: + string = string + "&#x" + hex(i)[2:] + ";" + return string + + +class StringRecord(object): + + def toXML(self, writer, ttFont): + writer.begintag("StringRecord") + writer.newline() + writer.simpletag("labelID", value=self.labelID) + writer.comment(getLabelString(self.labelID)) + writer.newline() + writer.newline() + writer.simpletag("string", value=mapUTF8toXML(self.string)) + writer.newline() + writer.endtag("StringRecord") + writer.newline() + + def fromXML(self, name, attrs, content, ttFont): + for element in content: + if isinstance(element, basestring): + continue + name, attrs, content = element + value = attrs["value"] + if name == "string": + self.string = mapXMLToUTF8(value) + else: + setattr(self, name, safeEval(value)) + + def compile(self, parentTable): + data = sstruct.pack(METAStringRecordFormat, self) + if parentTable.metaFlags == 0: + datum = struct.pack(">H", self.offset) + elif parentTable.metaFlags == 1: + datum = struct.pack(">L", self.offset) + data = data + datum + return data + + def __repr__(self): + return "StringRecord [ labelID: " + str(self.labelID) + " aka " + getLabelString(self.labelID) \ + + ", offset: " + str(self.offset) + ", length: " + str(self.stringLen) + ", string: " +self.string + " ]" diff -Nru fonttools-2.4/Tools/fontTools/ttLib/tables/_m_e_t_a_test.py fonttools-3.0/Tools/fontTools/ttLib/tables/_m_e_t_a_test.py --- fonttools-2.4/Tools/fontTools/ttLib/tables/_m_e_t_a_test.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.0/Tools/fontTools/ttLib/tables/_m_e_t_a_test.py 2015-08-31 17:57:15.000000000 +0000 @@ -0,0 +1,54 @@ +from __future__ import print_function, division, absolute_import, unicode_literals +from fontTools.misc.py23 import * +from fontTools.misc.textTools import deHexStr +from fontTools.misc.xmlWriter import XMLWriter +from fontTools.ttLib import TTLibError +from fontTools.ttLib.tables._m_e_t_a import table__m_e_t_a +import unittest + + +# From a real font on MacOS X, but substituted 'bild' tag by 'TEST', +# and shortened the payload. Note that from the 'meta' spec, one would +# expect that header.dataOffset is 0x0000001C (pointing to the beginning +# of the data section) and that dataMap[0].dataOffset should be 0 (relative +# to the beginning of the data section). However, in the fonts that Apple +# ships on MacOS X 10.10.4, dataMap[0].dataOffset is actually relative +# to the beginning of the 'meta' table, i.e. 0x0000001C again. While the +# following test data is invalid according to the 'meta' specification, +# it is reflecting the 'meta' table structure in all Apple-supplied fonts. +META_DATA = deHexStr( + "00 00 00 01 00 00 00 00 00 00 00 1C 00 00 00 01 " + "54 45 53 54 00 00 00 1C 00 00 00 04 CA FE BE EF") + + +class MetaTableTest(unittest.TestCase): + def test_decompile(self): + table = table__m_e_t_a() + table.decompile(META_DATA, ttFont={"meta": table}) + self.assertEqual({"TEST": b"\xCA\xFE\xBE\xEF"}, table.data) + + def test_compile(self): + table = table__m_e_t_a() + table.data["TEST"] = b"\xCA\xFE\xBE\xEF" + self.assertEqual(META_DATA, table.compile(ttFont={"meta": table})) + + def test_toXML(self): + table = table__m_e_t_a() + table.data["TEST"] = b"\xCA\xFE\xBE\xEF" + writer = XMLWriter(BytesIO()) + table.toXML(writer, {"meta": table}) + xml = writer.file.getvalue().decode("utf-8") + self.assertEqual([ + '', + 'cafebeef', + '' + ], [line.strip() for line in xml.splitlines()][1:]) + + def test_fromXML(self): + table = table__m_e_t_a() + table.fromXML("hexdata", {"tag": "TEST"}, ['cafebeef'], ttFont=None) + self.assertEqual({"TEST": b"\xCA\xFE\xBE\xEF"}, table.data) + + +if __name__ == "__main__": + unittest.main() diff -Nru fonttools-2.4/Tools/fontTools/ttLib/tables/_n_a_m_e.py fonttools-3.0/Tools/fontTools/ttLib/tables/_n_a_m_e.py --- fonttools-2.4/Tools/fontTools/ttLib/tables/_n_a_m_e.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.0/Tools/fontTools/ttLib/tables/_n_a_m_e.py 2015-08-31 17:57:15.000000000 +0000 @@ -0,0 +1,262 @@ +from __future__ import print_function, division, absolute_import +from fontTools.misc.py23 import * +from fontTools.misc import sstruct +from fontTools.misc.textTools import safeEval +from fontTools.misc.encodingTools import getEncoding +from . import DefaultTable +import struct + +nameRecordFormat = """ + > # big endian + platformID: H + platEncID: H + langID: H + nameID: H + length: H + offset: H +""" + +nameRecordSize = sstruct.calcsize(nameRecordFormat) + + +class table__n_a_m_e(DefaultTable.DefaultTable): + + def decompile(self, data, ttFont): + format, n, stringOffset = struct.unpack(">HHH", data[:6]) + expectedStringOffset = 6 + n * nameRecordSize + if stringOffset != expectedStringOffset: + # XXX we need a warn function + print("Warning: 'name' table stringOffset incorrect. Expected: %s; Actual: %s" % (expectedStringOffset, stringOffset)) + stringData = data[stringOffset:] + data = data[6:] + self.names = [] + for i in range(n): + if len(data) < 12: + # compensate for buggy font + break + name, data = sstruct.unpack2(nameRecordFormat, data, NameRecord()) + name.string = stringData[name.offset:name.offset+name.length] + assert len(name.string) == name.length + #if (name.platEncID, name.platformID) in ((0, 0), (1, 3)): + # if len(name.string) % 2: + # print "2-byte string doesn't have even length!" + # print name.__dict__ + del name.offset, name.length + self.names.append(name) + + def compile(self, ttFont): + if not hasattr(self, "names"): + # only happens when there are NO name table entries read + # from the TTX file + self.names = [] + names = self.names + names.sort() # sort according to the spec; see NameRecord.__lt__() + stringData = b"" + format = 0 + n = len(names) + stringOffset = 6 + n * sstruct.calcsize(nameRecordFormat) + data = struct.pack(">HHH", format, n, stringOffset) + lastoffset = 0 + done = {} # remember the data so we can reuse the "pointers" + for name in names: + string = name.toBytes() + if string in done: + name.offset, name.length = done[string] + else: + name.offset, name.length = done[string] = len(stringData), len(string) + stringData = bytesjoin([stringData, string]) + data = data + sstruct.pack(nameRecordFormat, name) + return data + stringData + + def toXML(self, writer, ttFont): + for name in self.names: + name.toXML(writer, ttFont) + + def fromXML(self, name, attrs, content, ttFont): + if name != "namerecord": + return # ignore unknown tags + if not hasattr(self, "names"): + self.names = [] + name = NameRecord() + self.names.append(name) + name.fromXML(name, attrs, content, ttFont) + + def getName(self, nameID, platformID, platEncID, langID=None): + for namerecord in self.names: + if ( namerecord.nameID == nameID and + namerecord.platformID == platformID and + namerecord.platEncID == platEncID): + if langID is None or namerecord.langID == langID: + return namerecord + return None # not found + + def getDebugName(self, nameID): + englishName = someName = None + for name in self.names: + if name.nameID != nameID: + continue + try: + unistr = name.toUnicode() + except UnicodeDecodeError: + continue + + someName = unistr + if (name.platformID, name.langID) in ((1, 0), (3, 0x409)): + englishName = unistr + break + if englishName: + return englishName + elif someName: + return someName + else: + return None + +class NameRecord(object): + + def getEncoding(self, default='ascii'): + """Returns the Python encoding name for this name entry based on its platformID, + platEncID, and langID. If encoding for these values is not known, by default + 'ascii' is returned. That can be overriden by passing a value to the default + argument. + """ + return getEncoding(self.platformID, self.platEncID, self.langID, default) + + def encodingIsUnicodeCompatible(self): + return self.getEncoding(None) in ['utf_16_be', 'ucs2be', 'ascii', 'latin1'] + + def __str__(self): + try: + return self.toUnicode() + except UnicodeDecodeError: + return str(self.string) + + def isUnicode(self): + return (self.platformID == 0 or + (self.platformID == 3 and self.platEncID in [0, 1, 10])) + + def toUnicode(self, errors='strict'): + """ + If self.string is a Unicode string, return it; otherwise try decoding the + bytes in self.string to a Unicode string using the encoding of this + entry as returned by self.getEncoding(); Note that self.getEncoding() + returns 'ascii' if the encoding is unknown to the library. + + Certain heuristics are performed to recover data from bytes that are + ill-formed in the chosen encoding, or that otherwise look misencoded + (mostly around bad UTF-16BE encoded bytes, or bytes that look like UTF-16BE + but marked otherwise). If the bytes are ill-formed and the heuristics fail, + the error is handled according to the errors parameter to this function, which is + passed to the underlying decode() function; by default it throws a + UnicodeDecodeError exception. + + Note: The mentioned heuristics mean that roundtripping a font to XML and back + to binary might recover some misencoded data whereas just loading the font + and saving it back will not change them. + """ + def isascii(b): + return (b >= 0x20 and b <= 0x7E) or b in [0x09, 0x0A, 0x0D] + encoding = self.getEncoding() + string = self.string + + if encoding == 'utf_16_be' and len(string) % 2 == 1: + # Recover badly encoded UTF-16 strings that have an odd number of bytes: + # - If the last byte is zero, drop it. Otherwise, + # - If all the odd bytes are zero and all the even bytes are ASCII, + # prepend one zero byte. Otherwise, + # - If first byte is zero and all other bytes are ASCII, insert zero + # bytes between consecutive ASCII bytes. + # + # (Yes, I've seen all of these in the wild... sigh) + if byteord(string[-1]) == 0: + string = string[:-1] + elif all(byteord(b) == 0 if i % 2 else isascii(byteord(b)) for i,b in enumerate(string)): + string = b'\0' + string + elif byteord(string[0]) == 0 and all(isascii(byteord(b)) for b in string[1:]): + string = bytesjoin(b'\0'+bytechr(byteord(b)) for b in string[1:]) + + string = tounicode(string, encoding=encoding, errors=errors) + + # If decoded strings still looks like UTF-16BE, it suggests a double-encoding. + # Fix it up. + if all(ord(c) == 0 if i % 2 == 0 else isascii(ord(c)) for i,c in enumerate(string)): + # If string claims to be Mac encoding, but looks like UTF-16BE with ASCII text, + # narrow it down. + string = ''.join(c for c in string[1::2]) + + return string + + def toBytes(self, errors='strict'): + """ If self.string is a bytes object, return it; otherwise try encoding + the Unicode string in self.string to bytes using the encoding of this + entry as returned by self.getEncoding(); Note that self.getEncoding() + returns 'ascii' if the encoding is unknown to the library. + + If the Unicode string cannot be encoded to bytes in the chosen encoding, + the error is handled according to the errors parameter to this function, + which is passed to the underlying encode() function; by default it throws a + UnicodeEncodeError exception. + """ + return tobytes(self.string, encoding=self.getEncoding(), errors=errors) + + def toXML(self, writer, ttFont): + try: + unistr = self.toUnicode() + except UnicodeDecodeError: + unistr = None + attrs = [ + ("nameID", self.nameID), + ("platformID", self.platformID), + ("platEncID", self.platEncID), + ("langID", hex(self.langID)), + ] + + if unistr is None or not self.encodingIsUnicodeCompatible(): + attrs.append(("unicode", unistr is not None)) + + writer.begintag("namerecord", attrs) + writer.newline() + if unistr is not None: + writer.write(unistr) + else: + writer.write8bit(self.string) + writer.newline() + writer.endtag("namerecord") + writer.newline() + + def fromXML(self, name, attrs, content, ttFont): + self.nameID = safeEval(attrs["nameID"]) + self.platformID = safeEval(attrs["platformID"]) + self.platEncID = safeEval(attrs["platEncID"]) + self.langID = safeEval(attrs["langID"]) + s = strjoin(content).strip() + encoding = self.getEncoding() + if self.encodingIsUnicodeCompatible() or safeEval(attrs.get("unicode", "False")): + self.string = s.encode(encoding) + else: + # This is the inverse of write8bit... + self.string = s.encode("latin1") + + def __lt__(self, other): + if type(self) != type(other): + return NotImplemented + + # implemented so that list.sort() sorts according to the spec. + selfTuple = ( + getattr(self, "platformID", None), + getattr(self, "platEncID", None), + getattr(self, "langID", None), + getattr(self, "nameID", None), + getattr(self, "string", None), + ) + otherTuple = ( + getattr(other, "platformID", None), + getattr(other, "platEncID", None), + getattr(other, "langID", None), + getattr(other, "nameID", None), + getattr(other, "string", None), + ) + return selfTuple < otherTuple + + def __repr__(self): + return "" % ( + self.nameID, self.platformID, self.langID) diff -Nru fonttools-2.4/Tools/fontTools/ttLib/tables/_n_a_m_e_test.py fonttools-3.0/Tools/fontTools/ttLib/tables/_n_a_m_e_test.py --- fonttools-2.4/Tools/fontTools/ttLib/tables/_n_a_m_e_test.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.0/Tools/fontTools/ttLib/tables/_n_a_m_e_test.py 2015-08-31 17:57:15.000000000 +0000 @@ -0,0 +1,143 @@ +# -*- coding: utf-8 -*- +from __future__ import print_function, division, absolute_import, unicode_literals +from fontTools.misc.py23 import * +from fontTools.misc.xmlWriter import XMLWriter +import unittest +from ._n_a_m_e import table__n_a_m_e, NameRecord + + +def makeName(text, nameID, platformID, platEncID, langID): + name = NameRecord() + name.nameID, name.platformID, name.platEncID, name.langID = ( + nameID, platformID, platEncID, langID) + name.string = tobytes(text, encoding=name.getEncoding()) + return name + + +class NameTableTest(unittest.TestCase): + + def test_getDebugName(self): + table = table__n_a_m_e() + table.names = [ + makeName("Bold", 258, 1, 0, 0), # Mac, MacRoman, English + makeName("Gras", 258, 1, 0, 1), # Mac, MacRoman, French + makeName("Fett", 258, 1, 0, 2), # Mac, MacRoman, German + makeName("Sem Fracções", 292, 1, 0, 8) # Mac, MacRoman, Portuguese + ] + self.assertEqual("Bold", table.getDebugName(258)) + self.assertEqual("Sem Fracções", table.getDebugName(292)) + self.assertEqual(None, table.getDebugName(999)) + + +class NameRecordTest(unittest.TestCase): + + def test_toUnicode_utf16be(self): + name = makeName("Foo Bold", 111, 0, 2, 7) + self.assertEqual("utf_16_be", name.getEncoding()) + self.assertEqual("Foo Bold", name.toUnicode()) + + def test_toUnicode_macroman(self): + name = makeName("Foo Italic", 222, 1, 0, 7) # MacRoman + self.assertEqual("mac_roman", name.getEncoding()) + self.assertEqual("Foo Italic", name.toUnicode()) + + def test_toUnicode_macromanian(self): + name = makeName(b"Foo Italic\xfb", 222, 1, 0, 37) # Mac Romanian + self.assertEqual("mac_romanian", name.getEncoding()) + self.assertEqual("Foo Italic"+unichr(0x02DA), name.toUnicode()) + + def test_toUnicode_UnicodeDecodeError(self): + name = makeName(b"\1", 111, 0, 2, 7) + self.assertEqual("utf_16_be", name.getEncoding()) + self.assertRaises(UnicodeDecodeError, name.toUnicode) + + def toXML(self, name): + writer = XMLWriter(BytesIO()) + name.toXML(writer, ttFont=None) + xml = writer.file.getvalue().decode("utf_8").strip() + return xml.split(writer.newlinestr.decode("utf_8"))[1:] + + def test_toXML_utf16be(self): + name = makeName("Foo Bold", 111, 0, 2, 7) + self.assertEqual([ + '', + ' Foo Bold', + '' + ], self.toXML(name)) + + def test_toXML_utf16be_odd_length1(self): + name = makeName(b"\0F\0o\0o\0", 111, 0, 2, 7) + self.assertEqual([ + '', + ' Foo', + '' + ], self.toXML(name)) + + def test_toXML_utf16be_odd_length2(self): + name = makeName(b"\0Fooz", 111, 0, 2, 7) + self.assertEqual([ + '', + ' Fooz', + '' + ], self.toXML(name)) + + def test_toXML_utf16be_double_encoded(self): + name = makeName(b"\0\0\0F\0\0\0o", 111, 0, 2, 7) + self.assertEqual([ + '', + ' Fo', + '' + ], self.toXML(name)) + + def test_toXML_macroman(self): + name = makeName("Foo Italic", 222, 1, 0, 7) # MacRoman + self.assertEqual([ + '', + ' Foo Italic', + '' + ], self.toXML(name)) + + def test_toXML_macroman_actual_utf16be(self): + name = makeName("\0F\0o\0o", 222, 1, 0, 7) + self.assertEqual([ + '', + ' Foo', + '' + ], self.toXML(name)) + + def test_toXML_unknownPlatEncID_nonASCII(self): + name = makeName(b"B\x8arli", 333, 1, 9876, 7) # Unknown Mac encodingID + self.assertEqual([ + '', + ' BŠrli', + '' + ], self.toXML(name)) + + def test_toXML_unknownPlatEncID_ASCII(self): + name = makeName(b"Barli", 333, 1, 9876, 7) # Unknown Mac encodingID + self.assertEqual([ + '', + ' Barli', + '' + ], self.toXML(name)) + + def test_encoding_macroman_misc(self): + name = makeName('', 123, 1, 0, 17) # Mac Turkish + self.assertEqual(name.getEncoding(), "mac_turkish") + name.langID = 37 + self.assertEqual(name.getEncoding(), "mac_romanian") + name.langID = 45 # Other + self.assertEqual(name.getEncoding(), "mac_roman") + + def test_extended_mac_encodings(self): + name = makeName(b'\xfe', 123, 1, 1, 0) # Mac Japanese + self.assertEqual(name.toUnicode(), unichr(0x2122)) + + def test_extended_unknown(self): + name = makeName(b'\xfe', 123, 10, 11, 12) + self.assertEqual(name.getEncoding(), "ascii") + self.assertEqual(name.getEncoding(None), None) + self.assertEqual(name.getEncoding(default=None), None) + +if __name__ == "__main__": + unittest.main() diff -Nru fonttools-2.4/Tools/fontTools/ttLib/tables/O_S_2f_2.py fonttools-3.0/Tools/fontTools/ttLib/tables/O_S_2f_2.py --- fonttools-2.4/Tools/fontTools/ttLib/tables/O_S_2f_2.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.0/Tools/fontTools/ttLib/tables/O_S_2f_2.py 2015-08-31 17:57:15.000000000 +0000 @@ -0,0 +1,230 @@ +from __future__ import print_function, division, absolute_import +from fontTools.misc.py23 import * +from fontTools.misc import sstruct +from fontTools.misc.textTools import safeEval, num2binary, binary2num +from . import DefaultTable +import warnings + + +# panose classification + +panoseFormat = """ + bFamilyType: B + bSerifStyle: B + bWeight: B + bProportion: B + bContrast: B + bStrokeVariation: B + bArmStyle: B + bLetterForm: B + bMidline: B + bXHeight: B +""" + +class Panose(object): + + def toXML(self, writer, ttFont): + formatstring, names, fixes = sstruct.getformat(panoseFormat) + for name in names: + writer.simpletag(name, value=getattr(self, name)) + writer.newline() + + def fromXML(self, name, attrs, content, ttFont): + setattr(self, name, safeEval(attrs["value"])) + + +# 'sfnt' OS/2 and Windows Metrics table - 'OS/2' + +OS2_format_0 = """ + > # big endian + version: H # version + xAvgCharWidth: h # average character width + usWeightClass: H # degree of thickness of strokes + usWidthClass: H # aspect ratio + fsType: h # type flags + ySubscriptXSize: h # subscript horizontal font size + ySubscriptYSize: h # subscript vertical font size + ySubscriptXOffset: h # subscript x offset + ySubscriptYOffset: h # subscript y offset + ySuperscriptXSize: h # superscript horizontal font size + ySuperscriptYSize: h # superscript vertical font size + ySuperscriptXOffset: h # superscript x offset + ySuperscriptYOffset: h # superscript y offset + yStrikeoutSize: h # strikeout size + yStrikeoutPosition: h # strikeout position + sFamilyClass: h # font family class and subclass + panose: 10s # panose classification number + ulUnicodeRange1: L # character range + ulUnicodeRange2: L # character range + ulUnicodeRange3: L # character range + ulUnicodeRange4: L # character range + achVendID: 4s # font vendor identification + fsSelection: H # font selection flags + usFirstCharIndex: H # first unicode character index + usLastCharIndex: H # last unicode character index + sTypoAscender: h # typographic ascender + sTypoDescender: h # typographic descender + sTypoLineGap: h # typographic line gap + usWinAscent: H # Windows ascender + usWinDescent: H # Windows descender +""" + +OS2_format_1_addition = """ + ulCodePageRange1: L + ulCodePageRange2: L +""" + +OS2_format_2_addition = OS2_format_1_addition + """ + sxHeight: h + sCapHeight: h + usDefaultChar: H + usBreakChar: H + usMaxContext: H +""" + +OS2_format_5_addition = OS2_format_2_addition + """ + usLowerOpticalPointSize: H + usUpperOpticalPointSize: H +""" + +bigendian = " > # big endian\n" + +OS2_format_1 = OS2_format_0 + OS2_format_1_addition +OS2_format_2 = OS2_format_0 + OS2_format_2_addition +OS2_format_5 = OS2_format_0 + OS2_format_5_addition +OS2_format_1_addition = bigendian + OS2_format_1_addition +OS2_format_2_addition = bigendian + OS2_format_2_addition +OS2_format_5_addition = bigendian + OS2_format_5_addition + + +class table_O_S_2f_2(DefaultTable.DefaultTable): + + """the OS/2 table""" + + def decompile(self, data, ttFont): + dummy, data = sstruct.unpack2(OS2_format_0, data, self) + + if self.version == 1: + dummy, data = sstruct.unpack2(OS2_format_1_addition, data, self) + elif self.version in (2, 3, 4): + dummy, data = sstruct.unpack2(OS2_format_2_addition, data, self) + elif self.version == 5: + dummy, data = sstruct.unpack2(OS2_format_5_addition, data, self) + self.usLowerOpticalPointSize /= 20 + self.usUpperOpticalPointSize /= 20 + elif self.version != 0: + from fontTools import ttLib + raise ttLib.TTLibError("unknown format for OS/2 table: version %s" % self.version) + if len(data): + warnings.warn("too much 'OS/2' table data") + + self.panose = sstruct.unpack(panoseFormat, self.panose, Panose()) + + def compile(self, ttFont): + self.updateFirstAndLastCharIndex(ttFont) + panose = self.panose + self.panose = sstruct.pack(panoseFormat, self.panose) + if self.version == 0: + data = sstruct.pack(OS2_format_0, self) + elif self.version == 1: + data = sstruct.pack(OS2_format_1, self) + elif self.version in (2, 3, 4): + data = sstruct.pack(OS2_format_2, self) + elif self.version == 5: + d = self.__dict__.copy() + d['usLowerOpticalPointSize'] = int(round(self.usLowerOpticalPointSize * 20)) + d['usUpperOpticalPointSize'] = int(round(self.usUpperOpticalPointSize * 20)) + data = sstruct.pack(OS2_format_5, d) + else: + from fontTools import ttLib + raise ttLib.TTLibError("unknown format for OS/2 table: version %s" % self.version) + self.panose = panose + return data + + def toXML(self, writer, ttFont): + writer.comment( + "The fields 'usFirstCharIndex' and 'usLastCharIndex'\n" + "will be recalculated by the compiler") + writer.newline() + if self.version == 1: + format = OS2_format_1 + elif self.version in (2, 3, 4): + format = OS2_format_2 + elif self.version == 5: + format = OS2_format_5 + else: + format = OS2_format_0 + formatstring, names, fixes = sstruct.getformat(format) + for name in names: + value = getattr(self, name) + if name=="panose": + writer.begintag("panose") + writer.newline() + value.toXML(writer, ttFont) + writer.endtag("panose") + elif name in ("ulUnicodeRange1", "ulUnicodeRange2", + "ulUnicodeRange3", "ulUnicodeRange4", + "ulCodePageRange1", "ulCodePageRange2"): + writer.simpletag(name, value=num2binary(value)) + elif name in ("fsType", "fsSelection"): + writer.simpletag(name, value=num2binary(value, 16)) + elif name == "achVendID": + writer.simpletag(name, value=repr(value)[1:-1]) + else: + writer.simpletag(name, value=value) + writer.newline() + + def fromXML(self, name, attrs, content, ttFont): + if name == "panose": + self.panose = panose = Panose() + for element in content: + if isinstance(element, tuple): + name, attrs, content = element + panose.fromXML(name, attrs, content, ttFont) + elif name in ("ulUnicodeRange1", "ulUnicodeRange2", + "ulUnicodeRange3", "ulUnicodeRange4", + "ulCodePageRange1", "ulCodePageRange2", + "fsType", "fsSelection"): + setattr(self, name, binary2num(attrs["value"])) + elif name == "achVendID": + setattr(self, name, safeEval("'''" + attrs["value"] + "'''")) + else: + setattr(self, name, safeEval(attrs["value"])) + + def updateFirstAndLastCharIndex(self, ttFont): + codes = set() + for table in ttFont['cmap'].tables: + if table.isUnicode(): + codes.update(table.cmap.keys()) + if codes: + minCode = min(codes) + maxCode = max(codes) + # USHORT cannot hold codepoints greater than 0xFFFF + self.usFirstCharIndex = 0xFFFF if minCode > 0xFFFF else minCode + self.usLastCharIndex = 0xFFFF if maxCode > 0xFFFF else maxCode + + # misspelled attributes kept for legacy reasons + + @property + def usMaxContex(self): + return self.usMaxContext + + @usMaxContex.setter + def usMaxContex(self, value): + self.usMaxContext = value + + @property + def fsFirstCharIndex(self): + return self.usFirstCharIndex + + @fsFirstCharIndex.setter + def fsFirstCharIndex(self, value): + self.usFirstCharIndex = value + + @property + def fsLastCharIndex(self): + return self.usLastCharIndex + + @fsLastCharIndex.setter + def fsLastCharIndex(self, value): + self.usLastCharIndex = value diff -Nru fonttools-2.4/Tools/fontTools/ttLib/tables/otBase.py fonttools-3.0/Tools/fontTools/ttLib/tables/otBase.py --- fonttools-2.4/Tools/fontTools/ttLib/tables/otBase.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.0/Tools/fontTools/ttLib/tables/otBase.py 2015-08-31 17:57:15.000000000 +0000 @@ -0,0 +1,901 @@ +from __future__ import print_function, division, absolute_import +from fontTools.misc.py23 import * +from .DefaultTable import DefaultTable +import struct + +class OverflowErrorRecord(object): + def __init__(self, overflowTuple): + self.tableType = overflowTuple[0] + self.LookupListIndex = overflowTuple[1] + self.SubTableIndex = overflowTuple[2] + self.itemName = overflowTuple[3] + self.itemIndex = overflowTuple[4] + + def __repr__(self): + return str((self.tableType, "LookupIndex:", self.LookupListIndex, "SubTableIndex:", self.SubTableIndex, "ItemName:", self.itemName, "ItemIndex:", self.itemIndex)) + +class OTLOffsetOverflowError(Exception): + def __init__(self, overflowErrorRecord): + self.value = overflowErrorRecord + + def __str__(self): + return repr(self.value) + + +class BaseTTXConverter(DefaultTable): + + """Generic base class for TTX table converters. It functions as an + adapter between the TTX (ttLib actually) table model and the model + we use for OpenType tables, which is necessarily subtly different. + """ + + def decompile(self, data, font): + from . import otTables + cachingStats = None if True else {} + class GlobalState(object): + def __init__(self, tableType, cachingStats): + self.tableType = tableType + self.cachingStats = cachingStats + globalState = GlobalState(tableType=self.tableTag, + cachingStats=cachingStats) + reader = OTTableReader(data, globalState) + tableClass = getattr(otTables, self.tableTag) + self.table = tableClass() + self.table.decompile(reader, font) + if cachingStats: + stats = sorted([(v, k) for k, v in cachingStats.items()]) + stats.reverse() + print("cachingsstats for ", self.tableTag) + for v, k in stats: + if v < 2: + break + print(v, k) + print("---", len(stats)) + + def compile(self, font): + """ Create a top-level OTFWriter for the GPOS/GSUB table. + Call the compile method for the the table + for each 'converter' record in the table converter list + call converter's write method for each item in the value. + - For simple items, the write method adds a string to the + writer's self.items list. + - For Struct/Table/Subtable items, it add first adds new writer to the + to the writer's self.items, then calls the item's compile method. + This creates a tree of writers, rooted at the GUSB/GPOS writer, with + each writer representing a table, and the writer.items list containing + the child data strings and writers. + call the getAllData method + call _doneWriting, which removes duplicates + call _gatherTables. This traverses the tables, adding unique occurences to a flat list of tables + Traverse the flat list of tables, calling getDataLength on each to update their position + Traverse the flat list of tables again, calling getData each get the data in the table, now that + pos's and offset are known. + + If a lookup subtable overflows an offset, we have to start all over. + """ + class GlobalState(object): + def __init__(self, tableType): + self.tableType = tableType + globalState = GlobalState(tableType=self.tableTag) + overflowRecord = None + + while True: + try: + writer = OTTableWriter(globalState) + self.table.compile(writer, font) + return writer.getAllData() + + except OTLOffsetOverflowError as e: + + if overflowRecord == e.value: + raise # Oh well... + + overflowRecord = e.value + print("Attempting to fix OTLOffsetOverflowError", e) + lastItem = overflowRecord + + ok = 0 + if overflowRecord.itemName is None: + from .otTables import fixLookupOverFlows + ok = fixLookupOverFlows(font, overflowRecord) + else: + from .otTables import fixSubTableOverFlows + ok = fixSubTableOverFlows(font, overflowRecord) + if not ok: + raise + + def toXML(self, writer, font): + self.table.toXML2(writer, font) + + def fromXML(self, name, attrs, content, font): + from . import otTables + if not hasattr(self, "table"): + tableClass = getattr(otTables, self.tableTag) + self.table = tableClass() + self.table.fromXML(name, attrs, content, font) + + +class OTTableReader(object): + + """Helper class to retrieve data from an OpenType table.""" + + __slots__ = ('data', 'offset', 'pos', 'globalState', 'localState') + + def __init__(self, data, globalState={}, localState=None, offset=0): + self.data = data + self.offset = offset + self.pos = offset + self.globalState = globalState + self.localState = localState + + def advance(self, count): + self.pos += count + def seek(self, pos): + self.pos = pos + + def copy(self): + other = self.__class__(self.data, self.globalState, self.localState, self.offset) + other.pos = self.pos + return other + + def getSubReader(self, offset): + offset = self.offset + offset + cachingStats = self.globalState.cachingStats + if cachingStats is not None: + cachingStats[offset] = cachingStats.get(offset, 0) + 1 + return self.__class__(self.data, self.globalState, self.localState, offset) + + def readUShort(self): + pos = self.pos + newpos = pos + 2 + value, = struct.unpack(">H", self.data[pos:newpos]) + self.pos = newpos + return value + + def readShort(self): + pos = self.pos + newpos = pos + 2 + value, = struct.unpack(">h", self.data[pos:newpos]) + self.pos = newpos + return value + + def readLong(self): + pos = self.pos + newpos = pos + 4 + value, = struct.unpack(">l", self.data[pos:newpos]) + self.pos = newpos + return value + + def readUInt24(self): + pos = self.pos + newpos = pos + 3 + value, = struct.unpack(">l", b'\0'+self.data[pos:newpos]) + self.pos = newpos + return value + + def readULong(self): + pos = self.pos + newpos = pos + 4 + value, = struct.unpack(">L", self.data[pos:newpos]) + self.pos = newpos + return value + + def readTag(self): + pos = self.pos + newpos = pos + 4 + value = Tag(self.data[pos:newpos]) + assert len(value) == 4 + self.pos = newpos + return value + + def readData(self, count): + pos = self.pos + newpos = pos + count + value = self.data[pos:newpos] + self.pos = newpos + return value + + def __setitem__(self, name, value): + state = self.localState.copy() if self.localState else dict() + state[name] = value + self.localState = state + + def __getitem__(self, name): + return self.localState and self.localState[name] + + def __contains__(self, name): + return self.localState and name in self.localState + + +class OTTableWriter(object): + + """Helper class to gather and assemble data for OpenType tables.""" + + def __init__(self, globalState, localState=None): + self.items = [] + self.pos = None + self.globalState = globalState + self.localState = localState + self.longOffset = False + self.parent = None + + def __setitem__(self, name, value): + state = self.localState.copy() if self.localState else dict() + state[name] = value + self.localState = state + + def __getitem__(self, name): + return self.localState[name] + + # assembler interface + + def getAllData(self): + """Assemble all data, including all subtables.""" + self._doneWriting() + tables, extTables = self._gatherTables() + tables.reverse() + extTables.reverse() + # Gather all data in two passes: the absolute positions of all + # subtable are needed before the actual data can be assembled. + pos = 0 + for table in tables: + table.pos = pos + pos = pos + table.getDataLength() + + for table in extTables: + table.pos = pos + pos = pos + table.getDataLength() + + data = [] + for table in tables: + tableData = table.getData() + data.append(tableData) + + for table in extTables: + tableData = table.getData() + data.append(tableData) + + return bytesjoin(data) + + def getDataLength(self): + """Return the length of this table in bytes, without subtables.""" + l = 0 + for item in self.items: + if hasattr(item, "getData") or hasattr(item, "getCountData"): + if item.longOffset: + l = l + 4 # sizeof(ULong) + else: + l = l + 2 # sizeof(UShort) + else: + l = l + len(item) + return l + + def getData(self): + """Assemble the data for this writer/table, without subtables.""" + items = list(self.items) # make a shallow copy + pos = self.pos + numItems = len(items) + for i in range(numItems): + item = items[i] + + if hasattr(item, "getData"): + if item.longOffset: + items[i] = packULong(item.pos - pos) + else: + try: + items[i] = packUShort(item.pos - pos) + except struct.error: + # provide data to fix overflow problem. + # If the overflow is to a lookup, or from a lookup to a subtable, + # just report the current item. Otherwise... + if self.name not in [ 'LookupList', 'Lookup']: + # overflow is within a subTable. Life is more complicated. + # If we split the sub-table just before the current item, we may still suffer overflow. + # This is because duplicate table merging is done only within an Extension subTable tree; + # when we split the subtable in two, some items may no longer be duplicates. + # Get worst case by adding up all the item lengths, depth first traversal. + # and then report the first item that overflows a short. + def getDeepItemLength(table): + if hasattr(table, "getDataLength"): + length = 0 + for item in table.items: + length = length + getDeepItemLength(item) + else: + length = len(table) + return length + + length = self.getDataLength() + if hasattr(self, "sortCoverageLast") and item.name == "Coverage": + # Coverage is first in the item list, but last in the table list, + # The original overflow is really in the item list. Skip the Coverage + # table in the following test. + items = items[i+1:] + + for j in range(len(items)): + item = items[j] + length = length + getDeepItemLength(item) + if length > 65535: + break + overflowErrorRecord = self.getOverflowErrorRecord(item) + + raise OTLOffsetOverflowError(overflowErrorRecord) + + return bytesjoin(items) + + def __hash__(self): + # only works after self._doneWriting() has been called + return hash(self.items) + + def __ne__(self, other): + return not self.__eq__(other) + def __eq__(self, other): + if type(self) != type(other): + return NotImplemented + return self.items == other.items + + def _doneWriting(self, internedTables=None): + # Convert CountData references to data string items + # collapse duplicate table references to a unique entry + # "tables" are OTTableWriter objects. + + # For Extension Lookup types, we can + # eliminate duplicates only within the tree under the Extension Lookup, + # as offsets may exceed 64K even between Extension LookupTable subtables. + if internedTables is None: + internedTables = {} + items = self.items + iRange = list(range(len(items))) + + if hasattr(self, "Extension"): + newTree = 1 + else: + newTree = 0 + for i in iRange: + item = items[i] + if hasattr(item, "getCountData"): + items[i] = item.getCountData() + elif hasattr(item, "getData"): + if newTree: + item._doneWriting() + else: + item._doneWriting(internedTables) + internedItem = internedTables.get(item) + if internedItem: + items[i] = item = internedItem + else: + internedTables[item] = item + self.items = tuple(items) + + def _gatherTables(self, tables=None, extTables=None, done=None): + # Convert table references in self.items tree to a flat + # list of tables in depth-first traversal order. + # "tables" are OTTableWriter objects. + # We do the traversal in reverse order at each level, in order to + # resolve duplicate references to be the last reference in the list of tables. + # For extension lookups, duplicate references can be merged only within the + # writer tree under the extension lookup. + if tables is None: # init call for first time. + tables = [] + extTables = [] + done = {} + + done[self] = 1 + + numItems = len(self.items) + iRange = list(range(numItems)) + iRange.reverse() + + if hasattr(self, "Extension"): + appendExtensions = 1 + else: + appendExtensions = 0 + + # add Coverage table if it is sorted last. + sortCoverageLast = 0 + if hasattr(self, "sortCoverageLast"): + # Find coverage table + for i in range(numItems): + item = self.items[i] + if hasattr(item, "name") and (item.name == "Coverage"): + sortCoverageLast = 1 + break + if item not in done: + item._gatherTables(tables, extTables, done) + else: + # We're a new parent of item + pass + + for i in iRange: + item = self.items[i] + if not hasattr(item, "getData"): + continue + + if sortCoverageLast and (i==1) and item.name == 'Coverage': + # we've already 'gathered' it above + continue + + if appendExtensions: + assert extTables is not None, "Program or XML editing error. Extension subtables cannot contain extensions subtables" + newDone = {} + item._gatherTables(extTables, None, newDone) + + elif item not in done: + item._gatherTables(tables, extTables, done) + else: + # We're a new parent of item + pass + + tables.append(self) + return tables, extTables + + # interface for gathering data, as used by table.compile() + + def getSubWriter(self): + subwriter = self.__class__(self.globalState, self.localState) + subwriter.parent = self # because some subtables have idential values, we discard + # the duplicates under the getAllData method. Hence some + # subtable writers can have more than one parent writer. + # But we just care about first one right now. + return subwriter + + def writeUShort(self, value): + assert 0 <= value < 0x10000 + self.items.append(struct.pack(">H", value)) + + def writeShort(self, value): + self.items.append(struct.pack(">h", value)) + + def writeUInt24(self, value): + assert 0 <= value < 0x1000000 + b = struct.pack(">L", value) + self.items.append(b[1:]) + + def writeLong(self, value): + self.items.append(struct.pack(">l", value)) + + def writeULong(self, value): + self.items.append(struct.pack(">L", value)) + + def writeTag(self, tag): + tag = Tag(tag).tobytes() + assert len(tag) == 4 + self.items.append(tag) + + def writeSubTable(self, subWriter): + self.items.append(subWriter) + + def writeCountReference(self, table, name): + ref = CountReference(table, name) + self.items.append(ref) + return ref + + def writeStruct(self, format, values): + data = struct.pack(*(format,) + values) + self.items.append(data) + + def writeData(self, data): + self.items.append(data) + + def getOverflowErrorRecord(self, item): + LookupListIndex = SubTableIndex = itemName = itemIndex = None + if self.name == 'LookupList': + LookupListIndex = item.repeatIndex + elif self.name == 'Lookup': + LookupListIndex = self.repeatIndex + SubTableIndex = item.repeatIndex + else: + itemName = item.name + if hasattr(item, 'repeatIndex'): + itemIndex = item.repeatIndex + if self.name == 'SubTable': + LookupListIndex = self.parent.repeatIndex + SubTableIndex = self.repeatIndex + elif self.name == 'ExtSubTable': + LookupListIndex = self.parent.parent.repeatIndex + SubTableIndex = self.parent.repeatIndex + else: # who knows how far below the SubTable level we are! Climb back up to the nearest subtable. + itemName = ".".join([self.name, item.name]) + p1 = self.parent + while p1 and p1.name not in ['ExtSubTable', 'SubTable']: + itemName = ".".join([p1.name, item.name]) + p1 = p1.parent + if p1: + if p1.name == 'ExtSubTable': + LookupListIndex = p1.parent.parent.repeatIndex + SubTableIndex = p1.parent.repeatIndex + else: + LookupListIndex = p1.parent.repeatIndex + SubTableIndex = p1.repeatIndex + + return OverflowErrorRecord( (self.globalState.tableType, LookupListIndex, SubTableIndex, itemName, itemIndex) ) + + +class CountReference(object): + """A reference to a Count value, not a count of references.""" + def __init__(self, table, name): + self.table = table + self.name = name + def setValue(self, value): + table = self.table + name = self.name + if table[name] is None: + table[name] = value + else: + assert table[name] == value, (name, table[name], value) + def getCountData(self): + return packUShort(self.table[self.name]) + + +def packUShort(value): + return struct.pack(">H", value) + + +def packULong(value): + assert 0 <= value < 0x100000000, value + return struct.pack(">L", value) + + +class BaseTable(object): + + """Generic base class for all OpenType (sub)tables.""" + + def __getattr__(self, attr): + reader = self.__dict__.get("reader") + if reader: + del self.reader + font = self.font + del self.font + self.decompile(reader, font) + return getattr(self, attr) + + raise AttributeError(attr) + + def ensureDecompiled(self): + reader = self.__dict__.get("reader") + if reader: + del self.reader + font = self.font + del self.font + self.decompile(reader, font) + + @classmethod + def getRecordSize(cls, reader): + totalSize = 0 + for conv in cls.converters: + size = conv.getRecordSize(reader) + if size is NotImplemented: return NotImplemented + countValue = 1 + if conv.repeat: + if conv.repeat in reader: + countValue = reader[conv.repeat] + else: + return NotImplemented + totalSize += size * countValue + return totalSize + + def getConverters(self): + return self.converters + + def getConverterByName(self, name): + return self.convertersByName[name] + + def decompile(self, reader, font): + self.readFormat(reader) + table = {} + self.__rawTable = table # for debugging + converters = self.getConverters() + for conv in converters: + if conv.name == "SubTable": + conv = conv.getConverter(reader.globalState.tableType, + table["LookupType"]) + if conv.name == "ExtSubTable": + conv = conv.getConverter(reader.globalState.tableType, + table["ExtensionLookupType"]) + if conv.name == "FeatureParams": + conv = conv.getConverter(reader["FeatureTag"]) + if conv.repeat: + if conv.repeat in table: + countValue = table[conv.repeat] + else: + # conv.repeat is a propagated count + countValue = reader[conv.repeat] + countValue += conv.aux + table[conv.name] = conv.readArray(reader, font, table, countValue) + else: + if conv.aux and not eval(conv.aux, None, table): + continue + table[conv.name] = conv.read(reader, font, table) + if conv.isPropagated: + reader[conv.name] = table[conv.name] + + self.postRead(table, font) + + del self.__rawTable # succeeded, get rid of debugging info + + def compile(self, writer, font): + self.ensureDecompiled() + table = self.preWrite(font) + + if hasattr(self, 'sortCoverageLast'): + writer.sortCoverageLast = 1 + + if hasattr(self.__class__, 'LookupType'): + writer['LookupType'].setValue(self.__class__.LookupType) + + self.writeFormat(writer) + for conv in self.getConverters(): + value = table.get(conv.name) + if conv.repeat: + if value is None: + value = [] + countValue = len(value) - conv.aux + if conv.repeat in table: + CountReference(table, conv.repeat).setValue(countValue) + else: + # conv.repeat is a propagated count + writer[conv.repeat].setValue(countValue) + conv.writeArray(writer, font, table, value) + elif conv.isCount: + # Special-case Count values. + # Assumption: a Count field will *always* precede + # the actual array(s). + # We need a default value, as it may be set later by a nested + # table. We will later store it here. + # We add a reference: by the time the data is assembled + # the Count value will be filled in. + ref = writer.writeCountReference(table, conv.name) + table[conv.name] = None + if conv.isPropagated: + writer[conv.name] = ref + elif conv.isLookupType: + ref = writer.writeCountReference(table, conv.name) + table[conv.name] = None + writer['LookupType'] = ref + else: + if conv.aux and not eval(conv.aux, None, table): + continue + conv.write(writer, font, table, value) + if conv.isPropagated: + writer[conv.name] = value + + def readFormat(self, reader): + pass + + def writeFormat(self, writer): + pass + + def postRead(self, table, font): + self.__dict__.update(table) + + def preWrite(self, font): + return self.__dict__.copy() + + def toXML(self, xmlWriter, font, attrs=None, name=None): + tableName = name if name else self.__class__.__name__ + if attrs is None: + attrs = [] + if hasattr(self, "Format"): + attrs = attrs + [("Format", self.Format)] + xmlWriter.begintag(tableName, attrs) + xmlWriter.newline() + self.toXML2(xmlWriter, font) + xmlWriter.endtag(tableName) + xmlWriter.newline() + + def toXML2(self, xmlWriter, font): + # Simpler variant of toXML, *only* for the top level tables (like GPOS, GSUB). + # This is because in TTX our parent writes our main tag, and in otBase.py we + # do it ourselves. I think I'm getting schizophrenic... + for conv in self.getConverters(): + if conv.repeat: + value = getattr(self, conv.name) + for i in range(len(value)): + item = value[i] + conv.xmlWrite(xmlWriter, font, item, conv.name, + [("index", i)]) + else: + if conv.aux and not eval(conv.aux, None, vars(self)): + continue + value = getattr(self, conv.name) + conv.xmlWrite(xmlWriter, font, value, conv.name, []) + + def fromXML(self, name, attrs, content, font): + try: + conv = self.getConverterByName(name) + except KeyError: + raise # XXX on KeyError, raise nice error + value = conv.xmlRead(attrs, content, font) + if conv.repeat: + seq = getattr(self, conv.name, None) + if seq is None: + seq = [] + setattr(self, conv.name, seq) + seq.append(value) + else: + setattr(self, conv.name, value) + + def __ne__(self, other): + return not self.__eq__(other) + def __eq__(self, other): + if type(self) != type(other): + return NotImplemented + + self.ensureDecompiled() + other.ensureDecompiled() + + return self.__dict__ == other.__dict__ + + +class FormatSwitchingBaseTable(BaseTable): + + """Minor specialization of BaseTable, for tables that have multiple + formats, eg. CoverageFormat1 vs. CoverageFormat2.""" + + @classmethod + def getRecordSize(cls, reader): + return NotImplemented + + def getConverters(self): + return self.converters[self.Format] + + def getConverterByName(self, name): + return self.convertersByName[self.Format][name] + + def readFormat(self, reader): + self.Format = reader.readUShort() + assert self.Format != 0, (self, reader.pos, len(reader.data)) + + def writeFormat(self, writer): + writer.writeUShort(self.Format) + + def toXML(self, xmlWriter, font, attrs=None, name=None): + BaseTable.toXML(self, xmlWriter, font, attrs, name=self.__class__.__name__) + + +# +# Support for ValueRecords +# +# This data type is so different from all other OpenType data types that +# it requires quite a bit of code for itself. It even has special support +# in OTTableReader and OTTableWriter... +# + +valueRecordFormat = [ +# Mask Name isDevice signed + (0x0001, "XPlacement", 0, 1), + (0x0002, "YPlacement", 0, 1), + (0x0004, "XAdvance", 0, 1), + (0x0008, "YAdvance", 0, 1), + (0x0010, "XPlaDevice", 1, 0), + (0x0020, "YPlaDevice", 1, 0), + (0x0040, "XAdvDevice", 1, 0), + (0x0080, "YAdvDevice", 1, 0), +# reserved: + (0x0100, "Reserved1", 0, 0), + (0x0200, "Reserved2", 0, 0), + (0x0400, "Reserved3", 0, 0), + (0x0800, "Reserved4", 0, 0), + (0x1000, "Reserved5", 0, 0), + (0x2000, "Reserved6", 0, 0), + (0x4000, "Reserved7", 0, 0), + (0x8000, "Reserved8", 0, 0), +] + +def _buildDict(): + d = {} + for mask, name, isDevice, signed in valueRecordFormat: + d[name] = mask, isDevice, signed + return d + +valueRecordFormatDict = _buildDict() + + +class ValueRecordFactory(object): + + """Given a format code, this object convert ValueRecords.""" + + def __init__(self, valueFormat): + format = [] + for mask, name, isDevice, signed in valueRecordFormat: + if valueFormat & mask: + format.append((name, isDevice, signed)) + self.format = format + + def __len__(self): + return len(self.format) + + def readValueRecord(self, reader, font): + format = self.format + if not format: + return None + valueRecord = ValueRecord() + for name, isDevice, signed in format: + if signed: + value = reader.readShort() + else: + value = reader.readUShort() + if isDevice: + if value: + from . import otTables + subReader = reader.getSubReader(value) + value = getattr(otTables, name)() + value.decompile(subReader, font) + else: + value = None + setattr(valueRecord, name, value) + return valueRecord + + def writeValueRecord(self, writer, font, valueRecord): + for name, isDevice, signed in self.format: + value = getattr(valueRecord, name, 0) + if isDevice: + if value: + subWriter = writer.getSubWriter() + writer.writeSubTable(subWriter) + value.compile(subWriter, font) + else: + writer.writeUShort(0) + elif signed: + writer.writeShort(value) + else: + writer.writeUShort(value) + + +class ValueRecord(object): + + # see ValueRecordFactory + + def getFormat(self): + format = 0 + for name in self.__dict__.keys(): + format = format | valueRecordFormatDict[name][0] + return format + + def toXML(self, xmlWriter, font, valueName, attrs=None): + if attrs is None: + simpleItems = [] + else: + simpleItems = list(attrs) + for mask, name, isDevice, format in valueRecordFormat[:4]: # "simple" values + if hasattr(self, name): + simpleItems.append((name, getattr(self, name))) + deviceItems = [] + for mask, name, isDevice, format in valueRecordFormat[4:8]: # device records + if hasattr(self, name): + device = getattr(self, name) + if device is not None: + deviceItems.append((name, device)) + if deviceItems: + xmlWriter.begintag(valueName, simpleItems) + xmlWriter.newline() + for name, deviceRecord in deviceItems: + if deviceRecord is not None: + deviceRecord.toXML(xmlWriter, font) + xmlWriter.endtag(valueName) + xmlWriter.newline() + else: + xmlWriter.simpletag(valueName, simpleItems) + xmlWriter.newline() + + def fromXML(self, name, attrs, content, font): + from . import otTables + for k, v in attrs.items(): + setattr(self, k, int(v)) + for element in content: + if not isinstance(element, tuple): + continue + name, attrs, content = element + value = getattr(otTables, name)() + for elem2 in content: + if not isinstance(elem2, tuple): + continue + name2, attrs2, content2 = elem2 + value.fromXML(name2, attrs2, content2, font) + setattr(self, name, value) + + def __ne__(self, other): + return not self.__eq__(other) + def __eq__(self, other): + if type(self) != type(other): + return NotImplemented + return self.__dict__ == other.__dict__ diff -Nru fonttools-2.4/Tools/fontTools/ttLib/tables/otConverters.py fonttools-3.0/Tools/fontTools/ttLib/tables/otConverters.py --- fonttools-2.4/Tools/fontTools/ttLib/tables/otConverters.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.0/Tools/fontTools/ttLib/tables/otConverters.py 2015-08-31 17:57:15.000000000 +0000 @@ -0,0 +1,481 @@ +from __future__ import print_function, division, absolute_import +from fontTools.misc.py23 import * +from fontTools.misc.textTools import safeEval +from fontTools.misc.fixedTools import fixedToFloat as fi2fl, floatToFixed as fl2fi +from .otBase import ValueRecordFactory +import array + + +def buildConverters(tableSpec, tableNamespace): + """Given a table spec from otData.py, build a converter object for each + field of the table. This is called for each table in otData.py, and + the results are assigned to the corresponding class in otTables.py.""" + converters = [] + convertersByName = {} + for tp, name, repeat, aux, descr in tableSpec: + tableName = name + if name.startswith("ValueFormat"): + assert tp == "uint16" + converterClass = ValueFormat + elif name.endswith("Count") or name.endswith("LookupType"): + assert tp == "uint16" + converterClass = ComputedUShort + elif name == "SubTable": + converterClass = SubTable + elif name == "ExtSubTable": + converterClass = ExtSubTable + elif name == "FeatureParams": + converterClass = FeatureParams + else: + if not tp in converterMapping: + tableName = tp + converterClass = Struct + else: + converterClass = converterMapping[tp] + tableClass = tableNamespace.get(tableName) + conv = converterClass(name, repeat, aux, tableClass) + if name in ["SubTable", "ExtSubTable"]: + conv.lookupTypes = tableNamespace['lookupTypes'] + # also create reverse mapping + for t in conv.lookupTypes.values(): + for cls in t.values(): + convertersByName[cls.__name__] = Table(name, repeat, aux, cls) + if name == "FeatureParams": + conv.featureParamTypes = tableNamespace['featureParamTypes'] + conv.defaultFeatureParams = tableNamespace['FeatureParams'] + for cls in conv.featureParamTypes.values(): + convertersByName[cls.__name__] = Table(name, repeat, aux, cls) + converters.append(conv) + assert name not in convertersByName, name + convertersByName[name] = conv + return converters, convertersByName + + +class _MissingItem(tuple): + __slots__ = () + +try: + from collections import UserList +except: + from UserList import UserList + +class _LazyList(UserList): + + def __getslice__(self, i, j): + return self.__getitem__(slice(i, j)) + def __getitem__(self, k): + if isinstance(k, slice): + indices = range(*k.indices(len(self))) + return [self[i] for i in indices] + item = self.data[k] + if isinstance(item, _MissingItem): + self.reader.seek(self.pos + item[0] * self.recordSize) + item = self.conv.read(self.reader, self.font, {}) + self.data[k] = item + return item + +class BaseConverter(object): + + """Base class for converter objects. Apart from the constructor, this + is an abstract class.""" + + def __init__(self, name, repeat, aux, tableClass): + self.name = name + self.repeat = repeat + self.aux = aux + self.tableClass = tableClass + self.isCount = name.endswith("Count") + self.isLookupType = name.endswith("LookupType") + self.isPropagated = name in ["ClassCount", "Class2Count", "FeatureTag", "SettingsCount", "AxisCount"] + + def readArray(self, reader, font, tableDict, count): + """Read an array of values from the reader.""" + lazy = font.lazy and count > 8 + if lazy: + recordSize = self.getRecordSize(reader) + if recordSize is NotImplemented: + lazy = False + if not lazy: + l = [] + for i in range(count): + l.append(self.read(reader, font, tableDict)) + return l + else: + l = _LazyList() + l.reader = reader.copy() + l.pos = l.reader.pos + l.font = font + l.conv = self + l.recordSize = recordSize + l.extend(_MissingItem([i]) for i in range(count)) + reader.advance(count * recordSize) + return l + + def getRecordSize(self, reader): + if hasattr(self, 'staticSize'): return self.staticSize + return NotImplemented + + def read(self, reader, font, tableDict): + """Read a value from the reader.""" + raise NotImplementedError(self) + + def writeArray(self, writer, font, tableDict, values): + for i in range(len(values)): + self.write(writer, font, tableDict, values[i], i) + + def write(self, writer, font, tableDict, value, repeatIndex=None): + """Write a value to the writer.""" + raise NotImplementedError(self) + + def xmlRead(self, attrs, content, font): + """Read a value from XML.""" + raise NotImplementedError(self) + + def xmlWrite(self, xmlWriter, font, value, name, attrs): + """Write a value to XML.""" + raise NotImplementedError(self) + + +class SimpleValue(BaseConverter): + def xmlWrite(self, xmlWriter, font, value, name, attrs): + xmlWriter.simpletag(name, attrs + [("value", value)]) + xmlWriter.newline() + def xmlRead(self, attrs, content, font): + return attrs["value"] + +class IntValue(SimpleValue): + def xmlRead(self, attrs, content, font): + return int(attrs["value"], 0) + +class Long(IntValue): + staticSize = 4 + def read(self, reader, font, tableDict): + return reader.readLong() + def write(self, writer, font, tableDict, value, repeatIndex=None): + writer.writeLong(value) + +class ULong(IntValue): + staticSize = 4 + def read(self, reader, font, tableDict): + return reader.readULong() + def write(self, writer, font, tableDict, value, repeatIndex=None): + writer.writeULong(value) + +class Short(IntValue): + staticSize = 2 + def read(self, reader, font, tableDict): + return reader.readShort() + def write(self, writer, font, tableDict, value, repeatIndex=None): + writer.writeShort(value) + +class UShort(IntValue): + staticSize = 2 + def read(self, reader, font, tableDict): + return reader.readUShort() + def write(self, writer, font, tableDict, value, repeatIndex=None): + writer.writeUShort(value) + +class UInt24(IntValue): + staticSize = 3 + def read(self, reader, font, tableDict): + return reader.readUInt24() + def write(self, writer, font, tableDict, value, repeatIndex=None): + writer.writeUInt24(value) + +class ComputedUShort(UShort): + def xmlWrite(self, xmlWriter, font, value, name, attrs): + xmlWriter.comment("%s=%s" % (name, value)) + xmlWriter.newline() + +class Tag(SimpleValue): + staticSize = 4 + def read(self, reader, font, tableDict): + return reader.readTag() + def write(self, writer, font, tableDict, value, repeatIndex=None): + writer.writeTag(value) + +class GlyphID(SimpleValue): + staticSize = 2 + def readArray(self, reader, font, tableDict, count): + glyphOrder = font.getGlyphOrder() + gids = array.array("H", reader.readData(2 * count)) + if sys.byteorder != "big": + gids.byteswap() + try: + l = [glyphOrder[gid] for gid in gids] + except IndexError: + # Slower, but will not throw an IndexError on an invalid glyph id. + l = [font.getGlyphName(gid) for gid in gids] + return l + def read(self, reader, font, tableDict): + return font.getGlyphName(reader.readUShort()) + def write(self, writer, font, tableDict, value, repeatIndex=None): + writer.writeUShort(font.getGlyphID(value)) + +class FloatValue(SimpleValue): + def xmlRead(self, attrs, content, font): + return float(attrs["value"]) + +class DeciPoints(FloatValue): + staticSize = 2 + def read(self, reader, font, tableDict): + return reader.readUShort() / 10 + + def write(self, writer, font, tableDict, value, repeatIndex=None): + writer.writeUShort(int(round(value * 10))) + +class Fixed(FloatValue): + staticSize = 4 + def read(self, reader, font, tableDict): + return fi2fl(reader.readLong(), 16) + def write(self, writer, font, tableDict, value, repeatIndex=None): + writer.writeLong(fl2fi(value, 16)) + +class Version(BaseConverter): + staticSize = 4 + def read(self, reader, font, tableDict): + value = reader.readLong() + assert (value >> 16) == 1, "Unsupported version 0x%08x" % value + return fi2fl(value, 16) + def write(self, writer, font, tableDict, value, repeatIndex=None): + if value < 0x10000: + value = fl2fi(value, 16) + value = int(round(value)) + assert (value >> 16) == 1, "Unsupported version 0x%08x" % value + writer.writeLong(value) + def xmlRead(self, attrs, content, font): + value = attrs["value"] + value = float(int(value, 0)) if value.startswith("0") else float(value) + if value >= 0x10000: + value = fi2fl(value, 16) + return value + def xmlWrite(self, xmlWriter, font, value, name, attrs): + if value >= 0x10000: + value = fi2fl(value, 16) + if value % 1 != 0: + # Write as hex + value = "0x%08x" % fl2fi(value, 16) + xmlWriter.simpletag(name, attrs + [("value", value)]) + xmlWriter.newline() + + +class Struct(BaseConverter): + + def getRecordSize(self, reader): + return self.tableClass and self.tableClass.getRecordSize(reader) + + def read(self, reader, font, tableDict): + table = self.tableClass() + table.decompile(reader, font) + return table + + def write(self, writer, font, tableDict, value, repeatIndex=None): + value.compile(writer, font) + + def xmlWrite(self, xmlWriter, font, value, name, attrs): + if value is None: + if attrs: + # If there are attributes (probably index), then + # don't drop this even if it's NULL. It will mess + # up the array indices of the containing element. + xmlWriter.simpletag(name, attrs + [("empty", 1)]) + xmlWriter.newline() + else: + pass # NULL table, ignore + else: + value.toXML(xmlWriter, font, attrs, name=name) + + def xmlRead(self, attrs, content, font): + if "empty" in attrs and safeEval(attrs["empty"]): + return None + table = self.tableClass() + Format = attrs.get("Format") + if Format is not None: + table.Format = int(Format) + for element in content: + if isinstance(element, tuple): + name, attrs, content = element + table.fromXML(name, attrs, content, font) + else: + pass + return table + + def __repr__(self): + return "Struct of " + repr(self.tableClass) + + +class Table(Struct): + + longOffset = False + staticSize = 2 + + def readOffset(self, reader): + return reader.readUShort() + + def writeNullOffset(self, writer): + if self.longOffset: + writer.writeULong(0) + else: + writer.writeUShort(0) + + def read(self, reader, font, tableDict): + offset = self.readOffset(reader) + if offset == 0: + return None + if offset <= 3: + # XXX hack to work around buggy pala.ttf + print("*** Warning: offset is not 0, yet suspiciously low (%s). table: %s" \ + % (offset, self.tableClass.__name__)) + return None + table = self.tableClass() + reader = reader.getSubReader(offset) + if font.lazy: + table.reader = reader + table.font = font + else: + table.decompile(reader, font) + return table + + def write(self, writer, font, tableDict, value, repeatIndex=None): + if value is None: + self.writeNullOffset(writer) + else: + subWriter = writer.getSubWriter() + subWriter.longOffset = self.longOffset + subWriter.name = self.name + if repeatIndex is not None: + subWriter.repeatIndex = repeatIndex + writer.writeSubTable(subWriter) + value.compile(subWriter, font) + +class LTable(Table): + + longOffset = True + staticSize = 4 + + def readOffset(self, reader): + return reader.readULong() + + +class SubTable(Table): + def getConverter(self, tableType, lookupType): + tableClass = self.lookupTypes[tableType][lookupType] + return self.__class__(self.name, self.repeat, self.aux, tableClass) + + +class ExtSubTable(LTable, SubTable): + + def write(self, writer, font, tableDict, value, repeatIndex=None): + writer.Extension = 1 # actually, mere presence of the field flags it as an Ext Subtable writer. + Table.write(self, writer, font, tableDict, value, repeatIndex) + +class FeatureParams(Table): + def getConverter(self, featureTag): + tableClass = self.featureParamTypes.get(featureTag, self.defaultFeatureParams) + return self.__class__(self.name, self.repeat, self.aux, tableClass) + + +class ValueFormat(IntValue): + staticSize = 2 + def __init__(self, name, repeat, aux, tableClass): + BaseConverter.__init__(self, name, repeat, aux, tableClass) + self.which = "ValueFormat" + ("2" if name[-1] == "2" else "1") + def read(self, reader, font, tableDict): + format = reader.readUShort() + reader[self.which] = ValueRecordFactory(format) + return format + def write(self, writer, font, tableDict, format, repeatIndex=None): + writer.writeUShort(format) + writer[self.which] = ValueRecordFactory(format) + + +class ValueRecord(ValueFormat): + def getRecordSize(self, reader): + return 2 * len(reader[self.which]) + def read(self, reader, font, tableDict): + return reader[self.which].readValueRecord(reader, font) + def write(self, writer, font, tableDict, value, repeatIndex=None): + writer[self.which].writeValueRecord(writer, font, value) + def xmlWrite(self, xmlWriter, font, value, name, attrs): + if value is None: + pass # NULL table, ignore + else: + value.toXML(xmlWriter, font, self.name, attrs) + def xmlRead(self, attrs, content, font): + from .otBase import ValueRecord + value = ValueRecord() + value.fromXML(None, attrs, content, font) + return value + + +class DeltaValue(BaseConverter): + + def read(self, reader, font, tableDict): + StartSize = tableDict["StartSize"] + EndSize = tableDict["EndSize"] + DeltaFormat = tableDict["DeltaFormat"] + assert DeltaFormat in (1, 2, 3), "illegal DeltaFormat" + nItems = EndSize - StartSize + 1 + nBits = 1 << DeltaFormat + minusOffset = 1 << nBits + mask = (1 << nBits) - 1 + signMask = 1 << (nBits - 1) + + DeltaValue = [] + tmp, shift = 0, 0 + for i in range(nItems): + if shift == 0: + tmp, shift = reader.readUShort(), 16 + shift = shift - nBits + value = (tmp >> shift) & mask + if value & signMask: + value = value - minusOffset + DeltaValue.append(value) + return DeltaValue + + def write(self, writer, font, tableDict, value, repeatIndex=None): + StartSize = tableDict["StartSize"] + EndSize = tableDict["EndSize"] + DeltaFormat = tableDict["DeltaFormat"] + DeltaValue = value + assert DeltaFormat in (1, 2, 3), "illegal DeltaFormat" + nItems = EndSize - StartSize + 1 + nBits = 1 << DeltaFormat + assert len(DeltaValue) == nItems + mask = (1 << nBits) - 1 + + tmp, shift = 0, 16 + for value in DeltaValue: + shift = shift - nBits + tmp = tmp | ((value & mask) << shift) + if shift == 0: + writer.writeUShort(tmp) + tmp, shift = 0, 16 + if shift != 16: + writer.writeUShort(tmp) + + def xmlWrite(self, xmlWriter, font, value, name, attrs): + xmlWriter.simpletag(name, attrs + [("value", value)]) + xmlWriter.newline() + + def xmlRead(self, attrs, content, font): + return safeEval(attrs["value"]) + + +converterMapping = { + # type class + "int16": Short, + "uint16": UShort, + "uint24": UInt24, + "uint32": ULong, + "Version": Version, + "Tag": Tag, + "GlyphID": GlyphID, + "DeciPoints": DeciPoints, + "Fixed": Fixed, + "struct": Struct, + "Offset": Table, + "LOffset": LTable, + "ValueRecord": ValueRecord, + "DeltaValue": DeltaValue, +} diff -Nru fonttools-2.4/Tools/fontTools/ttLib/tables/otData.py fonttools-3.0/Tools/fontTools/ttLib/tables/otData.py --- fonttools-2.4/Tools/fontTools/ttLib/tables/otData.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.0/Tools/fontTools/ttLib/tables/otData.py 2015-08-31 17:57:15.000000000 +0000 @@ -0,0 +1,1025 @@ +from __future__ import print_function, division, absolute_import +from fontTools.misc.py23 import * + +otData = [ + + # + # common + # + + ('LookupOrder', []), + + ('ScriptList', [ + ('uint16', 'ScriptCount', None, None, 'Number of ScriptRecords'), + ('struct', 'ScriptRecord', 'ScriptCount', 0, 'Array of ScriptRecords -listed alphabetically by ScriptTag'), + ]), + + ('ScriptRecord', [ + ('Tag', 'ScriptTag', None, None, '4-byte ScriptTag identifier'), + ('Offset', 'Script', None, None, 'Offset to Script table-from beginning of ScriptList'), + ]), + + ('Script', [ + ('Offset', 'DefaultLangSys', None, None, 'Offset to DefaultLangSys table-from beginning of Script table-may be NULL'), + ('uint16', 'LangSysCount', None, None, 'Number of LangSysRecords for this script-excluding the DefaultLangSys'), + ('struct', 'LangSysRecord', 'LangSysCount', 0, 'Array of LangSysRecords-listed alphabetically by LangSysTag'), + ]), + + ('LangSysRecord', [ + ('Tag', 'LangSysTag', None, None, '4-byte LangSysTag identifier'), + ('Offset', 'LangSys', None, None, 'Offset to LangSys table-from beginning of Script table'), + ]), + + ('LangSys', [ + ('Offset', 'LookupOrder', None, None, '= NULL (reserved for an offset to a reordering table)'), + ('uint16', 'ReqFeatureIndex', None, None, 'Index of a feature required for this language system- if no required features = 0xFFFF'), + ('uint16', 'FeatureCount', None, None, 'Number of FeatureIndex values for this language system-excludes the required feature'), + ('uint16', 'FeatureIndex', 'FeatureCount', 0, 'Array of indices into the FeatureList-in arbitrary order'), + ]), + + ('FeatureList', [ + ('uint16', 'FeatureCount', None, None, 'Number of FeatureRecords in this table'), + ('struct', 'FeatureRecord', 'FeatureCount', 0, 'Array of FeatureRecords-zero-based (first feature has FeatureIndex = 0)-listed alphabetically by FeatureTag'), + ]), + + ('FeatureRecord', [ + ('Tag', 'FeatureTag', None, None, '4-byte feature identification tag'), + ('Offset', 'Feature', None, None, 'Offset to Feature table-from beginning of FeatureList'), + ]), + + ('Feature', [ + ('Offset', 'FeatureParams', None, None, '= NULL (reserved for offset to FeatureParams)'), + ('uint16', 'LookupCount', None, None, 'Number of LookupList indices for this feature'), + ('uint16', 'LookupListIndex', 'LookupCount', 0, 'Array of LookupList indices for this feature -zero-based (first lookup is LookupListIndex = 0)'), + ]), + + ('FeatureParams', [ + ]), + + ('FeatureParamsSize', [ + ('DeciPoints', 'DesignSize', None, None, 'The design size in 720/inch units (decipoints).'), + ('uint16', 'SubfamilyID', None, None, 'Serves as an identifier that associates fonts in a subfamily.'), + ('uint16', 'SubfamilyNameID', None, None, 'Subfamily NameID.'), + ('DeciPoints', 'RangeStart', None, None, 'Small end of recommended usage range (exclusive) in 720/inch units.'), + ('DeciPoints', 'RangeEnd', None, None, 'Large end of recommended usage range (inclusive) in 720/inch units.'), + ]), + + ('FeatureParamsStylisticSet', [ + ('uint16', 'Version', None, None, 'Set to 0.'), + ('uint16', 'UINameID', None, None, 'UI NameID.'), + ]), + + ('FeatureParamsCharacterVariants', [ + ('uint16', 'Format', None, None, 'Set to 0.'), + ('uint16', 'FeatUILabelNameID', None, None, 'Feature UI label NameID.'), + ('uint16', 'FeatUITooltipTextNameID', None, None, 'Feature UI tooltip text NameID.'), + ('uint16', 'SampleTextNameID', None, None, 'Sample text NameID.'), + ('uint16', 'NumNamedParameters', None, None, 'Number of named parameters.'), + ('uint16', 'FirstParamUILabelNameID', None, None, 'First NameID of UI feature parameters.'), + ('uint16', 'CharCount', None, None, 'Count of characters this feature provides glyph variants for.'), + ('uint24', 'Character', 'CharCount', 0, 'Unicode characters for which this feature provides glyph variants.'), + ]), + + ('LookupList', [ + ('uint16', 'LookupCount', None, None, 'Number of lookups in this table'), + ('Offset', 'Lookup', 'LookupCount', 0, 'Array of offsets to Lookup tables-from beginning of LookupList -zero based (first lookup is Lookup index = 0)'), + ]), + + ('Lookup', [ + ('uint16', 'LookupType', None, None, 'Different enumerations for GSUB and GPOS'), + ('uint16', 'LookupFlag', None, None, 'Lookup qualifiers'), + ('uint16', 'SubTableCount', None, None, 'Number of SubTables for this lookup'), + ('Offset', 'SubTable', 'SubTableCount', 0, 'Array of offsets to SubTables-from beginning of Lookup table'), + ('uint16', 'MarkFilteringSet', None, 'LookupFlag & 0x0010', 'If set, indicates that the lookup table structure is followed by a MarkFilteringSet field. The layout engine skips over all mark glyphs not in the mark filtering set indicated.'), + ]), + + ('CoverageFormat1', [ + ('uint16', 'CoverageFormat', None, None, 'Format identifier-format = 1'), + ('uint16', 'GlyphCount', None, None, 'Number of glyphs in the GlyphArray'), + ('GlyphID', 'GlyphArray', 'GlyphCount', 0, 'Array of GlyphIDs-in numerical order'), + ]), + + ('CoverageFormat2', [ + ('uint16', 'CoverageFormat', None, None, 'Format identifier-format = 2'), + ('uint16', 'RangeCount', None, None, 'Number of RangeRecords'), + ('struct', 'RangeRecord', 'RangeCount', 0, 'Array of glyph ranges-ordered by Start GlyphID'), + ]), + + ('RangeRecord', [ + ('GlyphID', 'Start', None, None, 'First GlyphID in the range'), + ('GlyphID', 'End', None, None, 'Last GlyphID in the range'), + ('uint16', 'StartCoverageIndex', None, None, 'Coverage Index of first GlyphID in range'), + ]), + + ('ClassDefFormat1', [ + ('uint16', 'ClassFormat', None, None, 'Format identifier-format = 1'), + ('GlyphID', 'StartGlyph', None, None, 'First GlyphID of the ClassValueArray'), + ('uint16', 'GlyphCount', None, None, 'Size of the ClassValueArray'), + ('uint16', 'ClassValueArray', 'GlyphCount', 0, 'Array of Class Values-one per GlyphID'), + ]), + + ('ClassDefFormat2', [ + ('uint16', 'ClassFormat', None, None, 'Format identifier-format = 2'), + ('uint16', 'ClassRangeCount', None, None, 'Number of ClassRangeRecords'), + ('struct', 'ClassRangeRecord', 'ClassRangeCount', 0, 'Array of ClassRangeRecords-ordered by Start GlyphID'), + ]), + + ('ClassRangeRecord', [ + ('GlyphID', 'Start', None, None, 'First GlyphID in the range'), + ('GlyphID', 'End', None, None, 'Last GlyphID in the range'), + ('uint16', 'Class', None, None, 'Applied to all glyphs in the range'), + ]), + + ('Device', [ + ('uint16', 'StartSize', None, None, 'Smallest size to correct-in ppem'), + ('uint16', 'EndSize', None, None, 'Largest size to correct-in ppem'), + ('uint16', 'DeltaFormat', None, None, 'Format of DeltaValue array data: 1, 2, or 3'), + ('DeltaValue', 'DeltaValue', '', 0, 'Array of compressed data'), + ]), + + + # + # gpos + # + + ('GPOS', [ + ('Version', 'Version', None, None, 'Version of the GPOS table-initially = 0x00010000'), + ('Offset', 'ScriptList', None, None, 'Offset to ScriptList table-from beginning of GPOS table'), + ('Offset', 'FeatureList', None, None, 'Offset to FeatureList table-from beginning of GPOS table'), + ('Offset', 'LookupList', None, None, 'Offset to LookupList table-from beginning of GPOS table'), + ]), + + ('SinglePosFormat1', [ + ('uint16', 'PosFormat', None, None, 'Format identifier-format = 1'), + ('Offset', 'Coverage', None, None, 'Offset to Coverage table-from beginning of SinglePos subtable'), + ('uint16', 'ValueFormat', None, None, 'Defines the types of data in the ValueRecord'), + ('ValueRecord', 'Value', None, None, 'Defines positioning value(s)-applied to all glyphs in the Coverage table'), + ]), + + ('SinglePosFormat2', [ + ('uint16', 'PosFormat', None, None, 'Format identifier-format = 2'), + ('Offset', 'Coverage', None, None, 'Offset to Coverage table-from beginning of SinglePos subtable'), + ('uint16', 'ValueFormat', None, None, 'Defines the types of data in the ValueRecord'), + ('uint16', 'ValueCount', None, None, 'Number of ValueRecords'), + ('ValueRecord', 'Value', 'ValueCount', 0, 'Array of ValueRecords-positioning values applied to glyphs'), + ]), + + ('PairPosFormat1', [ + ('uint16', 'PosFormat', None, None, 'Format identifier-format = 1'), + ('Offset', 'Coverage', None, None, 'Offset to Coverage table-from beginning of PairPos subtable-only the first glyph in each pair'), + ('uint16', 'ValueFormat1', None, None, 'Defines the types of data in ValueRecord1-for the first glyph in the pair -may be zero (0)'), + ('uint16', 'ValueFormat2', None, None, 'Defines the types of data in ValueRecord2-for the second glyph in the pair -may be zero (0)'), + ('uint16', 'PairSetCount', None, None, 'Number of PairSet tables'), + ('Offset', 'PairSet', 'PairSetCount', 0, 'Array of offsets to PairSet tables-from beginning of PairPos subtable-ordered by Coverage Index'), + ]), + + ('PairSet', [ + ('uint16', 'PairValueCount', None, None, 'Number of PairValueRecords'), + ('struct', 'PairValueRecord', 'PairValueCount', 0, 'Array of PairValueRecords-ordered by GlyphID of the second glyph'), + ]), + + ('PairValueRecord', [ + ('GlyphID', 'SecondGlyph', None, None, 'GlyphID of second glyph in the pair-first glyph is listed in the Coverage table'), + ('ValueRecord', 'Value1', None, None, 'Positioning data for the first glyph in the pair'), + ('ValueRecord', 'Value2', None, None, 'Positioning data for the second glyph in the pair'), + ]), + + ('PairPosFormat2', [ + ('uint16', 'PosFormat', None, None, 'Format identifier-format = 2'), + ('Offset', 'Coverage', None, None, 'Offset to Coverage table-from beginning of PairPos subtable-for the first glyph of the pair'), + ('uint16', 'ValueFormat1', None, None, 'ValueRecord definition-for the first glyph of the pair-may be zero (0)'), + ('uint16', 'ValueFormat2', None, None, 'ValueRecord definition-for the second glyph of the pair-may be zero (0)'), + ('Offset', 'ClassDef1', None, None, 'Offset to ClassDef table-from beginning of PairPos subtable-for the first glyph of the pair'), + ('Offset', 'ClassDef2', None, None, 'Offset to ClassDef table-from beginning of PairPos subtable-for the second glyph of the pair'), + ('uint16', 'Class1Count', None, None, 'Number of classes in ClassDef1 table-includes Class0'), + ('uint16', 'Class2Count', None, None, 'Number of classes in ClassDef2 table-includes Class0'), + ('struct', 'Class1Record', 'Class1Count', 0, 'Array of Class1 records-ordered by Class1'), + ]), + + ('Class1Record', [ + ('struct', 'Class2Record', 'Class2Count', 0, 'Array of Class2 records-ordered by Class2'), + ]), + + ('Class2Record', [ + ('ValueRecord', 'Value1', None, None, 'Positioning for first glyph-empty if ValueFormat1 = 0'), + ('ValueRecord', 'Value2', None, None, 'Positioning for second glyph-empty if ValueFormat2 = 0'), + ]), + + ('CursivePosFormat1', [ + ('uint16', 'PosFormat', None, None, 'Format identifier-format = 1'), + ('Offset', 'Coverage', None, None, 'Offset to Coverage table-from beginning of CursivePos subtable'), + ('uint16', 'EntryExitCount', None, None, 'Number of EntryExit records'), + ('struct', 'EntryExitRecord', 'EntryExitCount', 0, 'Array of EntryExit records-in Coverage Index order'), + ]), + + ('EntryExitRecord', [ + ('Offset', 'EntryAnchor', None, None, 'Offset to EntryAnchor table-from beginning of CursivePos subtable-may be NULL'), + ('Offset', 'ExitAnchor', None, None, 'Offset to ExitAnchor table-from beginning of CursivePos subtable-may be NULL'), + ]), + + ('MarkBasePosFormat1', [ + ('uint16', 'PosFormat', None, None, 'Format identifier-format = 1'), + ('Offset', 'MarkCoverage', None, None, 'Offset to MarkCoverage table-from beginning of MarkBasePos subtable'), + ('Offset', 'BaseCoverage', None, None, 'Offset to BaseCoverage table-from beginning of MarkBasePos subtable'), + ('uint16', 'ClassCount', None, None, 'Number of classes defined for marks'), + ('Offset', 'MarkArray', None, None, 'Offset to MarkArray table-from beginning of MarkBasePos subtable'), + ('Offset', 'BaseArray', None, None, 'Offset to BaseArray table-from beginning of MarkBasePos subtable'), + ]), + + ('BaseArray', [ + ('uint16', 'BaseCount', None, None, 'Number of BaseRecords'), + ('struct', 'BaseRecord', 'BaseCount', 0, 'Array of BaseRecords-in order of BaseCoverage Index'), + ]), + + ('BaseRecord', [ + ('Offset', 'BaseAnchor', 'ClassCount', 0, 'Array of offsets (one per class) to Anchor tables-from beginning of BaseArray table-ordered by class-zero-based'), + ]), + + ('MarkLigPosFormat1', [ + ('uint16', 'PosFormat', None, None, 'Format identifier-format = 1'), + ('Offset', 'MarkCoverage', None, None, 'Offset to Mark Coverage table-from beginning of MarkLigPos subtable'), + ('Offset', 'LigatureCoverage', None, None, 'Offset to Ligature Coverage table-from beginning of MarkLigPos subtable'), + ('uint16', 'ClassCount', None, None, 'Number of defined mark classes'), + ('Offset', 'MarkArray', None, None, 'Offset to MarkArray table-from beginning of MarkLigPos subtable'), + ('Offset', 'LigatureArray', None, None, 'Offset to LigatureArray table-from beginning of MarkLigPos subtable'), + ]), + + ('LigatureArray', [ + ('uint16', 'LigatureCount', None, None, 'Number of LigatureAttach table offsets'), + ('Offset', 'LigatureAttach', 'LigatureCount', 0, 'Array of offsets to LigatureAttach tables-from beginning of LigatureArray table-ordered by LigatureCoverage Index'), + ]), + + ('LigatureAttach', [ + ('uint16', 'ComponentCount', None, None, 'Number of ComponentRecords in this ligature'), + ('struct', 'ComponentRecord', 'ComponentCount', 0, 'Array of Component records-ordered in writing direction'), + ]), + + ('ComponentRecord', [ + ('Offset', 'LigatureAnchor', 'ClassCount', 0, 'Array of offsets (one per class) to Anchor tables-from beginning of LigatureAttach table-ordered by class-NULL if a component does not have an attachment for a class-zero-based array'), + ]), + + ('MarkMarkPosFormat1', [ + ('uint16', 'PosFormat', None, None, 'Format identifier-format = 1'), + ('Offset', 'Mark1Coverage', None, None, 'Offset to Combining Mark Coverage table-from beginning of MarkMarkPos subtable'), + ('Offset', 'Mark2Coverage', None, None, 'Offset to Base Mark Coverage table-from beginning of MarkMarkPos subtable'), + ('uint16', 'ClassCount', None, None, 'Number of Combining Mark classes defined'), + ('Offset', 'Mark1Array', None, None, 'Offset to MarkArray table for Mark1-from beginning of MarkMarkPos subtable'), + ('Offset', 'Mark2Array', None, None, 'Offset to Mark2Array table for Mark2-from beginning of MarkMarkPos subtable'), + ]), + + ('Mark2Array', [ + ('uint16', 'Mark2Count', None, None, 'Number of Mark2 records'), + ('struct', 'Mark2Record', 'Mark2Count', 0, 'Array of Mark2 records-in Coverage order'), + ]), + + ('Mark2Record', [ + ('Offset', 'Mark2Anchor', 'ClassCount', 0, 'Array of offsets (one per class) to Anchor tables-from beginning of Mark2Array table-zero-based array'), + ]), + + ('PosLookupRecord', [ + ('uint16', 'SequenceIndex', None, None, 'Index to input glyph sequence-first glyph = 0'), + ('uint16', 'LookupListIndex', None, None, 'Lookup to apply to that position-zero-based'), + ]), + + ('ContextPosFormat1', [ + ('uint16', 'PosFormat', None, None, 'Format identifier-format = 1'), + ('Offset', 'Coverage', None, None, 'Offset to Coverage table-from beginning of ContextPos subtable'), + ('uint16', 'PosRuleSetCount', None, None, 'Number of PosRuleSet tables'), + ('Offset', 'PosRuleSet', 'PosRuleSetCount', 0, 'Array of offsets to PosRuleSet tables-from beginning of ContextPos subtable-ordered by Coverage Index'), + ]), + + ('PosRuleSet', [ + ('uint16', 'PosRuleCount', None, None, 'Number of PosRule tables'), + ('Offset', 'PosRule', 'PosRuleCount', 0, 'Array of offsets to PosRule tables-from beginning of PosRuleSet-ordered by preference'), + ]), + + ('PosRule', [ + ('uint16', 'GlyphCount', None, None, 'Number of glyphs in the Input glyph sequence'), + ('uint16', 'PosCount', None, None, 'Number of PosLookupRecords'), + ('GlyphID', 'Input', 'GlyphCount', -1, 'Array of input GlyphIDs-starting with the second glyph'), + ('struct', 'PosLookupRecord', 'PosCount', 0, 'Array of positioning lookups-in design order'), + ]), + + ('ContextPosFormat2', [ + ('uint16', 'PosFormat', None, None, 'Format identifier-format = 2'), + ('Offset', 'Coverage', None, None, 'Offset to Coverage table-from beginning of ContextPos subtable'), + ('Offset', 'ClassDef', None, None, 'Offset to ClassDef table-from beginning of ContextPos subtable'), + ('uint16', 'PosClassSetCount', None, None, 'Number of PosClassSet tables'), + ('Offset', 'PosClassSet', 'PosClassSetCount', 0, 'Array of offsets to PosClassSet tables-from beginning of ContextPos subtable-ordered by class-may be NULL'), + ]), + + ('PosClassSet', [ + ('uint16', 'PosClassRuleCount', None, None, 'Number of PosClassRule tables'), + ('Offset', 'PosClassRule', 'PosClassRuleCount', 0, 'Array of offsets to PosClassRule tables-from beginning of PosClassSet-ordered by preference'), + ]), + + ('PosClassRule', [ + ('uint16', 'GlyphCount', None, None, 'Number of glyphs to be matched'), + ('uint16', 'PosCount', None, None, 'Number of PosLookupRecords'), + ('uint16', 'Class', 'GlyphCount', -1, 'Array of classes-beginning with the second class-to be matched to the input glyph sequence'), + ('struct', 'PosLookupRecord', 'PosCount', 0, 'Array of positioning lookups-in design order'), + ]), + + ('ContextPosFormat3', [ + ('uint16', 'PosFormat', None, None, 'Format identifier-format = 3'), + ('uint16', 'GlyphCount', None, None, 'Number of glyphs in the input sequence'), + ('uint16', 'PosCount', None, None, 'Number of PosLookupRecords'), + ('Offset', 'Coverage', 'GlyphCount', 0, 'Array of offsets to Coverage tables-from beginning of ContextPos subtable'), + ('struct', 'PosLookupRecord', 'PosCount', 0, 'Array of positioning lookups-in design order'), + ]), + + ('ChainContextPosFormat1', [ + ('uint16', 'PosFormat', None, None, 'Format identifier-format = 1'), + ('Offset', 'Coverage', None, None, 'Offset to Coverage table-from beginning of ContextPos subtable'), + ('uint16', 'ChainPosRuleSetCount', None, None, 'Number of ChainPosRuleSet tables'), + ('Offset', 'ChainPosRuleSet', 'ChainPosRuleSetCount', 0, 'Array of offsets to ChainPosRuleSet tables-from beginning of ContextPos subtable-ordered by Coverage Index'), + ]), + + ('ChainPosRuleSet', [ + ('uint16', 'ChainPosRuleCount', None, None, 'Number of ChainPosRule tables'), + ('Offset', 'ChainPosRule', 'ChainPosRuleCount', 0, 'Array of offsets to ChainPosRule tables-from beginning of ChainPosRuleSet-ordered by preference'), + ]), + + ('ChainPosRule', [ + ('uint16', 'BacktrackGlyphCount', None, None, 'Total number of glyphs in the backtrack sequence (number of glyphs to be matched before the first glyph)'), + ('GlyphID', 'Backtrack', 'BacktrackGlyphCount', 0, "Array of backtracking GlyphID's (to be matched before the input sequence)"), + ('uint16', 'InputGlyphCount', None, None, 'Total number of glyphs in the input sequence (includes the first glyph)'), + ('GlyphID', 'Input', 'InputGlyphCount', -1, 'Array of input GlyphIDs (start with second glyph)'), + ('uint16', 'LookAheadGlyphCount', None, None, 'Total number of glyphs in the look ahead sequence (number of glyphs to be matched after the input sequence)'), + ('GlyphID', 'LookAhead', 'LookAheadGlyphCount', 0, "Array of lookahead GlyphID's (to be matched after the input sequence)"), + ('uint16', 'PosCount', None, None, 'Number of PosLookupRecords'), + ('struct', 'PosLookupRecord', 'PosCount', 0, 'Array of PosLookupRecords (in design order)'), + ]), + + ('ChainContextPosFormat2', [ + ('uint16', 'PosFormat', None, None, 'Format identifier-format = 2'), + ('Offset', 'Coverage', None, None, 'Offset to Coverage table-from beginning of ChainContextPos subtable'), + ('Offset', 'BacktrackClassDef', None, None, 'Offset to ClassDef table containing backtrack sequence context-from beginning of ChainContextPos subtable'), + ('Offset', 'InputClassDef', None, None, 'Offset to ClassDef table containing input sequence context-from beginning of ChainContextPos subtable'), + ('Offset', 'LookAheadClassDef', None, None, 'Offset to ClassDef table containing lookahead sequence context-from beginning of ChainContextPos subtable'), + ('uint16', 'ChainPosClassSetCount', None, None, 'Number of ChainPosClassSet tables'), + ('Offset', 'ChainPosClassSet', 'ChainPosClassSetCount', 0, 'Array of offsets to ChainPosClassSet tables-from beginning of ChainContextPos subtable-ordered by input class-may be NULL'), + ]), + + ('ChainPosClassSet', [ + ('uint16', 'ChainPosClassRuleCount', None, None, 'Number of ChainPosClassRule tables'), + ('Offset', 'ChainPosClassRule', 'ChainPosClassRuleCount', 0, 'Array of offsets to ChainPosClassRule tables-from beginning of ChainPosClassSet-ordered by preference'), + ]), + + ('ChainPosClassRule', [ + ('uint16', 'BacktrackGlyphCount', None, None, 'Total number of glyphs in the backtrack sequence (number of glyphs to be matched before the first glyph)'), + ('uint16', 'Backtrack', 'BacktrackGlyphCount', 0, 'Array of backtracking classes(to be matched before the input sequence)'), + ('uint16', 'InputGlyphCount', None, None, 'Total number of classes in the input sequence (includes the first class)'), + ('uint16', 'Input', 'InputGlyphCount', -1, 'Array of input classes(start with second class; to be matched with the input glyph sequence)'), + ('uint16', 'LookAheadGlyphCount', None, None, 'Total number of classes in the look ahead sequence (number of classes to be matched after the input sequence)'), + ('uint16', 'LookAhead', 'LookAheadGlyphCount', 0, 'Array of lookahead classes(to be matched after the input sequence)'), + ('uint16', 'PosCount', None, None, 'Number of PosLookupRecords'), + ('struct', 'PosLookupRecord', 'PosCount', 0, 'Array of PosLookupRecords (in design order)'), + ]), + + ('ChainContextPosFormat3', [ + ('uint16', 'PosFormat', None, None, 'Format identifier-format = 3'), + ('uint16', 'BacktrackGlyphCount', None, None, 'Number of glyphs in the backtracking sequence'), + ('Offset', 'BacktrackCoverage', 'BacktrackGlyphCount', 0, 'Array of offsets to coverage tables in backtracking sequence, in glyph sequence order'), + ('uint16', 'InputGlyphCount', None, None, 'Number of glyphs in input sequence'), + ('Offset', 'InputCoverage', 'InputGlyphCount', 0, 'Array of offsets to coverage tables in input sequence, in glyph sequence order'), + ('uint16', 'LookAheadGlyphCount', None, None, 'Number of glyphs in lookahead sequence'), + ('Offset', 'LookAheadCoverage', 'LookAheadGlyphCount', 0, 'Array of offsets to coverage tables in lookahead sequence, in glyph sequence order'), + ('uint16', 'PosCount', None, None, 'Number of PosLookupRecords'), + ('struct', 'PosLookupRecord', 'PosCount', 0, 'Array of PosLookupRecords,in design order'), + ]), + + ('ExtensionPosFormat1', [ + ('uint16', 'ExtFormat', None, None, 'Format identifier. Set to 1.'), + ('uint16', 'ExtensionLookupType', None, None, 'Lookup type of subtable referenced by ExtensionOffset (i.e. the extension subtable).'), + ('LOffset', 'ExtSubTable', None, None, 'Offset to SubTable'), + ]), + + ('ValueRecord', [ + ('int16', 'XPlacement', None, None, 'Horizontal adjustment for placement-in design units'), + ('int16', 'YPlacement', None, None, 'Vertical adjustment for placement-in design units'), + ('int16', 'XAdvance', None, None, 'Horizontal adjustment for advance-in design units (only used for horizontal writing)'), + ('int16', 'YAdvance', None, None, 'Vertical adjustment for advance-in design units (only used for vertical writing)'), + ('Offset', 'XPlaDevice', None, None, 'Offset to Device table for horizontal placement-measured from beginning of PosTable (may be NULL)'), + ('Offset', 'YPlaDevice', None, None, 'Offset to Device table for vertical placement-measured from beginning of PosTable (may be NULL)'), + ('Offset', 'XAdvDevice', None, None, 'Offset to Device table for horizontal advance-measured from beginning of PosTable (may be NULL)'), + ('Offset', 'YAdvDevice', None, None, 'Offset to Device table for vertical advance-measured from beginning of PosTable (may be NULL)'), + ]), + + ('AnchorFormat1', [ + ('uint16', 'AnchorFormat', None, None, 'Format identifier-format = 1'), + ('int16', 'XCoordinate', None, None, 'Horizontal value-in design units'), + ('int16', 'YCoordinate', None, None, 'Vertical value-in design units'), + ]), + + ('AnchorFormat2', [ + ('uint16', 'AnchorFormat', None, None, 'Format identifier-format = 2'), + ('int16', 'XCoordinate', None, None, 'Horizontal value-in design units'), + ('int16', 'YCoordinate', None, None, 'Vertical value-in design units'), + ('uint16', 'AnchorPoint', None, None, 'Index to glyph contour point'), + ]), + + ('AnchorFormat3', [ + ('uint16', 'AnchorFormat', None, None, 'Format identifier-format = 3'), + ('int16', 'XCoordinate', None, None, 'Horizontal value-in design units'), + ('int16', 'YCoordinate', None, None, 'Vertical value-in design units'), + ('Offset', 'XDeviceTable', None, None, 'Offset to Device table for X coordinate- from beginning of Anchor table (may be NULL)'), + ('Offset', 'YDeviceTable', None, None, 'Offset to Device table for Y coordinate- from beginning of Anchor table (may be NULL)'), + ]), + + ('MarkArray', [ + ('uint16', 'MarkCount', None, None, 'Number of MarkRecords'), + ('struct', 'MarkRecord', 'MarkCount', 0, 'Array of MarkRecords-in Coverage order'), + ]), + + ('MarkRecord', [ + ('uint16', 'Class', None, None, 'Class defined for this mark'), + ('Offset', 'MarkAnchor', None, None, 'Offset to Anchor table-from beginning of MarkArray table'), + ]), + + + # + # gsub + # + + ('GSUB', [ + ('Version', 'Version', None, None, 'Version of the GSUB table-initially set to 0x00010000'), + ('Offset', 'ScriptList', None, None, 'Offset to ScriptList table-from beginning of GSUB table'), + ('Offset', 'FeatureList', None, None, 'Offset to FeatureList table-from beginning of GSUB table'), + ('Offset', 'LookupList', None, None, 'Offset to LookupList table-from beginning of GSUB table'), + ]), + + ('SingleSubstFormat1', [ + ('uint16', 'SubstFormat', None, None, 'Format identifier-format = 1'), + ('Offset', 'Coverage', None, None, 'Offset to Coverage table-from beginning of Substitution table'), + ('uint16', 'DeltaGlyphID', None, None, 'Add to original GlyphID modulo 65536 to get substitute GlyphID'), + ]), + + ('SingleSubstFormat2', [ + ('uint16', 'SubstFormat', None, None, 'Format identifier-format = 2'), + ('Offset', 'Coverage', None, None, 'Offset to Coverage table-from beginning of Substitution table'), + ('uint16', 'GlyphCount', None, None, 'Number of GlyphIDs in the Substitute array'), + ('GlyphID', 'Substitute', 'GlyphCount', 0, 'Array of substitute GlyphIDs-ordered by Coverage Index'), + ]), + + ('MultipleSubstFormat1', [ + ('uint16', 'SubstFormat', None, None, 'Format identifier-format = 1'), + ('Offset', 'Coverage', None, None, 'Offset to Coverage table-from beginning of Substitution table'), + ('uint16', 'SequenceCount', None, None, 'Number of Sequence table offsets in the Sequence array'), + ('Offset', 'Sequence', 'SequenceCount', 0, 'Array of offsets to Sequence tables-from beginning of Substitution table-ordered by Coverage Index'), + ]), + + ('Sequence', [ + ('uint16', 'GlyphCount', None, None, 'Number of GlyphIDs in the Substitute array. This should always be greater than 0.'), + ('GlyphID', 'Substitute', 'GlyphCount', 0, 'String of GlyphIDs to substitute'), + ]), + + ('AlternateSubstFormat1', [ + ('uint16', 'SubstFormat', None, None, 'Format identifier-format = 1'), + ('Offset', 'Coverage', None, None, 'Offset to Coverage table-from beginning of Substitution table'), + ('uint16', 'AlternateSetCount', None, None, 'Number of AlternateSet tables'), + ('Offset', 'AlternateSet', 'AlternateSetCount', 0, 'Array of offsets to AlternateSet tables-from beginning of Substitution table-ordered by Coverage Index'), + ]), + + ('AlternateSet', [ + ('uint16', 'GlyphCount', None, None, 'Number of GlyphIDs in the Alternate array'), + ('GlyphID', 'Alternate', 'GlyphCount', 0, 'Array of alternate GlyphIDs-in arbitrary order'), + ]), + + ('LigatureSubstFormat1', [ + ('uint16', 'SubstFormat', None, None, 'Format identifier-format = 1'), + ('Offset', 'Coverage', None, None, 'Offset to Coverage table-from beginning of Substitution table'), + ('uint16', 'LigSetCount', None, None, 'Number of LigatureSet tables'), + ('Offset', 'LigatureSet', 'LigSetCount', 0, 'Array of offsets to LigatureSet tables-from beginning of Substitution table-ordered by Coverage Index'), + ]), + + ('LigatureSet', [ + ('uint16', 'LigatureCount', None, None, 'Number of Ligature tables'), + ('Offset', 'Ligature', 'LigatureCount', 0, 'Array of offsets to Ligature tables-from beginning of LigatureSet table-ordered by preference'), + ]), + + ('Ligature', [ + ('GlyphID', 'LigGlyph', None, None, 'GlyphID of ligature to substitute'), + ('uint16', 'CompCount', None, None, 'Number of components in the ligature'), + ('GlyphID', 'Component', 'CompCount', -1, 'Array of component GlyphIDs-start with the second component-ordered in writing direction'), + ]), + + ('SubstLookupRecord', [ + ('uint16', 'SequenceIndex', None, None, 'Index into current glyph sequence-first glyph = 0'), + ('uint16', 'LookupListIndex', None, None, 'Lookup to apply to that position-zero-based'), + ]), + + ('ContextSubstFormat1', [ + ('uint16', 'SubstFormat', None, None, 'Format identifier-format = 1'), + ('Offset', 'Coverage', None, None, 'Offset to Coverage table-from beginning of Substitution table'), + ('uint16', 'SubRuleSetCount', None, None, 'Number of SubRuleSet tables-must equal GlyphCount in Coverage table'), + ('Offset', 'SubRuleSet', 'SubRuleSetCount', 0, 'Array of offsets to SubRuleSet tables-from beginning of Substitution table-ordered by Coverage Index'), + ]), + + ('SubRuleSet', [ + ('uint16', 'SubRuleCount', None, None, 'Number of SubRule tables'), + ('Offset', 'SubRule', 'SubRuleCount', 0, 'Array of offsets to SubRule tables-from beginning of SubRuleSet table-ordered by preference'), + ]), + + ('SubRule', [ + ('uint16', 'GlyphCount', None, None, 'Total number of glyphs in input glyph sequence-includes the first glyph'), + ('uint16', 'SubstCount', None, None, 'Number of SubstLookupRecords'), + ('GlyphID', 'Input', 'GlyphCount', -1, 'Array of input GlyphIDs-start with second glyph'), + ('struct', 'SubstLookupRecord', 'SubstCount', 0, 'Array of SubstLookupRecords-in design order'), + ]), + + ('ContextSubstFormat2', [ + ('uint16', 'SubstFormat', None, None, 'Format identifier-format = 2'), + ('Offset', 'Coverage', None, None, 'Offset to Coverage table-from beginning of Substitution table'), + ('Offset', 'ClassDef', None, None, 'Offset to glyph ClassDef table-from beginning of Substitution table'), + ('uint16', 'SubClassSetCount', None, None, 'Number of SubClassSet tables'), + ('Offset', 'SubClassSet', 'SubClassSetCount', 0, 'Array of offsets to SubClassSet tables-from beginning of Substitution table-ordered by class-may be NULL'), + ]), + + ('SubClassSet', [ + ('uint16', 'SubClassRuleCount', None, None, 'Number of SubClassRule tables'), + ('Offset', 'SubClassRule', 'SubClassRuleCount', 0, 'Array of offsets to SubClassRule tables-from beginning of SubClassSet-ordered by preference'), + ]), + + ('SubClassRule', [ + ('uint16', 'GlyphCount', None, None, 'Total number of classes specified for the context in the rule-includes the first class'), + ('uint16', 'SubstCount', None, None, 'Number of SubstLookupRecords'), + ('uint16', 'Class', 'GlyphCount', -1, 'Array of classes-beginning with the second class-to be matched to the input glyph class sequence'), + ('struct', 'SubstLookupRecord', 'SubstCount', 0, 'Array of Substitution lookups-in design order'), + ]), + + ('ContextSubstFormat3', [ + ('uint16', 'SubstFormat', None, None, 'Format identifier-format = 3'), + ('uint16', 'GlyphCount', None, None, 'Number of glyphs in the input glyph sequence'), + ('uint16', 'SubstCount', None, None, 'Number of SubstLookupRecords'), + ('Offset', 'Coverage', 'GlyphCount', 0, 'Array of offsets to Coverage table-from beginning of Substitution table-in glyph sequence order'), + ('struct', 'SubstLookupRecord', 'SubstCount', 0, 'Array of SubstLookupRecords-in design order'), + ]), + + ('ChainContextSubstFormat1', [ + ('uint16', 'SubstFormat', None, None, 'Format identifier-format = 1'), + ('Offset', 'Coverage', None, None, 'Offset to Coverage table-from beginning of Substitution table'), + ('uint16', 'ChainSubRuleSetCount', None, None, 'Number of ChainSubRuleSet tables-must equal GlyphCount in Coverage table'), + ('Offset', 'ChainSubRuleSet', 'ChainSubRuleSetCount', 0, 'Array of offsets to ChainSubRuleSet tables-from beginning of Substitution table-ordered by Coverage Index'), + ]), + + ('ChainSubRuleSet', [ + ('uint16', 'ChainSubRuleCount', None, None, 'Number of ChainSubRule tables'), + ('Offset', 'ChainSubRule', 'ChainSubRuleCount', 0, 'Array of offsets to ChainSubRule tables-from beginning of ChainSubRuleSet table-ordered by preference'), + ]), + + ('ChainSubRule', [ + ('uint16', 'BacktrackGlyphCount', None, None, 'Total number of glyphs in the backtrack sequence (number of glyphs to be matched before the first glyph)'), + ('GlyphID', 'Backtrack', 'BacktrackGlyphCount', 0, "Array of backtracking GlyphID's (to be matched before the input sequence)"), + ('uint16', 'InputGlyphCount', None, None, 'Total number of glyphs in the input sequence (includes the first glyph)'), + ('GlyphID', 'Input', 'InputGlyphCount', -1, 'Array of input GlyphIDs (start with second glyph)'), + ('uint16', 'LookAheadGlyphCount', None, None, 'Total number of glyphs in the look ahead sequence (number of glyphs to be matched after the input sequence)'), + ('GlyphID', 'LookAhead', 'LookAheadGlyphCount', 0, "Array of lookahead GlyphID's (to be matched after the input sequence)"), + ('uint16', 'SubstCount', None, None, 'Number of SubstLookupRecords'), + ('struct', 'SubstLookupRecord', 'SubstCount', 0, 'Array of SubstLookupRecords (in design order)'), + ]), + + ('ChainContextSubstFormat2', [ + ('uint16', 'SubstFormat', None, None, 'Format identifier-format = 2'), + ('Offset', 'Coverage', None, None, 'Offset to Coverage table-from beginning of Substitution table'), + ('Offset', 'BacktrackClassDef', None, None, 'Offset to glyph ClassDef table containing backtrack sequence data-from beginning of Substitution table'), + ('Offset', 'InputClassDef', None, None, 'Offset to glyph ClassDef table containing input sequence data-from beginning of Substitution table'), + ('Offset', 'LookAheadClassDef', None, None, 'Offset to glyph ClassDef table containing lookahead sequence data-from beginning of Substitution table'), + ('uint16', 'ChainSubClassSetCount', None, None, 'Number of ChainSubClassSet tables'), + ('Offset', 'ChainSubClassSet', 'ChainSubClassSetCount', 0, 'Array of offsets to ChainSubClassSet tables-from beginning of Substitution table-ordered by input class-may be NULL'), + ]), + + ('ChainSubClassSet', [ + ('uint16', 'ChainSubClassRuleCount', None, None, 'Number of ChainSubClassRule tables'), + ('Offset', 'ChainSubClassRule', 'ChainSubClassRuleCount', 0, 'Array of offsets to ChainSubClassRule tables-from beginning of ChainSubClassSet-ordered by preference'), + ]), + + ('ChainSubClassRule', [ + ('uint16', 'BacktrackGlyphCount', None, None, 'Total number of glyphs in the backtrack sequence (number of glyphs to be matched before the first glyph)'), + ('uint16', 'Backtrack', 'BacktrackGlyphCount', 0, 'Array of backtracking classes(to be matched before the input sequence)'), + ('uint16', 'InputGlyphCount', None, None, 'Total number of classes in the input sequence (includes the first class)'), + ('uint16', 'Input', 'InputGlyphCount', -1, 'Array of input classes(start with second class; to be matched with the input glyph sequence)'), + ('uint16', 'LookAheadGlyphCount', None, None, 'Total number of classes in the look ahead sequence (number of classes to be matched after the input sequence)'), + ('uint16', 'LookAhead', 'LookAheadGlyphCount', 0, 'Array of lookahead classes(to be matched after the input sequence)'), + ('uint16', 'SubstCount', None, None, 'Number of SubstLookupRecords'), + ('struct', 'SubstLookupRecord', 'SubstCount', 0, 'Array of SubstLookupRecords (in design order)'), + ]), + + ('ChainContextSubstFormat3', [ + ('uint16', 'SubstFormat', None, None, 'Format identifier-format = 3'), + ('uint16', 'BacktrackGlyphCount', None, None, 'Number of glyphs in the backtracking sequence'), + ('Offset', 'BacktrackCoverage', 'BacktrackGlyphCount', 0, 'Array of offsets to coverage tables in backtracking sequence, in glyph sequence order'), + ('uint16', 'InputGlyphCount', None, None, 'Number of glyphs in input sequence'), + ('Offset', 'InputCoverage', 'InputGlyphCount', 0, 'Array of offsets to coverage tables in input sequence, in glyph sequence order'), + ('uint16', 'LookAheadGlyphCount', None, None, 'Number of glyphs in lookahead sequence'), + ('Offset', 'LookAheadCoverage', 'LookAheadGlyphCount', 0, 'Array of offsets to coverage tables in lookahead sequence, in glyph sequence order'), + ('uint16', 'SubstCount', None, None, 'Number of SubstLookupRecords'), + ('struct', 'SubstLookupRecord', 'SubstCount', 0, 'Array of SubstLookupRecords, in design order'), + ]), + + ('ExtensionSubstFormat1', [ + ('uint16', 'ExtFormat', None, None, 'Format identifier. Set to 1.'), + ('uint16', 'ExtensionLookupType', None, None, 'Lookup type of subtable referenced by ExtensionOffset (i.e. the extension subtable).'), + ('LOffset', 'ExtSubTable', None, None, 'Array of offsets to Lookup tables-from beginning of LookupList -zero based (first lookup is Lookup index = 0)'), + ]), + + ('ReverseChainSingleSubstFormat1', [ + ('uint16', 'SubstFormat', None, None, 'Format identifier-format = 1'), + ('Offset', 'Coverage', None, 0, 'Offset to Coverage table - from beginning of Substitution table'), + ('uint16', 'BacktrackGlyphCount', None, None, 'Number of glyphs in the backtracking sequence'), + ('Offset', 'BacktrackCoverage', 'BacktrackGlyphCount', 0, 'Array of offsets to coverage tables in backtracking sequence, in glyph sequence order'), + ('uint16', 'LookAheadGlyphCount', None, None, 'Number of glyphs in lookahead sequence'), + ('Offset', 'LookAheadCoverage', 'LookAheadGlyphCount', 0, 'Array of offsets to coverage tables in lookahead sequence, in glyph sequence order'), + ('uint16', 'GlyphCount', None, None, 'Number of GlyphIDs in the Substitute array'), + ('GlyphID', 'Substitute', 'GlyphCount', 0, 'Array of substitute GlyphIDs-ordered by Coverage index'), + ]), + + # + # gdef + # + + ('GDEF', [ + ('Version', 'Version', None, None, 'Version of the GDEF table-initially 0x00010000'), + ('Offset', 'GlyphClassDef', None, None, 'Offset to class definition table for glyph type-from beginning of GDEF header (may be NULL)'), + ('Offset', 'AttachList', None, None, 'Offset to list of glyphs with attachment points-from beginning of GDEF header (may be NULL)'), + ('Offset', 'LigCaretList', None, None, 'Offset to list of positioning points for ligature carets-from beginning of GDEF header (may be NULL)'), + ('Offset', 'MarkAttachClassDef', None, None, 'Offset to class definition table for mark attachment type-from beginning of GDEF header (may be NULL)'), + ('Offset', 'MarkGlyphSetsDef', None, 'int(round(Version*0x10000)) >= 0x00010002', 'Offset to the table of mark set definitions-from beginning of GDEF header (may be NULL)'), + ]), + + ('AttachList', [ + ('Offset', 'Coverage', None, None, 'Offset to Coverage table - from beginning of AttachList table'), + ('uint16', 'GlyphCount', None, None, 'Number of glyphs with attachment points'), + ('Offset', 'AttachPoint', 'GlyphCount', 0, 'Array of offsets to AttachPoint tables-from beginning of AttachList table-in Coverage Index order'), + ]), + + ('AttachPoint', [ + ('uint16', 'PointCount', None, None, 'Number of attachment points on this glyph'), + ('uint16', 'PointIndex', 'PointCount', 0, 'Array of contour point indices -in increasing numerical order'), + ]), + + ('LigCaretList', [ + ('Offset', 'Coverage', None, None, 'Offset to Coverage table - from beginning of LigCaretList table'), + ('uint16', 'LigGlyphCount', None, None, 'Number of ligature glyphs'), + ('Offset', 'LigGlyph', 'LigGlyphCount', 0, 'Array of offsets to LigGlyph tables-from beginning of LigCaretList table-in Coverage Index order'), + ]), + + ('LigGlyph', [ + ('uint16', 'CaretCount', None, None, 'Number of CaretValues for this ligature (components - 1)'), + ('Offset', 'CaretValue', 'CaretCount', 0, 'Array of offsets to CaretValue tables-from beginning of LigGlyph table-in increasing coordinate order'), + ]), + + ('CaretValueFormat1', [ + ('uint16', 'CaretValueFormat', None, None, 'Format identifier-format = 1'), + ('int16', 'Coordinate', None, None, 'X or Y value, in design units'), + ]), + + ('CaretValueFormat2', [ + ('uint16', 'CaretValueFormat', None, None, 'Format identifier-format = 2'), + ('uint16', 'CaretValuePoint', None, None, 'Contour point index on glyph'), + ]), + + ('CaretValueFormat3', [ + ('uint16', 'CaretValueFormat', None, None, 'Format identifier-format = 3'), + ('int16', 'Coordinate', None, None, 'X or Y value, in design units'), + ('Offset', 'DeviceTable', None, None, 'Offset to Device table for X or Y value-from beginning of CaretValue table'), + ]), + + ('MarkGlyphSetsDef', [ + ('uint16', 'MarkSetTableFormat', None, None, 'Format identifier == 1'), + ('uint16', 'MarkSetCount', None, None, 'Number of mark sets defined'), + ('LOffset', 'Coverage', 'MarkSetCount', 0, 'Array of offsets to mark set coverage tables.'), + ]), + + # + # base + # + + ('BASE', [ + ('Version', 'Version', None, None, 'Version of the BASE table-initially 0x00010000'), + ('Offset', 'HorizAxis', None, None, 'Offset to horizontal Axis table-from beginning of BASE table-may be NULL'), + ('Offset', 'VertAxis', None, None, 'Offset to vertical Axis table-from beginning of BASE table-may be NULL'), + ]), + + ('Axis', [ + ('Offset', 'BaseTagList', None, None, 'Offset to BaseTagList table-from beginning of Axis table-may be NULL'), + ('Offset', 'BaseScriptList', None, None, 'Offset to BaseScriptList table-from beginning of Axis table'), + ]), + + ('BaseTagList', [ + ('uint16', 'BaseTagCount', None, None, 'Number of baseline identification tags in this text direction-may be zero (0)'), + ('Tag', 'BaselineTag', 'BaseTagCount', 0, 'Array of 4-byte baseline identification tags-must be in alphabetical order'), + ]), + + ('BaseScriptList', [ + ('uint16', 'BaseScriptCount', None, None, 'Number of BaseScriptRecords defined'), + ('struct', 'BaseScriptRecord', 'BaseScriptCount', 0, 'Array of BaseScriptRecords-in alphabetical order by BaseScriptTag'), + ]), + + ('BaseScriptRecord', [ + ('Tag', 'BaseScriptTag', None, None, '4-byte script identification tag'), + ('Offset', 'BaseScript', None, None, 'Offset to BaseScript table-from beginning of BaseScriptList'), + ]), + + ('BaseScript', [ + ('Offset', 'BaseValues', None, None, 'Offset to BaseValues table-from beginning of BaseScript table-may be NULL'), + ('Offset', 'DefaultMinMax', None, None, 'Offset to MinMax table- from beginning of BaseScript table-may be NULL'), + ('uint16', 'BaseLangSysCount', None, None, 'Number of BaseLangSysRecords defined-may be zero (0)'), + ('struct', 'BaseLangSysRecord', 'BaseLangSysCount', 0, 'Array of BaseLangSysRecords-in alphabetical order by BaseLangSysTag'), + ]), + + ('BaseLangSysRecord', [ + ('Tag', 'BaseLangSysTag', None, None, '4-byte language system identification tag'), + ('Offset', 'MinMax', None, None, 'Offset to MinMax table-from beginning of BaseScript table'), + ]), + + ('BaseValues', [ + ('uint16', 'DefaultIndex', None, None, 'Index number of default baseline for this script-equals index position of baseline tag in BaselineArray of the BaseTagList'), + ('uint16', 'BaseCoordCount', None, None, 'Number of BaseCoord tables defined-should equal BaseTagCount in the BaseTagList'), + ('Offset', 'BaseCoord', 'BaseCoordCount', 0, 'Array of offsets to BaseCoord-from beginning of BaseValues table-order matches BaselineTag array in the BaseTagList'), + ]), + + ('MinMax', [ + ('Offset', 'MinCoord', None, None, 'Offset to BaseCoord table-defines minimum extent value-from the beginning of MinMax table-may be NULL'), + ('Offset', 'MaxCoord', None, None, 'Offset to BaseCoord table-defines maximum extent value-from the beginning of MinMax table-may be NULL'), + ('uint16', 'FeatMinMaxCount', None, None, 'Number of FeatMinMaxRecords-may be zero (0)'), + ('struct', 'FeatMinMaxRecord', 'FeatMinMaxCount', 0, 'Array of FeatMinMaxRecords-in alphabetical order, by FeatureTableTag'), + ]), + + ('FeatMinMaxRecord', [ + ('Tag', 'FeatureTableTag', None, None, '4-byte feature identification tag-must match FeatureTag in FeatureList'), + ('Offset', 'MinCoord', None, None, 'Offset to BaseCoord table-defines minimum extent value-from beginning of MinMax table-may be NULL'), + ('Offset', 'MaxCoord', None, None, 'Offset to BaseCoord table-defines maximum extent value-from beginning of MinMax table-may be NULL'), + ]), + + ('BaseCoordFormat1', [ + ('uint16', 'BaseCoordFormat', None, None, 'Format identifier-format = 1'), + ('int16', 'Coordinate', None, None, 'X or Y value, in design units'), + ]), + + ('BaseCoordFormat2', [ + ('uint16', 'BaseCoordFormat', None, None, 'Format identifier-format = 2'), + ('int16', 'Coordinate', None, None, 'X or Y value, in design units'), + ('GlyphID', 'ReferenceGlyph', None, None, 'GlyphID of control glyph'), + ('uint16', 'BaseCoordPoint', None, None, 'Index of contour point on the ReferenceGlyph'), + ]), + + ('BaseCoordFormat3', [ + ('uint16', 'BaseCoordFormat', None, None, 'Format identifier-format = 3'), + ('int16', 'Coordinate', None, None, 'X or Y value, in design units'), + ('Offset', 'DeviceTable', None, None, 'Offset to Device table for X or Y value'), + ]), + + + # + # jstf + # + + ('JSTF', [ + ('Version', 'Version', None, None, 'Version of the JSTF table-initially set to 0x00010000'), + ('uint16', 'JstfScriptCount', None, None, 'Number of JstfScriptRecords in this table'), + ('struct', 'JstfScriptRecord', 'JstfScriptCount', 0, 'Array of JstfScriptRecords-in alphabetical order, by JstfScriptTag'), + ]), + + ('JstfScriptRecord', [ + ('Tag', 'JstfScriptTag', None, None, '4-byte JstfScript identification'), + ('Offset', 'JstfScript', None, None, 'Offset to JstfScript table-from beginning of JSTF Header'), + ]), + + ('JstfScript', [ + ('Offset', 'ExtenderGlyph', None, None, 'Offset to ExtenderGlyph table-from beginning of JstfScript table-may be NULL'), + ('Offset', 'DefJstfLangSys', None, None, 'Offset to Default JstfLangSys table-from beginning of JstfScript table-may be NULL'), + ('uint16', 'JstfLangSysCount', None, None, 'Number of JstfLangSysRecords in this table- may be zero (0)'), + ('struct', 'JstfLangSysRecord', 'JstfLangSysCount', 0, 'Array of JstfLangSysRecords-in alphabetical order, by JstfLangSysTag'), + ]), + + ('JstfLangSysRecord', [ + ('Tag', 'JstfLangSysTag', None, None, '4-byte JstfLangSys identifier'), + ('Offset', 'JstfLangSys', None, None, 'Offset to JstfLangSys table-from beginning of JstfScript table'), + ]), + + ('ExtenderGlyph', [ + ('uint16', 'GlyphCount', None, None, 'Number of Extender Glyphs in this script'), + ('GlyphID', 'ExtenderGlyph', 'GlyphCount', 0, 'GlyphIDs-in increasing numerical order'), + ]), + + ('JstfLangSys', [ + ('uint16', 'JstfPriorityCount', None, None, 'Number of JstfPriority tables'), + ('Offset', 'JstfPriority', 'JstfPriorityCount', 0, 'Array of offsets to JstfPriority tables-from beginning of JstfLangSys table-in priority order'), + ]), + + ('JstfPriority', [ + ('Offset', 'ShrinkageEnableGSUB', None, None, 'Offset to Shrinkage Enable JstfGSUBModList table-from beginning of JstfPriority table-may be NULL'), + ('Offset', 'ShrinkageDisableGSUB', None, None, 'Offset to Shrinkage Disable JstfGSUBModList table-from beginning of JstfPriority table-may be NULL'), + ('Offset', 'ShrinkageEnableGPOS', None, None, 'Offset to Shrinkage Enable JstfGPOSModList table-from beginning of JstfPriority table-may be NULL'), + ('Offset', 'ShrinkageDisableGPOS', None, None, 'Offset to Shrinkage Disable JstfGPOSModList table-from beginning of JstfPriority table-may be NULL'), + ('Offset', 'ShrinkageJstfMax', None, None, 'Offset to Shrinkage JstfMax table-from beginning of JstfPriority table -may be NULL'), + ('Offset', 'ExtensionEnableGSUB', None, None, 'Offset to Extension Enable JstfGSUBModList table-may be NULL'), + ('Offset', 'ExtensionDisableGSUB', None, None, 'Offset to Extension Disable JstfGSUBModList table-from beginning of JstfPriority table-may be NULL'), + ('Offset', 'ExtensionEnableGPOS', None, None, 'Offset to Extension Enable JstfGSUBModList table-may be NULL'), + ('Offset', 'ExtensionDisableGPOS', None, None, 'Offset to Extension Disable JstfGSUBModList table-from beginning of JstfPriority table-may be NULL'), + ('Offset', 'ExtensionJstfMax', None, None, 'Offset to Extension JstfMax table-from beginning of JstfPriority table -may be NULL'), + ]), + + ('JstfGSUBModList', [ + ('uint16', 'LookupCount', None, None, 'Number of lookups for this modification'), + ('uint16', 'GSUBLookupIndex', 'LookupCount', 0, 'Array of LookupIndex identifiers in GSUB-in increasing numerical order'), + ]), + + ('JstfGPOSModList', [ + ('uint16', 'LookupCount', None, None, 'Number of lookups for this modification'), + ('uint16', 'GPOSLookupIndex', 'LookupCount', 0, 'Array of LookupIndex identifiers in GPOS-in increasing numerical order'), + ]), + + ('JstfMax', [ + ('uint16', 'LookupCount', None, None, 'Number of lookup Indices for this modification'), + ('Offset', 'Lookup', 'LookupCount', 0, 'Array of offsets to GPOS-type lookup tables-from beginning of JstfMax table-in design order'), + ]), + + # + # math + # + + ('MATH', [ + ('Version', 'Version', None, None, 'Version of the MATH table-initially set to 0x00010000.'), + ('Offset', 'MathConstants', None, None, 'Offset to MathConstants table - from the beginning of MATH table.'), + ('Offset', 'MathGlyphInfo', None, None, 'Offset to MathGlyphInfo table - from the beginning of MATH table.'), + ('Offset', 'MathVariants', None, None, 'Offset to MathVariants table - from the beginning of MATH table.'), + ]), + + ('MathValueRecord', [ + ('int16', 'Value', None, None, 'The X or Y value in design units.'), + ('Offset', 'DeviceTable', None, None, 'Offset to the device table - from the beginning of parent table. May be NULL. Suggested format for device table is 1.'), + ]), + + ('MathConstants', [ + ('int16', 'ScriptPercentScaleDown', None, None, 'Percentage of scaling down for script level 1. Suggested value: 80%.'), + ('int16', 'ScriptScriptPercentScaleDown', None, None, 'Percentage of scaling down for script level 2 (ScriptScript). Suggested value: 60%.'), + ('uint16', 'DelimitedSubFormulaMinHeight', None, None, 'Minimum height required for a delimited expression to be treated as a subformula. Suggested value: normal line height x1.5.'), + ('uint16', 'DisplayOperatorMinHeight', None, None, 'Minimum height of n-ary operators (such as integral and summation) for formulas in display mode.'), + ('MathValueRecord', 'MathLeading', None, None, 'White space to be left between math formulas to ensure proper line spacing. For example, for applications that treat line gap as a part of line ascender, formulas with ink going above (os2.sTypoAscender + os2.sTypoLineGap - MathLeading) or with ink going below os2.sTypoDescender will result in increasing line height.'), + ('MathValueRecord', 'AxisHeight', None, None, 'Axis height of the font.'), + ('MathValueRecord', 'AccentBaseHeight', None, None, 'Maximum (ink) height of accent base that does not require raising the accents. Suggested: x-height of the font (os2.sxHeight) plus any possible overshots.'), + ('MathValueRecord', 'FlattenedAccentBaseHeight', None, None, 'Maximum (ink) height of accent base that does not require flattening the accents. Suggested: cap height of the font (os2.sCapHeight).'), + ('MathValueRecord', 'SubscriptShiftDown', None, None, 'The standard shift down applied to subscript elements. Positive for moving in the downward direction. Suggested: os2.ySubscriptYOffset.'), + ('MathValueRecord', 'SubscriptTopMax', None, None, 'Maximum allowed height of the (ink) top of subscripts that does not require moving subscripts further down. Suggested: 4/5 x-height.'), + ('MathValueRecord', 'SubscriptBaselineDropMin', None, None, 'Minimum allowed drop of the baseline of subscripts relative to the (ink) bottom of the base. Checked for bases that are treated as a box or extended shape. Positive for subscript baseline dropped below the base bottom.'), + ('MathValueRecord', 'SuperscriptShiftUp', None, None, 'Standard shift up applied to superscript elements. Suggested: os2.ySuperscriptYOffset.'), + ('MathValueRecord', 'SuperscriptShiftUpCramped', None, None, 'Standard shift of superscripts relative to the base, in cramped style.'), + ('MathValueRecord', 'SuperscriptBottomMin', None, None, 'Minimum allowed height of the (ink) bottom of superscripts that does not require moving subscripts further up. Suggested: 1/4 x-height.'), + ('MathValueRecord', 'SuperscriptBaselineDropMax', None, None, 'Maximum allowed drop of the baseline of superscripts relative to the (ink) top of the base. Checked for bases that are treated as a box or extended shape. Positive for superscript baseline below the base top.'), + ('MathValueRecord', 'SubSuperscriptGapMin', None, None, 'Minimum gap between the superscript and subscript ink. Suggested: 4x default rule thickness.'), + ('MathValueRecord', 'SuperscriptBottomMaxWithSubscript', None, None, 'The maximum level to which the (ink) bottom of superscript can be pushed to increase the gap between superscript and subscript, before subscript starts being moved down. Suggested: 4/5 x-height.'), + ('MathValueRecord', 'SpaceAfterScript', None, None, 'Extra white space to be added after each subscript and superscript. Suggested: 0.5pt for a 12 pt font.'), + ('MathValueRecord', 'UpperLimitGapMin', None, None, 'Minimum gap between the (ink) bottom of the upper limit, and the (ink) top of the base operator.'), + ('MathValueRecord', 'UpperLimitBaselineRiseMin', None, None, 'Minimum distance between baseline of upper limit and (ink) top of the base operator.'), + ('MathValueRecord', 'LowerLimitGapMin', None, None, 'Minimum gap between (ink) top of the lower limit, and (ink) bottom of the base operator.'), + ('MathValueRecord', 'LowerLimitBaselineDropMin', None, None, 'Minimum distance between baseline of the lower limit and (ink) bottom of the base operator.'), + ('MathValueRecord', 'StackTopShiftUp', None, None, 'Standard shift up applied to the top element of a stack.'), + ('MathValueRecord', 'StackTopDisplayStyleShiftUp', None, None, 'Standard shift up applied to the top element of a stack in display style.'), + ('MathValueRecord', 'StackBottomShiftDown', None, None, 'Standard shift down applied to the bottom element of a stack. Positive for moving in the downward direction.'), + ('MathValueRecord', 'StackBottomDisplayStyleShiftDown', None, None, 'Standard shift down applied to the bottom element of a stack in display style. Positive for moving in the downward direction.'), + ('MathValueRecord', 'StackGapMin', None, None, 'Minimum gap between (ink) bottom of the top element of a stack, and the (ink) top of the bottom element. Suggested: 3x default rule thickness.'), + ('MathValueRecord', 'StackDisplayStyleGapMin', None, None, 'Minimum gap between (ink) bottom of the top element of a stack, and the (ink) top of the bottom element in display style. Suggested: 7x default rule thickness.'), + ('MathValueRecord', 'StretchStackTopShiftUp', None, None, 'Standard shift up applied to the top element of the stretch stack.'), + ('MathValueRecord', 'StretchStackBottomShiftDown', None, None, 'Standard shift down applied to the bottom element of the stretch stack. Positive for moving in the downward direction.'), + ('MathValueRecord', 'StretchStackGapAboveMin', None, None, 'Minimum gap between the ink of the stretched element, and the (ink) bottom of the element above. Suggested: UpperLimitGapMin'), + ('MathValueRecord', 'StretchStackGapBelowMin', None, None, 'Minimum gap between the ink of the stretched element, and the (ink) top of the element below. Suggested: LowerLimitGapMin.'), + ('MathValueRecord', 'FractionNumeratorShiftUp', None, None, 'Standard shift up applied to the numerator.'), + ('MathValueRecord', 'FractionNumeratorDisplayStyleShiftUp', None, None, 'Standard shift up applied to the numerator in display style. Suggested: StackTopDisplayStyleShiftUp.'), + ('MathValueRecord', 'FractionDenominatorShiftDown', None, None, 'Standard shift down applied to the denominator. Positive for moving in the downward direction.'), + ('MathValueRecord', 'FractionDenominatorDisplayStyleShiftDown', None, None, 'Standard shift down applied to the denominator in display style. Positive for moving in the downward direction. Suggested: StackBottomDisplayStyleShiftDown.'), + ('MathValueRecord', 'FractionNumeratorGapMin', None, None, 'Minimum tolerated gap between the (ink) bottom of the numerator and the ink of the fraction bar. Suggested: default rule thickness'), + ('MathValueRecord', 'FractionNumDisplayStyleGapMin', None, None, 'Minimum tolerated gap between the (ink) bottom of the numerator and the ink of the fraction bar in display style. Suggested: 3x default rule thickness.'), + ('MathValueRecord', 'FractionRuleThickness', None, None, 'Thickness of the fraction bar. Suggested: default rule thickness.'), + ('MathValueRecord', 'FractionDenominatorGapMin', None, None, 'Minimum tolerated gap between the (ink) top of the denominator and the ink of the fraction bar. Suggested: default rule thickness'), + ('MathValueRecord', 'FractionDenomDisplayStyleGapMin', None, None, 'Minimum tolerated gap between the (ink) top of the denominator and the ink of the fraction bar in display style. Suggested: 3x default rule thickness.'), + ('MathValueRecord', 'SkewedFractionHorizontalGap', None, None, 'Horizontal distance between the top and bottom elements of a skewed fraction.'), + ('MathValueRecord', 'SkewedFractionVerticalGap', None, None, 'Vertical distance between the ink of the top and bottom elements of a skewed fraction.'), + ('MathValueRecord', 'OverbarVerticalGap', None, None, 'Distance between the overbar and the (ink) top of he base. Suggested: 3x default rule thickness.'), + ('MathValueRecord', 'OverbarRuleThickness', None, None, 'Thickness of overbar. Suggested: default rule thickness.'), + ('MathValueRecord', 'OverbarExtraAscender', None, None, 'Extra white space reserved above the overbar. Suggested: default rule thickness.'), + ('MathValueRecord', 'UnderbarVerticalGap', None, None, 'Distance between underbar and (ink) bottom of the base. Suggested: 3x default rule thickness.'), + ('MathValueRecord', 'UnderbarRuleThickness', None, None, 'Thickness of underbar. Suggested: default rule thickness.'), + ('MathValueRecord', 'UnderbarExtraDescender', None, None, 'Extra white space reserved below the underbar. Always positive. Suggested: default rule thickness.'), + ('MathValueRecord', 'RadicalVerticalGap', None, None, 'Space between the (ink) top of the expression and the bar over it. Suggested: 1 1/4 default rule thickness.'), + ('MathValueRecord', 'RadicalDisplayStyleVerticalGap', None, None, 'Space between the (ink) top of the expression and the bar over it. Suggested: default rule thickness + 1/4 x-height.'), + ('MathValueRecord', 'RadicalRuleThickness', None, None, 'Thickness of the radical rule. This is the thickness of the rule in designed or constructed radical signs. Suggested: default rule thickness.'), + ('MathValueRecord', 'RadicalExtraAscender', None, None, 'Extra white space reserved above the radical. Suggested: RadicalRuleThickness.'), + ('MathValueRecord', 'RadicalKernBeforeDegree', None, None, 'Extra horizontal kern before the degree of a radical, if such is present. Suggested: 5/18 of em.'), + ('MathValueRecord', 'RadicalKernAfterDegree', None, None, 'Negative kern after the degree of a radical, if such is present. Suggested: 10/18 of em.'), + ('uint16', 'RadicalDegreeBottomRaisePercent', None, None, 'Height of the bottom of the radical degree, if such is present, in proportion to the ascender of the radical sign. Suggested: 60%.'), + ]), + + ('MathGlyphInfo', [ + ('Offset', 'MathItalicsCorrectionInfo', None, None, 'Offset to MathItalicsCorrectionInfo table - from the beginning of MathGlyphInfo table.'), + ('Offset', 'MathTopAccentAttachment', None, None, 'Offset to MathTopAccentAttachment table - from the beginning of MathGlyphInfo table.'), + ('Offset', 'ExtendedShapeCoverage', None, None, 'Offset to coverage table for Extended Shape glyphs - from the beginning of MathGlyphInfo table. When the left or right glyph of a box is an extended shape variant, the (ink) box (and not the default position defined by values in MathConstants table) should be used for vertical positioning purposes. May be NULL.'), + ('Offset', 'MathKernInfo', None, None, 'Offset to MathKernInfo table - from the beginning of MathGlyphInfo table.'), + ]), + + ('MathItalicsCorrectionInfo', [ + ('Offset', 'Coverage', None, None, 'Offset to Coverage table - from the beginning of MathItalicsCorrectionInfo table.'), + ('uint16', 'ItalicsCorrectionCount', None, None, 'Number of italics correction values. Should coincide with the number of covered glyphs.'), + ('MathValueRecord', 'ItalicsCorrection', 'ItalicsCorrectionCount', 0, 'Array of MathValueRecords defining italics correction values for each covered glyph.'), + ]), + + ('MathTopAccentAttachment', [ + ('Offset', 'TopAccentCoverage', None, None, 'Offset to Coverage table - from the beginning of MathTopAccentAttachment table.'), + ('uint16', 'TopAccentAttachmentCount', None, None, 'Number of top accent attachment point values. Should coincide with the number of covered glyphs'), + ('MathValueRecord', 'TopAccentAttachment', 'TopAccentAttachmentCount', 0, 'Array of MathValueRecords defining top accent attachment points for each covered glyph'), + ]), + + ('MathKernInfo', [ + ('Offset', 'MathKernCoverage', None, None, 'Offset to Coverage table - from the beginning of the MathKernInfo table.'), + ('uint16', 'MathKernCount', None, None, 'Number of MathKernInfoRecords.'), + ('MathKernInfoRecord', 'MathKernInfoRecords', 'MathKernCount', 0, 'Array of MathKernInfoRecords, per-glyph information for mathematical positioning of subscripts and superscripts.'), + ]), + + ('MathKernInfoRecord', [ + ('Offset', 'TopRightMathKern', None, None, 'Offset to MathKern table for top right corner - from the beginning of MathKernInfo table. May be NULL.'), + ('Offset', 'TopLeftMathKern', None, None, 'Offset to MathKern table for the top left corner - from the beginning of MathKernInfo table. May be NULL.'), + ('Offset', 'BottomRightMathKern', None, None, 'Offset to MathKern table for bottom right corner - from the beginning of MathKernInfo table. May be NULL.'), + ('Offset', 'BottomLeftMathKern', None, None, 'Offset to MathKern table for bottom left corner - from the beginning of MathKernInfo table. May be NULL.'), + ]), + + ('MathKern', [ + ('uint16', 'HeightCount', None, None, 'Number of heights on which the kern value changes.'), + ('MathValueRecord', 'CorrectionHeight', 'HeightCount', 0, 'Array of correction heights at which the kern value changes. Sorted by the height value in design units.'), + ('MathValueRecord', 'KernValue', 'HeightCount', 1, 'Array of kern values corresponding to heights. First value is the kern value for all heights less or equal than the first height in this table.Last value is the value to be applied for all heights greater than the last height in this table. Negative values are interpreted as move glyphs closer to each other.'), + ]), + + ('MathVariants', [ + ('uint16', 'MinConnectorOverlap', None, None, 'Minimum overlap of connecting glyphs during glyph construction, in design units.'), + ('Offset', 'VertGlyphCoverage', None, None, 'Offset to Coverage table - from the beginning of MathVariants table.'), + ('Offset', 'HorizGlyphCoverage', None, None, 'Offset to Coverage table - from the beginning of MathVariants table.'), + ('uint16', 'VertGlyphCount', None, None, 'Number of glyphs for which information is provided for vertically growing variants.'), + ('uint16', 'HorizGlyphCount', None, None, 'Number of glyphs for which information is provided for horizontally growing variants.'), + ('Offset', 'VertGlyphConstruction', 'VertGlyphCount', 0, 'Array of offsets to MathGlyphConstruction tables - from the beginning of the MathVariants table, for shapes growing in vertical direction.'), + ('Offset', 'HorizGlyphConstruction', 'HorizGlyphCount', 0, 'Array of offsets to MathGlyphConstruction tables - from the beginning of the MathVariants table, for shapes growing in horizontal direction.'), + ]), + + ('MathGlyphConstruction', [ + ('Offset', 'GlyphAssembly', None, None, 'Offset to GlyphAssembly table for this shape - from the beginning of MathGlyphConstruction table. May be NULL'), + ('uint16', 'VariantCount', None, None, 'Count of glyph growing variants for this glyph.'), + ('MathGlyphVariantRecord', 'MathGlyphVariantRecord', 'VariantCount', 0, 'MathGlyphVariantRecords for alternative variants of the glyphs.'), + ]), + + ('MathGlyphVariantRecord', [ + ('GlyphID', 'VariantGlyph', None, None, 'Glyph ID for the variant.'), + ('uint16', 'AdvanceMeasurement', None, None, 'Advance width/height, in design units, of the variant, in the direction of requested glyph extension.'), + ]), + + ('GlyphAssembly', [ + ('MathValueRecord', 'ItalicsCorrection', None, None, 'Italics correction of this GlyphAssembly. Should not depend on the assembly size.'), + ('uint16', 'PartCount', None, None, 'Number of parts in this assembly.'), + ('GlyphPartRecord', 'PartRecords', 'PartCount', 0, 'Array of part records, from left to right and bottom to top.'), + ]), + + ('GlyphPartRecord', [ + ('GlyphID', 'glyph', None, None, 'Glyph ID for the part.'), + ('uint16', 'StartConnectorLength', None, None, 'Advance width/ height of the straight bar connector material, in design units, is at the beginning of the glyph, in the direction of the extension.'), + ('uint16', 'EndConnectorLength', None, None, 'Advance width/ height of the straight bar connector material, in design units, is at the end of the glyph, in the direction of the extension.'), + ('uint16', 'FullAdvance', None, None, 'Full advance width/height for this part, in the direction of the extension. In design units.'), + ('uint16', 'PartFlags', None, None, 'Part qualifiers. PartFlags enumeration currently uses only one bit: 0x0001 fExtender: If set, the part can be skipped or repeated. 0xFFFE Reserved'), + ]), + + + ## + ## Apple Advanced Typography (AAT) tables + ## + + # + # feat + # + + ('feat', [ + ('Version', 'Version', None, None, 'Version of the feat table-initially set to 0x00010000.'), + ('FeatureNames', 'FeatureNames', None, None, 'The feature names.'), + ]), + + ('FeatureNames', [ + ('uint16', 'FeatureNameCount', None, None, 'Number of entries in the feature name array.'), + ('uint16', 'Reserved1', None, None, 'Reserved (set to zero).'), + ('uint32', 'Reserved2', None, None, 'Reserved (set to zero).'), + ('FeatureName', 'FeatureName', 'FeatureNameCount', 0, 'The feature name array.'), + ]), + + ('FeatureName', [ + ('uint16', 'FeatureType', None, None, 'Feature type.'), + ('uint16', 'SettingsCount', None, None, 'The number of records in the setting name array.'), + ('LOffset', 'Settings', None, None, 'Offset to setting table for this feature.'), + ('uint16', 'FeatureFlags', None, None, 'Single-bit flags associated with the feature type.'), + ('uint16', 'FeatureNameID', None, None, 'The name table index for the feature name.'), + ]), + + ('Settings', [ + ('Setting', 'Setting', 'SettingsCount', 0, 'The setting array.'), + ]), + + ('Setting', [ + ('uint16', 'SettingValue', None, None, 'The setting.'), + ('uint16', 'SettingNameID', None, None, 'The name table index for the setting name.'), + ]), + +] diff -Nru fonttools-2.4/Tools/fontTools/ttLib/tables/otTables.py fonttools-3.0/Tools/fontTools/ttLib/tables/otTables.py --- fonttools-2.4/Tools/fontTools/ttLib/tables/otTables.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.0/Tools/fontTools/ttLib/tables/otTables.py 2015-08-31 17:57:15.000000000 +0000 @@ -0,0 +1,774 @@ +"""fontTools.ttLib.tables.otTables -- A collection of classes representing the various +OpenType subtables. + +Most are constructed upon import from data in otData.py, all are populated with +converter objects from otConverters.py. +""" +from __future__ import print_function, division, absolute_import +from fontTools.misc.py23 import * +from .otBase import BaseTable, FormatSwitchingBaseTable +import operator +import warnings + + +class FeatureParams(BaseTable): + + def compile(self, writer, font): + assert featureParamTypes.get(writer['FeatureTag']) == self.__class__, "Wrong FeatureParams type for feature '%s': %s" % (writer['FeatureTag'], self.__class__.__name__) + BaseTable.compile(self, writer, font) + + def toXML(self, xmlWriter, font, attrs=None, name=None): + BaseTable.toXML(self, xmlWriter, font, attrs, name=self.__class__.__name__) + +class FeatureParamsSize(FeatureParams): + pass + +class FeatureParamsStylisticSet(FeatureParams): + pass + +class FeatureParamsCharacterVariants(FeatureParams): + pass + +class Coverage(FormatSwitchingBaseTable): + + # manual implementation to get rid of glyphID dependencies + + def postRead(self, rawTable, font): + if self.Format == 1: + # TODO only allow glyphs that are valid? + self.glyphs = rawTable["GlyphArray"] + elif self.Format == 2: + glyphs = self.glyphs = [] + ranges = rawTable["RangeRecord"] + glyphOrder = font.getGlyphOrder() + # Some SIL fonts have coverage entries that don't have sorted + # StartCoverageIndex. If it is so, fixup and warn. We undo + # this when writing font out. + sorted_ranges = sorted(ranges, key=lambda a: a.StartCoverageIndex) + if ranges != sorted_ranges: + warnings.warn("GSUB/GPOS Coverage is not sorted by glyph ids.") + ranges = sorted_ranges + del sorted_ranges + for r in ranges: + assert r.StartCoverageIndex == len(glyphs), \ + (r.StartCoverageIndex, len(glyphs)) + start = r.Start + end = r.End + try: + startID = font.getGlyphID(start, requireReal=True) + except KeyError: + warnings.warn("Coverage table has start glyph ID out of range: %s." % start) + continue + try: + endID = font.getGlyphID(end, requireReal=True) + 1 + except KeyError: + # Apparently some tools use 65535 to "match all" the range + if end != 'glyph65535': + warnings.warn("Coverage table has end glyph ID out of range: %s." % end) + # NOTE: We clobber out-of-range things here. There are legit uses for those, + # but none that we have seen in the wild. + endID = len(glyphOrder) + glyphs.extend(glyphOrder[glyphID] for glyphID in range(startID, endID)) + else: + assert 0, "unknown format: %s" % self.Format + del self.Format # Don't need this anymore + + def preWrite(self, font): + glyphs = getattr(self, "glyphs", None) + if glyphs is None: + glyphs = self.glyphs = [] + format = 1 + rawTable = {"GlyphArray": glyphs} + getGlyphID = font.getGlyphID + if glyphs: + # find out whether Format 2 is more compact or not + glyphIDs = [getGlyphID(glyphName) for glyphName in glyphs ] + brokenOrder = sorted(glyphIDs) != glyphIDs + + last = glyphIDs[0] + ranges = [[last]] + for glyphID in glyphIDs[1:]: + if glyphID != last + 1: + ranges[-1].append(last) + ranges.append([glyphID]) + last = glyphID + ranges[-1].append(last) + + if brokenOrder or len(ranges) * 3 < len(glyphs): # 3 words vs. 1 word + # Format 2 is more compact + index = 0 + for i in range(len(ranges)): + start, end = ranges[i] + r = RangeRecord() + r.StartID = start + r.Start = font.getGlyphName(start) + r.End = font.getGlyphName(end) + r.StartCoverageIndex = index + ranges[i] = r + index = index + end - start + 1 + if brokenOrder: + warnings.warn("GSUB/GPOS Coverage is not sorted by glyph ids.") + ranges.sort(key=lambda a: a.StartID) + for r in ranges: + del r.StartID + format = 2 + rawTable = {"RangeRecord": ranges} + #else: + # fallthrough; Format 1 is more compact + self.Format = format + return rawTable + + def toXML2(self, xmlWriter, font): + for glyphName in getattr(self, "glyphs", []): + xmlWriter.simpletag("Glyph", value=glyphName) + xmlWriter.newline() + + def fromXML(self, name, attrs, content, font): + glyphs = getattr(self, "glyphs", None) + if glyphs is None: + glyphs = [] + self.glyphs = glyphs + glyphs.append(attrs["value"]) + + +class SingleSubst(FormatSwitchingBaseTable): + + def postRead(self, rawTable, font): + mapping = {} + input = _getGlyphsFromCoverageTable(rawTable["Coverage"]) + lenMapping = len(input) + if self.Format == 1: + delta = rawTable["DeltaGlyphID"] + inputGIDS = [ font.getGlyphID(name) for name in input ] + outGIDS = [ (glyphID + delta) % 65536 for glyphID in inputGIDS ] + outNames = [ font.getGlyphName(glyphID) for glyphID in outGIDS ] + list(map(operator.setitem, [mapping]*lenMapping, input, outNames)) + elif self.Format == 2: + assert len(input) == rawTable["GlyphCount"], \ + "invalid SingleSubstFormat2 table" + subst = rawTable["Substitute"] + list(map(operator.setitem, [mapping]*lenMapping, input, subst)) + else: + assert 0, "unknown format: %s" % self.Format + self.mapping = mapping + del self.Format # Don't need this anymore + + def preWrite(self, font): + mapping = getattr(self, "mapping", None) + if mapping is None: + mapping = self.mapping = {} + items = list(mapping.items()) + getGlyphID = font.getGlyphID + gidItems = [(getGlyphID(a), getGlyphID(b)) for a,b in items] + sortableItems = sorted(zip(gidItems, items)) + + # figure out format + format = 2 + delta = None + for inID, outID in gidItems: + if delta is None: + delta = (outID - inID) % 65536 + + if (inID + delta) % 65536 != outID: + break + else: + format = 1 + + rawTable = {} + self.Format = format + cov = Coverage() + input = [ item [1][0] for item in sortableItems] + subst = [ item [1][1] for item in sortableItems] + cov.glyphs = input + rawTable["Coverage"] = cov + if format == 1: + assert delta is not None + rawTable["DeltaGlyphID"] = delta + else: + rawTable["Substitute"] = subst + return rawTable + + def toXML2(self, xmlWriter, font): + items = sorted(self.mapping.items()) + for inGlyph, outGlyph in items: + xmlWriter.simpletag("Substitution", + [("in", inGlyph), ("out", outGlyph)]) + xmlWriter.newline() + + def fromXML(self, name, attrs, content, font): + mapping = getattr(self, "mapping", None) + if mapping is None: + mapping = {} + self.mapping = mapping + mapping[attrs["in"]] = attrs["out"] + + +class ClassDef(FormatSwitchingBaseTable): + + def postRead(self, rawTable, font): + classDefs = {} + glyphOrder = font.getGlyphOrder() + + if self.Format == 1: + start = rawTable["StartGlyph"] + classList = rawTable["ClassValueArray"] + try: + startID = font.getGlyphID(start, requireReal=True) + except KeyError: + warnings.warn("ClassDef table has start glyph ID out of range: %s." % start) + startID = len(glyphOrder) + endID = startID + len(classList) + if endID > len(glyphOrder): + warnings.warn("ClassDef table has entries for out of range glyph IDs: %s,%s." % (start, len(classList))) + # NOTE: We clobber out-of-range things here. There are legit uses for those, + # but none that we have seen in the wild. + endID = len(glyphOrder) + + for glyphID, cls in zip(range(startID, endID), classList): + classDefs[glyphOrder[glyphID]] = cls + + elif self.Format == 2: + records = rawTable["ClassRangeRecord"] + for rec in records: + start = rec.Start + end = rec.End + cls = rec.Class + try: + startID = font.getGlyphID(start, requireReal=True) + except KeyError: + warnings.warn("ClassDef table has start glyph ID out of range: %s." % start) + continue + try: + endID = font.getGlyphID(end, requireReal=True) + 1 + except KeyError: + # Apparently some tools use 65535 to "match all" the range + if end != 'glyph65535': + warnings.warn("ClassDef table has end glyph ID out of range: %s." % end) + # NOTE: We clobber out-of-range things here. There are legit uses for those, + # but none that we have seen in the wild. + endID = len(glyphOrder) + for glyphID in range(startID, endID): + classDefs[glyphOrder[glyphID]] = cls + else: + assert 0, "unknown format: %s" % self.Format + self.classDefs = classDefs + del self.Format # Don't need this anymore + + def preWrite(self, font): + classDefs = getattr(self, "classDefs", None) + if classDefs is None: + classDefs = self.classDefs = {} + items = list(classDefs.items()) + format = 2 + rawTable = {"ClassRangeRecord": []} + getGlyphID = font.getGlyphID + for i in range(len(items)): + glyphName, cls = items[i] + items[i] = getGlyphID(glyphName), glyphName, cls + items.sort() + if items: + last, lastName, lastCls = items[0] + ranges = [[lastCls, last, lastName]] + for glyphID, glyphName, cls in items[1:]: + if glyphID != last + 1 or cls != lastCls: + ranges[-1].extend([last, lastName]) + ranges.append([cls, glyphID, glyphName]) + last = glyphID + lastName = glyphName + lastCls = cls + ranges[-1].extend([last, lastName]) + + startGlyph = ranges[0][1] + endGlyph = ranges[-1][3] + glyphCount = endGlyph - startGlyph + 1 + if len(ranges) * 3 < glyphCount + 1: + # Format 2 is more compact + for i in range(len(ranges)): + cls, start, startName, end, endName = ranges[i] + rec = ClassRangeRecord() + rec.Start = startName + rec.End = endName + rec.Class = cls + ranges[i] = rec + format = 2 + rawTable = {"ClassRangeRecord": ranges} + else: + # Format 1 is more compact + startGlyphName = ranges[0][2] + classes = [0] * glyphCount + for cls, start, startName, end, endName in ranges: + for g in range(start - startGlyph, end - startGlyph + 1): + classes[g] = cls + format = 1 + rawTable = {"StartGlyph": startGlyphName, "ClassValueArray": classes} + self.Format = format + return rawTable + + def toXML2(self, xmlWriter, font): + items = sorted(self.classDefs.items()) + for glyphName, cls in items: + xmlWriter.simpletag("ClassDef", [("glyph", glyphName), ("class", cls)]) + xmlWriter.newline() + + def fromXML(self, name, attrs, content, font): + classDefs = getattr(self, "classDefs", None) + if classDefs is None: + classDefs = {} + self.classDefs = classDefs + classDefs[attrs["glyph"]] = int(attrs["class"]) + + +class AlternateSubst(FormatSwitchingBaseTable): + + def postRead(self, rawTable, font): + alternates = {} + if self.Format == 1: + input = _getGlyphsFromCoverageTable(rawTable["Coverage"]) + alts = rawTable["AlternateSet"] + if len(input) != len(alts): + assert len(input) == len(alts) + for i in range(len(input)): + alternates[input[i]] = alts[i].Alternate + else: + assert 0, "unknown format: %s" % self.Format + self.alternates = alternates + del self.Format # Don't need this anymore + + def preWrite(self, font): + self.Format = 1 + alternates = getattr(self, "alternates", None) + if alternates is None: + alternates = self.alternates = {} + items = list(alternates.items()) + for i in range(len(items)): + glyphName, set = items[i] + items[i] = font.getGlyphID(glyphName), glyphName, set + items.sort() + cov = Coverage() + cov.glyphs = [ item[1] for item in items] + alternates = [] + setList = [ item[-1] for item in items] + for set in setList: + alts = AlternateSet() + alts.Alternate = set + alternates.append(alts) + # a special case to deal with the fact that several hundred Adobe Japan1-5 + # CJK fonts will overflow an offset if the coverage table isn't pushed to the end. + # Also useful in that when splitting a sub-table because of an offset overflow + # I don't need to calculate the change in the subtable offset due to the change in the coverage table size. + # Allows packing more rules in subtable. + self.sortCoverageLast = 1 + return {"Coverage": cov, "AlternateSet": alternates} + + def toXML2(self, xmlWriter, font): + items = sorted(self.alternates.items()) + for glyphName, alternates in items: + xmlWriter.begintag("AlternateSet", glyph=glyphName) + xmlWriter.newline() + for alt in alternates: + xmlWriter.simpletag("Alternate", glyph=alt) + xmlWriter.newline() + xmlWriter.endtag("AlternateSet") + xmlWriter.newline() + + def fromXML(self, name, attrs, content, font): + alternates = getattr(self, "alternates", None) + if alternates is None: + alternates = {} + self.alternates = alternates + glyphName = attrs["glyph"] + set = [] + alternates[glyphName] = set + for element in content: + if not isinstance(element, tuple): + continue + name, attrs, content = element + set.append(attrs["glyph"]) + + +class LigatureSubst(FormatSwitchingBaseTable): + + def postRead(self, rawTable, font): + ligatures = {} + if self.Format == 1: + input = _getGlyphsFromCoverageTable(rawTable["Coverage"]) + ligSets = rawTable["LigatureSet"] + assert len(input) == len(ligSets) + for i in range(len(input)): + ligatures[input[i]] = ligSets[i].Ligature + else: + assert 0, "unknown format: %s" % self.Format + self.ligatures = ligatures + del self.Format # Don't need this anymore + + def preWrite(self, font): + self.Format = 1 + ligatures = getattr(self, "ligatures", None) + if ligatures is None: + ligatures = self.ligatures = {} + items = list(ligatures.items()) + for i in range(len(items)): + glyphName, set = items[i] + items[i] = font.getGlyphID(glyphName), glyphName, set + items.sort() + cov = Coverage() + cov.glyphs = [ item[1] for item in items] + + ligSets = [] + setList = [ item[-1] for item in items ] + for set in setList: + ligSet = LigatureSet() + ligs = ligSet.Ligature = [] + for lig in set: + ligs.append(lig) + ligSets.append(ligSet) + # Useful in that when splitting a sub-table because of an offset overflow + # I don't need to calculate the change in subtabl offset due to the coverage table size. + # Allows packing more rules in subtable. + self.sortCoverageLast = 1 + return {"Coverage": cov, "LigatureSet": ligSets} + + def toXML2(self, xmlWriter, font): + items = sorted(self.ligatures.items()) + for glyphName, ligSets in items: + xmlWriter.begintag("LigatureSet", glyph=glyphName) + xmlWriter.newline() + for lig in ligSets: + xmlWriter.simpletag("Ligature", glyph=lig.LigGlyph, + components=",".join(lig.Component)) + xmlWriter.newline() + xmlWriter.endtag("LigatureSet") + xmlWriter.newline() + + def fromXML(self, name, attrs, content, font): + ligatures = getattr(self, "ligatures", None) + if ligatures is None: + ligatures = {} + self.ligatures = ligatures + glyphName = attrs["glyph"] + ligs = [] + ligatures[glyphName] = ligs + for element in content: + if not isinstance(element, tuple): + continue + name, attrs, content = element + lig = Ligature() + lig.LigGlyph = attrs["glyph"] + components = attrs["components"] + lig.Component = components.split(",") if components else [] + ligs.append(lig) + + +# +# For each subtable format there is a class. However, we don't really distinguish +# between "field name" and "format name": often these are the same. Yet there's +# a whole bunch of fields with different names. The following dict is a mapping +# from "format name" to "field name". _buildClasses() uses this to create a +# subclass for each alternate field name. +# +_equivalents = { + 'MarkArray': ("Mark1Array",), + 'LangSys': ('DefaultLangSys',), + 'Coverage': ('MarkCoverage', 'BaseCoverage', 'LigatureCoverage', 'Mark1Coverage', + 'Mark2Coverage', 'BacktrackCoverage', 'InputCoverage', + 'LookAheadCoverage', 'VertGlyphCoverage', 'HorizGlyphCoverage', + 'TopAccentCoverage', 'ExtendedShapeCoverage', 'MathKernCoverage'), + 'ClassDef': ('ClassDef1', 'ClassDef2', 'BacktrackClassDef', 'InputClassDef', + 'LookAheadClassDef', 'GlyphClassDef', 'MarkAttachClassDef'), + 'Anchor': ('EntryAnchor', 'ExitAnchor', 'BaseAnchor', 'LigatureAnchor', + 'Mark2Anchor', 'MarkAnchor'), + 'Device': ('XPlaDevice', 'YPlaDevice', 'XAdvDevice', 'YAdvDevice', + 'XDeviceTable', 'YDeviceTable', 'DeviceTable'), + 'Axis': ('HorizAxis', 'VertAxis',), + 'MinMax': ('DefaultMinMax',), + 'BaseCoord': ('MinCoord', 'MaxCoord',), + 'JstfLangSys': ('DefJstfLangSys',), + 'JstfGSUBModList': ('ShrinkageEnableGSUB', 'ShrinkageDisableGSUB', 'ExtensionEnableGSUB', + 'ExtensionDisableGSUB',), + 'JstfGPOSModList': ('ShrinkageEnableGPOS', 'ShrinkageDisableGPOS', 'ExtensionEnableGPOS', + 'ExtensionDisableGPOS',), + 'JstfMax': ('ShrinkageJstfMax', 'ExtensionJstfMax',), + 'MathKern': ('TopRightMathKern', 'TopLeftMathKern', 'BottomRightMathKern', + 'BottomLeftMathKern'), + 'MathGlyphConstruction': ('VertGlyphConstruction', 'HorizGlyphConstruction'), +} + +# +# OverFlow logic, to automatically create ExtensionLookups +# XXX This should probably move to otBase.py +# + +def fixLookupOverFlows(ttf, overflowRecord): + """ Either the offset from the LookupList to a lookup overflowed, or + an offset from a lookup to a subtable overflowed. + The table layout is: + GPSO/GUSB + Script List + Feature List + LookUpList + Lookup[0] and contents + SubTable offset list + SubTable[0] and contents + ... + SubTable[n] and contents + ... + Lookup[n] and contents + SubTable offset list + SubTable[0] and contents + ... + SubTable[n] and contents + If the offset to a lookup overflowed (SubTableIndex is None) + we must promote the *previous* lookup to an Extension type. + If the offset from a lookup to subtable overflowed, then we must promote it + to an Extension Lookup type. + """ + ok = 0 + lookupIndex = overflowRecord.LookupListIndex + if (overflowRecord.SubTableIndex is None): + lookupIndex = lookupIndex - 1 + if lookupIndex < 0: + return ok + if overflowRecord.tableType == 'GSUB': + extType = 7 + elif overflowRecord.tableType == 'GPOS': + extType = 9 + + lookups = ttf[overflowRecord.tableType].table.LookupList.Lookup + lookup = lookups[lookupIndex] + # If the previous lookup is an extType, look further back. Very unlikely, but possible. + while lookup.SubTable[0].__class__.LookupType == extType: + lookupIndex = lookupIndex -1 + if lookupIndex < 0: + return ok + lookup = lookups[lookupIndex] + + for si in range(len(lookup.SubTable)): + subTable = lookup.SubTable[si] + extSubTableClass = lookupTypes[overflowRecord.tableType][extType] + extSubTable = extSubTableClass() + extSubTable.Format = 1 + extSubTable.ExtSubTable = subTable + lookup.SubTable[si] = extSubTable + ok = 1 + return ok + +def splitAlternateSubst(oldSubTable, newSubTable, overflowRecord): + ok = 1 + newSubTable.Format = oldSubTable.Format + if hasattr(oldSubTable, 'sortCoverageLast'): + newSubTable.sortCoverageLast = oldSubTable.sortCoverageLast + + oldAlts = sorted(oldSubTable.alternates.items()) + oldLen = len(oldAlts) + + if overflowRecord.itemName in [ 'Coverage', 'RangeRecord']: + # Coverage table is written last. overflow is to or within the + # the coverage table. We will just cut the subtable in half. + newLen = oldLen//2 + + elif overflowRecord.itemName == 'AlternateSet': + # We just need to back up by two items + # from the overflowed AlternateSet index to make sure the offset + # to the Coverage table doesn't overflow. + newLen = overflowRecord.itemIndex - 1 + + newSubTable.alternates = {} + for i in range(newLen, oldLen): + item = oldAlts[i] + key = item[0] + newSubTable.alternates[key] = item[1] + del oldSubTable.alternates[key] + + return ok + + +def splitLigatureSubst(oldSubTable, newSubTable, overflowRecord): + ok = 1 + newSubTable.Format = oldSubTable.Format + oldLigs = sorted(oldSubTable.ligatures.items()) + oldLen = len(oldLigs) + + if overflowRecord.itemName in [ 'Coverage', 'RangeRecord']: + # Coverage table is written last. overflow is to or within the + # the coverage table. We will just cut the subtable in half. + newLen = oldLen//2 + + elif overflowRecord.itemName == 'LigatureSet': + # We just need to back up by two items + # from the overflowed AlternateSet index to make sure the offset + # to the Coverage table doesn't overflow. + newLen = overflowRecord.itemIndex - 1 + + newSubTable.ligatures = {} + for i in range(newLen, oldLen): + item = oldLigs[i] + key = item[0] + newSubTable.ligatures[key] = item[1] + del oldSubTable.ligatures[key] + + return ok + + +splitTable = { 'GSUB': { +# 1: splitSingleSubst, +# 2: splitMultipleSubst, + 3: splitAlternateSubst, + 4: splitLigatureSubst, +# 5: splitContextSubst, +# 6: splitChainContextSubst, +# 7: splitExtensionSubst, +# 8: splitReverseChainSingleSubst, + }, + 'GPOS': { +# 1: splitSinglePos, +# 2: splitPairPos, +# 3: splitCursivePos, +# 4: splitMarkBasePos, +# 5: splitMarkLigPos, +# 6: splitMarkMarkPos, +# 7: splitContextPos, +# 8: splitChainContextPos, +# 9: splitExtensionPos, + } + + } + +def fixSubTableOverFlows(ttf, overflowRecord): + """ + An offset has overflowed within a sub-table. We need to divide this subtable into smaller parts. + """ + ok = 0 + table = ttf[overflowRecord.tableType].table + lookup = table.LookupList.Lookup[overflowRecord.LookupListIndex] + subIndex = overflowRecord.SubTableIndex + subtable = lookup.SubTable[subIndex] + + if hasattr(subtable, 'ExtSubTable'): + # We split the subtable of the Extension table, and add a new Extension table + # to contain the new subtable. + + subTableType = subtable.ExtSubTable.__class__.LookupType + extSubTable = subtable + subtable = extSubTable.ExtSubTable + newExtSubTableClass = lookupTypes[overflowRecord.tableType][subtable.__class__.LookupType] + newExtSubTable = newExtSubTableClass() + newExtSubTable.Format = extSubTable.Format + lookup.SubTable.insert(subIndex + 1, newExtSubTable) + + newSubTableClass = lookupTypes[overflowRecord.tableType][subTableType] + newSubTable = newSubTableClass() + newExtSubTable.ExtSubTable = newSubTable + else: + subTableType = subtable.__class__.LookupType + newSubTableClass = lookupTypes[overflowRecord.tableType][subTableType] + newSubTable = newSubTableClass() + lookup.SubTable.insert(subIndex + 1, newSubTable) + + if hasattr(lookup, 'SubTableCount'): # may not be defined yet. + lookup.SubTableCount = lookup.SubTableCount + 1 + + try: + splitFunc = splitTable[overflowRecord.tableType][subTableType] + except KeyError: + return ok + + ok = splitFunc(subtable, newSubTable, overflowRecord) + return ok + +# End of OverFlow logic + + +def _buildClasses(): + import re + from .otData import otData + + formatPat = re.compile("([A-Za-z0-9]+)Format(\d+)$") + namespace = globals() + + # populate module with classes + for name, table in otData: + baseClass = BaseTable + m = formatPat.match(name) + if m: + # XxxFormatN subtable, we only add the "base" table + name = m.group(1) + baseClass = FormatSwitchingBaseTable + if name not in namespace: + # the class doesn't exist yet, so the base implementation is used. + cls = type(name, (baseClass,), {}) + namespace[name] = cls + + for base, alts in _equivalents.items(): + base = namespace[base] + for alt in alts: + namespace[alt] = type(alt, (base,), {}) + + global lookupTypes + lookupTypes = { + 'GSUB': { + 1: SingleSubst, + 2: MultipleSubst, + 3: AlternateSubst, + 4: LigatureSubst, + 5: ContextSubst, + 6: ChainContextSubst, + 7: ExtensionSubst, + 8: ReverseChainSingleSubst, + }, + 'GPOS': { + 1: SinglePos, + 2: PairPos, + 3: CursivePos, + 4: MarkBasePos, + 5: MarkLigPos, + 6: MarkMarkPos, + 7: ContextPos, + 8: ChainContextPos, + 9: ExtensionPos, + }, + } + lookupTypes['JSTF'] = lookupTypes['GPOS'] # JSTF contains GPOS + for lookupEnum in lookupTypes.values(): + for enum, cls in lookupEnum.items(): + cls.LookupType = enum + + global featureParamTypes + featureParamTypes = { + 'size': FeatureParamsSize, + } + for i in range(1, 20+1): + featureParamTypes['ss%02d' % i] = FeatureParamsStylisticSet + for i in range(1, 99+1): + featureParamTypes['cv%02d' % i] = FeatureParamsCharacterVariants + + # add converters to classes + from .otConverters import buildConverters + for name, table in otData: + m = formatPat.match(name) + if m: + # XxxFormatN subtable, add converter to "base" table + name, format = m.groups() + format = int(format) + cls = namespace[name] + if not hasattr(cls, "converters"): + cls.converters = {} + cls.convertersByName = {} + converters, convertersByName = buildConverters(table[1:], namespace) + cls.converters[format] = converters + cls.convertersByName[format] = convertersByName + # XXX Add staticSize? + else: + cls = namespace[name] + cls.converters, cls.convertersByName = buildConverters(table, namespace) + # XXX Add staticSize? + + +_buildClasses() + + +def _getGlyphsFromCoverageTable(coverage): + if coverage is None: + # empty coverage table + return [] + else: + return coverage.glyphs diff -Nru fonttools-2.4/Tools/fontTools/ttLib/tables/_p_o_s_t.py fonttools-3.0/Tools/fontTools/ttLib/tables/_p_o_s_t.py --- fonttools-2.4/Tools/fontTools/ttLib/tables/_p_o_s_t.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.0/Tools/fontTools/ttLib/tables/_p_o_s_t.py 2015-08-31 17:57:15.000000000 +0000 @@ -0,0 +1,277 @@ +from __future__ import print_function, division, absolute_import +from fontTools.misc.py23 import * +from fontTools import ttLib +from fontTools.ttLib.standardGlyphOrder import standardGlyphOrder +from fontTools.misc import sstruct +from fontTools.misc.textTools import safeEval, readHex +from . import DefaultTable +import sys +import struct +import array + + +postFormat = """ + > + formatType: 16.16F + italicAngle: 16.16F # italic angle in degrees + underlinePosition: h + underlineThickness: h + isFixedPitch: L + minMemType42: L # minimum memory if TrueType font is downloaded + maxMemType42: L # maximum memory if TrueType font is downloaded + minMemType1: L # minimum memory if Type1 font is downloaded + maxMemType1: L # maximum memory if Type1 font is downloaded +""" + +postFormatSize = sstruct.calcsize(postFormat) + + +class table__p_o_s_t(DefaultTable.DefaultTable): + + def decompile(self, data, ttFont): + sstruct.unpack(postFormat, data[:postFormatSize], self) + data = data[postFormatSize:] + if self.formatType == 1.0: + self.decode_format_1_0(data, ttFont) + elif self.formatType == 2.0: + self.decode_format_2_0(data, ttFont) + elif self.formatType == 3.0: + self.decode_format_3_0(data, ttFont) + elif self.formatType == 4.0: + self.decode_format_4_0(data, ttFont) + else: + # supported format + raise ttLib.TTLibError("'post' table format %f not supported" % self.formatType) + + def compile(self, ttFont): + data = sstruct.pack(postFormat, self) + if self.formatType == 1.0: + pass # we're done + elif self.formatType == 2.0: + data = data + self.encode_format_2_0(ttFont) + elif self.formatType == 3.0: + pass # we're done + elif self.formatType == 4.0: + data = data + self.encode_format_4_0(ttFont) + else: + # supported format + raise ttLib.TTLibError("'post' table format %f not supported" % self.formatType) + return data + + def getGlyphOrder(self): + """This function will get called by a ttLib.TTFont instance. + Do not call this function yourself, use TTFont().getGlyphOrder() + or its relatives instead! + """ + if not hasattr(self, "glyphOrder"): + raise ttLib.TTLibError("illegal use of getGlyphOrder()") + glyphOrder = self.glyphOrder + del self.glyphOrder + return glyphOrder + + def decode_format_1_0(self, data, ttFont): + self.glyphOrder = standardGlyphOrder[:ttFont["maxp"].numGlyphs] + + def decode_format_2_0(self, data, ttFont): + numGlyphs, = struct.unpack(">H", data[:2]) + numGlyphs = int(numGlyphs) + if numGlyphs > ttFont['maxp'].numGlyphs: + # Assume the numGlyphs field is bogus, so sync with maxp. + # I've seen this in one font, and if the assumption is + # wrong elsewhere, well, so be it: it's hard enough to + # work around _one_ non-conforming post format... + numGlyphs = ttFont['maxp'].numGlyphs + data = data[2:] + indices = array.array("H") + indices.fromstring(data[:2*numGlyphs]) + if sys.byteorder != "big": + indices.byteswap() + data = data[2*numGlyphs:] + self.extraNames = extraNames = unpackPStrings(data) + self.glyphOrder = glyphOrder = [""] * int(ttFont['maxp'].numGlyphs) + for glyphID in range(numGlyphs): + index = indices[glyphID] + if index > 32767: # reserved for future use; ignore + name = "" + elif index > 257: + try: + name = extraNames[index-258] + except IndexError: + name = "" + else: + # fetch names from standard list + name = standardGlyphOrder[index] + glyphOrder[glyphID] = name + self.build_psNameMapping(ttFont) + + def build_psNameMapping(self, ttFont): + mapping = {} + allNames = {} + for i in range(ttFont['maxp'].numGlyphs): + glyphName = psName = self.glyphOrder[i] + if glyphName == "": + glyphName = "glyph%.5d" % i + if glyphName in allNames: + # make up a new glyphName that's unique + n = allNames[glyphName] + while (glyphName + "#" + str(n)) in allNames: + n += 1 + allNames[glyphName] = n + 1 + glyphName = glyphName + "#" + str(n) + + self.glyphOrder[i] = glyphName + allNames[glyphName] = 1 + if glyphName != psName: + mapping[glyphName] = psName + + self.mapping = mapping + + def decode_format_3_0(self, data, ttFont): + # Setting self.glyphOrder to None will cause the TTFont object + # try and construct glyph names from a Unicode cmap table. + self.glyphOrder = None + + def decode_format_4_0(self, data, ttFont): + from fontTools import agl + numGlyphs = ttFont['maxp'].numGlyphs + indices = array.array("H") + indices.fromstring(data) + if sys.byteorder != "big": + indices.byteswap() + # In some older fonts, the size of the post table doesn't match + # the number of glyphs. Sometimes it's bigger, sometimes smaller. + self.glyphOrder = glyphOrder = [''] * int(numGlyphs) + for i in range(min(len(indices),numGlyphs)): + if indices[i] == 0xFFFF: + self.glyphOrder[i] = '' + elif indices[i] in agl.UV2AGL: + self.glyphOrder[i] = agl.UV2AGL[indices[i]] + else: + self.glyphOrder[i] = "uni%04X" % indices[i] + self.build_psNameMapping(ttFont) + + def encode_format_2_0(self, ttFont): + numGlyphs = ttFont['maxp'].numGlyphs + glyphOrder = ttFont.getGlyphOrder() + assert len(glyphOrder) == numGlyphs + indices = array.array("H") + extraDict = {} + extraNames = self.extraNames + for i in range(len(extraNames)): + extraDict[extraNames[i]] = i + for glyphID in range(numGlyphs): + glyphName = glyphOrder[glyphID] + if glyphName in self.mapping: + psName = self.mapping[glyphName] + else: + psName = glyphName + if psName in extraDict: + index = 258 + extraDict[psName] + elif psName in standardGlyphOrder: + index = standardGlyphOrder.index(psName) + else: + index = 258 + len(extraNames) + assert index < 32768, "Too many glyph names for 'post' table format 2" + extraDict[psName] = len(extraNames) + extraNames.append(psName) + indices.append(index) + if sys.byteorder != "big": + indices.byteswap() + return struct.pack(">H", numGlyphs) + indices.tostring() + packPStrings(extraNames) + + def encode_format_4_0(self, ttFont): + from fontTools import agl + numGlyphs = ttFont['maxp'].numGlyphs + glyphOrder = ttFont.getGlyphOrder() + assert len(glyphOrder) == numGlyphs + indices = array.array("H") + for glyphID in glyphOrder: + glyphID = glyphID.split('#')[0] + if glyphID in agl.AGL2UV: + indices.append(agl.AGL2UV[glyphID]) + elif len(glyphID) == 7 and glyphID[:3] == 'uni': + indices.append(int(glyphID[3:],16)) + else: + indices.append(0xFFFF) + if sys.byteorder != "big": + indices.byteswap() + return indices.tostring() + + def toXML(self, writer, ttFont): + formatstring, names, fixes = sstruct.getformat(postFormat) + for name in names: + value = getattr(self, name) + writer.simpletag(name, value=value) + writer.newline() + if hasattr(self, "mapping"): + writer.begintag("psNames") + writer.newline() + writer.comment("This file uses unique glyph names based on the information\n" + "found in the 'post' table. Since these names might not be unique,\n" + "we have to invent artificial names in case of clashes. In order to\n" + "be able to retain the original information, we need a name to\n" + "ps name mapping for those cases where they differ. That's what\n" + "you see below.\n") + writer.newline() + items = sorted(self.mapping.items()) + for name, psName in items: + writer.simpletag("psName", name=name, psName=psName) + writer.newline() + writer.endtag("psNames") + writer.newline() + if hasattr(self, "extraNames"): + writer.begintag("extraNames") + writer.newline() + writer.comment("following are the name that are not taken from the standard Mac glyph order") + writer.newline() + for name in self.extraNames: + writer.simpletag("psName", name=name) + writer.newline() + writer.endtag("extraNames") + writer.newline() + if hasattr(self, "data"): + writer.begintag("hexdata") + writer.newline() + writer.dumphex(self.data) + writer.endtag("hexdata") + writer.newline() + + def fromXML(self, name, attrs, content, ttFont): + if name not in ("psNames", "extraNames", "hexdata"): + setattr(self, name, safeEval(attrs["value"])) + elif name == "psNames": + self.mapping = {} + for element in content: + if not isinstance(element, tuple): + continue + name, attrs, content = element + if name == "psName": + self.mapping[attrs["name"]] = attrs["psName"] + elif name == "extraNames": + self.extraNames = [] + for element in content: + if not isinstance(element, tuple): + continue + name, attrs, content = element + if name == "psName": + self.extraNames.append(attrs["name"]) + else: + self.data = readHex(content) + + +def unpackPStrings(data): + strings = [] + index = 0 + dataLen = len(data) + while index < dataLen: + length = byteord(data[index]) + strings.append(tostr(data[index+1:index+1+length], encoding="latin1")) + index = index + 1 + length + return strings + + +def packPStrings(strings): + data = b"" + for s in strings: + data = data + bytechr(len(s)) + tobytes(s, encoding="latin1") + return data diff -Nru fonttools-2.4/Tools/fontTools/ttLib/tables/_p_r_e_p.py fonttools-3.0/Tools/fontTools/ttLib/tables/_p_r_e_p.py --- fonttools-2.4/Tools/fontTools/ttLib/tables/_p_r_e_p.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.0/Tools/fontTools/ttLib/tables/_p_r_e_p.py 2015-08-31 17:57:15.000000000 +0000 @@ -0,0 +1,8 @@ +from __future__ import print_function, division, absolute_import +from fontTools.misc.py23 import * +from fontTools import ttLib + +superclass = ttLib.getTableClass("fpgm") + +class table__p_r_e_p(superclass): + pass diff -Nru fonttools-2.4/Tools/fontTools/ttLib/tables/sbixGlyph.py fonttools-3.0/Tools/fontTools/ttLib/tables/sbixGlyph.py --- fonttools-2.4/Tools/fontTools/ttLib/tables/sbixGlyph.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.0/Tools/fontTools/ttLib/tables/sbixGlyph.py 2015-08-31 17:57:15.000000000 +0000 @@ -0,0 +1,119 @@ +from __future__ import print_function, division, absolute_import +from fontTools.misc.py23 import * +from fontTools.misc import sstruct +from fontTools.misc.textTools import readHex, safeEval +import struct + + +sbixGlyphHeaderFormat = """ + > + originOffsetX: h # The x-value of the point in the glyph relative to its + # lower-left corner which corresponds to the origin of + # the glyph on the screen, that is the point on the + # baseline at the left edge of the glyph. + originOffsetY: h # The y-value of the point in the glyph relative to its + # lower-left corner which corresponds to the origin of + # the glyph on the screen, that is the point on the + # baseline at the left edge of the glyph. + graphicType: 4s # e.g. "png " +""" + +sbixGlyphHeaderFormatSize = sstruct.calcsize(sbixGlyphHeaderFormat) + + +class Glyph(object): + def __init__(self, glyphName=None, referenceGlyphName=None, originOffsetX=0, originOffsetY=0, graphicType=None, imageData=None, rawdata=None, gid=0): + self.gid = gid + self.glyphName = glyphName + self.referenceGlyphName = referenceGlyphName + self.originOffsetX = originOffsetX + self.originOffsetY = originOffsetY + self.rawdata = rawdata + self.graphicType = graphicType + self.imageData = imageData + + # fix self.graphicType if it is null terminated or too short + if self.graphicType is not None: + if self.graphicType[-1] == "\0": + self.graphicType = self.graphicType[:-1] + if len(self.graphicType) > 4: + from fontTools import ttLib + raise ttLib.TTLibError("Glyph.graphicType must not be longer than 4 characters.") + elif len(self.graphicType) < 4: + # pad with spaces + self.graphicType += " "[:(4 - len(self.graphicType))] + + def decompile(self, ttFont): + self.glyphName = ttFont.getGlyphName(self.gid) + if self.rawdata is None: + from fontTools import ttLib + raise ttLib.TTLibError("No table data to decompile") + if len(self.rawdata) > 0: + if len(self.rawdata) < sbixGlyphHeaderFormatSize: + from fontTools import ttLib + #print "Glyph %i header too short: Expected %x, got %x." % (self.gid, sbixGlyphHeaderFormatSize, len(self.rawdata)) + raise ttLib.TTLibError("Glyph header too short.") + + sstruct.unpack(sbixGlyphHeaderFormat, self.rawdata[:sbixGlyphHeaderFormatSize], self) + + if self.graphicType == "dupe": + # this glyph is a reference to another glyph's image data + gid, = struct.unpack(">H", self.rawdata[sbixGlyphHeaderFormatSize:]) + self.referenceGlyphName = ttFont.getGlyphName(gid) + else: + self.imageData = self.rawdata[sbixGlyphHeaderFormatSize:] + self.referenceGlyphName = None + # clean up + del self.rawdata + del self.gid + + def compile(self, ttFont): + if self.glyphName is None: + from fontTools import ttLib + raise ttLib.TTLibError("Can't compile Glyph without glyph name") + # TODO: if ttFont has no maxp, cmap etc., ignore glyph names and compile by index? + # (needed if you just want to compile the sbix table on its own) + self.gid = struct.pack(">H", ttFont.getGlyphID(self.glyphName)) + if self.graphicType is None: + self.rawdata = "" + else: + self.rawdata = sstruct.pack(sbixGlyphHeaderFormat, self) + self.imageData + + def toXML(self, xmlWriter, ttFont): + if self.graphicType == None: + # TODO: ignore empty glyphs? + # a glyph data entry is required for each glyph, + # but empty ones can be calculated at compile time + xmlWriter.simpletag("glyph", name=self.glyphName) + xmlWriter.newline() + return + xmlWriter.begintag("glyph", + graphicType=self.graphicType, + name=self.glyphName, + originOffsetX=self.originOffsetX, + originOffsetY=self.originOffsetY, + ) + xmlWriter.newline() + if self.graphicType == "dupe": + # graphicType == "dupe" is a reference to another glyph id. + xmlWriter.simpletag("ref", glyphname=self.referenceGlyphName) + else: + xmlWriter.begintag("hexdata") + xmlWriter.newline() + xmlWriter.dumphex(self.imageData) + xmlWriter.endtag("hexdata") + xmlWriter.newline() + xmlWriter.endtag("glyph") + xmlWriter.newline() + + def fromXML(self, name, attrs, content, ttFont): + if name == "ref": + # glyph is a "dupe", i.e. a reference to another glyph's image data. + # in this case imageData contains the glyph id of the reference glyph + # get glyph id from glyphname + self.imageData = struct.pack(">H", ttFont.getGlyphID(safeEval("'''" + attrs["glyphname"] + "'''"))) + elif name == "hexdata": + self.imageData = readHex(content) + else: + from fontTools import ttLib + raise ttLib.TTLibError("can't handle '%s' element" % name) diff -Nru fonttools-2.4/Tools/fontTools/ttLib/tables/_s_b_i_x.py fonttools-3.0/Tools/fontTools/ttLib/tables/_s_b_i_x.py --- fonttools-2.4/Tools/fontTools/ttLib/tables/_s_b_i_x.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.0/Tools/fontTools/ttLib/tables/_s_b_i_x.py 2015-08-31 17:57:15.000000000 +0000 @@ -0,0 +1,117 @@ +from __future__ import print_function, division, absolute_import +from fontTools.misc.py23 import * +from fontTools.misc import sstruct +from fontTools.misc.textTools import safeEval, num2binary, binary2num +from . import DefaultTable +from .sbixGlyph import * +from .sbixStrike import * + + +sbixHeaderFormat = """ + > + version: H # Version number (set to 1) + flags: H # The only two bits used in the flags field are bits 0 + # and 1. For historical reasons, bit 0 must always be 1. + # Bit 1 is a sbixDrawOutlines flag and is interpreted as + # follows: + # 0: Draw only 'sbix' bitmaps + # 1: Draw both 'sbix' bitmaps and outlines, in that + # order + numStrikes: L # Number of bitmap strikes to follow +""" +sbixHeaderFormatSize = sstruct.calcsize(sbixHeaderFormat) + + +sbixStrikeOffsetFormat = """ + > + strikeOffset: L # Offset from begining of table to data for the + # individual strike +""" +sbixStrikeOffsetFormatSize = sstruct.calcsize(sbixStrikeOffsetFormat) + + +class table__s_b_i_x(DefaultTable.DefaultTable): + def __init__(self, tag): + self.tableTag = tag + self.version = 1 + self.flags = 1 + self.numStrikes = 0 + self.strikes = {} + self.strikeOffsets = [] + + def decompile(self, data, ttFont): + # read table header + sstruct.unpack(sbixHeaderFormat, data[ : sbixHeaderFormatSize], self) + # collect offsets to individual strikes in self.strikeOffsets + for i in range(self.numStrikes): + current_offset = sbixHeaderFormatSize + i * sbixStrikeOffsetFormatSize + offset_entry = sbixStrikeOffset() + sstruct.unpack(sbixStrikeOffsetFormat, \ + data[current_offset:current_offset+sbixStrikeOffsetFormatSize], \ + offset_entry) + self.strikeOffsets.append(offset_entry.strikeOffset) + + # decompile Strikes + for i in range(self.numStrikes-1, -1, -1): + current_strike = Strike(rawdata=data[self.strikeOffsets[i]:]) + data = data[:self.strikeOffsets[i]] + current_strike.decompile(ttFont) + #print " Strike length: %xh" % len(bitmapSetData) + #print "Number of Glyph entries:", len(current_strike.glyphs) + if current_strike.ppem in self.strikes: + from fontTools import ttLib + raise ttLib.TTLibError("Pixel 'ppem' must be unique for each Strike") + self.strikes[current_strike.ppem] = current_strike + + # after the glyph data records have been extracted, we don't need the offsets anymore + del self.strikeOffsets + del self.numStrikes + + def compile(self, ttFont): + sbixData = "" + self.numStrikes = len(self.strikes) + sbixHeader = sstruct.pack(sbixHeaderFormat, self) + + # calculate offset to start of first strike + setOffset = sbixHeaderFormatSize + sbixStrikeOffsetFormatSize * self.numStrikes + + for si in sorted(self.strikes.keys()): + current_strike = self.strikes[si] + current_strike.compile(ttFont) + # append offset to this strike to table header + current_strike.strikeOffset = setOffset + sbixHeader += sstruct.pack(sbixStrikeOffsetFormat, current_strike) + setOffset += len(current_strike.data) + sbixData += current_strike.data + + return sbixHeader + sbixData + + def toXML(self, xmlWriter, ttFont): + xmlWriter.simpletag("version", value=self.version) + xmlWriter.newline() + xmlWriter.simpletag("flags", value=num2binary(self.flags, 16)) + xmlWriter.newline() + for i in sorted(self.strikes.keys()): + self.strikes[i].toXML(xmlWriter, ttFont) + + def fromXML(self, name, attrs, content, ttFont): + if name =="version": + setattr(self, name, safeEval(attrs["value"])) + elif name == "flags": + setattr(self, name, binary2num(attrs["value"])) + elif name == "strike": + current_strike = Strike() + for element in content: + if isinstance(element, tuple): + name, attrs, content = element + current_strike.fromXML(name, attrs, content, ttFont) + self.strikes[current_strike.ppem] = current_strike + else: + from fontTools import ttLib + raise ttLib.TTLibError("can't handle '%s' element" % name) + + +# Helper classes + +class sbixStrikeOffset(object): + pass diff -Nru fonttools-2.4/Tools/fontTools/ttLib/tables/sbixStrike.py fonttools-3.0/Tools/fontTools/ttLib/tables/sbixStrike.py --- fonttools-2.4/Tools/fontTools/ttLib/tables/sbixStrike.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.0/Tools/fontTools/ttLib/tables/sbixStrike.py 2015-08-31 17:57:15.000000000 +0000 @@ -0,0 +1,150 @@ +from __future__ import print_function, division, absolute_import +from fontTools.misc.py23 import * +from fontTools.misc import sstruct +from fontTools.misc.textTools import readHex +from .sbixGlyph import * +import struct + +sbixStrikeHeaderFormat = """ + > + ppem: H # The PPEM for which this strike was designed (e.g., 9, + # 12, 24) + resolution: H # The screen resolution (in dpi) for which this strike + # was designed (e.g., 72) +""" + +sbixGlyphDataOffsetFormat = """ + > + glyphDataOffset: L # Offset from the beginning of the strike data record + # to data for the individual glyph +""" + +sbixStrikeHeaderFormatSize = sstruct.calcsize(sbixStrikeHeaderFormat) +sbixGlyphDataOffsetFormatSize = sstruct.calcsize(sbixGlyphDataOffsetFormat) + + +class Strike(object): + def __init__(self, rawdata=None, ppem=0, resolution=72): + self.data = rawdata + self.ppem = ppem + self.resolution = resolution + self.glyphs = {} + + def decompile(self, ttFont): + if self.data is None: + from fontTools import ttLib + raise ttLib.TTLibError + if len(self.data) < sbixStrikeHeaderFormatSize: + from fontTools import ttLib + raise(ttLib.TTLibError, "Strike header too short: Expected %x, got %x.") \ + % (sbixStrikeHeaderFormatSize, len(self.data)) + + # read Strike header from raw data + sstruct.unpack(sbixStrikeHeaderFormat, self.data[:sbixStrikeHeaderFormatSize], self) + + # calculate number of glyphs + firstGlyphDataOffset, = struct.unpack(">L", \ + self.data[sbixStrikeHeaderFormatSize:sbixStrikeHeaderFormatSize + sbixGlyphDataOffsetFormatSize]) + self.numGlyphs = (firstGlyphDataOffset - sbixStrikeHeaderFormatSize) // sbixGlyphDataOffsetFormatSize - 1 + # ^ -1 because there's one more offset than glyphs + + # build offset list for single glyph data offsets + self.glyphDataOffsets = [] + for i in range(self.numGlyphs + 1): # + 1 because there's one more offset than glyphs + start = i * sbixGlyphDataOffsetFormatSize + sbixStrikeHeaderFormatSize + current_offset, = struct.unpack(">L", self.data[start:start + sbixGlyphDataOffsetFormatSize]) + self.glyphDataOffsets.append(current_offset) + + # iterate through offset list and slice raw data into glyph data records + for i in range(self.numGlyphs): + current_glyph = Glyph(rawdata=self.data[self.glyphDataOffsets[i]:self.glyphDataOffsets[i+1]], gid=i) + current_glyph.decompile(ttFont) + self.glyphs[current_glyph.glyphName] = current_glyph + del self.glyphDataOffsets + del self.numGlyphs + del self.data + + def compile(self, ttFont): + self.glyphDataOffsets = "" + self.bitmapData = "" + + glyphOrder = ttFont.getGlyphOrder() + + # first glyph starts right after the header + currentGlyphDataOffset = sbixStrikeHeaderFormatSize + sbixGlyphDataOffsetFormatSize * (len(glyphOrder) + 1) + for glyphName in glyphOrder: + if glyphName in self.glyphs: + # we have glyph data for this glyph + current_glyph = self.glyphs[glyphName] + else: + # must add empty glyph data record for this glyph + current_glyph = Glyph(glyphName=glyphName) + current_glyph.compile(ttFont) + current_glyph.glyphDataOffset = currentGlyphDataOffset + self.bitmapData += current_glyph.rawdata + currentGlyphDataOffset += len(current_glyph.rawdata) + self.glyphDataOffsets += sstruct.pack(sbixGlyphDataOffsetFormat, current_glyph) + + # add last "offset", really the end address of the last glyph data record + dummy = Glyph() + dummy.glyphDataOffset = currentGlyphDataOffset + self.glyphDataOffsets += sstruct.pack(sbixGlyphDataOffsetFormat, dummy) + + # pack header + self.data = sstruct.pack(sbixStrikeHeaderFormat, self) + # add offsets and image data after header + self.data += self.glyphDataOffsets + self.bitmapData + + def toXML(self, xmlWriter, ttFont): + xmlWriter.begintag("strike") + xmlWriter.newline() + xmlWriter.simpletag("ppem", value=self.ppem) + xmlWriter.newline() + xmlWriter.simpletag("resolution", value=self.resolution) + xmlWriter.newline() + glyphOrder = ttFont.getGlyphOrder() + for i in range(len(glyphOrder)): + if glyphOrder[i] in self.glyphs: + self.glyphs[glyphOrder[i]].toXML(xmlWriter, ttFont) + # TODO: what if there are more glyph data records than (glyf table) glyphs? + xmlWriter.endtag("strike") + xmlWriter.newline() + + def fromXML(self, name, attrs, content, ttFont): + if name in ["ppem", "resolution"]: + setattr(self, name, safeEval(attrs["value"])) + elif name == "glyph": + if "graphicType" in attrs: + myFormat = safeEval("'''" + attrs["graphicType"] + "'''") + else: + myFormat = None + if "glyphname" in attrs: + myGlyphName = safeEval("'''" + attrs["glyphname"] + "'''") + elif "name" in attrs: + myGlyphName = safeEval("'''" + attrs["name"] + "'''") + else: + from fontTools import ttLib + raise ttLib.TTLibError("Glyph must have a glyph name.") + if "originOffsetX" in attrs: + myOffsetX = safeEval(attrs["originOffsetX"]) + else: + myOffsetX = 0 + if "originOffsetY" in attrs: + myOffsetY = safeEval(attrs["originOffsetY"]) + else: + myOffsetY = 0 + current_glyph = Glyph( + glyphName=myGlyphName, + graphicType=myFormat, + originOffsetX=myOffsetX, + originOffsetY=myOffsetY, + ) + for element in content: + if isinstance(element, tuple): + name, attrs, content = element + current_glyph.fromXML(name, attrs, content, ttFont) + current_glyph.compile(ttFont) + self.glyphs[current_glyph.glyphName] = current_glyph + else: + from fontTools import ttLib + raise ttLib.TTLibError("can't handle '%s' element" % name) diff -Nru fonttools-2.4/Tools/fontTools/ttLib/tables/S_I_N_G_.py fonttools-3.0/Tools/fontTools/ttLib/tables/S_I_N_G_.py --- fonttools-2.4/Tools/fontTools/ttLib/tables/S_I_N_G_.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.0/Tools/fontTools/ttLib/tables/S_I_N_G_.py 2015-08-31 17:57:15.000000000 +0000 @@ -0,0 +1,95 @@ +from __future__ import print_function, division, absolute_import +from fontTools.misc.py23 import * +from fontTools.misc import sstruct +from fontTools.misc.textTools import safeEval +from . import DefaultTable + +SINGFormat = """ + > # big endian + tableVersionMajor: H + tableVersionMinor: H + glyphletVersion: H + permissions: h + mainGID: H + unitsPerEm: H + vertAdvance: h + vertOrigin: h + uniqueName: 28s + METAMD5: 16s + nameLength: 1s +""" +# baseGlyphName is a byte string which follows the record above. + + +class table_S_I_N_G_(DefaultTable.DefaultTable): + + dependencies = [] + + def decompile(self, data, ttFont): + dummy, rest = sstruct.unpack2(SINGFormat, data, self) + self.uniqueName = self.decompileUniqueName(self.uniqueName) + self.nameLength = byteord(self.nameLength) + assert len(rest) == self.nameLength + self.baseGlyphName = tostr(rest) + + rawMETAMD5 = self.METAMD5 + self.METAMD5 = "[" + hex(byteord(self.METAMD5[0])) + for char in rawMETAMD5[1:]: + self.METAMD5 = self.METAMD5 + ", " + hex(byteord(char)) + self.METAMD5 = self.METAMD5 + "]" + + def decompileUniqueName(self, data): + name = "" + for char in data: + val = byteord(char) + if val == 0: + break + if (val > 31) or (val < 128): + name += chr(val) + else: + octString = oct(val) + if len(octString) > 3: + octString = octString[1:] # chop off that leading zero. + elif len(octString) < 3: + octString.zfill(3) + name += "\\" + octString + return name + + def compile(self, ttFont): + d = self.__dict__.copy() + d["nameLength"] = bytechr(len(self.baseGlyphName)) + d["uniqueName"] = self.compilecompileUniqueName(self.uniqueName, 28) + METAMD5List = eval(self.METAMD5) + d["METAMD5"] = b"" + for val in METAMD5List: + d["METAMD5"] += bytechr(val) + assert (len(d["METAMD5"]) == 16), "Failed to pack 16 byte MD5 hash in SING table" + data = sstruct.pack(SINGFormat, d) + data = data + tobytes(self.baseGlyphName) + return data + + def compilecompileUniqueName(self, name, length): + nameLen = len(name) + if length <= nameLen: + name = name[:length-1] + "\000" + else: + name += (nameLen - length) * "\000" + return name + + def toXML(self, writer, ttFont): + writer.comment("Most of this table will be recalculated by the compiler") + writer.newline() + formatstring, names, fixes = sstruct.getformat(SINGFormat) + for name in names: + value = getattr(self, name) + writer.simpletag(name, value=value) + writer.newline() + writer.simpletag("baseGlyphName", value=self.baseGlyphName) + writer.newline() + + def fromXML(self, name, attrs, content, ttFont): + value = attrs["value"] + if name in ["uniqueName", "METAMD5", "baseGlyphName"]: + setattr(self, name, value) + else: + setattr(self, name, safeEval(value)) diff -Nru fonttools-2.4/Tools/fontTools/ttLib/tables/S_V_G_.py fonttools-3.0/Tools/fontTools/ttLib/tables/S_V_G_.py --- fonttools-2.4/Tools/fontTools/ttLib/tables/S_V_G_.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.0/Tools/fontTools/ttLib/tables/S_V_G_.py 2015-08-31 17:57:15.000000000 +0000 @@ -0,0 +1,379 @@ +from __future__ import print_function, division, absolute_import +from fontTools.misc.py23 import * +from fontTools.misc import sstruct +from . import DefaultTable +try: + import xml.etree.cElementTree as ET +except ImportError: + import xml.etree.ElementTree as ET +import struct +import re + +__doc__=""" +Compiles/decompiles version 0 and 1 SVG tables from/to XML. + +Version 1 is the first SVG definition, implemented in Mozilla before Aug 2013, now deprecated. +This module will decompile this correctly, but will compile a version 1 table +only if you add the secret element "" to the SVG element in the TTF file. + +Version 0 is the joint Adobe-Mozilla proposal, which supports color palettes. + +The XML format is: + + + <complete SVG doc> ]] + </svgDoc> +... + <svgDoc endGlyphID="n" startGlyphID="m"> + <![CDATA[ <complete SVG doc> ]] + </svgDoc> + + <colorPalettes> + <colorParamUINameID>n</colorParamUINameID> + ... + <colorParamUINameID>m</colorParamUINameID> + <colorPalette uiNameID="n"> + <colorRecord red="<int>" green="<int>" blue="<int>" alpha="<int>" /> + ... + <colorRecord red="<int>" green="<int>" blue="<int>" alpha="<int>" /> + </colorPalette> + ... + <colorPalette uiNameID="m"> + <colorRecord red="<int> green="<int>" blue="<int>" alpha="<int>" /> + ... + <colorRecord red=<int>" green="<int>" blue="<int>" alpha="<int>" /> + </colorPalette> + </colorPalettes> +</SVG> + +Color values must be less than 256. + +The number of color records in each </colorPalette> must be the same as +the number of <colorParamUINameID> elements. + +""" + +XML = ET.XML +XMLElement = ET.Element +xmlToString = ET.tostring + +SVG_format_0 = """ + > # big endian + version: H + offsetToSVGDocIndex: L + offsetToColorPalettes: L +""" + +SVG_format_0Size = sstruct.calcsize(SVG_format_0) + +SVG_format_1 = """ + > # big endian + version: H + numIndicies: H +""" + +SVG_format_1Size = sstruct.calcsize(SVG_format_1) + +doc_index_entry_format_0 = """ + > # big endian + startGlyphID: H + endGlyphID: H + svgDocOffset: L + svgDocLength: L +""" + +doc_index_entry_format_0Size = sstruct.calcsize(doc_index_entry_format_0) + +colorRecord_format_0 = """ + red: B + green: B + blue: B + alpha: B +""" + + +class table_S_V_G_(DefaultTable.DefaultTable): + + def decompile(self, data, ttFont): + self.docList = None + self.colorPalettes = None + pos = 0 + self.version = struct.unpack(">H", data[pos:pos+2])[0] + + if self.version == 1: + self.decompile_format_1(data, ttFont) + else: + if self.version != 0: + print("Unknown SVG table version '%s'. Decompiling as version 0." % (self.version)) + self.decompile_format_0(data, ttFont) + + def decompile_format_0(self, data, ttFont): + dummy, data2 = sstruct.unpack2(SVG_format_0, data, self) + # read in SVG Documents Index + self.decompileEntryList(data) + + # read in colorPalettes table. + self.colorPalettes = colorPalettes = ColorPalettes() + pos = self.offsetToColorPalettes + if pos > 0: + colorPalettes.numColorParams = numColorParams = struct.unpack(">H", data[pos:pos+2])[0] + if numColorParams > 0: + colorPalettes.colorParamUINameIDs = colorParamUINameIDs = [] + pos = pos + 2 + for i in range(numColorParams): + nameID = struct.unpack(">H", data[pos:pos+2])[0] + colorParamUINameIDs.append(nameID) + pos = pos + 2 + + colorPalettes.numColorPalettes = numColorPalettes = struct.unpack(">H", data[pos:pos+2])[0] + pos = pos + 2 + if numColorPalettes > 0: + colorPalettes.colorPaletteList = colorPaletteList = [] + for i in range(numColorPalettes): + colorPalette = ColorPalette() + colorPaletteList.append(colorPalette) + colorPalette.uiNameID = struct.unpack(">H", data[pos:pos+2])[0] + pos = pos + 2 + colorPalette.paletteColors = paletteColors = [] + for j in range(numColorParams): + colorRecord, colorPaletteData = sstruct.unpack2(colorRecord_format_0, data[pos:], ColorRecord()) + paletteColors.append(colorRecord) + pos += 4 + + def decompile_format_1(self, data, ttFont): + pos = 2 + self.numEntries = struct.unpack(">H", data[pos:pos+2])[0] + pos += 2 + self.decompileEntryList(data, pos) + + def decompileEntryList(self, data): + # data starts with the first entry of the entry list. + pos = subTableStart = self.offsetToSVGDocIndex + self.numEntries = numEntries = struct.unpack(">H", data[pos:pos+2])[0] + pos += 2 + if self.numEntries > 0: + data2 = data[pos:] + self.docList = [] + self.entries = entries = [] + for i in range(self.numEntries): + docIndexEntry, data2 = sstruct.unpack2(doc_index_entry_format_0, data2, DocumentIndexEntry()) + entries.append(docIndexEntry) + + for entry in entries: + start = entry.svgDocOffset + subTableStart + end = start + entry.svgDocLength + doc = data[start:end] + if doc.startswith(b"\x1f\x8b"): + import gzip + bytesIO = BytesIO(doc) + with gzip.GzipFile(None, "r", fileobj=bytesIO) as gunzipper: + doc = gunzipper.read() + self.compressed = True + del bytesIO + doc = tostr(doc, "utf_8") + self.docList.append( [doc, entry.startGlyphID, entry.endGlyphID] ) + + def compile(self, ttFont): + if hasattr(self, "version1"): + data = self.compileFormat1(ttFont) + else: + data = self.compileFormat0(ttFont) + return data + + def compileFormat0(self, ttFont): + version = 0 + offsetToSVGDocIndex = SVG_format_0Size # I start the SVGDocIndex right after the header. + # get SGVDoc info. + docList = [] + entryList = [] + numEntries = len(self.docList) + datum = struct.pack(">H",numEntries) + entryList.append(datum) + curOffset = len(datum) + doc_index_entry_format_0Size*numEntries + for doc, startGlyphID, endGlyphID in self.docList: + docOffset = curOffset + docBytes = tobytes(doc, encoding="utf_8") + if getattr(self, "compressed", False) and not docBytes.startswith(b"\x1f\x8b"): + import gzip + bytesIO = BytesIO() + with gzip.GzipFile(None, "w", fileobj=bytesIO) as gzipper: + gzipper.write(docBytes) + gzipped = bytesIO.getvalue() + if len(gzipped) < len(docBytes): + docBytes = gzipped + del gzipped, bytesIO + docLength = len(docBytes) + curOffset += docLength + entry = struct.pack(">HHLL", startGlyphID, endGlyphID, docOffset, docLength) + entryList.append(entry) + docList.append(docBytes) + entryList.extend(docList) + svgDocData = bytesjoin(entryList) + + # get colorpalette info. + if self.colorPalettes is None: + offsetToColorPalettes = 0 + palettesData = "" + else: + offsetToColorPalettes = SVG_format_0Size + len(svgDocData) + dataList = [] + numColorParams = len(self.colorPalettes.colorParamUINameIDs) + datum = struct.pack(">H", numColorParams) + dataList.append(datum) + for uiNameId in self.colorPalettes.colorParamUINameIDs: + datum = struct.pack(">H", uiNameId) + dataList.append(datum) + numColorPalettes = len(self.colorPalettes.colorPaletteList) + datum = struct.pack(">H", numColorPalettes) + dataList.append(datum) + for colorPalette in self.colorPalettes.colorPaletteList: + datum = struct.pack(">H", colorPalette.uiNameID) + dataList.append(datum) + for colorRecord in colorPalette.paletteColors: + data = struct.pack(">BBBB", colorRecord.red, colorRecord.green, colorRecord.blue, colorRecord.alpha) + dataList.append(data) + palettesData = bytesjoin(dataList) + + header = struct.pack(">HLL", version, offsetToSVGDocIndex, offsetToColorPalettes) + data = [header, svgDocData, palettesData] + data = bytesjoin(data) + return data + + def compileFormat1(self, ttFont): + version = 1 + numEntries = len(self.docList) + header = struct.pack(">HH", version, numEntries) + dataList = [header] + docList = [] + curOffset = SVG_format_1Size + doc_index_entry_format_0Size*numEntries + for doc, startGlyphID, endGlyphID in self.docList: + docOffset = curOffset + docBytes = tobytes(doc, encoding="utf_8") + docLength = len(docBytes) + curOffset += docLength + entry = struct.pack(">HHLL", startGlyphID, endGlyphID, docOffset, docLength) + dataList.append(entry) + docList.append(docBytes) + dataList.extend(docList) + data = bytesjoin(dataList) + return data + + def toXML(self, writer, ttFont): + writer.newline() + for doc, startGID, endGID in self.docList: + writer.begintag("svgDoc", startGlyphID=startGID, endGlyphID=endGID) + writer.newline() + writer.writecdata(doc) + writer.newline() + writer.endtag("svgDoc") + writer.newline() + + if (self.colorPalettes is not None) and (self.colorPalettes.numColorParams is not None): + writer.begintag("colorPalettes") + writer.newline() + for uiNameID in self.colorPalettes.colorParamUINameIDs: + writer.begintag("colorParamUINameID") + writer.writeraw(str(uiNameID)) + writer.endtag("colorParamUINameID") + writer.newline() + for colorPalette in self.colorPalettes.colorPaletteList: + writer.begintag("colorPalette", [("uiNameID", str(colorPalette.uiNameID))]) + writer.newline() + for colorRecord in colorPalette.paletteColors: + colorAttributes = [ + ("red", hex(colorRecord.red)), + ("green", hex(colorRecord.green)), + ("blue", hex(colorRecord.blue)), + ("alpha", hex(colorRecord.alpha)), + ] + writer.begintag("colorRecord", colorAttributes) + writer.endtag("colorRecord") + writer.newline() + writer.endtag("colorPalette") + writer.newline() + + writer.endtag("colorPalettes") + writer.newline() + else: + writer.begintag("colorPalettes") + writer.endtag("colorPalettes") + writer.newline() + + def fromXML(self, name, attrs, content, ttFont): + if name == "svgDoc": + if not hasattr(self, "docList"): + self.docList = [] + doc = strjoin(content) + doc = doc.strip() + startGID = int(attrs["startGlyphID"]) + endGID = int(attrs["endGlyphID"]) + self.docList.append( [doc, startGID, endGID] ) + elif name == "colorPalettes": + self.colorPalettes = ColorPalettes() + self.colorPalettes.fromXML(name, attrs, content, ttFont) + if self.colorPalettes.numColorParams == 0: + self.colorPalettes = None + else: + print("Unknown", name, content) + +class DocumentIndexEntry(object): + def __init__(self): + self.startGlyphID = None # USHORT + self.endGlyphID = None # USHORT + self.svgDocOffset = None # ULONG + self.svgDocLength = None # ULONG + + def __repr__(self): + return "startGlyphID: %s, endGlyphID: %s, svgDocOffset: %s, svgDocLength: %s" % (self.startGlyphID, self.endGlyphID, self.svgDocOffset, self.svgDocLength) + +class ColorPalettes(object): + def __init__(self): + self.numColorParams = None # USHORT + self.colorParamUINameIDs = [] # list of name table name ID values that provide UI description of each color palette. + self.numColorPalettes = None # USHORT + self.colorPaletteList = [] # list of ColorPalette records + + def fromXML(self, name, attrs, content, ttFont): + for element in content: + if isinstance(element, type("")): + continue + name, attrib, content = element + if name == "colorParamUINameID": + uiNameID = int(content[0]) + self.colorParamUINameIDs.append(uiNameID) + elif name == "colorPalette": + colorPalette = ColorPalette() + self.colorPaletteList.append(colorPalette) + colorPalette.fromXML((name, attrib, content), ttFont) + + self.numColorParams = len(self.colorParamUINameIDs) + self.numColorPalettes = len(self.colorPaletteList) + for colorPalette in self.colorPaletteList: + if len(colorPalette.paletteColors) != self.numColorParams: + raise ValueError("Number of color records in a colorPalette ('%s') does not match the number of colorParamUINameIDs elements ('%s')." % (len(colorPalette.paletteColors), self.numColorParams)) + +class ColorPalette(object): + def __init__(self): + self.uiNameID = None # USHORT. name table ID that describes user interface strings associated with this color palette. + self.paletteColors = [] # list of ColorRecords + + def fromXML(self, name, attrs, content, ttFont): + self.uiNameID = int(attrs["uiNameID"]) + for element in content: + if isinstance(element, type("")): + continue + name, attrib, content = element + if name == "colorRecord": + colorRecord = ColorRecord() + self.paletteColors.append(colorRecord) + colorRecord.red = eval(attrib["red"]) + colorRecord.green = eval(attrib["green"]) + colorRecord.blue = eval(attrib["blue"]) + colorRecord.alpha = eval(attrib["alpha"]) + +class ColorRecord(object): + def __init__(self): + self.red = 255 # all are one byte values. + self.green = 255 + self.blue = 255 + self.alpha = 255 diff -Nru fonttools-2.4/Tools/fontTools/ttLib/tables/table_API_readme.txt fonttools-3.0/Tools/fontTools/ttLib/tables/table_API_readme.txt --- fonttools-2.4/Tools/fontTools/ttLib/tables/table_API_readme.txt 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.0/Tools/fontTools/ttLib/tables/table_API_readme.txt 2015-08-31 17:57:15.000000000 +0000 @@ -0,0 +1,91 @@ +This folder is a subpackage of ttLib. Each module here is a +specialized TT/OT table converter: they can convert raw data +to Python objects and vice versa. Usually you don't need to +use the modules directly: they are imported and used +automatically when needed by ttLib. + +If you are writing you own table converter the following is +important. + +The modules here have pretty strange names: this is due to the +fact that we need to map TT table tags (which are case sensitive) +to filenames (which on Mac and Win aren't case sensitive) as well +as to Python identifiers. The latter means it can only contain +[A-Za-z0-9_] and cannot start with a number. + +ttLib provides functions to expand a tag into the format used here: + +>>> from fontTools import ttLib +>>> ttLib.tagToIdentifier("FOO ") +'F_O_O_' +>>> ttLib.tagToIdentifier("cvt ") +'_c_v_t' +>>> ttLib.tagToIdentifier("OS/2") +'O_S_2f_2' +>>> ttLib.tagToIdentifier("glyf") +'_g_l_y_f' +>>> + +And vice versa: + +>>> ttLib.identifierToTag("F_O_O_") +'FOO ' +>>> ttLib.identifierToTag("_c_v_t") +'cvt ' +>>> ttLib.identifierToTag("O_S_2f_2") +'OS/2' +>>> ttLib.identifierToTag("_g_l_y_f") +'glyf' +>>> + +Eg. the 'glyf' table converter lives in a Python file called: + + _g_l_y_f.py + +The converter itself is a class, named "table_" + expandedtag. Eg: + + class table__g_l_y_f: + etc. + +Note that if you _do_ need to use such modules or classes manually, +there are two convenient API functions that let you find them by tag: + +>>> ttLib.getTableModule('glyf') +<module 'ttLib.tables._g_l_y_f'> +>>> ttLib.getTableClass('glyf') +<class ttLib.tables._g_l_y_f.table__g_l_y_f at 645f400> +>>> + +You must subclass from DefaultTable.DefaultTable. It provides some default +behavior, as well as a constructor method (__init__) that you don't need to +override. + +Your converter should minimally provide two methods: + +class table_F_O_O_(DefaultTable.DefaultTable): # converter for table 'FOO ' + + def decompile(self, data, ttFont): + # 'data' is the raw table data. Unpack it into a + # Python data structure. + # 'ttFont' is a ttLib.TTfile instance, enabling you to + # refer to other tables. Do ***not*** keep a reference to + # it: it will cause a circular reference (ttFont saves + # a reference to us), and that means we'll be leaking + # memory. If you need to use it in other methods, just + # pass it around as a method argument. + + def compile(self, ttFont): + # Return the raw data, as converted from the Python + # data structure. + # Again, 'ttFont' is there so you can access other tables. + # Same warning applies. + +If you want to support TTX import/export as well, you need to provide two +additional methods: + + def toXML(self, writer, ttFont): + # XXX + + def fromXML(self, (name, attrs, content), ttFont): + # XXX + diff -Nru fonttools-2.4/Tools/fontTools/ttLib/tables/T_S_I__0.py fonttools-3.0/Tools/fontTools/ttLib/tables/T_S_I__0.py --- fonttools-2.4/Tools/fontTools/ttLib/tables/T_S_I__0.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.0/Tools/fontTools/ttLib/tables/T_S_I__0.py 2015-08-31 17:57:15.000000000 +0000 @@ -0,0 +1,49 @@ +from __future__ import print_function, division, absolute_import +from fontTools.misc.py23 import * +from . import DefaultTable +import struct + +tsi0Format = '>HHl' + +def fixlongs(glyphID, textLength, textOffset): + return int(glyphID), int(textLength), textOffset + + +class table_T_S_I__0(DefaultTable.DefaultTable): + + dependencies = ["TSI1"] + + def decompile(self, data, ttFont): + numGlyphs = ttFont['maxp'].numGlyphs + indices = [] + size = struct.calcsize(tsi0Format) + for i in range(numGlyphs + 5): + glyphID, textLength, textOffset = fixlongs(*struct.unpack(tsi0Format, data[:size])) + indices.append((glyphID, textLength, textOffset)) + data = data[size:] + assert len(data) == 0 + assert indices[-5] == (0XFFFE, 0, -1409540300), "bad magic number" # 0xABFC1F34 + self.indices = indices[:-5] + self.extra_indices = indices[-4:] + + def compile(self, ttFont): + if not hasattr(self, "indices"): + # We have no corresponding table (TSI1 or TSI3); let's return + # no data, which effectively means "ignore us". + return "" + data = b"" + for index, textLength, textOffset in self.indices: + data = data + struct.pack(tsi0Format, index, textLength, textOffset) + data = data + struct.pack(tsi0Format, 0XFFFE, 0, -1409540300) # 0xABFC1F34 + for index, textLength, textOffset in self.extra_indices: + data = data + struct.pack(tsi0Format, index, textLength, textOffset) + return data + + def set(self, indices, extra_indices): + # gets called by 'TSI1' or 'TSI3' + self.indices = indices + self.extra_indices = extra_indices + + def toXML(self, writer, ttFont): + writer.comment("This table will be calculated by the compiler") + writer.newline() diff -Nru fonttools-2.4/Tools/fontTools/ttLib/tables/T_S_I__1.py fonttools-3.0/Tools/fontTools/ttLib/tables/T_S_I__1.py --- fonttools-2.4/Tools/fontTools/ttLib/tables/T_S_I__1.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.0/Tools/fontTools/ttLib/tables/T_S_I__1.py 2015-08-31 17:57:15.000000000 +0000 @@ -0,0 +1,116 @@ +from __future__ import print_function, division, absolute_import +from fontTools.misc.py23 import * +from . import DefaultTable + +class table_T_S_I__1(DefaultTable.DefaultTable): + + extras = {0xfffa: "ppgm", 0xfffb: "cvt", 0xfffc: "reserved", 0xfffd: "fpgm"} + + indextable = "TSI0" + + def decompile(self, data, ttFont): + indextable = ttFont[self.indextable] + self.glyphPrograms = {} + for i in range(len(indextable.indices)): + glyphID, textLength, textOffset = indextable.indices[i] + if textLength == 0x8000: + # Ugh. Hi Beat! + textLength = indextable.indices[i+1][1] + if textLength > 0x8000: + pass # XXX Hmmm. + text = data[textOffset:textOffset+textLength] + assert len(text) == textLength + if text: + self.glyphPrograms[ttFont.getGlyphName(glyphID)] = text + + self.extraPrograms = {} + for i in range(len(indextable.extra_indices)): + extraCode, textLength, textOffset = indextable.extra_indices[i] + if textLength == 0x8000: + if self.extras[extraCode] == "fpgm": # this is the last one + textLength = len(data) - textOffset + else: + textLength = indextable.extra_indices[i+1][1] + text = data[textOffset:textOffset+textLength] + assert len(text) == textLength + if text: + self.extraPrograms[self.extras[extraCode]] = text + + def compile(self, ttFont): + if not hasattr(self, "glyphPrograms"): + self.glyphPrograms = {} + self.extraPrograms = {} + data = b'' + indextable = ttFont[self.indextable] + glyphNames = ttFont.getGlyphOrder() + + indices = [] + for i in range(len(glyphNames)): + if len(data) % 2: + data = data + b"\015" # align on 2-byte boundaries, fill with return chars. Yum. + name = glyphNames[i] + if name in self.glyphPrograms: + text = tobytes(self.glyphPrograms[name]) + else: + text = b"" + textLength = len(text) + if textLength >= 0x8000: + textLength = 0x8000 # XXX ??? + indices.append((i, textLength, len(data))) + data = data + text + + extra_indices = [] + codes = sorted(self.extras.items()) + for i in range(len(codes)): + if len(data) % 2: + data = data + b"\015" # align on 2-byte boundaries, fill with return chars. + code, name = codes[i] + if name in self.extraPrograms: + text = tobytes(self.extraPrograms[name]) + else: + text = b"" + textLength = len(text) + if textLength >= 0x8000: + textLength = 0x8000 # XXX ??? + extra_indices.append((code, textLength, len(data))) + data = data + text + indextable.set(indices, extra_indices) + return data + + def toXML(self, writer, ttFont): + names = sorted(self.glyphPrograms.keys()) + writer.newline() + for name in names: + text = self.glyphPrograms[name] + if not text: + continue + writer.begintag("glyphProgram", name=name) + writer.newline() + writer.write_noindent(text.replace(b"\r", b"\n")) + writer.newline() + writer.endtag("glyphProgram") + writer.newline() + writer.newline() + extra_names = sorted(self.extraPrograms.keys()) + for name in extra_names: + text = self.extraPrograms[name] + if not text: + continue + writer.begintag("extraProgram", name=name) + writer.newline() + writer.write_noindent(text.replace(b"\r", b"\n")) + writer.newline() + writer.endtag("extraProgram") + writer.newline() + writer.newline() + + def fromXML(self, name, attrs, content, ttFont): + if not hasattr(self, "glyphPrograms"): + self.glyphPrograms = {} + self.extraPrograms = {} + lines = strjoin(content).replace("\r", "\n").split("\n") + text = '\r'.join(lines[1:-1]) + if name == "glyphProgram": + self.glyphPrograms[attrs["name"]] = text + elif name == "extraProgram": + self.extraPrograms[attrs["name"]] = text diff -Nru fonttools-2.4/Tools/fontTools/ttLib/tables/T_S_I__2.py fonttools-3.0/Tools/fontTools/ttLib/tables/T_S_I__2.py --- fonttools-2.4/Tools/fontTools/ttLib/tables/T_S_I__2.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.0/Tools/fontTools/ttLib/tables/T_S_I__2.py 2015-08-31 17:57:15.000000000 +0000 @@ -0,0 +1,9 @@ +from __future__ import print_function, division, absolute_import +from fontTools.misc.py23 import * +from fontTools import ttLib + +superclass = ttLib.getTableClass("TSI0") + +class table_T_S_I__2(superclass): + + dependencies = ["TSI3"] diff -Nru fonttools-2.4/Tools/fontTools/ttLib/tables/T_S_I__3.py fonttools-3.0/Tools/fontTools/ttLib/tables/T_S_I__3.py --- fonttools-2.4/Tools/fontTools/ttLib/tables/T_S_I__3.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.0/Tools/fontTools/ttLib/tables/T_S_I__3.py 2015-08-31 17:57:15.000000000 +0000 @@ -0,0 +1,11 @@ +from __future__ import print_function, division, absolute_import +from fontTools.misc.py23 import * +from fontTools import ttLib + +superclass = ttLib.getTableClass("TSI1") + +class table_T_S_I__3(superclass): + + extras = {0xfffa: "reserved0", 0xfffb: "reserved1", 0xfffc: "reserved2", 0xfffd: "reserved3"} + + indextable = "TSI2" diff -Nru fonttools-2.4/Tools/fontTools/ttLib/tables/T_S_I__5.py fonttools-3.0/Tools/fontTools/ttLib/tables/T_S_I__5.py --- fonttools-2.4/Tools/fontTools/ttLib/tables/T_S_I__5.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.0/Tools/fontTools/ttLib/tables/T_S_I__5.py 2015-08-31 17:57:15.000000000 +0000 @@ -0,0 +1,42 @@ +from __future__ import print_function, division, absolute_import +from fontTools.misc.py23 import * +from fontTools.misc.textTools import safeEval +from . import DefaultTable +import sys +import array + + +class table_T_S_I__5(DefaultTable.DefaultTable): + + def decompile(self, data, ttFont): + numGlyphs = ttFont['maxp'].numGlyphs + assert len(data) == 2 * numGlyphs + a = array.array("H") + a.fromstring(data) + if sys.byteorder != "big": + a.byteswap() + self.glyphGrouping = {} + for i in range(numGlyphs): + self.glyphGrouping[ttFont.getGlyphName(i)] = a[i] + + def compile(self, ttFont): + glyphNames = ttFont.getGlyphOrder() + a = array.array("H") + for i in range(len(glyphNames)): + a.append(self.glyphGrouping[glyphNames[i]]) + if sys.byteorder != "big": + a.byteswap() + return a.tostring() + + def toXML(self, writer, ttFont): + names = sorted(self.glyphGrouping.keys()) + for glyphName in names: + writer.simpletag("glyphgroup", name=glyphName, value=self.glyphGrouping[glyphName]) + writer.newline() + + def fromXML(self, name, attrs, content, ttFont): + if not hasattr(self, "glyphGrouping"): + self.glyphGrouping = {} + if name != "glyphgroup": + return + self.glyphGrouping[attrs["name"]] = safeEval(attrs["value"]) diff -Nru fonttools-2.4/Tools/fontTools/ttLib/tables/T_S_I_B_.py fonttools-3.0/Tools/fontTools/ttLib/tables/T_S_I_B_.py --- fonttools-2.4/Tools/fontTools/ttLib/tables/T_S_I_B_.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.0/Tools/fontTools/ttLib/tables/T_S_I_B_.py 2015-08-31 17:57:15.000000000 +0000 @@ -0,0 +1,6 @@ +from __future__ import print_function, division, absolute_import +from fontTools.misc.py23 import * +from . import asciiTable + +class table_T_S_I_B_(asciiTable.asciiTable): + pass diff -Nru fonttools-2.4/Tools/fontTools/ttLib/tables/T_S_I_D_.py fonttools-3.0/Tools/fontTools/ttLib/tables/T_S_I_D_.py --- fonttools-2.4/Tools/fontTools/ttLib/tables/T_S_I_D_.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.0/Tools/fontTools/ttLib/tables/T_S_I_D_.py 2015-08-31 17:57:15.000000000 +0000 @@ -0,0 +1,6 @@ +from __future__ import print_function, division, absolute_import +from fontTools.misc.py23 import * +from . import asciiTable + +class table_T_S_I_D_(asciiTable.asciiTable): + pass diff -Nru fonttools-2.4/Tools/fontTools/ttLib/tables/T_S_I_J_.py fonttools-3.0/Tools/fontTools/ttLib/tables/T_S_I_J_.py --- fonttools-2.4/Tools/fontTools/ttLib/tables/T_S_I_J_.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.0/Tools/fontTools/ttLib/tables/T_S_I_J_.py 2015-08-31 17:57:15.000000000 +0000 @@ -0,0 +1,6 @@ +from __future__ import print_function, division, absolute_import +from fontTools.misc.py23 import * +from . import asciiTable + +class table_T_S_I_J_(asciiTable.asciiTable): + pass diff -Nru fonttools-2.4/Tools/fontTools/ttLib/tables/T_S_I_P_.py fonttools-3.0/Tools/fontTools/ttLib/tables/T_S_I_P_.py --- fonttools-2.4/Tools/fontTools/ttLib/tables/T_S_I_P_.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.0/Tools/fontTools/ttLib/tables/T_S_I_P_.py 2015-08-31 17:57:15.000000000 +0000 @@ -0,0 +1,6 @@ +from __future__ import print_function, division, absolute_import +from fontTools.misc.py23 import * +from . import asciiTable + +class table_T_S_I_P_(asciiTable.asciiTable): + pass diff -Nru fonttools-2.4/Tools/fontTools/ttLib/tables/T_S_I_S_.py fonttools-3.0/Tools/fontTools/ttLib/tables/T_S_I_S_.py --- fonttools-2.4/Tools/fontTools/ttLib/tables/T_S_I_S_.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.0/Tools/fontTools/ttLib/tables/T_S_I_S_.py 2015-08-31 17:57:15.000000000 +0000 @@ -0,0 +1,6 @@ +from __future__ import print_function, division, absolute_import +from fontTools.misc.py23 import * +from . import asciiTable + +class table_T_S_I_S_(asciiTable.asciiTable): + pass diff -Nru fonttools-2.4/Tools/fontTools/ttLib/tables/T_S_I_V_.py fonttools-3.0/Tools/fontTools/ttLib/tables/T_S_I_V_.py --- fonttools-2.4/Tools/fontTools/ttLib/tables/T_S_I_V_.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.0/Tools/fontTools/ttLib/tables/T_S_I_V_.py 2015-08-31 17:57:15.000000000 +0000 @@ -0,0 +1,6 @@ +from __future__ import print_function, division, absolute_import +from fontTools.misc.py23 import * +from . import asciiTable + +class table_T_S_I_V_(asciiTable.asciiTable): + pass diff -Nru fonttools-2.4/Tools/fontTools/ttLib/tables/ttProgram.py fonttools-3.0/Tools/fontTools/ttLib/tables/ttProgram.py --- fonttools-2.4/Tools/fontTools/ttLib/tables/ttProgram.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.0/Tools/fontTools/ttLib/tables/ttProgram.py 2015-08-31 17:57:15.000000000 +0000 @@ -0,0 +1,498 @@ +"""ttLib.tables.ttProgram.py -- Assembler/disassembler for TrueType bytecode programs.""" + +from __future__ import print_function, division, absolute_import +from fontTools.misc.py23 import * +from fontTools.misc.textTools import num2binary, binary2num, readHex +import array +import re + +# first, the list of instructions that eat bytes or words from the instruction stream + +streamInstructions = [ +# +# opcode mnemonic argBits descriptive name pops pushes eats from instruction stream pushes +# + (0x40, 'NPUSHB', 0, 'PushNBytes', 0, -1), # n, b1, b2,...bn b1,b2...bn + (0x41, 'NPUSHW', 0, 'PushNWords', 0, -1), # n, w1, w2,...w w1,w2...wn + (0xb0, 'PUSHB', 3, 'PushBytes', 0, -1), # b0, b1,..bn b0, b1, ...,bn + (0xb8, 'PUSHW', 3, 'PushWords', 0, -1), # w0,w1,..wn w0 ,w1, ...wn +] + + +# next, the list of "normal" instructions + +instructions = [ +# +#, opcode mnemonic argBits descriptive name pops pushes eats from instruction stream pushes +# + (0x7f, 'AA', 0, 'AdjustAngle', 1, 0), # p - + (0x64, 'ABS', 0, 'Absolute', 1, 1), # n |n| + (0x60, 'ADD', 0, 'Add', 2, 1), # n2, n1 (n1 + n2) + (0x27, 'ALIGNPTS', 0, 'AlignPts', 2, 0), # p2, p1 - + (0x3c, 'ALIGNRP', 0, 'AlignRelativePt', -1, 0), # p1, p2, ... , ploopvalue - + (0x5a, 'AND', 0, 'LogicalAnd', 2, 1), # e2, e1 b + (0x2b, 'CALL', 0, 'CallFunction', 1, 0), # f - + (0x67, 'CEILING', 0, 'Ceiling', 1, 1), # n ceil(n) + (0x25, 'CINDEX', 0, 'CopyXToTopStack', 1, 1), # k ek + (0x22, 'CLEAR', 0, 'ClearStack', -1, 0), # all items on the stack - + (0x4f, 'DEBUG', 0, 'DebugCall', 1, 0), # n - + (0x73, 'DELTAC1', 0, 'DeltaExceptionC1', -1, 0), # argn, cn, argn-1,cn-1, , arg1, c1 - + (0x74, 'DELTAC2', 0, 'DeltaExceptionC2', -1, 0), # argn, cn, argn-1,cn-1, , arg1, c1 - + (0x75, 'DELTAC3', 0, 'DeltaExceptionC3', -1, 0), # argn, cn, argn-1,cn-1, , arg1, c1 - + (0x5d, 'DELTAP1', 0, 'DeltaExceptionP1', -1, 0), # argn, pn, argn-1, pn-1, , arg1, p1 - + (0x71, 'DELTAP2', 0, 'DeltaExceptionP2', -1, 0), # argn, pn, argn-1, pn-1, , arg1, p1 - + (0x72, 'DELTAP3', 0, 'DeltaExceptionP3', -1, 0), # argn, pn, argn-1, pn-1, , arg1, p1 - + (0x24, 'DEPTH', 0, 'GetDepthStack', 0, 1), # - n + (0x62, 'DIV', 0, 'Divide', 2, 1), # n2, n1 (n1 * 64)/ n2 + (0x20, 'DUP', 0, 'DuplicateTopStack', 1, 2), # e e, e + (0x59, 'EIF', 0, 'EndIf', 0, 0), # - - + (0x1b, 'ELSE', 0, 'Else', 0, 0), # - - + (0x2d, 'ENDF', 0, 'EndFunctionDefinition', 0, 0), # - - + (0x54, 'EQ', 0, 'Equal', 2, 1), # e2, e1 b + (0x57, 'EVEN', 0, 'Even', 1, 1), # e b + (0x2c, 'FDEF', 0, 'FunctionDefinition', 1, 0), # f - + (0x4e, 'FLIPOFF', 0, 'SetAutoFlipOff', 0, 0), # - - + (0x4d, 'FLIPON', 0, 'SetAutoFlipOn', 0, 0), # - - + (0x80, 'FLIPPT', 0, 'FlipPoint', -1, 0), # p1, p2, ..., ploopvalue - + (0x82, 'FLIPRGOFF', 0, 'FlipRangeOff', 2, 0), # h, l - + (0x81, 'FLIPRGON', 0, 'FlipRangeOn', 2, 0), # h, l - + (0x66, 'FLOOR', 0, 'Floor', 1, 1), # n floor(n) + (0x46, 'GC', 1, 'GetCoordOnPVector', 1, 1), # p c + (0x88, 'GETINFO', 0, 'GetInfo', 1, 1), # selector result + (0x0d, 'GFV', 0, 'GetFVector', 0, 2), # - px, py + (0x0c, 'GPV', 0, 'GetPVector', 0, 2), # - px, py + (0x52, 'GT', 0, 'GreaterThan', 2, 1), # e2, e1 b + (0x53, 'GTEQ', 0, 'GreaterThanOrEqual', 2, 1), # e2, e1 b + (0x89, 'IDEF', 0, 'InstructionDefinition', 1, 0), # f - + (0x58, 'IF', 0, 'If', 1, 0), # e - + (0x8e, 'INSTCTRL', 0, 'SetInstrExecControl', 2, 0), # s, v - + (0x39, 'IP', 0, 'InterpolatePts', -1, 0), # p1, p2, ... , ploopvalue - + (0x0f, 'ISECT', 0, 'MovePtToIntersect', 5, 0), # a1, a0, b1, b0, p - + (0x30, 'IUP', 1, 'InterpolateUntPts', 0, 0), # - - + (0x1c, 'JMPR', 0, 'Jump', 1, 0), # offset - + (0x79, 'JROF', 0, 'JumpRelativeOnFalse', 2, 0), # e, offset - + (0x78, 'JROT', 0, 'JumpRelativeOnTrue', 2, 0), # e, offset - + (0x2a, 'LOOPCALL', 0, 'LoopAndCallFunction', 2, 0), # f, count - + (0x50, 'LT', 0, 'LessThan', 2, 1), # e2, e1 b + (0x51, 'LTEQ', 0, 'LessThenOrEqual', 2, 1), # e2, e1 b + (0x8b, 'MAX', 0, 'Maximum', 2, 1), # e2, e1 max(e1, e2) + (0x49, 'MD', 1, 'MeasureDistance', 2, 1), # p2,p1 d + (0x2e, 'MDAP', 1, 'MoveDirectAbsPt', 1, 0), # p - + (0xc0, 'MDRP', 5, 'MoveDirectRelPt', 1, 0), # p - + (0x3e, 'MIAP', 1, 'MoveIndirectAbsPt', 2, 0), # n, p - + (0x8c, 'MIN', 0, 'Minimum', 2, 1), # e2, e1 min(e1, e2) + (0x26, 'MINDEX', 0, 'MoveXToTopStack', 1, 1), # k ek + (0xe0, 'MIRP', 5, 'MoveIndirectRelPt', 2, 0), # n, p - + (0x4b, 'MPPEM', 0, 'MeasurePixelPerEm', 0, 1), # - ppem + (0x4c, 'MPS', 0, 'MeasurePointSize', 0, 1), # - pointSize + (0x3a, 'MSIRP', 1, 'MoveStackIndirRelPt', 2, 0), # d, p - + (0x63, 'MUL', 0, 'Multiply', 2, 1), # n2, n1 (n1 * n2)/64 + (0x65, 'NEG', 0, 'Negate', 1, 1), # n -n + (0x55, 'NEQ', 0, 'NotEqual', 2, 1), # e2, e1 b + (0x5c, 'NOT', 0, 'LogicalNot', 1, 1), # e ( not e ) + (0x6c, 'NROUND', 2, 'NoRound', 1, 1), # n1 n2 + (0x56, 'ODD', 0, 'Odd', 1, 1), # e b + (0x5b, 'OR', 0, 'LogicalOr', 2, 1), # e2, e1 b + (0x21, 'POP', 0, 'PopTopStack', 1, 0), # e - + (0x45, 'RCVT', 0, 'ReadCVT', 1, 1), # location value + (0x7d, 'RDTG', 0, 'RoundDownToGrid', 0, 0), # - - + (0x7a, 'ROFF', 0, 'RoundOff', 0, 0), # - - + (0x8a, 'ROLL', 0, 'RollTopThreeStack', 3, 3), # a,b,c b,a,c + (0x68, 'ROUND', 2, 'Round', 1, 1), # n1 n2 + (0x43, 'RS', 0, 'ReadStore', 1, 1), # n v + (0x3d, 'RTDG', 0, 'RoundToDoubleGrid', 0, 0), # - - + (0x18, 'RTG', 0, 'RoundToGrid', 0, 0), # - - + (0x19, 'RTHG', 0, 'RoundToHalfGrid', 0, 0), # - - + (0x7c, 'RUTG', 0, 'RoundUpToGrid', 0, 0), # - - + (0x77, 'S45ROUND', 0, 'SuperRound45Degrees', 1, 0), # n - + (0x7e, 'SANGW', 0, 'SetAngleWeight', 1, 0), # weight - + (0x85, 'SCANCTRL', 0, 'ScanConversionControl', 1, 0), # n - + (0x8d, 'SCANTYPE', 0, 'ScanType', 1, 0), # n - + (0x48, 'SCFS', 0, 'SetCoordFromStackFP', 2, 0), # c, p - + (0x1d, 'SCVTCI', 0, 'SetCVTCutIn', 1, 0), # n - + (0x5e, 'SDB', 0, 'SetDeltaBaseInGState', 1, 0), # n - + (0x86, 'SDPVTL', 1, 'SetDualPVectorToLine', 2, 0), # p2, p1 - + (0x5f, 'SDS', 0, 'SetDeltaShiftInGState',1, 0), # n - + (0x0b, 'SFVFS', 0, 'SetFVectorFromStack', 2, 0), # y, x - + (0x04, 'SFVTCA', 1, 'SetFVectorToAxis', 0, 0), # - - + (0x08, 'SFVTL', 1, 'SetFVectorToLine', 2, 0), # p2, p1 - + (0x0e, 'SFVTPV', 0, 'SetFVectorToPVector', 0, 0), # - - + (0x34, 'SHC', 1, 'ShiftContourByLastPt', 1, 0), # c - + (0x32, 'SHP', 1, 'ShiftPointByLastPoint',-1, 0), # p1, p2, ..., ploopvalue - + (0x38, 'SHPIX', 0, 'ShiftZoneByPixel', -1, 0), # d, p1, p2, ..., ploopvalue - + (0x36, 'SHZ', 1, 'ShiftZoneByLastPoint', 1, 0), # e - + (0x17, 'SLOOP', 0, 'SetLoopVariable', 1, 0), # n - + (0x1a, 'SMD', 0, 'SetMinimumDistance', 1, 0), # distance - + (0x0a, 'SPVFS', 0, 'SetPVectorFromStack', 2, 0), # y, x - + (0x02, 'SPVTCA', 1, 'SetPVectorToAxis', 0, 0), # - - + (0x06, 'SPVTL', 1, 'SetPVectorToLine', 2, 0), # p2, p1 - + (0x76, 'SROUND', 0, 'SuperRound', 1, 0), # n - + (0x10, 'SRP0', 0, 'SetRefPoint0', 1, 0), # p - + (0x11, 'SRP1', 0, 'SetRefPoint1', 1, 0), # p - + (0x12, 'SRP2', 0, 'SetRefPoint2', 1, 0), # p - + (0x1f, 'SSW', 0, 'SetSingleWidth', 1, 0), # n - + (0x1e, 'SSWCI', 0, 'SetSingleWidthCutIn', 1, 0), # n - + (0x61, 'SUB', 0, 'Subtract', 2, 1), # n2, n1 (n1 - n2) + (0x00, 'SVTCA', 1, 'SetFPVectorToAxis', 0, 0), # - - + (0x23, 'SWAP', 0, 'SwapTopStack', 2, 2), # e2, e1 e1, e2 + (0x13, 'SZP0', 0, 'SetZonePointer0', 1, 0), # n - + (0x14, 'SZP1', 0, 'SetZonePointer1', 1, 0), # n - + (0x15, 'SZP2', 0, 'SetZonePointer2', 1, 0), # n - + (0x16, 'SZPS', 0, 'SetZonePointerS', 1, 0), # n - + (0x29, 'UTP', 0, 'UnTouchPt', 1, 0), # p - + (0x70, 'WCVTF', 0, 'WriteCVTInFUnits', 2, 0), # n, l - + (0x44, 'WCVTP', 0, 'WriteCVTInPixels', 2, 0), # v, l - + (0x42, 'WS', 0, 'WriteStore', 2, 0), # v, l - +] + + +def bitRepr(value, bits): + s = "" + for i in range(bits): + s = "01"[value & 0x1] + s + value = value >> 1 + return s + + +_mnemonicPat = re.compile("[A-Z][A-Z0-9]*$") + +def _makeDict(instructionList): + opcodeDict = {} + mnemonicDict = {} + for op, mnemonic, argBits, name, pops, pushes in instructionList: + assert _mnemonicPat.match(mnemonic) + mnemonicDict[mnemonic] = op, argBits, name + if argBits: + argoffset = op + for i in range(1 << argBits): + opcodeDict[op+i] = mnemonic, argBits, argoffset, name + else: + opcodeDict[op] = mnemonic, 0, 0, name + return opcodeDict, mnemonicDict + +streamOpcodeDict, streamMnemonicDict = _makeDict(streamInstructions) +opcodeDict, mnemonicDict = _makeDict(instructions) + +class tt_instructions_error(Exception): + def __init__(self, error): + self.error = error + def __str__(self): + return "TT instructions error: %s" % repr(self.error) + + +_comment = r"/\*.*?\*/" +_instruction = r"([A-Z][A-Z0-9]*)\s*\[(.*?)\]" +_number = r"-?[0-9]+" +_token = "(%s)|(%s)|(%s)" % (_instruction, _number, _comment) + +_tokenRE = re.compile(_token) +_whiteRE = re.compile(r"\s*") + +_pushCountPat = re.compile(r"[A-Z][A-Z0-9]*\s*\[.*?\]\s*/\* ([0-9]+).*?\*/") + + +def _skipWhite(data, pos): + m = _whiteRE.match(data, pos) + newPos = m.regs[0][1] + assert newPos >= pos + return newPos + + +class Program(object): + + def __init__(self): + pass + + def fromBytecode(self, bytecode): + self.bytecode = array.array("B", bytecode) + if hasattr(self, "assembly"): + del self.assembly + + def fromAssembly(self, assembly): + self.assembly = assembly + if hasattr(self, "bytecode"): + del self.bytecode + + def getBytecode(self): + if not hasattr(self, "bytecode"): + self._assemble() + return self.bytecode.tostring() + + def getAssembly(self, preserve=False): + if not hasattr(self, "assembly"): + self._disassemble(preserve=preserve) + return self.assembly + + def toXML(self, writer, ttFont): + if not hasattr (ttFont, "disassembleInstructions") or ttFont.disassembleInstructions: + assembly = self.getAssembly() + writer.begintag("assembly") + writer.newline() + i = 0 + nInstr = len(assembly) + while i < nInstr: + instr = assembly[i] + writer.write(instr) + writer.newline() + m = _pushCountPat.match(instr) + i = i + 1 + if m: + nValues = int(m.group(1)) + line = [] + j = 0 + for j in range(nValues): + if j and not (j % 25): + writer.write(' '.join(line)) + writer.newline() + line = [] + line.append(assembly[i+j]) + writer.write(' '.join(line)) + writer.newline() + i = i + j + 1 + writer.endtag("assembly") + else: + writer.begintag("bytecode") + writer.newline() + writer.dumphex(self.getBytecode()) + writer.endtag("bytecode") + + def fromXML(self, name, attrs, content, ttFont): + if name == "assembly": + self.fromAssembly(strjoin(content)) + self._assemble() + del self.assembly + else: + assert name == "bytecode" + self.fromBytecode(readHex(content)) + + def _assemble(self): + assembly = self.assembly + if isinstance(assembly, type([])): + assembly = ' '.join(assembly) + bytecode = [] + push = bytecode.append + lenAssembly = len(assembly) + pos = _skipWhite(assembly, 0) + while pos < lenAssembly: + m = _tokenRE.match(assembly, pos) + if m is None: + raise tt_instructions_error("Syntax error in TT program (%s)" % assembly[pos-5:pos+15]) + dummy, mnemonic, arg, number, comment = m.groups() + pos = m.regs[0][1] + if comment: + pos = _skipWhite(assembly, pos) + continue + + arg = arg.strip() + if mnemonic.startswith("INSTR"): + # Unknown instruction + op = int(mnemonic[5:]) + push(op) + elif mnemonic not in ("PUSH", "NPUSHB", "NPUSHW", "PUSHB", "PUSHW"): + op, argBits, name = mnemonicDict[mnemonic] + if len(arg) != argBits: + raise tt_instructions_error("Incorrect number of argument bits (%s[%s])" % (mnemonic, arg)) + if arg: + arg = binary2num(arg) + push(op + arg) + else: + push(op) + else: + args = [] + pos = _skipWhite(assembly, pos) + while pos < lenAssembly: + m = _tokenRE.match(assembly, pos) + if m is None: + raise tt_instructions_error("Syntax error in TT program (%s)" % assembly[pos:pos+15]) + dummy, _mnemonic, arg, number, comment = m.groups() + if number is None and comment is None: + break + pos = m.regs[0][1] + pos = _skipWhite(assembly, pos) + if comment is not None: + continue + args.append(int(number)) + nArgs = len(args) + if mnemonic == "PUSH": + # Automatically choose the most compact representation + nWords = 0 + while nArgs: + while nWords < nArgs and nWords < 255 and not (0 <= args[nWords] <= 255): + nWords += 1 + nBytes = 0 + while nWords+nBytes < nArgs and nBytes < 255 and 0 <= args[nWords+nBytes] <= 255: + nBytes += 1 + if nBytes < 2 and nWords + nBytes < 255 and nWords + nBytes != nArgs: + # Will write bytes as words + nWords += nBytes + continue + + # Write words + if nWords: + if nWords <= 8: + op, argBits, name = streamMnemonicDict["PUSHW"] + op = op + nWords - 1 + push(op) + else: + op, argBits, name = streamMnemonicDict["NPUSHW"] + push(op) + push(nWords) + for value in args[:nWords]: + assert -32768 <= value < 32768, "PUSH value out of range %d" % value + push((value >> 8) & 0xff) + push(value & 0xff) + + # Write bytes + if nBytes: + pass + if nBytes <= 8: + op, argBits, name = streamMnemonicDict["PUSHB"] + op = op + nBytes - 1 + push(op) + else: + op, argBits, name = streamMnemonicDict["NPUSHB"] + push(op) + push(nBytes) + for value in args[nWords:nWords+nBytes]: + push(value) + + nTotal = nWords + nBytes + args = args[nTotal:] + nArgs -= nTotal + nWords = 0 + else: + # Write exactly what we've been asked to + words = mnemonic[-1] == "W" + op, argBits, name = streamMnemonicDict[mnemonic] + if mnemonic[0] != "N": + assert nArgs <= 8, nArgs + op = op + nArgs - 1 + push(op) + else: + assert nArgs < 256 + push(op) + push(nArgs) + if words: + for value in args: + assert -32768 <= value < 32768, "PUSHW value out of range %d" % value + push((value >> 8) & 0xff) + push(value & 0xff) + else: + for value in args: + assert 0 <= value < 256, "PUSHB value out of range %d" % value + push(value) + + pos = _skipWhite(assembly, pos) + + if bytecode: + assert max(bytecode) < 256 and min(bytecode) >= 0 + self.bytecode = array.array("B", bytecode) + + def _disassemble(self, preserve=False): + assembly = [] + i = 0 + bytecode = self.bytecode + numBytecode = len(bytecode) + while i < numBytecode: + op = bytecode[i] + try: + mnemonic, argBits, argoffset, name = opcodeDict[op] + except KeyError: + if op in streamOpcodeDict: + values = [] + + # Merge consecutive PUSH operations + while bytecode[i] in streamOpcodeDict: + op = bytecode[i] + mnemonic, argBits, argoffset, name = streamOpcodeDict[op] + words = mnemonic[-1] == "W" + if argBits: + nValues = op - argoffset + 1 + else: + i = i + 1 + nValues = bytecode[i] + i = i + 1 + assert nValues > 0 + if not words: + for j in range(nValues): + value = bytecode[i] + values.append(repr(value)) + i = i + 1 + else: + for j in range(nValues): + # cast to signed int16 + value = (bytecode[i] << 8) | bytecode[i+1] + if value >= 0x8000: + value = value - 0x10000 + values.append(repr(value)) + i = i + 2 + if preserve: + break + + if not preserve: + mnemonic = "PUSH" + nValues = len(values) + if nValues == 1: + assembly.append("%s[ ] /* 1 value pushed */" % mnemonic) + else: + assembly.append("%s[ ] /* %s values pushed */" % (mnemonic, nValues)) + assembly.extend(values) + else: + assembly.append("INSTR%d[ ]" % op) + i = i + 1 + else: + if argBits: + assembly.append(mnemonic + "[%s] /* %s */" % (num2binary(op - argoffset, argBits), name)) + else: + assembly.append(mnemonic + "[ ] /* %s */" % name) + i = i + 1 + self.assembly = assembly + + def __bool__(self): + """ + >>> p = Program() + >>> bool(p) + False + >>> bc = array.array("B", [0]) + >>> p.fromBytecode(bc) + >>> bool(p) + True + >>> p.bytecode.pop() + 0 + >>> bool(p) + False + + >>> p = Program() + >>> asm = ['SVTCA[0]'] + >>> p.fromAssembly(asm) + >>> bool(p) + True + >>> p.assembly.pop() + 'SVTCA[0]' + >>> bool(p) + False + """ + return ((hasattr(self, 'assembly') and len(self.assembly) > 0) or + (hasattr(self, 'bytecode') and len(self.bytecode) > 0)) + + __nonzero__ = __bool__ + + +def _test(): + """ + >>> _test() + True + """ + + bc = b"""@;:9876543210/.-,+*)(\'&%$#"! \037\036\035\034\033\032\031\030\027\026\025\024\023\022\021\020\017\016\015\014\013\012\011\010\007\006\005\004\003\002\001\000,\001\260\030CXEj\260\031C`\260F#D#\020 \260FN\360M/\260\000\022\033!#\0213Y-,\001\260\030CX\260\005+\260\000\023K\260\024PX\261\000@8Y\260\006+\033!#\0213Y-,\001\260\030CXN\260\003%\020\362!\260\000\022M\033 E\260\004%\260\004%#Jad\260(RX!#\020\326\033\260\003%\020\362!\260\000\022YY-,\260\032CX!!\033\260\002%\260\002%I\260\003%\260\003%Ja d\260\020PX!!!\033\260\003%\260\003%I\260\000PX\260\000PX\270\377\3428!\033\260\0208!Y\033\260\000RX\260\0368!\033\270\377\3608!YYYY-,\001\260\030CX\260\005+\260\000\023K\260\024PX\271\000\000\377\3008Y\260\006+\033!#\0213Y-,N\001\212\020\261F\031CD\260\000\024\261\000F\342\260\000\025\271\000\000\377\3608\000\260\000<\260(+\260\002%\020\260\000<-,\001\030\260\000/\260\001\024\362\260\001\023\260\001\025M\260\000\022-,\001\260\030CX\260\005+\260\000\023\271\000\000\377\3408\260\006+\033!#\0213Y-,\001\260\030CXEdj#Edi\260\031Cd``\260F#D#\020 \260F\360/\260\000\022\033!! \212 \212RX\0213\033!!YY-,\001\261\013\012C#Ce\012-,\000\261\012\013C#C\013-,\000\260F#p\261\001F>\001\260F#p\261\002FE:\261\002\000\010\015-,\260\022+\260\002%E\260\002%Ej\260@\213`\260\002%#D!!!-,\260\023+\260\002%E\260\002%Ej\270\377\300\214`\260\002%#D!!!-,\260\000\260\022+!!!-,\260\000\260\023+!!!-,\001\260\006C\260\007Ce\012-, i\260@a\260\000\213 \261,\300\212\214\270\020\000b`+\014d#da\\X\260\003aY-,\261\000\003%EhT\260\034KPZX\260\003%E\260\003%E`h \260\004%#D\260\004%#D\033\260\003% Eh \212#D\260\003%Eh`\260\003%#DY-,\260\003% Eh \212#D\260\003%Edhe`\260\004%\260\001`#D-,\260\011CX\207!\300\033\260\022CX\207E\260\021+\260G#D\260Gz\344\033\003\212E\030i \260G#D\212\212\207 \260\240QX\260\021+\260G#D\260Gz\344\033!\260Gz\344YYY\030-, \212E#Eh`D-,EjB-,\001\030/-,\001\260\030CX\260\004%\260\004%Id#Edi\260@\213a \260\200bj\260\002%\260\002%a\214\260\031C`\260F#D!\212\020\260F\366!\033!!!!Y-,\001\260\030CX\260\002%E\260\002%Ed`j\260\003%Eja \260\004%Ej \212\213e\260\004%#D\214\260\003%#D!!\033 EjD EjDY-,\001 E\260\000U\260\030CZXEh#Ei\260@\213a \260\200bj \212#a \260\003%\213e\260\004%#D\214\260\003%#D!!\033!!\260\031+Y-,\001\212\212Ed#EdadB-,\260\004%\260\004%\260\031+\260\030CX\260\004%\260\004%\260\003%\260\033+\001\260\002%C\260@T\260\002%C\260\000TZX\260\003% E\260@aDY\260\002%C\260\000T\260\002%C\260@TZX\260\004% E\260@`DYY!!!!-,\001KRXC\260\002%E#aD\033!!Y-,\001KRXC\260\002%E#`D\033!!Y-,KRXED\033!!Y-,\001 \260\003%#I\260@`\260 c \260\000RX#\260\002%8#\260\002%e8\000\212c8\033!!!!!Y\001-,KPXED\033!!Y-,\001\260\005%\020# \212\365\000\260\001`#\355\354-,\001\260\005%\020# \212\365\000\260\001a#\355\354-,\001\260\006%\020\365\000\355\354-,F#F`\212\212F# F\212`\212a\270\377\200b# \020#\212\261KK\212pE` \260\000PX\260\001a\270\377\272\213\033\260F\214Y\260\020`h\001:-, E\260\003%FRX\260\002%F ha\260\003%\260\003%?#!8\033!\021Y-, E\260\003%FPX\260\002%F ha\260\003%\260\003%?#!8\033!\021Y-,\000\260\007C\260\006C\013-,\212\020\354-,\260\014CX!\033 F\260\000RX\270\377\3608\033\260\0208YY-, \260\000UX\270\020\000c\260\003%Ed\260\003%Eda\260\000SX\260\002\033\260@a\260\003Y%EiSXED\033!!Y\033!\260\002%E\260\002%Ead\260(QXED\033!!YY-,!!\014d#d\213\270@\000b-,!\260\200QX\014d#d\213\270 \000b\033\262\000@/+Y\260\002`-,!\260\300QX\014d#d\213\270\025Ub\033\262\000\200/+Y\260\002`-,\014d#d\213\270@\000b`#!-,KSX\260\004%\260\004%Id#Edi\260@\213a \260\200bj\260\002%\260\002%a\214\260F#D!\212\020\260F\366!\033!\212\021#\022 9/Y-,\260\002%\260\002%Id\260\300TX\270\377\3708\260\0108\033!!Y-,\260\023CX\003\033\002Y-,\260\023CX\002\033\003Y-,\260\012+#\020 <\260\027+-,\260\002%\270\377\3608\260(+\212\020# \320#\260\020+\260\005CX\300\033<Y \020\021\260\000\022\001-,KS#KQZX8\033!!Y-,\001\260\002%\020\320#\311\001\260\001\023\260\000\024\020\260\001<\260\001\026-,\001\260\000\023\260\001\260\003%I\260\003\0278\260\001\023-,KS#KQZX E\212`D\033!!Y-, 9/-""" + + p = Program() + p.fromBytecode(bc) + asm = p.getAssembly(preserve=True) + p.fromAssembly(asm) + print(bc == p.getBytecode()) + +if __name__ == "__main__": + import sys + import doctest + sys.exit(doctest.testmod().failed) diff -Nru fonttools-2.4/Tools/fontTools/ttLib/tables/V_D_M_X_.py fonttools-3.0/Tools/fontTools/ttLib/tables/V_D_M_X_.py --- fonttools-2.4/Tools/fontTools/ttLib/tables/V_D_M_X_.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.0/Tools/fontTools/ttLib/tables/V_D_M_X_.py 2015-08-31 17:57:15.000000000 +0000 @@ -0,0 +1,234 @@ +from __future__ import print_function, division, absolute_import +from fontTools.misc.py23 import * +from . import DefaultTable +from fontTools.misc import sstruct +from fontTools.misc.textTools import safeEval +import struct + +VDMX_HeaderFmt = """ + > # big endian + version: H # Version number (0 or 1) + numRecs: H # Number of VDMX groups present + numRatios: H # Number of aspect ratio groupings +""" +# the VMDX header is followed by an array of RatRange[numRatios] (i.e. aspect +# ratio ranges); +VDMX_RatRangeFmt = """ + > # big endian + bCharSet: B # Character set + xRatio: B # Value to use for x-Ratio + yStartRatio: B # Starting y-Ratio value + yEndRatio: B # Ending y-Ratio value +""" +# followed by an array of offset[numRatios] from start of VDMX table to the +# VDMX Group for this ratio range (offsets will be re-calculated on compile); +# followed by an array of Group[numRecs] records; +VDMX_GroupFmt = """ + > # big endian + recs: H # Number of height records in this group + startsz: B # Starting yPelHeight + endsz: B # Ending yPelHeight +""" +# followed by an array of vTable[recs] records. +VDMX_vTableFmt = """ + > # big endian + yPelHeight: H # yPelHeight to which values apply + yMax: h # Maximum value (in pels) for this yPelHeight + yMin: h # Minimum value (in pels) for this yPelHeight +""" + + +class table_V_D_M_X_(DefaultTable.DefaultTable): + + def decompile(self, data, ttFont): + pos = 0 # track current position from to start of VDMX table + dummy, data = sstruct.unpack2(VDMX_HeaderFmt, data, self) + pos += sstruct.calcsize(VDMX_HeaderFmt) + self.ratRanges = [] + for i in range(self.numRatios): + ratio, data = sstruct.unpack2(VDMX_RatRangeFmt, data) + pos += sstruct.calcsize(VDMX_RatRangeFmt) + # the mapping between a ratio and a group is defined further below + ratio['groupIndex'] = None + self.ratRanges.append(ratio) + lenOffset = struct.calcsize('>H') + _offsets = [] # temporarily store offsets to groups + for i in range(self.numRatios): + offset = struct.unpack('>H', data[0:lenOffset])[0] + data = data[lenOffset:] + pos += lenOffset + _offsets.append(offset) + self.groups = [] + for groupIndex in range(self.numRecs): + # the offset to this group from beginning of the VDMX table + currOffset = pos + group, data = sstruct.unpack2(VDMX_GroupFmt, data) + # the group lenght and bounding sizes are re-calculated on compile + recs = group.pop('recs') + startsz = group.pop('startsz') + endsz = group.pop('endsz') + pos += sstruct.calcsize(VDMX_GroupFmt) + for j in range(recs): + vTable, data = sstruct.unpack2(VDMX_vTableFmt, data) + vTableLength = sstruct.calcsize(VDMX_vTableFmt) + pos += vTableLength + # group is a dict of (yMax, yMin) tuples keyed by yPelHeight + group[vTable['yPelHeight']] = (vTable['yMax'], vTable['yMin']) + # make sure startsz and endsz match the calculated values + minSize = min(group.keys()) + maxSize = max(group.keys()) + assert startsz == minSize, \ + "startsz (%s) must equal min yPelHeight (%s): group %d" % \ + (group.startsz, minSize, groupIndex) + assert endsz == maxSize, \ + "endsz (%s) must equal max yPelHeight (%s): group %d" % \ + (group.endsz, maxSize, groupIndex) + self.groups.append(group) + # match the defined offsets with the current group's offset + for offsetIndex, offsetValue in enumerate(_offsets): + # when numRecs < numRatios there can more than one ratio range + # sharing the same VDMX group + if currOffset == offsetValue: + # map the group with the ratio range thas has the same + # index as the offset to that group (it took me a while..) + self.ratRanges[offsetIndex]['groupIndex'] = groupIndex + # check that all ratio ranges have a group + for i in range(self.numRatios): + ratio = self.ratRanges[i] + if ratio['groupIndex'] is None: + from fontTools import ttLib + raise ttLib.TTLibError( + "no group defined for ratRange %d" % i) + + def _getOffsets(self): + """ + Calculate offsets to VDMX_Group records. + For each ratRange return a list of offset values from the beginning of + the VDMX table to a VDMX_Group. + """ + lenHeader = sstruct.calcsize(VDMX_HeaderFmt) + lenRatRange = sstruct.calcsize(VDMX_RatRangeFmt) + lenOffset = struct.calcsize('>H') + lenGroupHeader = sstruct.calcsize(VDMX_GroupFmt) + lenVTable = sstruct.calcsize(VDMX_vTableFmt) + # offset to the first group + pos = lenHeader + self.numRatios*lenRatRange + self.numRatios*lenOffset + groupOffsets = [] + for group in self.groups: + groupOffsets.append(pos) + lenGroup = lenGroupHeader + len(group) * lenVTable + pos += lenGroup # offset to next group + offsets = [] + for ratio in self.ratRanges: + groupIndex = ratio['groupIndex'] + offsets.append(groupOffsets[groupIndex]) + return offsets + + def compile(self, ttFont): + if not(self.version == 0 or self.version == 1): + from fontTools import ttLib + raise ttLib.TTLibError( + "unknown format for VDMX table: version %s" % self.version) + data = sstruct.pack(VDMX_HeaderFmt, self) + for ratio in self.ratRanges: + data += sstruct.pack(VDMX_RatRangeFmt, ratio) + # recalculate offsets to VDMX groups + for offset in self._getOffsets(): + data += struct.pack('>H', offset) + for group in self.groups: + recs = len(group) + startsz = min(group.keys()) + endsz = max(group.keys()) + gHeader = {'recs': recs, 'startsz': startsz, 'endsz': endsz} + data += sstruct.pack(VDMX_GroupFmt, gHeader) + for yPelHeight, (yMax, yMin) in sorted(group.items()): + vTable = {'yPelHeight': yPelHeight, 'yMax': yMax, 'yMin': yMin} + data += sstruct.pack(VDMX_vTableFmt, vTable) + return data + + def toXML(self, writer, ttFont): + writer.simpletag("version", value=self.version) + writer.newline() + writer.begintag("ratRanges") + writer.newline() + for ratio in self.ratRanges: + groupIndex = ratio['groupIndex'] + writer.simpletag( + "ratRange", + bCharSet=ratio['bCharSet'], + xRatio=ratio['xRatio'], + yStartRatio=ratio['yStartRatio'], + yEndRatio=ratio['yEndRatio'], + groupIndex=groupIndex + ) + writer.newline() + writer.endtag("ratRanges") + writer.newline() + writer.begintag("groups") + writer.newline() + for groupIndex in range(self.numRecs): + group = self.groups[groupIndex] + recs = len(group) + startsz = min(group.keys()) + endsz = max(group.keys()) + writer.begintag("group", index=groupIndex) + writer.newline() + writer.comment("recs=%d, startsz=%d, endsz=%d" % + (recs, startsz, endsz)) + writer.newline() + for yPelHeight in group.keys(): + yMax, yMin = group[yPelHeight] + writer.simpletag( + "record", yPelHeight=yPelHeight, yMax=yMax, yMin=yMin) + writer.newline() + writer.endtag("group") + writer.newline() + writer.endtag("groups") + writer.newline() + + def fromXML(self, name, attrs, content, ttFont): + if name == "version": + self.version = safeEval(attrs["value"]) + elif name == "ratRanges": + if not hasattr(self, "ratRanges"): + self.ratRanges = [] + for element in content: + if not isinstance(element, tuple): + continue + name, attrs, content = element + if name == "ratRange": + if not hasattr(self, "numRatios"): + self.numRatios = 1 + else: + self.numRatios += 1 + ratio = { + "bCharSet": safeEval(attrs["bCharSet"]), + "xRatio": safeEval(attrs["xRatio"]), + "yStartRatio": safeEval(attrs["yStartRatio"]), + "yEndRatio": safeEval(attrs["yEndRatio"]), + "groupIndex": safeEval(attrs["groupIndex"]) + } + self.ratRanges.append(ratio) + elif name == "groups": + if not hasattr(self, "groups"): + self.groups = [] + for element in content: + if not isinstance(element, tuple): + continue + name, attrs, content = element + if name == "group": + if not hasattr(self, "numRecs"): + self.numRecs = 1 + else: + self.numRecs += 1 + group = {} + for element in content: + if not isinstance(element, tuple): + continue + name, attrs, content = element + if name == "record": + yPelHeight = safeEval(attrs["yPelHeight"]) + yMax = safeEval(attrs["yMax"]) + yMin = safeEval(attrs["yMin"]) + group[yPelHeight] = (yMax, yMin) + self.groups.append(group) diff -Nru fonttools-2.4/Tools/fontTools/ttLib/tables/_v_h_e_a.py fonttools-3.0/Tools/fontTools/ttLib/tables/_v_h_e_a.py --- fonttools-2.4/Tools/fontTools/ttLib/tables/_v_h_e_a.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.0/Tools/fontTools/ttLib/tables/_v_h_e_a.py 2015-08-31 17:57:15.000000000 +0000 @@ -0,0 +1,90 @@ +from __future__ import print_function, division, absolute_import +from fontTools.misc.py23 import * +from fontTools.misc import sstruct +from fontTools.misc.textTools import safeEval +from . import DefaultTable + +vheaFormat = """ + > # big endian + tableVersion: 16.16F + ascent: h + descent: h + lineGap: h + advanceHeightMax: H + minTopSideBearing: h + minBottomSideBearing: h + yMaxExtent: h + caretSlopeRise: h + caretSlopeRun: h + reserved0: h + reserved1: h + reserved2: h + reserved3: h + reserved4: h + metricDataFormat: h + numberOfVMetrics: H +""" + +class table__v_h_e_a(DefaultTable.DefaultTable): + + # Note: Keep in sync with table__h_h_e_a + + dependencies = ['vmtx', 'glyf'] + + def decompile(self, data, ttFont): + sstruct.unpack(vheaFormat, data, self) + + def compile(self, ttFont): + if ttFont.isLoaded('glyf') and ttFont.recalcBBoxes: + self.recalc(ttFont) + return sstruct.pack(vheaFormat, self) + + def recalc(self, ttFont): + vtmxTable = ttFont['vmtx'] + if 'glyf' in ttFont: + glyfTable = ttFont['glyf'] + INFINITY = 100000 + advanceHeightMax = 0 + minTopSideBearing = +INFINITY # arbitrary big number + minBottomSideBearing = +INFINITY # arbitrary big number + yMaxExtent = -INFINITY # arbitrary big negative number + + for name in ttFont.getGlyphOrder(): + height, tsb = vtmxTable[name] + advanceHeightMax = max(advanceHeightMax, height) + g = glyfTable[name] + if g.numberOfContours == 0: + continue + if g.numberOfContours < 0 and not hasattr(g, "yMax"): + # Composite glyph without extents set. + # Calculate those. + g.recalcBounds(glyfTable) + minTopSideBearing = min(minTopSideBearing, tsb) + bsb = height - tsb - (g.yMax - g.yMin) + minBottomSideBearing = min(minBottomSideBearing, bsb) + extent = tsb + (g.yMax - g.yMin) + yMaxExtent = max(yMaxExtent, extent) + + if yMaxExtent == -INFINITY: + # No glyph has outlines. + minTopSideBearing = 0 + minBottomSideBearing = 0 + yMaxExtent = 0 + + self.advanceHeightMax = advanceHeightMax + self.minTopSideBearing = minTopSideBearing + self.minBottomSideBearing = minBottomSideBearing + self.yMaxExtent = yMaxExtent + else: + # XXX CFF recalc... + pass + + def toXML(self, writer, ttFont): + formatstring, names, fixes = sstruct.getformat(vheaFormat) + for name in names: + value = getattr(self, name) + writer.simpletag(name, value=value) + writer.newline() + + def fromXML(self, name, attrs, content, ttFont): + setattr(self, name, safeEval(attrs["value"])) diff -Nru fonttools-2.4/Tools/fontTools/ttLib/tables/_v_m_t_x.py fonttools-3.0/Tools/fontTools/ttLib/tables/_v_m_t_x.py --- fonttools-2.4/Tools/fontTools/ttLib/tables/_v_m_t_x.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.0/Tools/fontTools/ttLib/tables/_v_m_t_x.py 2015-08-31 17:57:15.000000000 +0000 @@ -0,0 +1,12 @@ +from __future__ import print_function, division, absolute_import +from fontTools.misc.py23 import * +from fontTools import ttLib + +superclass = ttLib.getTableClass("hmtx") + +class table__v_m_t_x(superclass): + + headerTag = 'vhea' + advanceName = 'height' + sideBearingName = 'tsb' + numberOfMetricsName = 'numberOfVMetrics' diff -Nru fonttools-2.4/Tools/fontTools/ttLib/tables/V_O_R_G_.py fonttools-3.0/Tools/fontTools/ttLib/tables/V_O_R_G_.py --- fonttools-2.4/Tools/fontTools/ttLib/tables/V_O_R_G_.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.0/Tools/fontTools/ttLib/tables/V_O_R_G_.py 2015-08-31 17:57:15.000000000 +0000 @@ -0,0 +1,140 @@ +from __future__ import print_function, division, absolute_import +from fontTools.misc.py23 import * +from fontTools.misc.textTools import safeEval +from . import DefaultTable +import operator +import struct + + +class table_V_O_R_G_(DefaultTable.DefaultTable): + + """ This table is structured so that you can treat it like a dictionary keyed by glyph name. + ttFont['VORG'][<glyphName>] will return the vertical origin for any glyph + ttFont['VORG'][<glyphName>] = <value> will set the vertical origin for any glyph. + """ + + def decompile(self, data, ttFont): + self.getGlyphName = ttFont.getGlyphName # for use in get/set item functions, for access by GID + self.majorVersion, self.minorVersion, self.defaultVertOriginY, self.numVertOriginYMetrics = struct.unpack(">HHhH", data[:8]) + assert (self.majorVersion <= 1), "Major version of VORG table is higher than I know how to handle" + data = data[8:] + vids = [] + gids = [] + pos = 0 + for i in range(self.numVertOriginYMetrics): + gid, vOrigin = struct.unpack(">Hh", data[pos:pos+4]) + pos += 4 + gids.append(gid) + vids.append(vOrigin) + + self.VOriginRecords = vOrig = {} + glyphOrder = ttFont.getGlyphOrder() + try: + names = map(operator.getitem, [glyphOrder]*self.numVertOriginYMetrics, gids) + except IndexError: + getGlyphName = self.getGlyphName + names = map(getGlyphName, gids ) + + list(map(operator.setitem, [vOrig]*self.numVertOriginYMetrics, names, vids)) + + def compile(self, ttFont): + vorgs = list(self.VOriginRecords.values()) + names = list(self.VOriginRecords.keys()) + nameMap = ttFont.getReverseGlyphMap() + lenRecords = len(vorgs) + try: + gids = map(operator.getitem, [nameMap]*lenRecords, names) + except KeyError: + nameMap = ttFont.getReverseGlyphMap(rebuild=True) + gids = map(operator.getitem, [nameMap]*lenRecords, names) + vOriginTable = list(zip(gids, vorgs)) + self.numVertOriginYMetrics = lenRecords + vOriginTable.sort() # must be in ascending GID order + dataList = [ struct.pack(">Hh", rec[0], rec[1]) for rec in vOriginTable] + header = struct.pack(">HHhH", self.majorVersion, self.minorVersion, self.defaultVertOriginY, self.numVertOriginYMetrics) + dataList.insert(0, header) + data = bytesjoin(dataList) + return data + + def toXML(self, writer, ttFont): + writer.simpletag("majorVersion", value=self.majorVersion) + writer.newline() + writer.simpletag("minorVersion", value=self.minorVersion) + writer.newline() + writer.simpletag("defaultVertOriginY", value=self.defaultVertOriginY) + writer.newline() + writer.simpletag("numVertOriginYMetrics", value=self.numVertOriginYMetrics) + writer.newline() + vOriginTable = [] + glyphNames = self.VOriginRecords.keys() + for glyphName in glyphNames: + try: + gid = ttFont.getGlyphID(glyphName) + except: + assert 0, "VORG table contains a glyph name not in ttFont.getGlyphNames(): " + str(glyphName) + vOriginTable.append([gid, glyphName, self.VOriginRecords[glyphName]]) + vOriginTable.sort() + for entry in vOriginTable: + vOriginRec = VOriginRecord(entry[1], entry[2]) + vOriginRec.toXML(writer, ttFont) + + def fromXML(self, name, attrs, content, ttFont): + if not hasattr(self, "VOriginRecords"): + self.VOriginRecords = {} + self.getGlyphName = ttFont.getGlyphName # for use in get/set item functions, for access by GID + if name == "VOriginRecord": + vOriginRec = VOriginRecord() + for element in content: + if isinstance(element, basestring): + continue + name, attrs, content = element + vOriginRec.fromXML(name, attrs, content, ttFont) + self.VOriginRecords[vOriginRec.glyphName] = vOriginRec.vOrigin + elif "value" in attrs: + setattr(self, name, safeEval(attrs["value"])) + + def __getitem__(self, glyphSelector): + if isinstance(glyphSelector, int): + # its a gid, convert to glyph name + glyphSelector = self.getGlyphName(glyphSelector) + + if glyphSelector not in self.VOriginRecords: + return self.defaultVertOriginY + + return self.VOriginRecords[glyphSelector] + + def __setitem__(self, glyphSelector, value): + if isinstance(glyphSelector, int): + # its a gid, convert to glyph name + glyphSelector = self.getGlyphName(glyphSelector) + + if value != self.defaultVertOriginY: + self.VOriginRecords[glyphSelector] = value + elif glyphSelector in self.VOriginRecords: + del self.VOriginRecords[glyphSelector] + + def __delitem__(self, glyphSelector): + del self.VOriginRecords[glyphSelector] + +class VOriginRecord(object): + + def __init__(self, name=None, vOrigin=None): + self.glyphName = name + self.vOrigin = vOrigin + + def toXML(self, writer, ttFont): + writer.begintag("VOriginRecord") + writer.newline() + writer.simpletag("glyphName", value=self.glyphName) + writer.newline() + writer.simpletag("vOrigin", value=self.vOrigin) + writer.newline() + writer.endtag("VOriginRecord") + writer.newline() + + def fromXML(self, name, attrs, content, ttFont): + value = attrs["value"] + if name == "glyphName": + setattr(self, name, value) + else: + setattr(self, name, safeEval(value)) diff -Nru fonttools-2.4/Tools/fontTools/ttLib/testdata/TestOTF-Regular.otx fonttools-3.0/Tools/fontTools/ttLib/testdata/TestOTF-Regular.otx --- fonttools-2.4/Tools/fontTools/ttLib/testdata/TestOTF-Regular.otx 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.0/Tools/fontTools/ttLib/testdata/TestOTF-Regular.otx 2015-08-31 17:57:15.000000000 +0000 @@ -0,0 +1,519 @@ +<?xml version="1.0" encoding="UTF-8"?> +<ttFont sfntVersion="OTTO" ttLibVersion="2.5"> + + <GlyphOrder> + <!-- The 'id' attribute is only for humans; it is ignored when parsed. --> + <GlyphID id="0" name=".notdef"/> + <GlyphID id="1" name=".null"/> + <GlyphID id="2" name="CR"/> + <GlyphID id="3" name="space"/> + <GlyphID id="4" name="period"/> + <GlyphID id="5" name="ellipsis"/> + </GlyphOrder> + + <head> + <!-- Most of this table will be recalculated by the compiler --> + <tableVersion value="1.0"/> + <fontRevision value="1.0"/> + <checkSumAdjustment value="0x34034793"/> + <magicNumber value="0x5f0f3cf5"/> + <flags value="00000000 00000011"/> + <unitsPerEm value="1000"/> + <created value="Thu Jun 4 14:29:11 2015"/> + <modified value="Sat Aug 1 10:07:17 2015"/> + <xMin value="50"/> + <yMin value="0"/> + <xMax value="668"/> + <yMax value="750"/> + <macStyle value="00000000 00000000"/> + <lowestRecPPEM value="9"/> + <fontDirectionHint value="2"/> + <indexToLocFormat value="0"/> + <glyphDataFormat value="0"/> + </head> + + <hhea> + <tableVersion value="1.0"/> + <ascent value="900"/> + <descent value="-300"/> + <lineGap value="0"/> + <advanceWidthMax value="723"/> + <minLeftSideBearing value="50"/> + <minRightSideBearing value="50"/> + <xMaxExtent value="668"/> + <caretSlopeRise value="1"/> + <caretSlopeRun value="0"/> + <caretOffset value="0"/> + <reserved0 value="0"/> + <reserved1 value="0"/> + <reserved2 value="0"/> + <reserved3 value="0"/> + <metricDataFormat value="0"/> + <numberOfHMetrics value="6"/> + </hhea> + + <maxp> + <tableVersion value="0x5000"/> + <numGlyphs value="6"/> + </maxp> + + <OS_2> + <!-- The fields 'usFirstCharIndex' and 'usLastCharIndex' + will be recalculated by the compiler --> + <version value="4"/> + <xAvgCharWidth value="392"/> + <usWeightClass value="400"/> + <usWidthClass value="5"/> + <fsType value="00000000 00000000"/> + <ySubscriptXSize value="700"/> + <ySubscriptYSize value="650"/> + <ySubscriptXOffset value="0"/> + <ySubscriptYOffset value="140"/> + <ySuperscriptXSize value="700"/> + <ySuperscriptYSize value="650"/> + <ySuperscriptXOffset value="0"/> + <ySuperscriptYOffset value="477"/> + <yStrikeoutSize value="50"/> + <yStrikeoutPosition value="250"/> + <sFamilyClass value="2050"/> + <panose> + <bFamilyType value="2"/> + <bSerifStyle value="11"/> + <bWeight value="6"/> + <bProportion value="4"/> + <bContrast value="4"/> + <bStrokeVariation value="2"/> + <bArmStyle value="7"/> + <bLetterForm value="8"/> + <bMidline value="1"/> + <bXHeight value="4"/> + </panose> + <ulUnicodeRange1 value="10000000 00000000 00000000 00000001"/> + <ulUnicodeRange2 value="00000000 00000000 00000000 00000000"/> + <ulUnicodeRange3 value="00000000 00000000 00000000 00000000"/> + <ulUnicodeRange4 value="00000000 00000000 00000000 00000000"/> + <achVendID value="NONE"/> + <fsSelection value="00000000 11000000"/> + <usFirstCharIndex value="0"/> + <usLastCharIndex value="8230"/> + <sTypoAscender value="750"/> + <sTypoDescender value="-250"/> + <sTypoLineGap value="200"/> + <usWinAscent value="900"/> + <usWinDescent value="300"/> + <ulCodePageRange1 value="00000000 00000000 00000000 00000001"/> + <ulCodePageRange2 value="00000000 00000000 00000000 00000000"/> + <sxHeight value="500"/> + <sCapHeight value="700"/> + <usDefaultChar value="0"/> + <usBreakChar value="32"/> + <usMaxContext value="0"/> + </OS_2> + + <name> + <namerecord nameID="0" platformID="1" platEncID="0" langID="0x0" unicode="True"> + Copyright (c) 2015 by FontTools. No rights reserved. + </namerecord> + <namerecord nameID="1" platformID="1" platEncID="0" langID="0x0" unicode="True"> + Test OTF + </namerecord> + <namerecord nameID="2" platformID="1" platEncID="0" langID="0x0" unicode="True"> + Regular + </namerecord> + <namerecord nameID="3" platformID="1" platEncID="0" langID="0x0" unicode="True"> + FontTools: Test OTF: 2015 + </namerecord> + <namerecord nameID="4" platformID="1" platEncID="0" langID="0x0" unicode="True"> + Test OTF + </namerecord> + <namerecord nameID="5" platformID="1" platEncID="0" langID="0x0" unicode="True"> + Version 1.000 + </namerecord> + <namerecord nameID="6" platformID="1" platEncID="0" langID="0x0" unicode="True"> + TestOTF-Regular + </namerecord> + <namerecord nameID="7" platformID="1" platEncID="0" langID="0x0" unicode="True"> + Test OTF is not a trademark of FontTools. + </namerecord> + <namerecord nameID="8" platformID="1" platEncID="0" langID="0x0" unicode="True"> + FontTools + </namerecord> + <namerecord nameID="9" platformID="1" platEncID="0" langID="0x0" unicode="True"> + FontTools + </namerecord> + <namerecord nameID="11" platformID="1" platEncID="0" langID="0x0" unicode="True"> + https://github.com/behdad/fonttools + </namerecord> + <namerecord nameID="12" platformID="1" platEncID="0" langID="0x0" unicode="True"> + https://github.com/behdad/fonttools + </namerecord> + <namerecord nameID="14" platformID="1" platEncID="0" langID="0x0" unicode="True"> + https://github.com/behdad/fonttools/blob/master/LICENSE.txt + </namerecord> + <namerecord nameID="18" platformID="1" platEncID="0" langID="0x0" unicode="True"> + Test TTF + </namerecord> + <namerecord nameID="0" platformID="3" platEncID="1" langID="0x409"> + Copyright (c) 2015 by FontTools. No rights reserved. + </namerecord> + <namerecord nameID="1" platformID="3" platEncID="1" langID="0x409"> + Test OTF + </namerecord> + <namerecord nameID="2" platformID="3" platEncID="1" langID="0x409"> + Regular + </namerecord> + <namerecord nameID="3" platformID="3" platEncID="1" langID="0x409"> + FontTools: Test OTF: 2015 + </namerecord> + <namerecord nameID="4" platformID="3" platEncID="1" langID="0x409"> + Test OTF + </namerecord> + <namerecord nameID="5" platformID="3" platEncID="1" langID="0x409"> + Version 1.000 + </namerecord> + <namerecord nameID="6" platformID="3" platEncID="1" langID="0x409"> + TestOTF-Regular + </namerecord> + <namerecord nameID="7" platformID="3" platEncID="1" langID="0x409"> + Test OTF is not a trademark of FontTools. + </namerecord> + <namerecord nameID="8" platformID="3" platEncID="1" langID="0x409"> + FontTools + </namerecord> + <namerecord nameID="9" platformID="3" platEncID="1" langID="0x409"> + FontTools + </namerecord> + <namerecord nameID="11" platformID="3" platEncID="1" langID="0x409"> + https://github.com/behdad/fonttools + </namerecord> + <namerecord nameID="12" platformID="3" platEncID="1" langID="0x409"> + https://github.com/behdad/fonttools + </namerecord> + <namerecord nameID="14" platformID="3" platEncID="1" langID="0x409"> + https://github.com/behdad/fonttools/blob/master/LICENSE.txt + </namerecord> + </name> + + <cmap> + <tableVersion version="0"/> + <cmap_format_4 platformID="0" platEncID="3" language="0"> + <map code="0x0" name=".null"/><!-- ???? --> + <map code="0xd" name="CR"/><!-- ???? --> + <map code="0x20" name="space"/><!-- SPACE --> + <map code="0x2e" name="period"/><!-- FULL STOP --> + <map code="0x2026" name="ellipsis"/><!-- HORIZONTAL ELLIPSIS --> + </cmap_format_4> + <cmap_format_6 platformID="1" platEncID="0" language="0"> + <map code="0x0" name=".null"/> + <map code="0x1" name=".notdef"/> + <map code="0x2" name=".notdef"/> + <map code="0x3" name=".notdef"/> + <map code="0x4" name=".notdef"/> + <map code="0x5" name=".notdef"/> + <map code="0x6" name=".notdef"/> + <map code="0x7" name=".notdef"/> + <map code="0x8" name=".notdef"/> + <map code="0x9" name=".notdef"/> + <map code="0xa" name=".notdef"/> + <map code="0xb" name=".notdef"/> + <map code="0xc" name=".notdef"/> + <map code="0xd" name="CR"/> + <map code="0xe" name=".notdef"/> + <map code="0xf" name=".notdef"/> + <map code="0x10" name=".notdef"/> + <map code="0x11" name=".notdef"/> + <map code="0x12" name=".notdef"/> + <map code="0x13" name=".notdef"/> + <map code="0x14" name=".notdef"/> + <map code="0x15" name=".notdef"/> + <map code="0x16" name=".notdef"/> + <map code="0x17" name=".notdef"/> + <map code="0x18" name=".notdef"/> + <map code="0x19" name=".notdef"/> + <map code="0x1a" name=".notdef"/> + <map code="0x1b" name=".notdef"/> + <map code="0x1c" name=".notdef"/> + <map code="0x1d" name=".notdef"/> + <map code="0x1e" name=".notdef"/> + <map code="0x1f" name=".notdef"/> + <map code="0x20" name="space"/> + <map code="0x21" name=".notdef"/> + <map code="0x22" name=".notdef"/> + <map code="0x23" name=".notdef"/> + <map code="0x24" name=".notdef"/> + <map code="0x25" name=".notdef"/> + <map code="0x26" name=".notdef"/> + <map code="0x27" name=".notdef"/> + <map code="0x28" name=".notdef"/> + <map code="0x29" name=".notdef"/> + <map code="0x2a" name=".notdef"/> + <map code="0x2b" name=".notdef"/> + <map code="0x2c" name=".notdef"/> + <map code="0x2d" name=".notdef"/> + <map code="0x2e" name="period"/> + <map code="0x2f" name=".notdef"/> + <map code="0x30" name=".notdef"/> + <map code="0x31" name=".notdef"/> + <map code="0x32" name=".notdef"/> + <map code="0x33" name=".notdef"/> + <map code="0x34" name=".notdef"/> + <map code="0x35" name=".notdef"/> + <map code="0x36" name=".notdef"/> + <map code="0x37" name=".notdef"/> + <map code="0x38" name=".notdef"/> + <map code="0x39" name=".notdef"/> + <map code="0x3a" name=".notdef"/> + <map code="0x3b" name=".notdef"/> + <map code="0x3c" name=".notdef"/> + <map code="0x3d" name=".notdef"/> + <map code="0x3e" name=".notdef"/> + <map code="0x3f" name=".notdef"/> + <map code="0x40" name=".notdef"/> + <map code="0x41" name=".notdef"/> + <map code="0x42" name=".notdef"/> + <map code="0x43" name=".notdef"/> + <map code="0x44" name=".notdef"/> + <map code="0x45" name=".notdef"/> + <map code="0x46" name=".notdef"/> + <map code="0x47" name=".notdef"/> + <map code="0x48" name=".notdef"/> + <map code="0x49" name=".notdef"/> + <map code="0x4a" name=".notdef"/> + <map code="0x4b" name=".notdef"/> + <map code="0x4c" name=".notdef"/> + <map code="0x4d" name=".notdef"/> + <map code="0x4e" name=".notdef"/> + <map code="0x4f" name=".notdef"/> + <map code="0x50" name=".notdef"/> + <map code="0x51" name=".notdef"/> + <map code="0x52" name=".notdef"/> + <map code="0x53" name=".notdef"/> + <map code="0x54" name=".notdef"/> + <map code="0x55" name=".notdef"/> + <map code="0x56" name=".notdef"/> + <map code="0x57" name=".notdef"/> + <map code="0x58" name=".notdef"/> + <map code="0x59" name=".notdef"/> + <map code="0x5a" name=".notdef"/> + <map code="0x5b" name=".notdef"/> + <map code="0x5c" name=".notdef"/> + <map code="0x5d" name=".notdef"/> + <map code="0x5e" name=".notdef"/> + <map code="0x5f" name=".notdef"/> + <map code="0x60" name=".notdef"/> + <map code="0x61" name=".notdef"/> + <map code="0x62" name=".notdef"/> + <map code="0x63" name=".notdef"/> + <map code="0x64" name=".notdef"/> + <map code="0x65" name=".notdef"/> + <map code="0x66" name=".notdef"/> + <map code="0x67" name=".notdef"/> + <map code="0x68" name=".notdef"/> + <map code="0x69" name=".notdef"/> + <map code="0x6a" name=".notdef"/> + <map code="0x6b" name=".notdef"/> + <map code="0x6c" name=".notdef"/> + <map code="0x6d" name=".notdef"/> + <map code="0x6e" name=".notdef"/> + <map code="0x6f" name=".notdef"/> + <map code="0x70" name=".notdef"/> + <map code="0x71" name=".notdef"/> + <map code="0x72" name=".notdef"/> + <map code="0x73" name=".notdef"/> + <map code="0x74" name=".notdef"/> + <map code="0x75" name=".notdef"/> + <map code="0x76" name=".notdef"/> + <map code="0x77" name=".notdef"/> + <map code="0x78" name=".notdef"/> + <map code="0x79" name=".notdef"/> + <map code="0x7a" name=".notdef"/> + <map code="0x7b" name=".notdef"/> + <map code="0x7c" name=".notdef"/> + <map code="0x7d" name=".notdef"/> + <map code="0x7e" name=".notdef"/> + <map code="0x7f" name=".notdef"/> + <map code="0x80" name=".notdef"/> + <map code="0x81" name=".notdef"/> + <map code="0x82" name=".notdef"/> + <map code="0x83" name=".notdef"/> + <map code="0x84" name=".notdef"/> + <map code="0x85" name=".notdef"/> + <map code="0x86" name=".notdef"/> + <map code="0x87" name=".notdef"/> + <map code="0x88" name=".notdef"/> + <map code="0x89" name=".notdef"/> + <map code="0x8a" name=".notdef"/> + <map code="0x8b" name=".notdef"/> + <map code="0x8c" name=".notdef"/> + <map code="0x8d" name=".notdef"/> + <map code="0x8e" name=".notdef"/> + <map code="0x8f" name=".notdef"/> + <map code="0x90" name=".notdef"/> + <map code="0x91" name=".notdef"/> + <map code="0x92" name=".notdef"/> + <map code="0x93" name=".notdef"/> + <map code="0x94" name=".notdef"/> + <map code="0x95" name=".notdef"/> + <map code="0x96" name=".notdef"/> + <map code="0x97" name=".notdef"/> + <map code="0x98" name=".notdef"/> + <map code="0x99" name=".notdef"/> + <map code="0x9a" name=".notdef"/> + <map code="0x9b" name=".notdef"/> + <map code="0x9c" name=".notdef"/> + <map code="0x9d" name=".notdef"/> + <map code="0x9e" name=".notdef"/> + <map code="0x9f" name=".notdef"/> + <map code="0xa0" name=".notdef"/> + <map code="0xa1" name=".notdef"/> + <map code="0xa2" name=".notdef"/> + <map code="0xa3" name=".notdef"/> + <map code="0xa4" name=".notdef"/> + <map code="0xa5" name=".notdef"/> + <map code="0xa6" name=".notdef"/> + <map code="0xa7" name=".notdef"/> + <map code="0xa8" name=".notdef"/> + <map code="0xa9" name=".notdef"/> + <map code="0xaa" name=".notdef"/> + <map code="0xab" name=".notdef"/> + <map code="0xac" name=".notdef"/> + <map code="0xad" name=".notdef"/> + <map code="0xae" name=".notdef"/> + <map code="0xaf" name=".notdef"/> + <map code="0xb0" name=".notdef"/> + <map code="0xb1" name=".notdef"/> + <map code="0xb2" name=".notdef"/> + <map code="0xb3" name=".notdef"/> + <map code="0xb4" name=".notdef"/> + <map code="0xb5" name=".notdef"/> + <map code="0xb6" name=".notdef"/> + <map code="0xb7" name=".notdef"/> + <map code="0xb8" name=".notdef"/> + <map code="0xb9" name=".notdef"/> + <map code="0xba" name=".notdef"/> + <map code="0xbb" name=".notdef"/> + <map code="0xbc" name=".notdef"/> + <map code="0xbd" name=".notdef"/> + <map code="0xbe" name=".notdef"/> + <map code="0xbf" name=".notdef"/> + <map code="0xc0" name=".notdef"/> + <map code="0xc1" name=".notdef"/> + <map code="0xc2" name=".notdef"/> + <map code="0xc3" name=".notdef"/> + <map code="0xc4" name=".notdef"/> + <map code="0xc5" name=".notdef"/> + <map code="0xc6" name=".notdef"/> + <map code="0xc7" name=".notdef"/> + <map code="0xc8" name=".notdef"/> + <map code="0xc9" name="ellipsis"/> + </cmap_format_6> + <cmap_format_4 platformID="3" platEncID="1" language="0"> + <map code="0x0" name=".null"/><!-- ???? --> + <map code="0xd" name="CR"/><!-- ???? --> + <map code="0x20" name="space"/><!-- SPACE --> + <map code="0x2e" name="period"/><!-- FULL STOP --> + <map code="0x2026" name="ellipsis"/><!-- HORIZONTAL ELLIPSIS --> + </cmap_format_4> + </cmap> + + <post> + <formatType value="3.0"/> + <italicAngle value="0.0"/> + <underlinePosition value="-75"/> + <underlineThickness value="50"/> + <isFixedPitch value="0"/> + <minMemType42 value="0"/> + <maxMemType42 value="0"/> + <minMemType1 value="0"/> + <maxMemType1 value="0"/> + </post> + + <CFF> + <CFFFont name="TestOTF-Regular"> + <version value="001.001"/> + <Notice value="Copyright \(c\) 2015 by FontTools. No rights reserved."/> + <FullName value="Test OTF"/> + <FamilyName value="Test OTF"/> + <Weight value="Regular"/> + <isFixedPitch value="0"/> + <ItalicAngle value="0"/> + <UnderlineThickness value="50"/> + <PaintType value="0"/> + <CharstringType value="2"/> + <FontMatrix value="0.001 0 0 0.001 0 0"/> + <FontBBox value="50 0 668 750"/> + <StrokeWidth value="0"/> + <!-- charset is dumped separately as the 'GlyphOrder' element --> + <Encoding name="StandardEncoding"/> + <Private> + <BlueScale value="0.039625"/> + <BlueShift value="7"/> + <BlueFuzz value="1"/> + <ForceBold value="0"/> + <LanguageGroup value="0"/> + <ExpansionFactor value="0.06"/> + <initialRandomSeed value="0"/> + <defaultWidthX value="0"/> + <nominalWidthX value="0"/> + <Subrs> + <!-- The 'index' attribute is only for humans; it is ignored when parsed. --> + <CharString index="0"> + 131 122 -131 hlineto + return + </CharString> + </Subrs> + </Private> + <CharStrings> + <CharString name=".notdef"> + 500 450 hmoveto + 750 -400 -750 vlineto + 50 50 rmoveto + 650 300 -650 vlineto + endchar + </CharString> + <CharString name=".null"> + 0 endchar + </CharString> + <CharString name="CR"> + 250 endchar + </CharString> + <CharString name="ellipsis"> + 723 55 hmoveto + -107 callsubr + 241 -122 rmoveto + -107 callsubr + 241 -122 rmoveto + -107 callsubr + endchar + </CharString> + <CharString name="period"> + 241 55 hmoveto + -107 callsubr + endchar + </CharString> + <CharString name="space"> + 250 endchar + </CharString> + </CharStrings> + </CFFFont> + + <GlobalSubrs> + <!-- The 'index' attribute is only for humans; it is ignored when parsed. --> + </GlobalSubrs> + </CFF> + + <hmtx> + <mtx name=".notdef" width="500" lsb="50"/> + <mtx name=".null" width="0" lsb="0"/> + <mtx name="CR" width="250" lsb="0"/> + <mtx name="ellipsis" width="723" lsb="55"/> + <mtx name="period" width="241" lsb="55"/> + <mtx name="space" width="250" lsb="0"/> + </hmtx> + + <DSIG> + <!-- note that the Digital Signature will be invalid after recompilation! --> + <tableHeader flag="0x0" numSigs="0" version="1"/> + </DSIG> + +</ttFont> diff -Nru fonttools-2.4/Tools/fontTools/ttLib/testdata/TestTTF-Regular.ttx fonttools-3.0/Tools/fontTools/ttLib/testdata/TestTTF-Regular.ttx --- fonttools-2.4/Tools/fontTools/ttLib/testdata/TestTTF-Regular.ttx 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.0/Tools/fontTools/ttLib/testdata/TestTTF-Regular.ttx 2015-08-31 17:57:15.000000000 +0000 @@ -0,0 +1,553 @@ +<?xml version="1.0" encoding="UTF-8"?> +<ttFont sfntVersion="\x00\x01\x00\x00" ttLibVersion="2.5"> + + <GlyphOrder> + <!-- The 'id' attribute is only for humans; it is ignored when parsed. --> + <GlyphID id="0" name=".notdef"/> + <GlyphID id="1" name=".null"/> + <GlyphID id="2" name="CR"/> + <GlyphID id="3" name="space"/> + <GlyphID id="4" name="period"/> + <GlyphID id="5" name="ellipsis"/> + </GlyphOrder> + + <head> + <!-- Most of this table will be recalculated by the compiler --> + <tableVersion value="1.0"/> + <fontRevision value="1.0"/> + <checkSumAdjustment value="0x2ee689e2"/> + <magicNumber value="0x5f0f3cf5"/> + <flags value="00000000 00000011"/> + <unitsPerEm value="1000"/> + <created value="Thu Jun 4 14:29:11 2015"/> + <modified value="Mon Aug 3 13:04:43 2015"/> + <xMin value="50"/> + <yMin value="0"/> + <xMax value="668"/> + <yMax value="750"/> + <macStyle value="00000000 00000000"/> + <lowestRecPPEM value="9"/> + <fontDirectionHint value="2"/> + <indexToLocFormat value="0"/> + <glyphDataFormat value="0"/> + </head> + + <hhea> + <tableVersion value="1.0"/> + <ascent value="900"/> + <descent value="-300"/> + <lineGap value="0"/> + <advanceWidthMax value="723"/> + <minLeftSideBearing value="50"/> + <minRightSideBearing value="50"/> + <xMaxExtent value="668"/> + <caretSlopeRise value="1"/> + <caretSlopeRun value="0"/> + <caretOffset value="0"/> + <reserved0 value="0"/> + <reserved1 value="0"/> + <reserved2 value="0"/> + <reserved3 value="0"/> + <metricDataFormat value="0"/> + <numberOfHMetrics value="6"/> + </hhea> + + <maxp> + <!-- Most of this table will be recalculated by the compiler --> + <tableVersion value="0x10000"/> + <numGlyphs value="6"/> + <maxPoints value="8"/> + <maxContours value="2"/> + <maxCompositePoints value="12"/> + <maxCompositeContours value="3"/> + <maxZones value="1"/> + <maxTwilightPoints value="0"/> + <maxStorage value="0"/> + <maxFunctionDefs value="0"/> + <maxInstructionDefs value="0"/> + <maxStackElements value="0"/> + <maxSizeOfInstructions value="0"/> + <maxComponentElements value="3"/> + <maxComponentDepth value="1"/> + </maxp> + + <OS_2> + <!-- The fields 'usFirstCharIndex' and 'usLastCharIndex' + will be recalculated by the compiler --> + <version value="4"/> + <xAvgCharWidth value="392"/> + <usWeightClass value="400"/> + <usWidthClass value="5"/> + <fsType value="00000000 00000000"/> + <ySubscriptXSize value="700"/> + <ySubscriptYSize value="650"/> + <ySubscriptXOffset value="0"/> + <ySubscriptYOffset value="140"/> + <ySuperscriptXSize value="700"/> + <ySuperscriptYSize value="650"/> + <ySuperscriptXOffset value="0"/> + <ySuperscriptYOffset value="477"/> + <yStrikeoutSize value="50"/> + <yStrikeoutPosition value="250"/> + <sFamilyClass value="2050"/> + <panose> + <bFamilyType value="2"/> + <bSerifStyle value="11"/> + <bWeight value="6"/> + <bProportion value="4"/> + <bContrast value="4"/> + <bStrokeVariation value="2"/> + <bArmStyle value="7"/> + <bLetterForm value="8"/> + <bMidline value="1"/> + <bXHeight value="4"/> + </panose> + <ulUnicodeRange1 value="10000000 00000000 00000000 00000001"/> + <ulUnicodeRange2 value="00000000 00000000 00000000 00000000"/> + <ulUnicodeRange3 value="00000000 00000000 00000000 00000000"/> + <ulUnicodeRange4 value="00000000 00000000 00000000 00000000"/> + <achVendID value="NONE"/> + <fsSelection value="00000000 11000000"/> + <usFirstCharIndex value="0"/> + <usLastCharIndex value="8230"/> + <sTypoAscender value="750"/> + <sTypoDescender value="-250"/> + <sTypoLineGap value="200"/> + <usWinAscent value="900"/> + <usWinDescent value="300"/> + <ulCodePageRange1 value="00000000 00000000 00000000 00000001"/> + <ulCodePageRange2 value="00000000 00000000 00000000 00000000"/> + <sxHeight value="500"/> + <sCapHeight value="700"/> + <usDefaultChar value="0"/> + <usBreakChar value="32"/> + <usMaxContext value="0"/> + </OS_2> + + <hmtx> + <mtx name=".notdef" width="500" lsb="50"/> + <mtx name=".null" width="0" lsb="0"/> + <mtx name="CR" width="250" lsb="0"/> + <mtx name="ellipsis" width="723" lsb="55"/> + <mtx name="period" width="241" lsb="55"/> + <mtx name="space" width="250" lsb="0"/> + </hmtx> + + <cmap> + <tableVersion version="0"/> + <cmap_format_4 platformID="0" platEncID="3" language="0"> + <map code="0x0" name=".null"/><!-- ???? --> + <map code="0xd" name="CR"/><!-- ???? --> + <map code="0x20" name="space"/><!-- SPACE --> + <map code="0x2e" name="period"/><!-- FULL STOP --> + <map code="0x2026" name="ellipsis"/><!-- HORIZONTAL ELLIPSIS --> + </cmap_format_4> + <cmap_format_6 platformID="1" platEncID="0" language="0"> + <map code="0x0" name=".null"/> + <map code="0x1" name=".notdef"/> + <map code="0x2" name=".notdef"/> + <map code="0x3" name=".notdef"/> + <map code="0x4" name=".notdef"/> + <map code="0x5" name=".notdef"/> + <map code="0x6" name=".notdef"/> + <map code="0x7" name=".notdef"/> + <map code="0x8" name=".notdef"/> + <map code="0x9" name=".notdef"/> + <map code="0xa" name=".notdef"/> + <map code="0xb" name=".notdef"/> + <map code="0xc" name=".notdef"/> + <map code="0xd" name="CR"/> + <map code="0xe" name=".notdef"/> + <map code="0xf" name=".notdef"/> + <map code="0x10" name=".notdef"/> + <map code="0x11" name=".notdef"/> + <map code="0x12" name=".notdef"/> + <map code="0x13" name=".notdef"/> + <map code="0x14" name=".notdef"/> + <map code="0x15" name=".notdef"/> + <map code="0x16" name=".notdef"/> + <map code="0x17" name=".notdef"/> + <map code="0x18" name=".notdef"/> + <map code="0x19" name=".notdef"/> + <map code="0x1a" name=".notdef"/> + <map code="0x1b" name=".notdef"/> + <map code="0x1c" name=".notdef"/> + <map code="0x1d" name=".notdef"/> + <map code="0x1e" name=".notdef"/> + <map code="0x1f" name=".notdef"/> + <map code="0x20" name="space"/> + <map code="0x21" name=".notdef"/> + <map code="0x22" name=".notdef"/> + <map code="0x23" name=".notdef"/> + <map code="0x24" name=".notdef"/> + <map code="0x25" name=".notdef"/> + <map code="0x26" name=".notdef"/> + <map code="0x27" name=".notdef"/> + <map code="0x28" name=".notdef"/> + <map code="0x29" name=".notdef"/> + <map code="0x2a" name=".notdef"/> + <map code="0x2b" name=".notdef"/> + <map code="0x2c" name=".notdef"/> + <map code="0x2d" name=".notdef"/> + <map code="0x2e" name="period"/> + <map code="0x2f" name=".notdef"/> + <map code="0x30" name=".notdef"/> + <map code="0x31" name=".notdef"/> + <map code="0x32" name=".notdef"/> + <map code="0x33" name=".notdef"/> + <map code="0x34" name=".notdef"/> + <map code="0x35" name=".notdef"/> + <map code="0x36" name=".notdef"/> + <map code="0x37" name=".notdef"/> + <map code="0x38" name=".notdef"/> + <map code="0x39" name=".notdef"/> + <map code="0x3a" name=".notdef"/> + <map code="0x3b" name=".notdef"/> + <map code="0x3c" name=".notdef"/> + <map code="0x3d" name=".notdef"/> + <map code="0x3e" name=".notdef"/> + <map code="0x3f" name=".notdef"/> + <map code="0x40" name=".notdef"/> + <map code="0x41" name=".notdef"/> + <map code="0x42" name=".notdef"/> + <map code="0x43" name=".notdef"/> + <map code="0x44" name=".notdef"/> + <map code="0x45" name=".notdef"/> + <map code="0x46" name=".notdef"/> + <map code="0x47" name=".notdef"/> + <map code="0x48" name=".notdef"/> + <map code="0x49" name=".notdef"/> + <map code="0x4a" name=".notdef"/> + <map code="0x4b" name=".notdef"/> + <map code="0x4c" name=".notdef"/> + <map code="0x4d" name=".notdef"/> + <map code="0x4e" name=".notdef"/> + <map code="0x4f" name=".notdef"/> + <map code="0x50" name=".notdef"/> + <map code="0x51" name=".notdef"/> + <map code="0x52" name=".notdef"/> + <map code="0x53" name=".notdef"/> + <map code="0x54" name=".notdef"/> + <map code="0x55" name=".notdef"/> + <map code="0x56" name=".notdef"/> + <map code="0x57" name=".notdef"/> + <map code="0x58" name=".notdef"/> + <map code="0x59" name=".notdef"/> + <map code="0x5a" name=".notdef"/> + <map code="0x5b" name=".notdef"/> + <map code="0x5c" name=".notdef"/> + <map code="0x5d" name=".notdef"/> + <map code="0x5e" name=".notdef"/> + <map code="0x5f" name=".notdef"/> + <map code="0x60" name=".notdef"/> + <map code="0x61" name=".notdef"/> + <map code="0x62" name=".notdef"/> + <map code="0x63" name=".notdef"/> + <map code="0x64" name=".notdef"/> + <map code="0x65" name=".notdef"/> + <map code="0x66" name=".notdef"/> + <map code="0x67" name=".notdef"/> + <map code="0x68" name=".notdef"/> + <map code="0x69" name=".notdef"/> + <map code="0x6a" name=".notdef"/> + <map code="0x6b" name=".notdef"/> + <map code="0x6c" name=".notdef"/> + <map code="0x6d" name=".notdef"/> + <map code="0x6e" name=".notdef"/> + <map code="0x6f" name=".notdef"/> + <map code="0x70" name=".notdef"/> + <map code="0x71" name=".notdef"/> + <map code="0x72" name=".notdef"/> + <map code="0x73" name=".notdef"/> + <map code="0x74" name=".notdef"/> + <map code="0x75" name=".notdef"/> + <map code="0x76" name=".notdef"/> + <map code="0x77" name=".notdef"/> + <map code="0x78" name=".notdef"/> + <map code="0x79" name=".notdef"/> + <map code="0x7a" name=".notdef"/> + <map code="0x7b" name=".notdef"/> + <map code="0x7c" name=".notdef"/> + <map code="0x7d" name=".notdef"/> + <map code="0x7e" name=".notdef"/> + <map code="0x7f" name=".notdef"/> + <map code="0x80" name=".notdef"/> + <map code="0x81" name=".notdef"/> + <map code="0x82" name=".notdef"/> + <map code="0x83" name=".notdef"/> + <map code="0x84" name=".notdef"/> + <map code="0x85" name=".notdef"/> + <map code="0x86" name=".notdef"/> + <map code="0x87" name=".notdef"/> + <map code="0x88" name=".notdef"/> + <map code="0x89" name=".notdef"/> + <map code="0x8a" name=".notdef"/> + <map code="0x8b" name=".notdef"/> + <map code="0x8c" name=".notdef"/> + <map code="0x8d" name=".notdef"/> + <map code="0x8e" name=".notdef"/> + <map code="0x8f" name=".notdef"/> + <map code="0x90" name=".notdef"/> + <map code="0x91" name=".notdef"/> + <map code="0x92" name=".notdef"/> + <map code="0x93" name=".notdef"/> + <map code="0x94" name=".notdef"/> + <map code="0x95" name=".notdef"/> + <map code="0x96" name=".notdef"/> + <map code="0x97" name=".notdef"/> + <map code="0x98" name=".notdef"/> + <map code="0x99" name=".notdef"/> + <map code="0x9a" name=".notdef"/> + <map code="0x9b" name=".notdef"/> + <map code="0x9c" name=".notdef"/> + <map code="0x9d" name=".notdef"/> + <map code="0x9e" name=".notdef"/> + <map code="0x9f" name=".notdef"/> + <map code="0xa0" name=".notdef"/> + <map code="0xa1" name=".notdef"/> + <map code="0xa2" name=".notdef"/> + <map code="0xa3" name=".notdef"/> + <map code="0xa4" name=".notdef"/> + <map code="0xa5" name=".notdef"/> + <map code="0xa6" name=".notdef"/> + <map code="0xa7" name=".notdef"/> + <map code="0xa8" name=".notdef"/> + <map code="0xa9" name=".notdef"/> + <map code="0xaa" name=".notdef"/> + <map code="0xab" name=".notdef"/> + <map code="0xac" name=".notdef"/> + <map code="0xad" name=".notdef"/> + <map code="0xae" name=".notdef"/> + <map code="0xaf" name=".notdef"/> + <map code="0xb0" name=".notdef"/> + <map code="0xb1" name=".notdef"/> + <map code="0xb2" name=".notdef"/> + <map code="0xb3" name=".notdef"/> + <map code="0xb4" name=".notdef"/> + <map code="0xb5" name=".notdef"/> + <map code="0xb6" name=".notdef"/> + <map code="0xb7" name=".notdef"/> + <map code="0xb8" name=".notdef"/> + <map code="0xb9" name=".notdef"/> + <map code="0xba" name=".notdef"/> + <map code="0xbb" name=".notdef"/> + <map code="0xbc" name=".notdef"/> + <map code="0xbd" name=".notdef"/> + <map code="0xbe" name=".notdef"/> + <map code="0xbf" name=".notdef"/> + <map code="0xc0" name=".notdef"/> + <map code="0xc1" name=".notdef"/> + <map code="0xc2" name=".notdef"/> + <map code="0xc3" name=".notdef"/> + <map code="0xc4" name=".notdef"/> + <map code="0xc5" name=".notdef"/> + <map code="0xc6" name=".notdef"/> + <map code="0xc7" name=".notdef"/> + <map code="0xc8" name=".notdef"/> + <map code="0xc9" name="ellipsis"/> + </cmap_format_6> + <cmap_format_4 platformID="3" platEncID="1" language="0"> + <map code="0x0" name=".null"/><!-- ???? --> + <map code="0xd" name="CR"/><!-- ???? --> + <map code="0x20" name="space"/><!-- SPACE --> + <map code="0x2e" name="period"/><!-- FULL STOP --> + <map code="0x2026" name="ellipsis"/><!-- HORIZONTAL ELLIPSIS --> + </cmap_format_4> + </cmap> + + <fpgm> + <assembly> + SVTCA[0] /* SetFPVectorToAxis */ + </assembly> + </fpgm> + + <prep> + <assembly> + SVTCA[0] /* SetFPVectorToAxis */ + </assembly> + </prep> + + <cvt> + <cv index="0" value="0"/> + </cvt> + + <loca> + <!-- The 'loca' table will be calculated by the compiler --> + </loca> + + <glyf> + + <!-- The xMin, yMin, xMax and yMax values + will be recalculated by the compiler. --> + + <TTGlyph name=".notdef" xMin="50" yMin="0" xMax="450" yMax="750"> + <contour> + <pt x="50" y="0" on="1"/> + <pt x="50" y="750" on="1"/> + <pt x="450" y="750" on="1"/> + <pt x="450" y="0" on="1"/> + </contour> + <contour> + <pt x="400" y="50" on="1"/> + <pt x="400" y="700" on="1"/> + <pt x="100" y="700" on="1"/> + <pt x="100" y="50" on="1"/> + </contour> + <instructions><assembly> + SVTCA[0] /* SetFPVectorToAxis */ + SVTCA[1] /* SetFPVectorToAxis */ + </assembly></instructions> + </TTGlyph> + + <TTGlyph name=".null"/><!-- contains no outline data --> + + <TTGlyph name="CR"/><!-- contains no outline data --> + + <TTGlyph name="ellipsis" xMin="55" yMin="0" xMax="668" yMax="122"> + <component glyphName="period" x="0" y="0" flags="0x4"/> + <component glyphName="period" x="241" y="0" flags="0x4"/> + <component glyphName="period" x="482" y="0" flags="0x4"/> + <instructions><assembly> + SVTCA[0] /* SetFPVectorToAxis */ + SVTCA[1] /* SetFPVectorToAxis */ + </assembly></instructions> + </TTGlyph> + + <TTGlyph name="period" xMin="55" yMin="0" xMax="186" yMax="122"> + <contour> + <pt x="55" y="122" on="1"/> + <pt x="186" y="122" on="1"/> + <pt x="186" y="0" on="1"/> + <pt x="55" y="0" on="1"/> + </contour> + <instructions><assembly> + SVTCA[0] /* SetFPVectorToAxis */ + SVTCA[1] /* SetFPVectorToAxis */ + </assembly></instructions> + </TTGlyph> + + <TTGlyph name="space"/><!-- contains no outline data --> + + </glyf> + + <name> + <namerecord nameID="0" platformID="1" platEncID="0" langID="0x0" unicode="True"> + Copyright (c) 2015 by FontTools. No rights reserved. + </namerecord> + <namerecord nameID="1" platformID="1" platEncID="0" langID="0x0" unicode="True"> + Test TTF + </namerecord> + <namerecord nameID="2" platformID="1" platEncID="0" langID="0x0" unicode="True"> + Regular + </namerecord> + <namerecord nameID="3" platformID="1" platEncID="0" langID="0x0" unicode="True"> + FontTools: Test TTF: 2015 + </namerecord> + <namerecord nameID="4" platformID="1" platEncID="0" langID="0x0" unicode="True"> + Test TTF + </namerecord> + <namerecord nameID="5" platformID="1" platEncID="0" langID="0x0" unicode="True"> + Version 1.000 + </namerecord> + <namerecord nameID="6" platformID="1" platEncID="0" langID="0x0" unicode="True"> + TestTTF-Regular + </namerecord> + <namerecord nameID="7" platformID="1" platEncID="0" langID="0x0" unicode="True"> + Test TTF is not a trademark of FontTools. + </namerecord> + <namerecord nameID="8" platformID="1" platEncID="0" langID="0x0" unicode="True"> + FontTools + </namerecord> + <namerecord nameID="9" platformID="1" platEncID="0" langID="0x0" unicode="True"> + FontTools + </namerecord> + <namerecord nameID="11" platformID="1" platEncID="0" langID="0x0" unicode="True"> + https://github.com/behdad/fonttools + </namerecord> + <namerecord nameID="12" platformID="1" platEncID="0" langID="0x0" unicode="True"> + https://github.com/behdad/fonttools + </namerecord> + <namerecord nameID="14" platformID="1" platEncID="0" langID="0x0" unicode="True"> + https://github.com/behdad/fonttools/blob/master/LICENSE.txt + </namerecord> + <namerecord nameID="18" platformID="1" platEncID="0" langID="0x0" unicode="True"> + Test TTF + </namerecord> + <namerecord nameID="0" platformID="3" platEncID="1" langID="0x409"> + Copyright (c) 2015 by FontTools. No rights reserved. + </namerecord> + <namerecord nameID="1" platformID="3" platEncID="1" langID="0x409"> + Test TTF + </namerecord> + <namerecord nameID="2" platformID="3" platEncID="1" langID="0x409"> + Regular + </namerecord> + <namerecord nameID="3" platformID="3" platEncID="1" langID="0x409"> + FontTools: Test TTF: 2015 + </namerecord> + <namerecord nameID="4" platformID="3" platEncID="1" langID="0x409"> + Test TTF + </namerecord> + <namerecord nameID="5" platformID="3" platEncID="1" langID="0x409"> + Version 1.000 + </namerecord> + <namerecord nameID="6" platformID="3" platEncID="1" langID="0x409"> + TestTTF-Regular + </namerecord> + <namerecord nameID="7" platformID="3" platEncID="1" langID="0x409"> + Test TTF is not a trademark of FontTools. + </namerecord> + <namerecord nameID="8" platformID="3" platEncID="1" langID="0x409"> + FontTools + </namerecord> + <namerecord nameID="9" platformID="3" platEncID="1" langID="0x409"> + FontTools + </namerecord> + <namerecord nameID="11" platformID="3" platEncID="1" langID="0x409"> + https://github.com/behdad/fonttools + </namerecord> + <namerecord nameID="12" platformID="3" platEncID="1" langID="0x409"> + https://github.com/behdad/fonttools + </namerecord> + <namerecord nameID="14" platformID="3" platEncID="1" langID="0x409"> + https://github.com/behdad/fonttools/blob/master/LICENSE.txt + </namerecord> + </name> + + <post> + <formatType value="2.0"/> + <italicAngle value="0.0"/> + <underlinePosition value="-75"/> + <underlineThickness value="50"/> + <isFixedPitch value="0"/> + <minMemType42 value="0"/> + <maxMemType42 value="0"/> + <minMemType1 value="0"/> + <maxMemType1 value="0"/> + <psNames> + <!-- This file uses unique glyph names based on the information + found in the 'post' table. Since these names might not be unique, + we have to invent artificial names in case of clashes. In order to + be able to retain the original information, we need a name to + ps name mapping for those cases where they differ. That's what + you see below. + --> + </psNames> + <extraNames> + <!-- following are the name that are not taken from the standard Mac glyph order --> + <psName name=".null"/> + <psName name="CR"/> + </extraNames> + </post> + + <gasp> + <gaspRange rangeMaxPPEM="8" rangeGaspBehavior="10"/> + <gaspRange rangeMaxPPEM="65535" rangeGaspBehavior="15"/> + </gasp> + + <DSIG> + <!-- note that the Digital Signature will be invalid after recompilation! --> + <tableHeader flag="0x0" numSigs="0" version="1"/> + </DSIG> + +</ttFont> diff -Nru fonttools-2.4/Tools/fontTools/ttLib/testdata/test_woff2_metadata.xml fonttools-3.0/Tools/fontTools/ttLib/testdata/test_woff2_metadata.xml --- fonttools-2.4/Tools/fontTools/ttLib/testdata/test_woff2_metadata.xml 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.0/Tools/fontTools/ttLib/testdata/test_woff2_metadata.xml 2015-08-31 17:57:15.000000000 +0000 @@ -0,0 +1,103 @@ +<?xml version="1.0" encoding="UTF-8"?> +<metadata version="1.0"> + <uniqueid id="org.w3.webfonts.wofftest" /> + <vendor name="Test Vendor" url="http://w3c.org/Fonts" /> + <credits> + <credit name="Credit 1" role="Role 1" url="http://w3c.org/Fonts" /> + <credit name="Credit 2" role="Role 2" url="http://w3c.org/Fonts" /> + </credits> + <description url="http://w3c.org/Fonts"> + <text> + Description without language. + </text> + <text lang="en"> + Description with "en" language. + </text> + <text lang="fr"> + Description with "fr" language. + </text> + </description> + <license url="http://w3c.org/Fonts" id="License ID"> + <text> + License without language. + </text> + <text lang="en"> + License with "en" language. + </text> + <text lang="fr"> + License with "fr" language. + </text> + </license> + <copyright> + <text> + Copyright without language. + </text> + <text lang="en"> + Copyright with "en" language. + </text> + <text lang="fr"> + Copyright with "fr" language. + </text> + </copyright> + <trademark> + <text> + Trademark without language. + </text> + <text lang="en"> + Trademark with "en" language. + </text> + <text lang="fr"> + Trademark with "fr" language. + </text> + </trademark> + <licensee name="Licensee Name" /> + <extension id="Extension 1"> + <name>Extension 1 - Name Without Language</name> + <name lang="en">Extension 1 - Name With "en" Language</name> + <name lang="fr">Extension 1 - Name With "fr" Language</name> + <item id="Extension 1 - Item 1 ID"> + <name>Extension 1 - Item 1 - Name Without Language</name> + <name lang="en">Extension 1 - Item 1 - Name With "en" Language</name> + <name lang="fr">Extension 1 - Item 1 - Name With "fr" Language</name> + <value>Extension 1 - Item 1 - Value Without Language</value> + <value lang="en">Extension 1 - Item 1 - Value With "en" Language</value> + <value lang="fr">Extension 1 - Item 1 - Value With "fr" Language</value> + </item> + <item id="Extension 1 - Item 2 ID"> + <name>Extension 1 - Item 2 - Name Without Language</name> + <name lang="en">Extension 1 - Item 2 - Name With "en" Language</name> + <name lang="fr">Extension 1 - Item 2 - Name With "fr" Language</name> + <value>Extension 1 - Item 2 - Value Without Language</value> + <value lang="en">Extension 1 - Item 2 - Value With "en" Language</value> + <value lang="fr">Extension 1 - Item 2 - Value With "fr" Language</value> + </item> + </extension> + <extension id="Extension 2"> + <name>Extension 2 - Name Without Language</name> + <name lang="en">Extension 2 - Name With "en" Language</name> + <name lang="fr">Extension 2 - Name With "fr" Language</name> + <item id="Extension 2 - Item 1 ID"> + <name>Extension 2 - Item 1 - Name Without Language</name> + <name lang="en">Extension 2 - Item 1 - Name With "en" Language</name> + <name lang="fr">Extension 2 - Item 1 - Name With "fr" Language</name> + <value>Extension 2 - Item 1 - Value Without Language</value> + <value lang="en">Extension 2 - Item 1 - Value With "en" Language</value> + <value lang="fr">Extension 2 - Item 1 - Value With "fr" Language</value> + </item> + <item id="Extension 2 - Item 2 ID"> + <name>Extension 2 - Item 2 - Name Without Language</name> + <name lang="en">Extension 2 - Item 2 - Name With "en" Language</name> + <name lang="fr">Extension 2 - Item 2 - Name With "fr" Language</name> + <value>Extension 2 - Item 2 - Value Without Language</value> + <value lang="en">Extension 2 - Item 2 - Value With "en" Language</value> + <value lang="fr">Extension 2 - Item 2 - Value With "fr" Language</value> + </item> + <item id="Extension 2 - Item 3 ID"> + <name>Extension 2 - Item 3 - Name Without Language</name> + <name lang="en">Extension 2 - Item 3 - Name With "en" Language</name> + <name lang="fr">Extension 2 - Item 3 - Name With "fr" Language</name> + <value>Extension 2 - Item 3 - Value Without Language</value> + <value lang="en">Extension 2 - Item 3 - Value With "en" Language</value> + </item> + </extension> +</metadata> diff -Nru fonttools-2.4/Tools/fontTools/ttLib/woff2.py fonttools-3.0/Tools/fontTools/ttLib/woff2.py --- fonttools-2.4/Tools/fontTools/ttLib/woff2.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.0/Tools/fontTools/ttLib/woff2.py 2015-08-31 17:57:15.000000000 +0000 @@ -0,0 +1,1084 @@ +from __future__ import print_function, division, absolute_import +from fontTools.misc.py23 import * +import sys +import array +import struct +from collections import OrderedDict +from fontTools.misc import sstruct +from fontTools.misc.arrayTools import calcIntBounds +from fontTools.misc.textTools import pad +from fontTools.ttLib import (TTFont, TTLibError, getTableModule, getTableClass, + getSearchRange) +from fontTools.ttLib.sfnt import (SFNTReader, SFNTWriter, DirectoryEntry, + WOFFFlavorData, sfntDirectoryFormat, sfntDirectorySize, SFNTDirectoryEntry, + sfntDirectoryEntrySize, calcChecksum) +from fontTools.ttLib.tables import ttProgram + +haveBrotli = False +try: + import brotli + haveBrotli = True +except ImportError: + pass + + +class WOFF2Reader(SFNTReader): + + flavor = "woff2" + + def __init__(self, file, checkChecksums=1, fontNumber=-1): + if not haveBrotli: + print('The WOFF2 decoder requires the Brotli Python extension, available at:\n' + 'https://github.com/google/brotli', file=sys.stderr) + raise ImportError("No module named brotli") + + self.file = file + + signature = Tag(self.file.read(4)) + if signature != b"wOF2": + raise TTLibError("Not a WOFF2 font (bad signature)") + + self.file.seek(0) + self.DirectoryEntry = WOFF2DirectoryEntry + data = self.file.read(woff2DirectorySize) + if len(data) != woff2DirectorySize: + raise TTLibError('Not a WOFF2 font (not enough data)') + sstruct.unpack(woff2DirectoryFormat, data, self) + + self.tables = OrderedDict() + offset = 0 + for i in range(self.numTables): + entry = self.DirectoryEntry() + entry.fromFile(self.file) + tag = Tag(entry.tag) + self.tables[tag] = entry + entry.offset = offset + offset += entry.length + + totalUncompressedSize = offset + compressedData = self.file.read(self.totalCompressedSize) + decompressedData = brotli.decompress(compressedData) + if len(decompressedData) != totalUncompressedSize: + raise TTLibError( + 'unexpected size for decompressed font data: expected %d, found %d' + % (totalUncompressedSize, len(decompressedData))) + self.transformBuffer = BytesIO(decompressedData) + + self.file.seek(0, 2) + if self.length != self.file.tell(): + raise TTLibError("reported 'length' doesn't match the actual file size") + + self.flavorData = WOFF2FlavorData(self) + + # make empty TTFont to store data while reconstructing tables + self.ttFont = TTFont(recalcBBoxes=False, recalcTimestamp=False) + + def __getitem__(self, tag): + """Fetch the raw table data. Reconstruct transformed tables.""" + entry = self.tables[Tag(tag)] + if not hasattr(entry, 'data'): + if tag in woff2TransformedTableTags: + entry.data = self.reconstructTable(tag) + else: + entry.data = entry.loadData(self.transformBuffer) + return entry.data + + def reconstructTable(self, tag): + """Reconstruct table named 'tag' from transformed data.""" + if tag not in woff2TransformedTableTags: + raise TTLibError("transform for table '%s' is unknown" % tag) + entry = self.tables[Tag(tag)] + rawData = entry.loadData(self.transformBuffer) + if tag == 'glyf': + # no need to pad glyph data when reconstructing + padding = self.padding if hasattr(self, 'padding') else None + data = self._reconstructGlyf(rawData, padding) + elif tag == 'loca': + data = self._reconstructLoca() + else: + raise NotImplementedError + return data + + def _reconstructGlyf(self, data, padding=None): + """ Return recostructed glyf table data, and set the corresponding loca's + locations. Optionally pad glyph offsets to the specified number of bytes. + """ + self.ttFont['loca'] = WOFF2LocaTable() + glyfTable = self.ttFont['glyf'] = WOFF2GlyfTable() + glyfTable.reconstruct(data, self.ttFont) + glyfTable.padding = padding + data = glyfTable.compile(self.ttFont) + return data + + def _reconstructLoca(self): + """ Return reconstructed loca table data. """ + if 'loca' not in self.ttFont: + # make sure glyf is reconstructed first + self.tables['glyf'].data = self.reconstructTable('glyf') + locaTable = self.ttFont['loca'] + data = locaTable.compile(self.ttFont) + if len(data) != self.tables['loca'].origLength: + raise TTLibError( + "reconstructed 'loca' table doesn't match original size: " + "expected %d, found %d" + % (self.tables['loca'].origLength, len(data))) + return data + + +class WOFF2Writer(SFNTWriter): + + flavor = "woff2" + + def __init__(self, file, numTables, sfntVersion="\000\001\000\000", + flavor=None, flavorData=None): + if not haveBrotli: + print('The WOFF2 encoder requires the Brotli Python extension, available at:\n' + 'https://github.com/google/brotli', file=sys.stderr) + raise ImportError("No module named brotli") + + self.file = file + self.numTables = numTables + self.sfntVersion = Tag(sfntVersion) + self.flavorData = flavorData or WOFF2FlavorData() + + self.directoryFormat = woff2DirectoryFormat + self.directorySize = woff2DirectorySize + self.DirectoryEntry = WOFF2DirectoryEntry + + self.signature = Tag("wOF2") + + self.nextTableOffset = 0 + self.transformBuffer = BytesIO() + + self.tables = OrderedDict() + + # make empty TTFont to store data while normalising and transforming tables + self.ttFont = TTFont(recalcBBoxes=False, recalcTimestamp=False) + + def __setitem__(self, tag, data): + """Associate new entry named 'tag' with raw table data.""" + if tag in self.tables: + raise TTLibError("cannot rewrite '%s' table" % tag) + if tag == 'DSIG': + # always drop DSIG table, since the encoding process can invalidate it + self.numTables -= 1 + return + + entry = self.DirectoryEntry() + entry.tag = Tag(tag) + entry.flags = getKnownTagIndex(entry.tag) + # WOFF2 table data are written to disk only on close(), after all tags + # have been specified + entry.data = data + + self.tables[tag] = entry + + def close(self): + """ All tags must have been specified. Now write the table data and directory. + """ + if len(self.tables) != self.numTables: + raise TTLibError("wrong number of tables; expected %d, found %d" % (self.numTables, len(self.tables))) + + if self.sfntVersion in ("\x00\x01\x00\x00", "true"): + isTrueType = True + elif self.sfntVersion == "OTTO": + isTrueType = False + else: + raise TTLibError("Not a TrueType or OpenType font (bad sfntVersion)") + + # The WOFF2 spec no longer requires the glyph offsets to be 4-byte aligned. + # However, the reference WOFF2 implementation still fails to reconstruct + # 'unpadded' glyf tables, therefore we need to 'normalise' them. + # See: + # https://github.com/khaledhosny/ots/issues/60 + # https://github.com/google/woff2/issues/15 + if isTrueType: + self._normaliseGlyfAndLoca(padding=4) + self._setHeadTransformFlag() + + # To pass the legacy OpenType Sanitiser currently included in browsers, + # we must sort the table directory and data alphabetically by tag. + # See: + # https://github.com/google/woff2/pull/3 + # https://lists.w3.org/Archives/Public/public-webfonts-wg/2015Mar/0000.html + # TODO(user): remove to match spec once browsers are on newer OTS + self.tables = OrderedDict(sorted(self.tables.items())) + + self.totalSfntSize = self._calcSFNTChecksumsLengthsAndOffsets() + + fontData = self._transformTables() + compressedFont = brotli.compress(fontData, mode=brotli.MODE_FONT) + + self.totalCompressedSize = len(compressedFont) + self.length = self._calcTotalSize() + self.majorVersion, self.minorVersion = self._getVersion() + self.reserved = 0 + + directory = self._packTableDirectory() + self.file.seek(0) + self.file.write(pad(directory + compressedFont, size=4)) + self._writeFlavorData() + + def _normaliseGlyfAndLoca(self, padding=4): + """ Recompile glyf and loca tables, aligning glyph offsets to multiples of + 'padding' size. Update the head table's 'indexToLocFormat' accordingly while + compiling loca. + """ + if self.sfntVersion == "OTTO": + return + for tag in ('maxp', 'head', 'loca', 'glyf'): + self._decompileTable(tag) + self.ttFont['glyf'].padding = padding + for tag in ('glyf', 'loca'): + self._compileTable(tag) + + def _setHeadTransformFlag(self): + """ Set bit 11 of 'head' table flags to indicate that the font has undergone + a lossless modifying transform. Re-compile head table data.""" + self._decompileTable('head') + self.ttFont['head'].flags |= (1 << 11) + self._compileTable('head') + + def _decompileTable(self, tag): + """ Fetch table data, decompile it, and store it inside self.ttFont. """ + tag = Tag(tag) + if tag not in self.tables: + raise TTLibError("missing required table: %s" % tag) + if self.ttFont.isLoaded(tag): + return + data = self.tables[tag].data + if tag == 'loca': + tableClass = WOFF2LocaTable + elif tag == 'glyf': + tableClass = WOFF2GlyfTable + else: + tableClass = getTableClass(tag) + table = tableClass(tag) + self.ttFont.tables[tag] = table + table.decompile(data, self.ttFont) + + def _compileTable(self, tag): + """ Compile table and store it in its 'data' attribute. """ + self.tables[tag].data = self.ttFont[tag].compile(self.ttFont) + + def _calcSFNTChecksumsLengthsAndOffsets(self): + """ Compute the 'original' SFNT checksums, lengths and offsets for checksum + adjustment calculation. Return the total size of the uncompressed font. + """ + offset = sfntDirectorySize + sfntDirectoryEntrySize * len(self.tables) + for tag, entry in self.tables.items(): + data = entry.data + entry.origOffset = offset + entry.origLength = len(data) + if tag == 'head': + entry.checkSum = calcChecksum(data[:8] + b'\0\0\0\0' + data[12:]) + else: + entry.checkSum = calcChecksum(data) + offset += (entry.origLength + 3) & ~3 + return offset + + def _transformTables(self): + """Return transformed font data.""" + for tag, entry in self.tables.items(): + if tag in woff2TransformedTableTags: + data = self.transformTable(tag) + else: + data = entry.data + entry.offset = self.nextTableOffset + entry.saveData(self.transformBuffer, data) + self.nextTableOffset += entry.length + self.writeMasterChecksum() + fontData = self.transformBuffer.getvalue() + return fontData + + def transformTable(self, tag): + """Return transformed table data.""" + if tag not in woff2TransformedTableTags: + raise TTLibError("Transform for table '%s' is unknown" % tag) + if tag == "loca": + data = b"" + elif tag == "glyf": + for tag in ('maxp', 'head', 'loca', 'glyf'): + self._decompileTable(tag) + glyfTable = self.ttFont['glyf'] + data = glyfTable.transform(self.ttFont) + else: + raise NotImplementedError + return data + + def _calcMasterChecksum(self): + """Calculate checkSumAdjustment.""" + tags = list(self.tables.keys()) + checksums = [] + for i in range(len(tags)): + checksums.append(self.tables[tags[i]].checkSum) + + # Create a SFNT directory for checksum calculation purposes + self.searchRange, self.entrySelector, self.rangeShift = getSearchRange(self.numTables, 16) + directory = sstruct.pack(sfntDirectoryFormat, self) + tables = sorted(self.tables.items()) + for tag, entry in tables: + sfntEntry = SFNTDirectoryEntry() + sfntEntry.tag = entry.tag + sfntEntry.checkSum = entry.checkSum + sfntEntry.offset = entry.origOffset + sfntEntry.length = entry.origLength + directory = directory + sfntEntry.toString() + + directory_end = sfntDirectorySize + len(self.tables) * sfntDirectoryEntrySize + assert directory_end == len(directory) + + checksums.append(calcChecksum(directory)) + checksum = sum(checksums) & 0xffffffff + # BiboAfba! + checksumadjustment = (0xB1B0AFBA - checksum) & 0xffffffff + return checksumadjustment + + def writeMasterChecksum(self): + """Write checkSumAdjustment to the transformBuffer.""" + checksumadjustment = self._calcMasterChecksum() + self.transformBuffer.seek(self.tables['head'].offset + 8) + self.transformBuffer.write(struct.pack(">L", checksumadjustment)) + + def _calcTotalSize(self): + """Calculate total size of WOFF2 font, including any meta- and/or private data.""" + offset = self.directorySize + for entry in self.tables.values(): + offset += len(entry.toString()) + offset += self.totalCompressedSize + offset = (offset + 3) & ~3 + offset = self._calcFlavorDataOffsetsAndSize(offset) + return offset + + def _calcFlavorDataOffsetsAndSize(self, start): + """Calculate offsets and lengths for any meta- and/or private data.""" + offset = start + data = self.flavorData + if data.metaData: + self.metaOrigLength = len(data.metaData) + self.metaOffset = offset + self.compressedMetaData = brotli.compress( + data.metaData, mode=brotli.MODE_TEXT) + self.metaLength = len(self.compressedMetaData) + offset += self.metaLength + else: + self.metaOffset = self.metaLength = self.metaOrigLength = 0 + self.compressedMetaData = b"" + if data.privData: + # make sure private data is padded to 4-byte boundary + offset = (offset + 3) & ~3 + self.privOffset = offset + self.privLength = len(data.privData) + offset += self.privLength + else: + self.privOffset = self.privLength = 0 + return offset + + def _getVersion(self): + """Return the WOFF2 font's (majorVersion, minorVersion) tuple.""" + data = self.flavorData + if data.majorVersion is not None and data.minorVersion is not None: + return data.majorVersion, data.minorVersion + else: + # if None, return 'fontRevision' from 'head' table + if 'head' in self.tables: + return struct.unpack(">HH", self.tables['head'].data[4:8]) + else: + return 0, 0 + + def _packTableDirectory(self): + """Return WOFF2 table directory data.""" + directory = sstruct.pack(self.directoryFormat, self) + for entry in self.tables.values(): + directory = directory + entry.toString() + return directory + + def _writeFlavorData(self): + """Write metadata and/or private data using appropiate padding.""" + compressedMetaData = self.compressedMetaData + privData = self.flavorData.privData + if compressedMetaData and privData: + compressedMetaData = pad(compressedMetaData, size=4) + if compressedMetaData: + self.file.seek(self.metaOffset) + assert self.file.tell() == self.metaOffset + self.file.write(compressedMetaData) + if privData: + self.file.seek(self.privOffset) + assert self.file.tell() == self.privOffset + self.file.write(privData) + + def reordersTables(self): + return True + + +# -- woff2 directory helpers and cruft + +woff2DirectoryFormat = """ + > # big endian + signature: 4s # "wOF2" + sfntVersion: 4s + length: L # total woff2 file size + numTables: H # number of tables + reserved: H # set to 0 + totalSfntSize: L # uncompressed size + totalCompressedSize: L # compressed size + majorVersion: H # major version of WOFF file + minorVersion: H # minor version of WOFF file + metaOffset: L # offset to metadata block + metaLength: L # length of compressed metadata + metaOrigLength: L # length of uncompressed metadata + privOffset: L # offset to private data block + privLength: L # length of private data block +""" + +woff2DirectorySize = sstruct.calcsize(woff2DirectoryFormat) + +woff2KnownTags = ( + "cmap", "head", "hhea", "hmtx", "maxp", "name", "OS/2", "post", "cvt ", + "fpgm", "glyf", "loca", "prep", "CFF ", "VORG", "EBDT", "EBLC", "gasp", + "hdmx", "kern", "LTSH", "PCLT", "VDMX", "vhea", "vmtx", "BASE", "GDEF", + "GPOS", "GSUB", "EBSC", "JSTF", "MATH", "CBDT", "CBLC", "COLR", "CPAL", + "SVG ", "sbix", "acnt", "avar", "bdat", "bloc", "bsln", "cvar", "fdsc", + "feat", "fmtx", "fvar", "gvar", "hsty", "just", "lcar", "mort", "morx", + "opbd", "prop", "trak", "Zapf", "Silf", "Glat", "Gloc", "Feat", "Sill") + +woff2FlagsFormat = """ + > # big endian + flags: B # table type and flags +""" + +woff2FlagsSize = sstruct.calcsize(woff2FlagsFormat) + +woff2UnknownTagFormat = """ + > # big endian + tag: 4s # 4-byte tag (optional) +""" + +woff2UnknownTagSize = sstruct.calcsize(woff2UnknownTagFormat) + +woff2UnknownTagIndex = 0x3F + +woff2Base128MaxSize = 5 +woff2DirectoryEntryMaxSize = woff2FlagsSize + woff2UnknownTagSize + 2 * woff2Base128MaxSize + +woff2TransformedTableTags = ('glyf', 'loca') + +woff2GlyfTableFormat = """ + > # big endian + version: L # = 0x00000000 + numGlyphs: H # Number of glyphs + indexFormat: H # Offset format for loca table + nContourStreamSize: L # Size of nContour stream + nPointsStreamSize: L # Size of nPoints stream + flagStreamSize: L # Size of flag stream + glyphStreamSize: L # Size of glyph stream + compositeStreamSize: L # Size of composite stream + bboxStreamSize: L # Comnined size of bboxBitmap and bboxStream + instructionStreamSize: L # Size of instruction stream +""" + +woff2GlyfTableFormatSize = sstruct.calcsize(woff2GlyfTableFormat) + +bboxFormat = """ + > # big endian + xMin: h + yMin: h + xMax: h + yMax: h +""" + + +def getKnownTagIndex(tag): + """Return index of 'tag' in woff2KnownTags list. Return 63 if not found.""" + for i in range(len(woff2KnownTags)): + if tag == woff2KnownTags[i]: + return i + return woff2UnknownTagIndex + + +class WOFF2DirectoryEntry(DirectoryEntry): + + def fromFile(self, file): + pos = file.tell() + data = file.read(woff2DirectoryEntryMaxSize) + left = self.fromString(data) + consumed = len(data) - len(left) + file.seek(pos + consumed) + + def fromString(self, data): + if len(data) < 1: + raise TTLibError("can't read table 'flags': not enough data") + dummy, data = sstruct.unpack2(woff2FlagsFormat, data, self) + if self.flags & 0x3F == 0x3F: + # if bits [0..5] of the flags byte == 63, read a 4-byte arbitrary tag value + if len(data) < woff2UnknownTagSize: + raise TTLibError("can't read table 'tag': not enough data") + dummy, data = sstruct.unpack2(woff2UnknownTagFormat, data, self) + else: + # otherwise, tag is derived from a fixed 'Known Tags' table + self.tag = woff2KnownTags[self.flags & 0x3F] + self.tag = Tag(self.tag) + if self.flags & 0xC0 != 0: + raise TTLibError('bits 6-7 are reserved and must be 0') + self.origLength, data = unpackBase128(data) + self.length = self.origLength + if self.tag in woff2TransformedTableTags: + self.length, data = unpackBase128(data) + if self.tag == 'loca' and self.length != 0: + raise TTLibError( + "the transformLength of the 'loca' table must be 0") + # return left over data + return data + + def toString(self): + data = bytechr(self.flags) + if (self.flags & 0x3F) == 0x3F: + data += struct.pack('>4s', self.tag.tobytes()) + data += packBase128(self.origLength) + if self.tag in woff2TransformedTableTags: + data += packBase128(self.length) + return data + + +class WOFF2LocaTable(getTableClass('loca')): + """Same as parent class. The only difference is that it attempts to preserve + the 'indexFormat' as encoded in the WOFF2 glyf table. + """ + + def __init__(self, tag=None): + self.tableTag = Tag(tag or 'loca') + + def compile(self, ttFont): + try: + max_location = max(self.locations) + except AttributeError: + self.set([]) + max_location = 0 + if 'glyf' in ttFont and hasattr(ttFont['glyf'], 'indexFormat'): + # copile loca using the indexFormat specified in the WOFF2 glyf table + indexFormat = ttFont['glyf'].indexFormat + if indexFormat == 0: + if max_location >= 0x20000: + raise TTLibError("indexFormat is 0 but local offsets > 0x20000") + if not all(l % 2 == 0 for l in self.locations): + raise TTLibError("indexFormat is 0 but local offsets not multiples of 2") + locations = array.array("H") + for i in range(len(self.locations)): + locations.append(self.locations[i] // 2) + else: + locations = array.array("I", self.locations) + if sys.byteorder != "big": + locations.byteswap() + data = locations.tostring() + else: + # use the most compact indexFormat given the current glyph offsets + data = super(WOFF2LocaTable, self).compile(ttFont) + return data + + +class WOFF2GlyfTable(getTableClass('glyf')): + """Decoder/Encoder for WOFF2 'glyf' table transform.""" + + subStreams = ( + 'nContourStream', 'nPointsStream', 'flagStream', 'glyphStream', + 'compositeStream', 'bboxStream', 'instructionStream') + + def __init__(self, tag=None): + self.tableTag = Tag(tag or 'glyf') + + def reconstruct(self, data, ttFont): + """ Decompile transformed 'glyf' data. """ + inputDataSize = len(data) + + if inputDataSize < woff2GlyfTableFormatSize: + raise TTLibError("not enough 'glyf' data") + dummy, data = sstruct.unpack2(woff2GlyfTableFormat, data, self) + offset = woff2GlyfTableFormatSize + + for stream in self.subStreams: + size = getattr(self, stream + 'Size') + setattr(self, stream, data[:size]) + data = data[size:] + offset += size + + if offset != inputDataSize: + raise TTLibError( + "incorrect size of transformed 'glyf' table: expected %d, received %d bytes" + % (offset, inputDataSize)) + + bboxBitmapSize = ((self.numGlyphs + 31) >> 5) << 2 + bboxBitmap = self.bboxStream[:bboxBitmapSize] + self.bboxBitmap = array.array('B', bboxBitmap) + self.bboxStream = self.bboxStream[bboxBitmapSize:] + + self.nContourStream = array.array("h", self.nContourStream) + if sys.byteorder != "big": + self.nContourStream.byteswap() + assert len(self.nContourStream) == self.numGlyphs + + if 'head' in ttFont: + ttFont['head'].indexToLocFormat = self.indexFormat + try: + self.glyphOrder = ttFont.getGlyphOrder() + except: + self.glyphOrder = None + if self.glyphOrder is None: + self.glyphOrder = [".notdef"] + self.glyphOrder.extend(["glyph%.5d" % i for i in range(1, self.numGlyphs)]) + else: + if len(self.glyphOrder) != self.numGlyphs: + raise TTLibError( + "incorrect glyphOrder: expected %d glyphs, found %d" % + (len(self.glyphOrder), self.numGlyphs)) + + glyphs = self.glyphs = {} + for glyphID, glyphName in enumerate(self.glyphOrder): + glyph = self._decodeGlyph(glyphID) + glyphs[glyphName] = glyph + + def transform(self, ttFont): + """ Return transformed 'glyf' data """ + self.numGlyphs = len(self.glyphs) + if not hasattr(self, "glyphOrder"): + try: + self.glyphOrder = ttFont.getGlyphOrder() + except: + self.glyphOrder = None + if self.glyphOrder is None: + self.glyphOrder = [".notdef"] + self.glyphOrder.extend(["glyph%.5d" % i for i in range(1, self.numGlyphs)]) + if len(self.glyphOrder) != self.numGlyphs: + raise TTLibError( + "incorrect glyphOrder: expected %d glyphs, found %d" % + (len(self.glyphOrder), self.numGlyphs)) + + if 'maxp' in ttFont: + ttFont['maxp'].numGlyphs = self.numGlyphs + self.indexFormat = ttFont['head'].indexToLocFormat + + for stream in self.subStreams: + setattr(self, stream, b"") + bboxBitmapSize = ((self.numGlyphs + 31) >> 5) << 2 + self.bboxBitmap = array.array('B', [0]*bboxBitmapSize) + + for glyphID in range(self.numGlyphs): + self._encodeGlyph(glyphID) + + self.bboxStream = self.bboxBitmap.tostring() + self.bboxStream + for stream in self.subStreams: + setattr(self, stream + 'Size', len(getattr(self, stream))) + self.version = 0 + data = sstruct.pack(woff2GlyfTableFormat, self) + data += bytesjoin([getattr(self, s) for s in self.subStreams]) + return data + + def _decodeGlyph(self, glyphID): + glyph = getTableModule('glyf').Glyph() + glyph.numberOfContours = self.nContourStream[glyphID] + if glyph.numberOfContours == 0: + return glyph + elif glyph.isComposite(): + self._decodeComponents(glyph) + else: + self._decodeCoordinates(glyph) + self._decodeBBox(glyphID, glyph) + return glyph + + def _decodeComponents(self, glyph): + data = self.compositeStream + glyph.components = [] + more = 1 + haveInstructions = 0 + while more: + component = getTableModule('glyf').GlyphComponent() + more, haveInstr, data = component.decompile(data, self) + haveInstructions = haveInstructions | haveInstr + glyph.components.append(component) + self.compositeStream = data + if haveInstructions: + self._decodeInstructions(glyph) + + def _decodeCoordinates(self, glyph): + data = self.nPointsStream + endPtsOfContours = [] + endPoint = -1 + for i in range(glyph.numberOfContours): + ptsOfContour, data = unpack255UShort(data) + endPoint += ptsOfContour + endPtsOfContours.append(endPoint) + glyph.endPtsOfContours = endPtsOfContours + self.nPointsStream = data + self._decodeTriplets(glyph) + self._decodeInstructions(glyph) + + def _decodeInstructions(self, glyph): + glyphStream = self.glyphStream + instructionStream = self.instructionStream + instructionLength, glyphStream = unpack255UShort(glyphStream) + glyph.program = ttProgram.Program() + glyph.program.fromBytecode(instructionStream[:instructionLength]) + self.glyphStream = glyphStream + self.instructionStream = instructionStream[instructionLength:] + + def _decodeBBox(self, glyphID, glyph): + haveBBox = bool(self.bboxBitmap[glyphID >> 3] & (0x80 >> (glyphID & 7))) + if glyph.isComposite() and not haveBBox: + raise TTLibError('no bbox values for composite glyph %d' % glyphID) + if haveBBox: + dummy, self.bboxStream = sstruct.unpack2(bboxFormat, self.bboxStream, glyph) + else: + glyph.recalcBounds(self) + + def _decodeTriplets(self, glyph): + + def withSign(flag, baseval): + assert 0 <= baseval and baseval < 65536, 'integer overflow' + return baseval if flag & 1 else -baseval + + nPoints = glyph.endPtsOfContours[-1] + 1 + flagSize = nPoints + if flagSize > len(self.flagStream): + raise TTLibError("not enough 'flagStream' data") + flagsData = self.flagStream[:flagSize] + self.flagStream = self.flagStream[flagSize:] + flags = array.array('B', flagsData) + + triplets = array.array('B', self.glyphStream) + nTriplets = len(triplets) + assert nPoints <= nTriplets + + x = 0 + y = 0 + glyph.coordinates = getTableModule('glyf').GlyphCoordinates.zeros(nPoints) + glyph.flags = array.array("B") + tripletIndex = 0 + for i in range(nPoints): + flag = flags[i] + onCurve = not bool(flag >> 7) + flag &= 0x7f + if flag < 84: + nBytes = 1 + elif flag < 120: + nBytes = 2 + elif flag < 124: + nBytes = 3 + else: + nBytes = 4 + assert ((tripletIndex + nBytes) <= nTriplets) + if flag < 10: + dx = 0 + dy = withSign(flag, ((flag & 14) << 7) + triplets[tripletIndex]) + elif flag < 20: + dx = withSign(flag, (((flag - 10) & 14) << 7) + triplets[tripletIndex]) + dy = 0 + elif flag < 84: + b0 = flag - 20 + b1 = triplets[tripletIndex] + dx = withSign(flag, 1 + (b0 & 0x30) + (b1 >> 4)) + dy = withSign(flag >> 1, 1 + ((b0 & 0x0c) << 2) + (b1 & 0x0f)) + elif flag < 120: + b0 = flag - 84 + dx = withSign(flag, 1 + ((b0 // 12) << 8) + triplets[tripletIndex]) + dy = withSign(flag >> 1, + 1 + (((b0 % 12) >> 2) << 8) + triplets[tripletIndex + 1]) + elif flag < 124: + b2 = triplets[tripletIndex + 1] + dx = withSign(flag, (triplets[tripletIndex] << 4) + (b2 >> 4)) + dy = withSign(flag >> 1, + ((b2 & 0x0f) << 8) + triplets[tripletIndex + 2]) + else: + dx = withSign(flag, + (triplets[tripletIndex] << 8) + triplets[tripletIndex + 1]) + dy = withSign(flag >> 1, + (triplets[tripletIndex + 2] << 8) + triplets[tripletIndex + 3]) + tripletIndex += nBytes + x += dx + y += dy + glyph.coordinates[i] = (x, y) + glyph.flags.append(int(onCurve)) + bytesConsumed = tripletIndex + self.glyphStream = self.glyphStream[bytesConsumed:] + + def _encodeGlyph(self, glyphID): + glyphName = self.getGlyphName(glyphID) + glyph = self[glyphName] + self.nContourStream += struct.pack(">h", glyph.numberOfContours) + if glyph.numberOfContours == 0: + return + elif glyph.isComposite(): + self._encodeComponents(glyph) + else: + self._encodeCoordinates(glyph) + self._encodeBBox(glyphID, glyph) + + def _encodeComponents(self, glyph): + lastcomponent = len(glyph.components) - 1 + more = 1 + haveInstructions = 0 + for i in range(len(glyph.components)): + if i == lastcomponent: + haveInstructions = hasattr(glyph, "program") + more = 0 + component = glyph.components[i] + self.compositeStream += component.compile(more, haveInstructions, self) + if haveInstructions: + self._encodeInstructions(glyph) + + def _encodeCoordinates(self, glyph): + lastEndPoint = -1 + for endPoint in glyph.endPtsOfContours: + ptsOfContour = endPoint - lastEndPoint + self.nPointsStream += pack255UShort(ptsOfContour) + lastEndPoint = endPoint + self._encodeTriplets(glyph) + self._encodeInstructions(glyph) + + def _encodeInstructions(self, glyph): + instructions = glyph.program.getBytecode() + self.glyphStream += pack255UShort(len(instructions)) + self.instructionStream += instructions + + def _encodeBBox(self, glyphID, glyph): + assert glyph.numberOfContours != 0, "empty glyph has no bbox" + if not glyph.isComposite(): + # for simple glyphs, compare the encoded bounding box info with the calculated + # values, and if they match omit the bounding box info + currentBBox = glyph.xMin, glyph.yMin, glyph.xMax, glyph.yMax + calculatedBBox = calcIntBounds(glyph.coordinates) + if currentBBox == calculatedBBox: + return + self.bboxBitmap[glyphID >> 3] |= 0x80 >> (glyphID & 7) + self.bboxStream += sstruct.pack(bboxFormat, glyph) + + def _encodeTriplets(self, glyph): + assert len(glyph.coordinates) == len(glyph.flags) + coordinates = glyph.coordinates.copy() + coordinates.absoluteToRelative() + + flags = array.array('B') + triplets = array.array('B') + for i in range(len(coordinates)): + onCurve = glyph.flags[i] + x, y = coordinates[i] + absX = abs(x) + absY = abs(y) + onCurveBit = 0 if onCurve else 128 + xSignBit = 0 if (x < 0) else 1 + ySignBit = 0 if (y < 0) else 1 + xySignBits = xSignBit + 2 * ySignBit + + if x == 0 and absY < 1280: + flags.append(onCurveBit + ((absY & 0xf00) >> 7) + ySignBit) + triplets.append(absY & 0xff) + elif y == 0 and absX < 1280: + flags.append(onCurveBit + 10 + ((absX & 0xf00) >> 7) + xSignBit) + triplets.append(absX & 0xff) + elif absX < 65 and absY < 65: + flags.append(onCurveBit + 20 + ((absX - 1) & 0x30) + (((absY - 1) & 0x30) >> 2) + xySignBits) + triplets.append((((absX - 1) & 0xf) << 4) | ((absY - 1) & 0xf)) + elif absX < 769 and absY < 769: + flags.append(onCurveBit + 84 + 12 * (((absX - 1) & 0x300) >> 8) + (((absY - 1) & 0x300) >> 6) + xySignBits) + triplets.append((absX - 1) & 0xff) + triplets.append((absY - 1) & 0xff) + elif absX < 4096 and absY < 4096: + flags.append(onCurveBit + 120 + xySignBits) + triplets.append(absX >> 4) + triplets.append(((absX & 0xf) << 4) | (absY >> 8)) + triplets.append(absY & 0xff) + else: + flags.append(onCurveBit + 124 + xySignBits) + triplets.append(absX >> 8) + triplets.append(absX & 0xff) + triplets.append(absY >> 8) + triplets.append(absY & 0xff) + + self.flagStream += flags.tostring() + self.glyphStream += triplets.tostring() + + +class WOFF2FlavorData(WOFFFlavorData): + + Flavor = 'woff2' + + def __init__(self, reader=None): + if not haveBrotli: + raise ImportError("No module named brotli") + self.majorVersion = None + self.minorVersion = None + self.metaData = None + self.privData = None + if reader: + self.majorVersion = reader.majorVersion + self.minorVersion = reader.minorVersion + if reader.metaLength: + reader.file.seek(reader.metaOffset) + rawData = reader.file.read(reader.metaLength) + assert len(rawData) == reader.metaLength + data = brotli.decompress(rawData) + assert len(data) == reader.metaOrigLength + self.metaData = data + if reader.privLength: + reader.file.seek(reader.privOffset) + data = reader.file.read(reader.privLength) + assert len(data) == reader.privLength + self.privData = data + + +def unpackBase128(data): + r""" Read one to five bytes from UIntBase128-encoded input string, and return + a tuple containing the decoded integer plus any leftover data. + + >>> unpackBase128(b'\x3f\x00\x00') == (63, b"\x00\x00") + True + >>> unpackBase128(b'\x8f\xff\xff\xff\x7f')[0] == 4294967295 + True + >>> unpackBase128(b'\x80\x80\x3f') # doctest: +IGNORE_EXCEPTION_DETAIL + Traceback (most recent call last): + File "<stdin>", line 1, in ? + TTLibError: UIntBase128 value must not start with leading zeros + >>> unpackBase128(b'\x8f\xff\xff\xff\xff\x7f')[0] # doctest: +IGNORE_EXCEPTION_DETAIL + Traceback (most recent call last): + File "<stdin>", line 1, in ? + TTLibError: UIntBase128-encoded sequence is longer than 5 bytes + >>> unpackBase128(b'\x90\x80\x80\x80\x00')[0] # doctest: +IGNORE_EXCEPTION_DETAIL + Traceback (most recent call last): + File "<stdin>", line 1, in ? + TTLibError: UIntBase128 value exceeds 2**32-1 + """ + if len(data) == 0: + raise TTLibError('not enough data to unpack UIntBase128') + result = 0 + if byteord(data[0]) == 0x80: + # font must be rejected if UIntBase128 value starts with 0x80 + raise TTLibError('UIntBase128 value must not start with leading zeros') + for i in range(woff2Base128MaxSize): + if len(data) == 0: + raise TTLibError('not enough data to unpack UIntBase128') + code = byteord(data[0]) + data = data[1:] + # if any of the top seven bits are set then we're about to overflow + if result & 0xFE000000: + raise TTLibError('UIntBase128 value exceeds 2**32-1') + # set current value = old value times 128 bitwise-or (byte bitwise-and 127) + result = (result << 7) | (code & 0x7f) + # repeat until the most significant bit of byte is false + if (code & 0x80) == 0: + # return result plus left over data + return result, data + # make sure not to exceed the size bound + raise TTLibError('UIntBase128-encoded sequence is longer than 5 bytes') + + +def base128Size(n): + """ Return the length in bytes of a UIntBase128-encoded sequence with value n. + + >>> base128Size(0) + 1 + >>> base128Size(24567) + 3 + >>> base128Size(2**32-1) + 5 + """ + assert n >= 0 + size = 1 + while n >= 128: + size += 1 + n >>= 7 + return size + + +def packBase128(n): + r""" Encode unsigned integer in range 0 to 2**32-1 (inclusive) to a string of + bytes using UIntBase128 variable-length encoding. Produce the shortest possible + encoding. + + >>> packBase128(63) == b"\x3f" + True + >>> packBase128(2**32-1) == b'\x8f\xff\xff\xff\x7f' + True + """ + if n < 0 or n >= 2**32: + raise TTLibError( + "UIntBase128 format requires 0 <= integer <= 2**32-1") + data = b'' + size = base128Size(n) + for i in range(size): + b = (n >> (7 * (size - i - 1))) & 0x7f + if i < size - 1: + b |= 0x80 + data += struct.pack('B', b) + return data + + +def unpack255UShort(data): + """ Read one to three bytes from 255UInt16-encoded input string, and return a + tuple containing the decoded integer plus any leftover data. + + >>> unpack255UShort(bytechr(252))[0] + 252 + + Note that some numbers (e.g. 506) can have multiple encodings: + >>> unpack255UShort(struct.pack("BB", 254, 0))[0] + 506 + >>> unpack255UShort(struct.pack("BB", 255, 253))[0] + 506 + >>> unpack255UShort(struct.pack("BBB", 253, 1, 250))[0] + 506 + """ + code = byteord(data[:1]) + data = data[1:] + if code == 253: + # read two more bytes as an unsigned short + if len(data) < 2: + raise TTLibError('not enough data to unpack 255UInt16') + result, = struct.unpack(">H", data[:2]) + data = data[2:] + elif code == 254: + # read another byte, plus 253 * 2 + if len(data) == 0: + raise TTLibError('not enough data to unpack 255UInt16') + result = byteord(data[:1]) + result += 506 + data = data[1:] + elif code == 255: + # read another byte, plus 253 + if len(data) == 0: + raise TTLibError('not enough data to unpack 255UInt16') + result = byteord(data[:1]) + result += 253 + data = data[1:] + else: + # leave as is if lower than 253 + result = code + # return result plus left over data + return result, data + + +def pack255UShort(value): + r""" Encode unsigned integer in range 0 to 65535 (inclusive) to a bytestring + using 255UInt16 variable-length encoding. + + >>> pack255UShort(252) == b'\xfc' + True + >>> pack255UShort(506) == b'\xfe\x00' + True + >>> pack255UShort(762) == b'\xfd\x02\xfa' + True + """ + if value < 0 or value > 0xFFFF: + raise TTLibError( + "255UInt16 format requires 0 <= integer <= 65535") + if value < 253: + return struct.pack(">B", value) + elif value < 506: + return struct.pack(">BB", 255, value - 253) + elif value < 762: + return struct.pack(">BB", 254, value - 506) + else: + return struct.pack(">BH", 253, value) + + +if __name__ == "__main__": + import doctest + sys.exit(doctest.testmod().failed) diff -Nru fonttools-2.4/Tools/fontTools/ttLib/woff2_test.py fonttools-3.0/Tools/fontTools/ttLib/woff2_test.py --- fonttools-2.4/Tools/fontTools/ttLib/woff2_test.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.0/Tools/fontTools/ttLib/woff2_test.py 2015-08-31 17:57:15.000000000 +0000 @@ -0,0 +1,747 @@ +from __future__ import print_function, division, absolute_import, unicode_literals +from fontTools.misc.py23 import * +from fontTools import ttLib +from .woff2 import (WOFF2Reader, woff2DirectorySize, woff2DirectoryFormat, + woff2FlagsSize, woff2UnknownTagSize, woff2Base128MaxSize, WOFF2DirectoryEntry, + getKnownTagIndex, packBase128, base128Size, woff2UnknownTagIndex, + WOFF2FlavorData, woff2TransformedTableTags, WOFF2GlyfTable, WOFF2LocaTable, + WOFF2Writer) +import unittest +import sstruct +import os +import random +import copy +from collections import OrderedDict + +haveBrotli = False +try: + import brotli + haveBrotli = True +except ImportError: + pass + + +# Python 3 renamed 'assertRaisesRegexp' to 'assertRaisesRegex', and fires +# deprecation warnings if a program uses the old name. +if not hasattr(unittest.TestCase, 'assertRaisesRegex'): + unittest.TestCase.assertRaisesRegex = unittest.TestCase.assertRaisesRegexp + + +current_dir = os.path.abspath(os.path.dirname(os.path.realpath(__file__))) +data_dir = os.path.join(current_dir, 'testdata') +TTX = os.path.join(data_dir, 'TestTTF-Regular.ttx') +OTX = os.path.join(data_dir, 'TestOTF-Regular.otx') +METADATA = os.path.join(data_dir, 'test_woff2_metadata.xml') + +TT_WOFF2 = BytesIO() +CFF_WOFF2 = BytesIO() + + +def setUpModule(): + if not haveBrotli: + raise unittest.SkipTest("No module named brotli") + assert os.path.exists(TTX) + assert os.path.exists(OTX) + # import TT-flavoured test font and save it as WOFF2 + ttf = ttLib.TTFont(recalcBBoxes=False, recalcTimestamp=False) + ttf.importXML(TTX, quiet=True) + ttf.flavor = "woff2" + ttf.save(TT_WOFF2, reorderTables=None) + # import CFF-flavoured test font and save it as WOFF2 + otf = ttLib.TTFont(recalcBBoxes=False, recalcTimestamp=False) + otf.importXML(OTX, quiet=True) + otf.flavor = "woff2" + otf.save(CFF_WOFF2, reorderTables=None) + + +class WOFF2ReaderTest(unittest.TestCase): + + @classmethod + def setUpClass(cls): + cls.file = BytesIO(CFF_WOFF2.getvalue()) + cls.font = ttLib.TTFont(recalcBBoxes=False, recalcTimestamp=False) + cls.font.importXML(OTX, quiet=True) + + def setUp(self): + self.file.seek(0) + + def test_bad_signature(self): + with self.assertRaisesRegex(ttLib.TTLibError, 'bad signature'): + WOFF2Reader(BytesIO(b"wOFF")) + + def test_not_enough_data_header(self): + incomplete_header = self.file.read(woff2DirectorySize - 1) + with self.assertRaisesRegex(ttLib.TTLibError, 'not enough data'): + WOFF2Reader(BytesIO(incomplete_header)) + + def test_incorrect_compressed_size(self): + data = self.file.read(woff2DirectorySize) + header = sstruct.unpack(woff2DirectoryFormat, data) + header['totalCompressedSize'] = 0 + data = sstruct.pack(woff2DirectoryFormat, header) + with self.assertRaises(brotli.error): + WOFF2Reader(BytesIO(data + self.file.read())) + + def test_incorrect_uncompressed_size(self): + decompress_backup = brotli.decompress + brotli.decompress = lambda data: b"" # return empty byte string + with self.assertRaisesRegex(ttLib.TTLibError, 'unexpected size for decompressed'): + WOFF2Reader(self.file) + brotli.decompress = decompress_backup + + def test_incorrect_file_size(self): + data = self.file.read(woff2DirectorySize) + header = sstruct.unpack(woff2DirectoryFormat, data) + header['length'] -= 1 + data = sstruct.pack(woff2DirectoryFormat, header) + with self.assertRaisesRegex( + ttLib.TTLibError, "doesn't match the actual file size"): + WOFF2Reader(BytesIO(data + self.file.read())) + + def test_num_tables(self): + tags = [t for t in self.font.keys() if t not in ('GlyphOrder', 'DSIG')] + data = self.file.read(woff2DirectorySize) + header = sstruct.unpack(woff2DirectoryFormat, data) + self.assertEqual(header['numTables'], len(tags)) + + def test_table_tags(self): + tags = set([t for t in self.font.keys() if t not in ('GlyphOrder', 'DSIG')]) + reader = WOFF2Reader(self.file) + self.assertEqual(set(reader.keys()), tags) + + def test_get_normal_tables(self): + woff2Reader = WOFF2Reader(self.file) + specialTags = woff2TransformedTableTags + ('head', 'GlyphOrder', 'DSIG') + for tag in [t for t in self.font.keys() if t not in specialTags]: + origData = self.font.getTableData(tag) + decompressedData = woff2Reader[tag] + self.assertEqual(origData, decompressedData) + + def test_reconstruct_unknown(self): + reader = WOFF2Reader(self.file) + with self.assertRaisesRegex(ttLib.TTLibError, 'transform for table .* unknown'): + reader.reconstructTable('ZZZZ') + + +class WOFF2ReaderTTFTest(WOFF2ReaderTest): + """ Tests specific to TT-flavored fonts. """ + + @classmethod + def setUpClass(cls): + cls.file = BytesIO(TT_WOFF2.getvalue()) + cls.font = ttLib.TTFont(recalcBBoxes=False, recalcTimestamp=False) + cls.font.importXML(TTX, quiet=True) + + def setUp(self): + self.file.seek(0) + + def test_reconstruct_glyf(self): + woff2Reader = WOFF2Reader(self.file) + reconstructedData = woff2Reader['glyf'] + self.assertEqual(self.font.getTableData('glyf'), reconstructedData) + + def test_reconstruct_loca(self): + woff2Reader = WOFF2Reader(self.file) + reconstructedData = woff2Reader['loca'] + self.assertEqual(self.font.getTableData('loca'), reconstructedData) + self.assertTrue(hasattr(woff2Reader.tables['glyf'], 'data')) + + def test_reconstruct_loca_not_match_orig_size(self): + reader = WOFF2Reader(self.file) + reader.tables['loca'].origLength -= 1 + with self.assertRaisesRegex( + ttLib.TTLibError, "'loca' table doesn't match original size"): + reader.reconstructTable('loca') + + +def normalise_table(font, tag, padding=4): + """ Return normalised table data. Keep 'font' instance unmodified. """ + assert tag in ('glyf', 'loca', 'head') + assert tag in font + if tag == 'head': + origHeadFlags = font['head'].flags + font['head'].flags |= (1 << 11) + tableData = font['head'].compile(font) + if font.sfntVersion in ("\x00\x01\x00\x00", "true"): + assert {'glyf', 'loca', 'head'}.issubset(font.keys()) + origIndexFormat = font['head'].indexToLocFormat + if hasattr(font['loca'], 'locations'): + origLocations = font['loca'].locations[:] + else: + origLocations = [] + glyfTable = ttLib.getTableClass('glyf')() + glyfTable.decompile(font.getTableData('glyf'), font) + glyfTable.padding = padding + if tag == 'glyf': + tableData = glyfTable.compile(font) + elif tag == 'loca': + glyfTable.compile(font) + tableData = font['loca'].compile(font) + if tag == 'head': + glyfTable.compile(font) + font['loca'].compile(font) + tableData = font['head'].compile(font) + font['head'].indexToLocFormat = origIndexFormat + font['loca'].set(origLocations) + if tag == 'head': + font['head'].flags = origHeadFlags + return tableData + + +def normalise_font(font, padding=4): + """ Return normalised font data. Keep 'font' instance unmodified. """ + # drop DSIG but keep a copy + DSIG_copy = copy.deepcopy(font['DSIG']) + del font['DSIG'] + # ovverride TTFont attributes + origFlavor = font.flavor + origRecalcBBoxes = font.recalcBBoxes + origRecalcTimestamp = font.recalcTimestamp + origLazy = font.lazy + font.flavor = None + font.recalcBBoxes = False + font.recalcTimestamp = False + font.lazy = True + # save font to temporary stream + infile = BytesIO() + font.save(infile) + infile.seek(0) + # reorder tables alphabetically + outfile = BytesIO() + reader = ttLib.sfnt.SFNTReader(infile) + writer = ttLib.sfnt.SFNTWriter( + outfile, len(reader.tables), reader.sfntVersion, reader.flavor, reader.flavorData) + for tag in sorted(reader.keys()): + if tag in woff2TransformedTableTags + ('head',): + writer[tag] = normalise_table(font, tag, padding) + else: + writer[tag] = reader[tag] + writer.close() + # restore font attributes + font['DSIG'] = DSIG_copy + font.flavor = origFlavor + font.recalcBBoxes = origRecalcBBoxes + font.recalcTimestamp = origRecalcTimestamp + font.lazy = origLazy + return outfile.getvalue() + + +class WOFF2DirectoryEntryTest(unittest.TestCase): + + def setUp(self): + self.entry = WOFF2DirectoryEntry() + + def test_not_enough_data_table_flags(self): + with self.assertRaisesRegex(ttLib.TTLibError, "can't read table 'flags'"): + self.entry.fromString(b"") + + def test_not_enough_data_table_tag(self): + incompleteData = bytearray([0x3F, 0, 0, 0]) + with self.assertRaisesRegex(ttLib.TTLibError, "can't read table 'tag'"): + self.entry.fromString(bytes(incompleteData)) + + def test_table_reserved_flags(self): + with self.assertRaisesRegex(ttLib.TTLibError, "bits 6-7 are reserved"): + self.entry.fromString(bytechr(0xC0)) + + def test_loca_zero_transformLength(self): + data = bytechr(getKnownTagIndex('loca')) # flags + data += packBase128(random.randint(1, 100)) # origLength + data += packBase128(1) # non-zero transformLength + with self.assertRaisesRegex( + ttLib.TTLibError, "transformLength of the 'loca' table must be 0"): + self.entry.fromString(data) + + def test_fromFile(self): + unknownTag = Tag('ZZZZ') + data = bytechr(getKnownTagIndex(unknownTag)) + data += unknownTag.tobytes() + data += packBase128(random.randint(1, 100)) + expectedPos = len(data) + f = BytesIO(data + b'\0'*100) + self.entry.fromFile(f) + self.assertEqual(f.tell(), expectedPos) + + def test_transformed_toString(self): + self.entry.tag = Tag('glyf') + self.entry.flags = getKnownTagIndex(self.entry.tag) + self.entry.origLength = random.randint(101, 200) + self.entry.length = random.randint(1, 100) + expectedSize = (woff2FlagsSize + base128Size(self.entry.origLength) + + base128Size(self.entry.length)) + data = self.entry.toString() + self.assertEqual(len(data), expectedSize) + + def test_known_toString(self): + self.entry.tag = Tag('head') + self.entry.flags = getKnownTagIndex(self.entry.tag) + self.entry.origLength = 54 + expectedSize = (woff2FlagsSize + base128Size(self.entry.origLength)) + data = self.entry.toString() + self.assertEqual(len(data), expectedSize) + + def test_unknown_toString(self): + self.entry.tag = Tag('ZZZZ') + self.entry.flags = woff2UnknownTagIndex + self.entry.origLength = random.randint(1, 100) + expectedSize = (woff2FlagsSize + woff2UnknownTagSize + + base128Size(self.entry.origLength)) + data = self.entry.toString() + self.assertEqual(len(data), expectedSize) + + +class DummyReader(WOFF2Reader): + + def __init__(self, file, checkChecksums=1, fontNumber=-1): + self.file = file + for attr in ('majorVersion', 'minorVersion', 'metaOffset', 'metaLength', + 'metaOrigLength', 'privLength', 'privOffset'): + setattr(self, attr, 0) + + +class WOFF2FlavorDataTest(unittest.TestCase): + + @classmethod + def setUpClass(cls): + assert os.path.exists(METADATA) + with open(METADATA, 'rb') as f: + cls.xml_metadata = f.read() + cls.compressed_metadata = brotli.compress(cls.xml_metadata, mode=brotli.MODE_TEXT) + # make random byte strings; font data must be 4-byte aligned + cls.fontdata = bytes(bytearray(random.sample(range(0, 256), 80))) + cls.privData = bytes(bytearray(random.sample(range(0, 256), 20))) + + def setUp(self): + self.file = BytesIO(self.fontdata) + self.file.seek(0, 2) + + def test_get_metaData_no_privData(self): + self.file.write(self.compressed_metadata) + reader = DummyReader(self.file) + reader.metaOffset = len(self.fontdata) + reader.metaLength = len(self.compressed_metadata) + reader.metaOrigLength = len(self.xml_metadata) + flavorData = WOFF2FlavorData(reader) + self.assertEqual(self.xml_metadata, flavorData.metaData) + + def test_get_privData_no_metaData(self): + self.file.write(self.privData) + reader = DummyReader(self.file) + reader.privOffset = len(self.fontdata) + reader.privLength = len(self.privData) + flavorData = WOFF2FlavorData(reader) + self.assertEqual(self.privData, flavorData.privData) + + def test_get_metaData_and_privData(self): + self.file.write(self.compressed_metadata + self.privData) + reader = DummyReader(self.file) + reader.metaOffset = len(self.fontdata) + reader.metaLength = len(self.compressed_metadata) + reader.metaOrigLength = len(self.xml_metadata) + reader.privOffset = reader.metaOffset + reader.metaLength + reader.privLength = len(self.privData) + flavorData = WOFF2FlavorData(reader) + self.assertEqual(self.xml_metadata, flavorData.metaData) + self.assertEqual(self.privData, flavorData.privData) + + def test_get_major_minorVersion(self): + reader = DummyReader(self.file) + reader.majorVersion = reader.minorVersion = 1 + flavorData = WOFF2FlavorData(reader) + self.assertEqual(flavorData.majorVersion, 1) + self.assertEqual(flavorData.minorVersion, 1) + + +class WOFF2WriterTest(unittest.TestCase): + + @classmethod + def setUpClass(cls): + cls.font = ttLib.TTFont(recalcBBoxes=False, recalcTimestamp=False, flavor="woff2") + cls.font.importXML(OTX, quiet=True) + cls.tags = [t for t in cls.font.keys() if t != 'GlyphOrder'] + cls.numTables = len(cls.tags) + cls.file = BytesIO(CFF_WOFF2.getvalue()) + cls.file.seek(0, 2) + cls.length = (cls.file.tell() + 3) & ~3 + cls.setUpFlavorData() + + @classmethod + def setUpFlavorData(cls): + assert os.path.exists(METADATA) + with open(METADATA, 'rb') as f: + cls.xml_metadata = f.read() + cls.compressed_metadata = brotli.compress(cls.xml_metadata, mode=brotli.MODE_TEXT) + cls.privData = bytes(bytearray(random.sample(range(0, 256), 20))) + + def setUp(self): + self.file.seek(0) + self.writer = WOFF2Writer(BytesIO(), self.numTables, self.font.sfntVersion) + + def test_DSIG_dropped(self): + self.writer['DSIG'] = b"\0" + self.assertEqual(len(self.writer.tables), 0) + self.assertEqual(self.writer.numTables, self.numTables-1) + + def test_no_rewrite_table(self): + self.writer['ZZZZ'] = b"\0" + with self.assertRaisesRegex(ttLib.TTLibError, "cannot rewrite"): + self.writer['ZZZZ'] = b"\0" + + def test_num_tables(self): + self.writer['ABCD'] = b"\0" + with self.assertRaisesRegex(ttLib.TTLibError, "wrong number of tables"): + self.writer.close() + + def test_required_tables(self): + font = ttLib.TTFont(flavor="woff2") + with self.assertRaisesRegex(ttLib.TTLibError, "missing required table"): + font.save(BytesIO()) + + def test_head_transform_flag(self): + headData = self.font.getTableData('head') + origFlags = byteord(headData[16]) + woff2font = ttLib.TTFont(self.file) + newHeadData = woff2font.getTableData('head') + modifiedFlags = byteord(newHeadData[16]) + self.assertNotEqual(origFlags, modifiedFlags) + restoredFlags = modifiedFlags & ~0x08 # turn off bit 11 + self.assertEqual(origFlags, restoredFlags) + + def test_tables_sorted_alphabetically(self): + expected = sorted([t for t in self.tags if t != 'DSIG']) + woff2font = ttLib.TTFont(self.file) + self.assertEqual(expected, list(woff2font.reader.keys())) + + def test_checksums(self): + normFile = BytesIO(normalise_font(self.font, padding=4)) + normFile.seek(0) + normFont = ttLib.TTFont(normFile, checkChecksums=2) + w2font = ttLib.TTFont(self.file) + # force reconstructing glyf table using 4-byte padding + w2font.reader.padding = 4 + for tag in [t for t in self.tags if t != 'DSIG']: + w2data = w2font.reader[tag] + normData = normFont.reader[tag] + if tag == "head": + w2data = w2data[:8] + b'\0\0\0\0' + w2data[12:] + normData = normData[:8] + b'\0\0\0\0' + normData[12:] + w2CheckSum = ttLib.sfnt.calcChecksum(w2data) + normCheckSum = ttLib.sfnt.calcChecksum(normData) + self.assertEqual(w2CheckSum, normCheckSum) + normCheckSumAdjustment = normFont['head'].checkSumAdjustment + self.assertEqual(normCheckSumAdjustment, w2font['head'].checkSumAdjustment) + + def test_calcSFNTChecksumsLengthsAndOffsets(self): + normFont = ttLib.TTFont(BytesIO(normalise_font(self.font, padding=4))) + for tag in self.tags: + self.writer[tag] = self.font.getTableData(tag) + self.writer._normaliseGlyfAndLoca(padding=4) + self.writer._setHeadTransformFlag() + self.writer.tables = OrderedDict(sorted(self.writer.tables.items())) + self.writer._calcSFNTChecksumsLengthsAndOffsets() + for tag, entry in normFont.reader.tables.items(): + self.assertEqual(entry.offset, self.writer.tables[tag].origOffset) + self.assertEqual(entry.length, self.writer.tables[tag].origLength) + self.assertEqual(entry.checkSum, self.writer.tables[tag].checkSum) + + def test_bad_sfntVersion(self): + for i in range(self.numTables): + self.writer[bytechr(65 + i)*4] = b"\0" + self.writer.sfntVersion = 'ZZZZ' + with self.assertRaisesRegex(ttLib.TTLibError, "bad sfntVersion"): + self.writer.close() + + def test_calcTotalSize_no_flavorData(self): + expected = self.length + self.writer.file = BytesIO() + for tag in self.tags: + self.writer[tag] = self.font.getTableData(tag) + self.writer.close() + self.assertEqual(expected, self.writer.length) + self.assertEqual(expected, self.writer.file.tell()) + + def test_calcTotalSize_with_metaData(self): + expected = self.length + len(self.compressed_metadata) + flavorData = self.writer.flavorData = WOFF2FlavorData() + flavorData.metaData = self.xml_metadata + self.writer.file = BytesIO() + for tag in self.tags: + self.writer[tag] = self.font.getTableData(tag) + self.writer.close() + self.assertEqual(expected, self.writer.length) + self.assertEqual(expected, self.writer.file.tell()) + + def test_calcTotalSize_with_privData(self): + expected = self.length + len(self.privData) + flavorData = self.writer.flavorData = WOFF2FlavorData() + flavorData.privData = self.privData + self.writer.file = BytesIO() + for tag in self.tags: + self.writer[tag] = self.font.getTableData(tag) + self.writer.close() + self.assertEqual(expected, self.writer.length) + self.assertEqual(expected, self.writer.file.tell()) + + def test_calcTotalSize_with_metaData_and_privData(self): + metaDataLength = (len(self.compressed_metadata) + 3) & ~3 + expected = self.length + metaDataLength + len(self.privData) + flavorData = self.writer.flavorData = WOFF2FlavorData() + flavorData.metaData = self.xml_metadata + flavorData.privData = self.privData + self.writer.file = BytesIO() + for tag in self.tags: + self.writer[tag] = self.font.getTableData(tag) + self.writer.close() + self.assertEqual(expected, self.writer.length) + self.assertEqual(expected, self.writer.file.tell()) + + def test_getVersion(self): + # no version + self.assertEqual((0, 0), self.writer._getVersion()) + # version from head.fontRevision + fontRevision = self.font['head'].fontRevision + versionTuple = tuple(int(i) for i in str(fontRevision).split(".")) + entry = self.writer.tables['head'] = ttLib.getTableClass('head')() + entry.data = self.font.getTableData('head') + self.assertEqual(versionTuple, self.writer._getVersion()) + # version from writer.flavorData + flavorData = self.writer.flavorData = WOFF2FlavorData() + flavorData.majorVersion, flavorData.minorVersion = (10, 11) + self.assertEqual((10, 11), self.writer._getVersion()) + + +class WOFF2WriterTTFTest(WOFF2WriterTest): + + @classmethod + def setUpClass(cls): + cls.font = ttLib.TTFont(recalcBBoxes=False, recalcTimestamp=False, flavor="woff2") + cls.font.importXML(TTX, quiet=True) + cls.tags = [t for t in cls.font.keys() if t != 'GlyphOrder'] + cls.numTables = len(cls.tags) + cls.file = BytesIO(TT_WOFF2.getvalue()) + cls.file.seek(0, 2) + cls.length = (cls.file.tell() + 3) & ~3 + cls.setUpFlavorData() + + def test_normaliseGlyfAndLoca(self): + normTables = {} + for tag in ('head', 'loca', 'glyf'): + normTables[tag] = normalise_table(self.font, tag, padding=4) + for tag in self.tags: + tableData = self.font.getTableData(tag) + self.writer[tag] = tableData + if tag in normTables: + self.assertNotEqual(tableData, normTables[tag]) + self.writer._normaliseGlyfAndLoca(padding=4) + self.writer._setHeadTransformFlag() + for tag in normTables: + self.assertEqual(self.writer.tables[tag].data, normTables[tag]) + + +class WOFF2LocaTableTest(unittest.TestCase): + + def setUp(self): + self.font = font = ttLib.TTFont(recalcBBoxes=False, recalcTimestamp=False) + font['head'] = ttLib.getTableClass('head') + font['loca'] = WOFF2LocaTable() + font['glyf'] = WOFF2GlyfTable() + + def test_compile_short_loca(self): + locaTable = self.font['loca'] + locaTable.set(list(range(0, 0x20000, 2))) + self.font['glyf'].indexFormat = 0 + locaData = locaTable.compile(self.font) + self.assertEqual(len(locaData), 0x20000) + + def test_compile_short_loca_overflow(self): + locaTable = self.font['loca'] + locaTable.set(list(range(0x20000 + 1))) + self.font['glyf'].indexFormat = 0 + with self.assertRaisesRegex( + ttLib.TTLibError, "indexFormat is 0 but local offsets > 0x20000"): + locaTable.compile(self.font) + + def test_compile_short_loca_not_multiples_of_2(self): + locaTable = self.font['loca'] + locaTable.set([1, 3, 5, 7]) + self.font['glyf'].indexFormat = 0 + with self.assertRaisesRegex(ttLib.TTLibError, "offsets not multiples of 2"): + locaTable.compile(self.font) + + def test_compile_long_loca(self): + locaTable = self.font['loca'] + locaTable.set(list(range(0x20001))) + self.font['glyf'].indexFormat = 1 + locaData = locaTable.compile(self.font) + self.assertEqual(len(locaData), 0x20001 * 4) + + def test_compile_set_indexToLocFormat_0(self): + locaTable = self.font['loca'] + # offsets are all multiples of 2 and max length is < 0x10000 + locaTable.set(list(range(0, 0x20000, 2))) + locaTable.compile(self.font) + newIndexFormat = self.font['head'].indexToLocFormat + self.assertEqual(0, newIndexFormat) + + def test_compile_set_indexToLocFormat_1(self): + locaTable = self.font['loca'] + # offsets are not multiples of 2 + locaTable.set(list(range(10))) + locaTable.compile(self.font) + newIndexFormat = self.font['head'].indexToLocFormat + self.assertEqual(1, newIndexFormat) + # max length is >= 0x10000 + locaTable.set(list(range(0, 0x20000 + 1, 2))) + locaTable.compile(self.font) + newIndexFormat = self.font['head'].indexToLocFormat + self.assertEqual(1, newIndexFormat) + + +class WOFF2GlyfTableTest(unittest.TestCase): + + @classmethod + def setUpClass(cls): + font = ttLib.TTFont(recalcBBoxes=False, recalcTimestamp=False) + font.importXML(TTX, quiet=True) + cls.tables = {} + cls.transformedTags = ('maxp', 'head', 'loca', 'glyf') + for tag in reversed(cls.transformedTags): # compile in inverse order + cls.tables[tag] = font.getTableData(tag) + infile = BytesIO(TT_WOFF2.getvalue()) + reader = WOFF2Reader(infile) + cls.transformedGlyfData = reader.tables['glyf'].loadData( + reader.transformBuffer) + + def setUp(self): + self.font = font = ttLib.TTFont(recalcBBoxes=False, recalcTimestamp=False) + font['head'] = ttLib.getTableClass('head')() + font['maxp'] = ttLib.getTableClass('maxp')() + font['loca'] = WOFF2LocaTable() + font['glyf'] = WOFF2GlyfTable() + for tag in self.transformedTags: + font[tag].decompile(self.tables[tag], font) + + def test_reconstruct_glyf_padded_4(self): + glyfTable = WOFF2GlyfTable() + glyfTable.reconstruct(self.transformedGlyfData, self.font) + glyfTable.padding = 4 + data = glyfTable.compile(self.font) + normGlyfData = normalise_table(self.font, 'glyf', glyfTable.padding) + self.assertEqual(normGlyfData, data) + + def test_reconstruct_glyf_padded_2(self): + glyfTable = WOFF2GlyfTable() + glyfTable.reconstruct(self.transformedGlyfData, self.font) + glyfTable.padding = 2 + data = glyfTable.compile(self.font) + normGlyfData = normalise_table(self.font, 'glyf', glyfTable.padding) + self.assertEqual(normGlyfData, data) + + def test_reconstruct_glyf_unpadded(self): + glyfTable = WOFF2GlyfTable() + glyfTable.reconstruct(self.transformedGlyfData, self.font) + data = glyfTable.compile(self.font) + self.assertEqual(self.tables['glyf'], data) + + def test_reconstruct_glyf_incorrect_glyphOrder(self): + glyfTable = WOFF2GlyfTable() + badGlyphOrder = self.font.getGlyphOrder()[:-1] + self.font.setGlyphOrder(badGlyphOrder) + with self.assertRaisesRegex(ttLib.TTLibError, "incorrect glyphOrder"): + glyfTable.reconstruct(self.transformedGlyfData, self.font) + + def test_reconstruct_glyf_missing_glyphOrder(self): + glyfTable = WOFF2GlyfTable() + del self.font.glyphOrder + numGlyphs = self.font['maxp'].numGlyphs + del self.font['maxp'] + glyfTable.reconstruct(self.transformedGlyfData, self.font) + expected = [".notdef"] + expected.extend(["glyph%.5d" % i for i in range(1, numGlyphs)]) + self.assertEqual(expected, glyfTable.glyphOrder) + + def test_reconstruct_loca_padded_4(self): + locaTable = self.font['loca'] = WOFF2LocaTable() + glyfTable = self.font['glyf'] = WOFF2GlyfTable() + glyfTable.reconstruct(self.transformedGlyfData, self.font) + glyfTable.padding = 4 + glyfTable.compile(self.font) + data = locaTable.compile(self.font) + normLocaData = normalise_table(self.font, 'loca', glyfTable.padding) + self.assertEqual(normLocaData, data) + + def test_reconstruct_loca_padded_2(self): + locaTable = self.font['loca'] = WOFF2LocaTable() + glyfTable = self.font['glyf'] = WOFF2GlyfTable() + glyfTable.reconstruct(self.transformedGlyfData, self.font) + glyfTable.padding = 2 + glyfTable.compile(self.font) + data = locaTable.compile(self.font) + normLocaData = normalise_table(self.font, 'loca', glyfTable.padding) + self.assertEqual(normLocaData, data) + + def test_reconstruct_loca_unpadded(self): + locaTable = self.font['loca'] = WOFF2LocaTable() + glyfTable = self.font['glyf'] = WOFF2GlyfTable() + glyfTable.reconstruct(self.transformedGlyfData, self.font) + glyfTable.compile(self.font) + data = locaTable.compile(self.font) + self.assertEqual(self.tables['loca'], data) + + def test_reconstruct_glyf_header_not_enough_data(self): + with self.assertRaisesRegex(ttLib.TTLibError, "not enough 'glyf' data"): + WOFF2GlyfTable().reconstruct(b"", self.font) + + def test_reconstruct_glyf_table_incorrect_size(self): + msg = "incorrect size of transformed 'glyf'" + with self.assertRaisesRegex(ttLib.TTLibError, msg): + WOFF2GlyfTable().reconstruct(self.transformedGlyfData + b"\x00", self.font) + with self.assertRaisesRegex(ttLib.TTLibError, msg): + WOFF2GlyfTable().reconstruct(self.transformedGlyfData[:-1], self.font) + + def test_transform_glyf(self): + glyfTable = self.font['glyf'] + data = glyfTable.transform(self.font) + self.assertEqual(self.transformedGlyfData, data) + + def test_transform_glyf_incorrect_glyphOrder(self): + glyfTable = self.font['glyf'] + badGlyphOrder = self.font.getGlyphOrder()[:-1] + del glyfTable.glyphOrder + self.font.setGlyphOrder(badGlyphOrder) + with self.assertRaisesRegex(ttLib.TTLibError, "incorrect glyphOrder"): + glyfTable.transform(self.font) + glyfTable.glyphOrder = badGlyphOrder + with self.assertRaisesRegex(ttLib.TTLibError, "incorrect glyphOrder"): + glyfTable.transform(self.font) + + def test_transform_glyf_missing_glyphOrder(self): + glyfTable = self.font['glyf'] + del glyfTable.glyphOrder + del self.font.glyphOrder + numGlyphs = self.font['maxp'].numGlyphs + del self.font['maxp'] + glyfTable.transform(self.font) + expected = [".notdef"] + expected.extend(["glyph%.5d" % i for i in range(1, numGlyphs)]) + self.assertEqual(expected, glyfTable.glyphOrder) + + def test_roundtrip_glyf_reconstruct_and_transform(self): + glyfTable = WOFF2GlyfTable() + glyfTable.reconstruct(self.transformedGlyfData, self.font) + data = glyfTable.transform(self.font) + self.assertEqual(self.transformedGlyfData, data) + + def test_roundtrip_glyf_transform_and_reconstruct(self): + glyfTable = self.font['glyf'] + transformedData = glyfTable.transform(self.font) + newGlyfTable = WOFF2GlyfTable() + newGlyfTable.reconstruct(transformedData, self.font) + newGlyfTable.padding = 4 + reconstructedData = newGlyfTable.compile(self.font) + normGlyfData = normalise_table(self.font, 'glyf', newGlyfTable.padding) + self.assertEqual(normGlyfData, reconstructedData) + + +if __name__ == "__main__": + unittest.main() diff -Nru fonttools-2.4/Tools/fontTools/ttx.py fonttools-3.0/Tools/fontTools/ttx.py --- fonttools-2.4/Tools/fontTools/ttx.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.0/Tools/fontTools/ttx.py 2015-08-31 17:57:15.000000000 +0000 @@ -0,0 +1,370 @@ +"""\ +usage: ttx [options] inputfile1 [... inputfileN] + + TTX %s -- From OpenType To XML And Back + + If an input file is a TrueType or OpenType font file, it will be + dumped to an TTX file (an XML-based text format). + If an input file is a TTX file, it will be compiled to a TrueType + or OpenType font file. + + Output files are created so they are unique: an existing file is + never overwritten. + + General options: + -h Help: print this message + -d <outputfolder> Specify a directory where the output files are + to be created. + -o <outputfile> Specify a file to write the output to. A special + value of of - would use the standard output. + -f Overwrite existing output file(s), ie. don't append numbers. + -v Verbose: more messages will be written to stdout about what + is being done. + -q Quiet: No messages will be written to stdout about what + is being done. + -a allow virtual glyphs ID's on compile or decompile. + + Dump options: + -l List table info: instead of dumping to a TTX file, list some + minimal info about each table. + -t <table> Specify a table to dump. Multiple -t options + are allowed. When no -t option is specified, all tables + will be dumped. + -x <table> Specify a table to exclude from the dump. Multiple + -x options are allowed. -t and -x are mutually exclusive. + -s Split tables: save the TTX data into separate TTX files per + table and write one small TTX file that contains references + to the individual table dumps. This file can be used as + input to ttx, as long as the table files are in the + same directory. + -i Do NOT disassemble TT instructions: when this option is given, + all TrueType programs (glyph programs, the font program and the + pre-program) will be written to the TTX file as hex data + instead of assembly. This saves some time and makes the TTX + file smaller. + -z <format> Specify a bitmap data export option for EBDT: + {'raw', 'row', 'bitwise', 'extfile'} or for the CBDT: + {'raw', 'extfile'} Each option does one of the following: + -z raw + * export the bitmap data as a hex dump + -z row + * export each row as hex data + -z bitwise + * export each row as binary in an ASCII art style + -z extfile + * export the data as external files with XML references + If no export format is specified 'raw' format is used. + -e Don't ignore decompilation errors, but show a full traceback + and abort. + -y <number> Select font number for TrueType Collection, + starting from 0. + --unicodedata <UnicodeData.txt> Use custom database file to write + character names in the comments of the cmap TTX output. + + Compile options: + -m Merge with TrueType-input-file: specify a TrueType or OpenType + font file to be merged with the TTX file. This option is only + valid when at most one TTX file is specified. + -b Don't recalc glyph bounding boxes: use the values in the TTX + file as-is. + --recalc-timestamp Set font 'modified' timestamp to current time. + By default, the modification time of the TTX file will be used. +""" + + +from __future__ import print_function, division, absolute_import +from fontTools.misc.py23 import * +from fontTools.ttLib import TTFont, TTLibError +from fontTools.misc.macCreatorType import getMacCreatorAndType +from fontTools.unicode import setUnicodeData +from fontTools.misc.timeTools import timestampSinceEpoch +import os +import sys +import getopt +import re + +def usage(): + from fontTools import version + print(__doc__ % version) + sys.exit(2) + + +numberAddedRE = re.compile("#\d+$") +opentypeheaderRE = re.compile('''sfntVersion=['"]OTTO["']''') + +def makeOutputFileName(input, outputDir, extension, overWrite=False): + dirName, fileName = os.path.split(input) + fileName, ext = os.path.splitext(fileName) + if outputDir: + dirName = outputDir + fileName = numberAddedRE.split(fileName)[0] + output = os.path.join(dirName, fileName + extension) + n = 1 + if not overWrite: + while os.path.exists(output): + output = os.path.join(dirName, fileName + "#" + repr(n) + extension) + n = n + 1 + return output + + +class Options(object): + + listTables = False + outputDir = None + outputFile = None + overWrite = False + verbose = False + quiet = False + splitTables = False + disassembleInstructions = True + mergeFile = None + recalcBBoxes = True + allowVID = False + ignoreDecompileErrors = True + bitmapGlyphDataFormat = 'raw' + unicodedata = None + recalcTimestamp = False + + def __init__(self, rawOptions, numFiles): + self.onlyTables = [] + self.skipTables = [] + self.fontNumber = -1 + for option, value in rawOptions: + # general options + if option == "-h": + from fontTools import version + print(__doc__ % version) + sys.exit(0) + elif option == "-d": + if not os.path.isdir(value): + print("The -d option value must be an existing directory") + sys.exit(2) + self.outputDir = value + elif option == "-o": + self.outputFile = value + elif option == "-f": + self.overWrite = True + elif option == "-v": + self.verbose = True + elif option == "-q": + self.quiet = True + # dump options + elif option == "-l": + self.listTables = True + elif option == "-t": + self.onlyTables.append(value) + elif option == "-x": + self.skipTables.append(value) + elif option == "-s": + self.splitTables = True + elif option == "-i": + self.disassembleInstructions = False + elif option == "-z": + validOptions = ('raw', 'row', 'bitwise', 'extfile') + if value not in validOptions: + print("-z does not allow %s as a format. Use %s" % (option, validOptions)) + sys.exit(2) + self.bitmapGlyphDataFormat = value + elif option == "-y": + self.fontNumber = int(value) + # compile options + elif option == "-m": + self.mergeFile = value + elif option == "-b": + self.recalcBBoxes = False + elif option == "-a": + self.allowVID = True + elif option == "-e": + self.ignoreDecompileErrors = False + elif option == "--unicodedata": + self.unicodedata = value + elif option == "--recalc-timestamp": + self.recalcTimestamp = True + if self.onlyTables and self.skipTables: + print("-t and -x options are mutually exclusive") + sys.exit(2) + if self.mergeFile and numFiles > 1: + print("Must specify exactly one TTX source file when using -m") + sys.exit(2) + + +def ttList(input, output, options): + ttf = TTFont(input, fontNumber=options.fontNumber, lazy=True) + reader = ttf.reader + tags = sorted(reader.keys()) + print('Listing table info for "%s":' % input) + format = " %4s %10s %7s %7s" + print(format % ("tag ", " checksum", " length", " offset")) + print(format % ("----", "----------", "-------", "-------")) + for tag in tags: + entry = reader.tables[tag] + if ttf.flavor == "woff2": + # WOFF2 doesn't store table checksums, so they must be calculated + from fontTools.ttLib.sfnt import calcChecksum + data = entry.loadData(reader.transformBuffer) + checkSum = calcChecksum(data) + else: + checkSum = int(entry.checkSum) + if checkSum < 0: + checkSum = checkSum + 0x100000000 + checksum = "0x%08X" % checkSum + print(format % (tag, checksum, entry.length, entry.offset)) + print() + ttf.close() + + +def ttDump(input, output, options): + if not options.quiet: + print('Dumping "%s" to "%s"...' % (input, output)) + if options.unicodedata: + setUnicodeData(options.unicodedata) + ttf = TTFont(input, 0, verbose=options.verbose, allowVID=options.allowVID, + quiet=options.quiet, + ignoreDecompileErrors=options.ignoreDecompileErrors, + fontNumber=options.fontNumber) + ttf.saveXML(output, + quiet=options.quiet, + tables=options.onlyTables, + skipTables=options.skipTables, + splitTables=options.splitTables, + disassembleInstructions=options.disassembleInstructions, + bitmapGlyphDataFormat=options.bitmapGlyphDataFormat) + ttf.close() + + +def ttCompile(input, output, options): + if not options.quiet: + print('Compiling "%s" to "%s"...' % (input, output)) + ttf = TTFont(options.mergeFile, + recalcBBoxes=options.recalcBBoxes, + recalcTimestamp=options.recalcTimestamp, + verbose=options.verbose, allowVID=options.allowVID) + ttf.importXML(input, quiet=options.quiet) + + if not options.recalcTimestamp: + # use TTX file modification time for head "modified" timestamp + mtime = os.path.getmtime(input) + ttf['head'].modified = timestampSinceEpoch(mtime) + + ttf.save(output) + + if options.verbose: + import time + print("finished at", time.strftime("%H:%M:%S", time.localtime(time.time()))) + + +def guessFileType(fileName): + base, ext = os.path.splitext(fileName) + try: + f = open(fileName, "rb") + except IOError: + return None + cr, tp = getMacCreatorAndType(fileName) + if tp in ("sfnt", "FFIL"): + return "TTF" + if ext == ".dfont": + return "TTF" + header = f.read(256) + head = Tag(header[:4]) + if head == "OTTO": + return "OTF" + elif head == "ttcf": + return "TTC" + elif head in ("\0\1\0\0", "true"): + return "TTF" + elif head == "wOFF": + return "WOFF" + elif head == "wOF2": + return "WOFF2" + elif head.lower() == "<?xm": + # Use 'latin1' because that can't fail. + header = tostr(header, 'latin1') + if opentypeheaderRE.search(header): + return "OTX" + else: + return "TTX" + return None + + +def parseOptions(args): + try: + rawOptions, files = getopt.getopt(args, "ld:o:fvqht:x:sim:z:baey:", + ['unicodedata=', "recalc-timestamp"]) + except getopt.GetoptError: + usage() + + if not files: + usage() + + options = Options(rawOptions, len(files)) + jobs = [] + + for input in files: + tp = guessFileType(input) + if tp in ("OTF", "TTF", "TTC", "WOFF", "WOFF2"): + extension = ".ttx" + if options.listTables: + action = ttList + else: + action = ttDump + elif tp == "TTX": + extension = ".ttf" + action = ttCompile + elif tp == "OTX": + extension = ".otf" + action = ttCompile + else: + print('Unknown file type: "%s"' % input) + continue + + if options.outputFile: + output = options.outputFile + else: + output = makeOutputFileName(input, options.outputDir, extension, options.overWrite) + # 'touch' output file to avoid race condition in choosing file names + if action != ttList: + open(output, 'a').close() + jobs.append((action, input, output)) + return jobs, options + + +def process(jobs, options): + for action, input, output in jobs: + action(input, output, options) + + +def waitForKeyPress(): + """Force the DOS Prompt window to stay open so the user gets + a chance to see what's wrong.""" + import msvcrt + print('(Hit any key to exit)') + while not msvcrt.kbhit(): + pass + + +def main(args=None): + if args is None: + args = sys.argv[1:] + jobs, options = parseOptions(args) + try: + process(jobs, options) + except KeyboardInterrupt: + print("(Cancelled.)") + except SystemExit: + if sys.platform == "win32": + waitForKeyPress() + else: + raise + except TTLibError as e: + print("Error:",e) + except: + if sys.platform == "win32": + import traceback + traceback.print_exc() + waitForKeyPress() + else: + raise + + +if __name__ == "__main__": + main() diff -Nru fonttools-2.4/Tools/fontTools/unicode.py fonttools-3.0/Tools/fontTools/unicode.py --- fonttools-2.4/Tools/fontTools/unicode.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.0/Tools/fontTools/unicode.py 2015-08-31 17:57:15.000000000 +0000 @@ -0,0 +1,43 @@ +from __future__ import print_function, division, absolute_import +from fontTools.misc.py23 import * + +def _makeunicodes(f): + import re + lines = iter(f.readlines()) + unicodes = {} + for line in lines: + if not line: continue + num, name = line.split(';')[:2] + if name[0] == '<': continue # "<control>", etc. + num = int(num, 16) + unicodes[num] = name + return unicodes + + +class _UnicodeCustom(object): + + def __init__(self, f): + if isinstance(f, basestring): + f = open(f) + self.codes = _makeunicodes(f) + + def __getitem__(self, charCode): + try: + return self.codes[charCode] + except KeyError: + return "????" + +class _UnicodeBuiltin(object): + + def __getitem__(self, charCode): + import unicodedata + try: + return unicodedata.name(unichr(charCode)) + except ValueError: + return "????" + +Unicode = _UnicodeBuiltin() + +def setUnicodeData(f): + global Unicode + Unicode = _UnicodeCustom(f) diff -Nru fonttools-2.4/Tools/pyftinspect fonttools-3.0/Tools/pyftinspect --- fonttools-2.4/Tools/pyftinspect 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.0/Tools/pyftinspect 2015-08-31 17:57:15.000000000 +0000 @@ -0,0 +1,6 @@ +#! /usr/bin/env python + +import sys +from fontTools import inspect + +inspect.main(sys.argv[1:]) diff -Nru fonttools-2.4/Tools/pyftmerge fonttools-3.0/Tools/pyftmerge --- fonttools-2.4/Tools/pyftmerge 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.0/Tools/pyftmerge 2015-08-31 17:57:15.000000000 +0000 @@ -0,0 +1,6 @@ +#! /usr/bin/env python + +import sys +from fontTools import merge + +merge.main(sys.argv[1:]) diff -Nru fonttools-2.4/Tools/pyftsubset fonttools-3.0/Tools/pyftsubset --- fonttools-2.4/Tools/pyftsubset 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.0/Tools/pyftsubset 2015-08-31 17:57:15.000000000 +0000 @@ -0,0 +1,6 @@ +#! /usr/bin/env python + +import sys +from fontTools import subset + +subset.main(sys.argv[1:]) diff -Nru fonttools-2.4/Tools/ttx fonttools-3.0/Tools/ttx --- fonttools-2.4/Tools/ttx 2013-04-23 04:08:08.000000000 +0000 +++ fonttools-3.0/Tools/ttx 2015-08-31 17:57:15.000000000 +0000 @@ -1,10 +1,5 @@ #! /usr/bin/env python - -def _dummy(): - from encodings import latin_1, utf_8, utf_16_be - - import sys from fontTools import ttx diff -Nru fonttools-2.4/.travis.yml fonttools-3.0/.travis.yml --- fonttools-2.4/.travis.yml 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.0/.travis.yml 2015-08-31 17:57:15.000000000 +0000 @@ -0,0 +1,25 @@ +language: python +python: + - "2.7" + - "3.3" + - "3.4" + - "pypy" +# - "pypy3" # Disable pypy3 until Travis updates it to >= 3.3 +before_install: +# install GCC v4.8 with better C++11 support, required to build Brotli extension +# See: https://github.com/travis-ci/travis-ci/issues/1379 + - sudo add-apt-repository -y ppa:ubuntu-toolchain-r/test + - sudo apt-get -qq update + - sudo apt-get install -qq gcc-4.8 + - sudo apt-get install -qq g++-4.8 + - sudo update-alternatives --install /usr/bin/gcc gcc /usr/bin/gcc-4.8 90 + - sudo update-alternatives --install /usr/bin/g++ g++ /usr/bin/g++-4.8 90 + - g++ --version +install: + - pip install -vr requirements.txt + - make install +script: + - make check +notifications: + irc: "irc.freenode.org##fonts" + email: fonttools@googlegroups.com diff -Nru fonttools-2.4/Windows/README.TXT fonttools-3.0/Windows/README.TXT --- fonttools-2.4/Windows/README.TXT 2013-04-23 04:08:08.000000000 +0000 +++ fonttools-3.0/Windows/README.TXT 2015-08-31 17:57:15.000000000 +0000 @@ -15,7 +15,7 @@ 3. Install InnoSetup 4: http://www.jrsoftware.org/ 4. Download the latest released source code of TTX/FontTools at http://sourceforge.net/projects/fonttools/ - Or alternatively grab the sources from SVN: + Or alternatively grab the sources from the VCS: http://fonttools.sourceforge.net/ 5. Unzip the source code of TTX/FontTools into a folder. 6. In the folder where you unzipped TTX/FontTools, type: @@ -36,7 +36,7 @@ 5. Put UPX somewhere within your PATH: http://upx.sourceforge.net/ 6. Download the latest released source code of TTX/FontTools at http://sourceforge.net/projects/fonttools/ - Or alternatively grab the sources from SVN: + Or alternatively grab the sources from the VCS: http://fonttools.sourceforge.net/ 7. Unzip the source code of TTX/FontTools into a folder. 8. In the folder where you unzipped TTX/FontTools, type: