Binary files /tmp/WlnUgzz6bE/gdata-2.13.3/build/vignette.rds and /tmp/J1mSmQsMZw/gdata-2.17.0/build/vignette.rds differ diff -Nru gdata-2.13.3/ChangeLog gdata-2.17.0/ChangeLog --- gdata-2.13.3/ChangeLog 2014-04-05 18:39:47.000000000 +0000 +++ gdata-2.17.0/ChangeLog 1970-01-01 00:00:00.000000000 +0000 @@ -1,1618 +0,0 @@ -2014-04-05 warnes - - * [r1799] R/nobs.R: Call stats::nobs instead of - stats:::nobs.default within - gdata::nobs.default. This avoids R CMD check warning. - * [r1798] tests/unitTests/runit.unknown.R: Don't compare optional - POSIXlt field. Explicitly compare POSIXlt, with special handling - of '-1' unknown value. - * [r1797] R/mapLevels.R, R/unknown.R: Don't use gdata::: - prefix to access gdata function - * [r1796] DESCRIPTION: Fix syntax error in DESCRIPTION file. - * [r1795] tests/runRUnitTests.R: Package name needs to be defined - outside of if test. - * [r1794] vignettes/Rnews.sty: Style file needed - * [r1793] R/unknown.R, tests/unitTests/runit.unknown.R: The issue - Brian pointed out was an error in the isUnknown() code, not an - error in the unit tests! - * [r1792] tests/unitTests/runit.unknown.R: Apply changes Brian - recommned to NAtoUnknown as well as unknownToNA. - * [r1791] inst/NEWS: Update NEWS file - * [r1790] inst/doc/Rnews.dtx: Don't need latex .dtx source file - * [r1789] inst/doc/mapLevels.Rnw, inst/doc/unknown.Rnw, vignettes, - vignettes/mapLevels.Rnw, vignettes/unknown.Rnw: Move vignettes - from inst/doc/ to vignettes/ - * [r1788] R/aggregate.table.R, man/aggregate.table.Rd, - man/gdata-defunct.Rd: Change 'aggregate.table' from deprecated to - defunct. - * [r1787] DESCRIPTION, inst/unitTests, man/gdata-package.Rd, - tests/runRUnitTests.R, tests/unitTests: Complete changes so that - the unit tests are run as part of R CMD check - * [r1786] DESCRIPTION, inst/NEWS: Update NEWS for gdata 2.13.4 - * [r1785] NAMESPACE: Update NAMESPACE file to remove deleted - function - * [r1784] inst/unitTests/Makefile, inst/unitTests/runit.bindData.R, - inst/unitTests/runit.cbindX.R, - inst/unitTests/runit.drop.levels.R, - inst/unitTests/runit.getDateTimeParts.R, - inst/unitTests/runit.mapLevels.R, inst/unitTests/runit.nPairs.R, - inst/unitTests/runit.reorder.factor.R, - inst/unitTests/runit.trim.R, inst/unitTests/runit.trimSum.R, - inst/unitTests/runit.unknown.R, - inst/unitTests/runit.wideByFactor.R, - inst/unitTests/runit.write.fwf.R, tests/Makefile, - tests/runRUnitTests.R, tests/runit.bindData.R, - tests/runit.cbindX.R, tests/runit.drop.levels.R, - tests/runit.getDateTimeParts.R, tests/runit.mapLevels.R, - tests/runit.nPairs.R, tests/runit.reorder.factor.R, - tests/runit.trim.R, tests/runit.trimSum.R, tests/runit.unknown.R, - tests/runit.wideByFactor.R, tests/runit.write.fwf.R: Move unit - test files back to inst/unitTests. Fix up runRUnitTests.R to work - properly in the new location - * [r1783] tests/runit.unknown.R: - For unit tests, don't check for - equality of optional POSIXlt - components. (Bug reported by Brian Ripley). - * [r1782] R/runRUnitTests.R, inst/unitTests/Makefile, - inst/unitTests/runRUnitTests.R, inst/unitTests/runit.bindData.R, - inst/unitTests/runit.cbindX.R, - inst/unitTests/runit.drop.levels.R, - inst/unitTests/runit.getDateTimeParts.R, - inst/unitTests/runit.mapLevels.R, inst/unitTests/runit.nPairs.R, - inst/unitTests/runit.reorder.factor.R, - inst/unitTests/runit.trim.R, inst/unitTests/runit.trimSum.R, - inst/unitTests/runit.unknown.R, - inst/unitTests/runit.wideByFactor.R, - inst/unitTests/runit.write.fwf.R, man/runRUnitTests.Rd, - tests/Makefile, tests/runRUnitTests.R, tests/runit.bindData.R, - tests/runit.cbindX.R, tests/runit.drop.levels.R, - tests/runit.getDateTimeParts.R, tests/runit.mapLevels.R, - tests/runit.nPairs.R, tests/runit.reorder.factor.R, - tests/runit.trim.R, tests/runit.trimSum.R, tests/runit.unknown.R, - tests/runit.wideByFactor.R, tests/runit.write.fwf.R: Move unit - test code into the (now) standard location - -2014-03-19 arnima - - * [r1777] R/keep.R: change warning message to R standards - -2013-12-18 arnima - - * [r1758] R/ll.R: Retain original list order unless sort=FALSE; - also stop if unnamed list - -2013-12-16 warnes - - * [r1757] R/trim.R: Trim will now remove all types of - leading/trailing whitespace by using - the [:blank:] character class. - -2013-06-29 warnes - - * [r1692] inst/NEWS: Update NEWS for second try for gdata 2.13.2 - * [r1691] R/ll.R: Simplify ll() by stuffing list arguments into an - environment, avoiding the need to use attach/detach. - -2013-06-28 warnes - - * [r1685] inst/NEWS: Update NEWS for gdata 2.13.2 - * [r1684] tests/test.read.xls.Rout.save, - tests/tests.write.fwf.Rout.save: Minor update to - tests/*.Rout.save - * [r1683] R/ll.R: Add on.exit() handler to ensure a matching detach - occurs when attach is used in ll() - * [r1682] DESCRIPTION: Update for gdata 2.13.2 - * [r1681] R/aggregate.table.R: Improve deprecated message - -2013-03-24 warnes - - * [r1645] tests/test.read.xls.Rout.save, - tests/tests.write.fwf.Rout.save: Update test files for code - changes - * [r1644] inst/NEWS: Fix formatting in NEWS - * [r1643] DESCRIPTION, inst/NEWS, man/read.xls.Rd, - man/sheetCount.Rd, tests/test.read.xls.R: Replaced calls to - depreciated function ".path.package" with the new public function - "path.package". - -2013-01-14 warnes - - * [r1639] R/installXLSXsupport.R, R/sheetCount.R, R/xls2sep.R, - R/xlsFormats.R: Replace (obsolete) '.path.package' with - 'find.package' function. - -2012-09-20 warnes - - * [r1622] man/MedUnits.Rd, man/ans.Rd, man/duplicated2.Rd: Correct - .Rd file errors detected by 'R CMD check'. - * [r1621] NAMESPACE: Add duplicated() and ans() to the NAMESPACE. - * [r1620] DESCRIPTION, inst/NEWS: Update for gdata 2.13.0. - * [r1619] man/ConvertMedUnits.Rd: Fix typographic error. - * [r1618] R/ans.R, R/duplicated2.R, man/ans.Rd, man/duplicated2.Rd: - Add 'ans()' and 'duplicated()' contributed by Liviu Andronic. - -2012-09-19 warnes - - * [r1617] data/MedUnits.rda: Correct column names. Unit columns - were reversed and misspelled. - * [r1616] R/sheetCount.R: Add ignore.stderr to system command in - sheetCmd() to prevent stderr - messages from being included in the captured output from the perl - script. - -2012-09-12 warnes - - * [r1606] DESCRIPTION, inst/NEWS: Update for gdata 2.12.0 - * [r1605] R/aggregate.table.R, man/aggregate.table.Rd: - 'stats::aggregate' was made into a generic on 27-Jan-2010, so - that - attempting to call 'aggregate' on a 'table' object will now - incorrectly call 'aggregate.table'. Since 'aggregate.table' can - be - replaced by a call to tapply using two index vectors, e.g. - aggregate.table(x, by1=a, by2=b, mean) - can be replaced by - tapply(x, INDEX=list(a, b), FUN=mean), - the 'aggregate.table' function will now display a warning that it - is depreciated and recommending the equivalent call to tapply. It - will be removed entirely in a future version of gdata. - * [r1604] .Rinstignore: Don't ignore .Rnw files, but do ignore .svn - files. - -2012-09-11 warnes - - * [r1603] man/interleave.Rd: Clarify workding of DROP argument to - interleave(). - * [r1602] man/interleave.Rd: Replace call to aggregate.table() with - equivalent tapply() call since aggregate.table() is being - depreciated. - -2012-08-22 warnes - - * [r1601] DESCRIPTION, inst/NEWS: Update DESCRIPTION and NEWS for - gdate 2.11.1. - * [r1600] man/read.xls.Rd: Add example for read.xls() that shows - how to use the fileEncoding - argument to read in latin-1 encoded data. - * [r1599] tests/latin-1.xls, tests/test.read.xls.R, - tests/test.read.xls.Rout.save: Add XLSX test for latin-1 - characters, and look for them in their new - location in inst/xls/. - * [r1598] inst/xls/latin-1.xls, inst/xls/latin-1.xlsx: add XLSX - version of latin-1.xls - * [r1597] tests/latin-1.xls, tests/test.read.xls.R, - tests/test.read.xls.Rout.save: Add test file and code to ensure - that read.xls() can properly handle - files with alternative encodings. latin-1.xls contains each of - the - non-ascii latin-1 special characters in both the column headings - and - the body of the file. - * [r1596] R/read.xls.R: Change code to have R read the csv/tab data - from the file rather than - from the connetion we made, so that file encodings can be - properly handled. - * [r1595] R/read.xls.R: Always close the connection. - -2012-08-13 warnes - - * [r1594] inst/perl/xls2csv.pl: Remove trailing space from output - line. - -2012-06-18 warnes - - * [r1567] inst/NEWS: Update NEWS for 2.11.0 release. - * [r1566] DESCRIPTION: Bump version number and add - SystemRequirements for perl. - * [r1565] R/xls2sep.R, inst/perl/xls2csv.pl, man/read.xls.Rd, - tests/test.read.xls.R, tests/test.read.xls.Rout.save: read.xls() - and supporting functions now allow blank lines to be - preserved, rather than skipped, by supplying the argument - "blank.lines.skip=FALSE". The underlying perl function has been - extended to suppor this via an optional "-s" argument which, when - present, *preserves* blank lines during the conversion. - -2012-06-13 warnes - - * [r1564] DESCRIPTION, R/nobs.R, inst/NEWS: - nobs.default needs to - handle logical vectors in addition to numeric - vectors. - - update DESCRIPTION and NEWS for 2.10.6. - * [r1563] R/nobs.R: nobs.default needs to handle logical as well as - numeric vectors. - -2012-06-08 warnes - - * [r1562] DESCRIPTION, tests/test.read.xls.Rout.save: Update - DESCRIPTION and tests - * [r1561] tests/test.read.xls.R: fix incorrect function name - * [r1560] DESCRIPTION, man/installXLSXsupport.Rd: Mark example for - installXLSXsupport() to not be executed durin R CMD check. - * [r1559] DESCRIPTION: stats:::nobs.default and stats::nobs.lm - require R > 2.13.0, so add this as a dependency. - -2012-06-06 warnes - - * [r1552] DESCRIPTION, inst/NEWS: Update for release 2.10.2 - * [r1551] R/nobs.R: Fix bugs in nobs.default. - * [r1550] tests/test.read.xls.Rout.save, - tests/tests.write.fwf.Rout.save: Update to reflect warning on - startup that 'nobs' hides 'stats::nobs'. - * [r1549] man/nobs.Rd: Remove stray non-ASCII characters. - * [r1548] R/nobs.R: The nobs() dispatch method must be defined in - the gdata namespace to - pick up the definition of gdata::nobs.default. - * [r1547] DESCRIPTION, inst/NEWS: Update DESCRIPTION and NEWS for - 2.10.1 release. - * [r1546] NAMESPACE, R/nobs.R, man/nobs.Rd: Define aliases for - 'nobs' and 'nobs.lm' to support backward - compatibility for packages depending on gdata. - * [r1545] DESCRIPTION, inst/NEWS: Update DESCRIPTION and NEWS for - 2.10.0 release - * [r1544] NAMESPACE, R/startsWith.R, man/startsWith.Rd: - Add - manual page and NAMESPACE entry for startsWith(). - - Add 'ignore.case' argument to startsWith(). - * [r1543] tests/test.read.xls.Rout.save: Update to match new code. - * [r1542] man/read.xls.Rd: Replace non-ASCII characters. - * [r1541] R/read.xls.R, man/read.xls.Rd, tests/test.read.xls.R: Add - na.strings to read.xls call to convert "#DIV/0!" to NA. - -2012-06-05 warnes - - * [r1540] NAMESPACE: Remove nobs method dispatch and lm methods - since these are now provided by the stats package. - * [r1539] R/env.R: Spell out arguments to ls() to avoid R CMD check - warnings. - * [r1538] .Rinstignore: Add .Rinstignore file to omit latex style - and source files from distributed inst/doc directory. - * [r1537] R/ConvertMedUnits.R: - Add NULL definition of MedUnits to - avoid R CMD check warning. - - Specify local environment when calling data() so that MedUnits - gets - defined in the function's environment rather than the global - environment. - * [r1536] R/ls.funs.R: Fix error in ls.funs() that occurs when - there are no objects in the environment. - * [r1535] R/object.size.R: Avoid warning by calling - utils::object.size rather than Internal(object.size(x)) - -2012-05-31 warnes - - * [r1534] R/nobs.R, man/nobs.Rd: - Remove dispatch function 'nobs' - and method 'nobs.lm' since these are - now provided by the R 'stats' package. - -2012-05-04 warnes - - * [r1532] DESCRIPTION: Update for next release - * [r1531] NAMESPACE, R/ls.funs.R, man/ls.funs.Rd: Add ls.funs() to - show functions defined in the specified environment. - * [r1530] man/is.what.Rd: Fix enumerate syntax. - -2012-04-03 warnes - - * [r1522] R/startsWith.R: Add startsWith() function. - -2011-10-05 warnes - - * [r1516] man/read.xls.Rd: Fix typo - -2011-09-30 warnes - - * [r1515] inst/NEWS: Update DESCRIPTION and README for 2.9.0 - release. - * [r1514] DESCRIPTION: Update DESCRIPTION and README for 2.9.0 - release. - -2011-09-20 warnes - - * [r1508] man/read.xls.Rd: Improve xls2csv() man page - * [r1507] NAMESPACE: Add case() function, a vector equivalent of - the switch() function - * [r1506] R/case.R, man/case.Rd: Add case() function, a vector - equivalent of the switch() function - -2011-09-02 warnes - - * [r1500] NAMESPACE: Add 'centerText' function to center text - strings for a specified width. - * [r1499] R/centerText.R, man/centerText.Rd: Add 'centerText' - function to center text strings for a specified width. - -2011-04-16 warnes - - * [r1469] DESCRIPTION, inst/NEWS: Update for release 2.8.2 - -2011-04-15 warnes - - * [r1468] R/dQuote.ascii.R, R/installXLSXsupport.R, R/read.xls.R, - R/sheetCount.R, R/xls2sep.R: Fix errors on windows when R or Perl - install path includes spaces by properly quoting the path. - * [r1467] R/xlsFormats.R: Fix error in xlsFormat() on windows when - R or Perl install path includes spaces by quoting the path. - -2011-01-15 ggorjan - - * [r1465] NAMESPACE, R/nPairs.R, inst/NEWS, - inst/unitTests/runit.nPairs.R, man/nPairs.Rd: Adding summary - method for nPairs - -2010-11-12 warnes - - * [r1462] inst/NEWS: Update NEWS for gdata 2.8.1 - * [r1461] DESCRIPTION: Update DEScription file for 2.8.1 release - * [r1460] tests/test.read.xls.Rout.save, - tests/tests.write.fwf.Rout.save: Update test output to match - latest code - * [r1459] R/write.fwf.R, man/write.fwf.Rd, - tests/test.write.fwf.eol.R: Modify write.fwf() to capture and - pass on additional arguments for - write.table(). This resolves a bug reported by Jan Wijffels. - -2010-11-01 arnima - - * [r1453] man/Args.Rd: Minor improvement in Args.Rd help page - -2010-10-19 warnes - - * [r1452] R/onAttach.R, R/xls2sep.R: Avoid use of file.access() - which is unreliable on Windows network shares. - -2010-07-08 ggrothendieck2 - - * [r1448] R/xls2sep.R: findPerl call added to xls2sep - -2010-07-07 ggrothendieck2 - - * [r1447] man/read.xls.Rd: small improvements to read.xls.Rd - -2010-05-03 warnes - - * [r1439] NAMESPACE, R/installXLSXModules.R, - R/installXLSXsupport.R, R/onAttach.R, inst/NEWS, - man/installXLSXsupport.Rd, man/xlsFormats.Rd: Rename - installXLSXModules() to installXLSXsupport() and provide - documentation for it. - * [r1438] inst/NEWS: Update news for gdata 2.8.0 - * [r1437] DESCRIPTION, NAMESPACE, R/installXLSXModules.R, - R/onAttach.R, inst/perl/install_modules.pl, - inst/perl/module_tools.pl, tests/test.read.xls.R: Add .onAttach - function to check & inform user if perl is available, to check - whether XLS and XLSX formats are avaiable, and to run the (new) - installXLSXModules() functon to attempt to install the necessar - libraries if not. Added installXLSXModules() function. - -2010-05-02 warnes - - * [r1436] man/xlsFormats.Rd: Correct error in xlsFormat example - * [r1435] DESCRIPTION, NAMESPACE, R/dQuote.ascii.R, R/findPerl.R, - R/read.xls.R, R/xlsFormats.R, inst/doc/gregmisc.tex, - inst/perl/install_modules.pl, inst/perl/module_tools.pl, - inst/perl/sheetCount.pl, inst/perl/supportedFormats.pl, - inst/perl/xls2csv.pl, man/ConvertMedUnits.Rd, - man/aggregate.table.Rd, man/combine.Rd, man/interleave.Rd, - man/matchcols.Rd, man/nobs.Rd, man/read.xls.Rd, - man/rename.vars.Rd, man/reorder.Rd, man/resample.Rd, - man/sheetCount.Rd, man/trim.Rd, man/unmatrix.Rd, - man/upperTriangle.Rd, man/xlsFormats.Rd, src, - tests/test.read.xls.R, tests/test.read.xls.Rout.save, - tests/tests.write.fwf.Rout.save: Update perl code to work (but - generate warnings) when Zlib or SpreadSheet::XLXS is not - instaled. Also update Greg's email address - -2010-02-21 ggrothendieck2 - - * [r1423] R/read.xls.R, man/read.xls.Rd: isOpen problems fixed - (isOpen must have changed in R since this worked in earlier - versions). Also nba.xls link in read.xls.Rd disappeared. Replaced - with similar link. - -2010-02-20 ggrothendieck2 - - * [r1422] INSTALL: improved INSTALL file - -2010-02-19 ggrothendieck2 - - * [r1421] INSTALL, R/dQuote.ascii.R, R/read.xls.R, R/sheetCount.R, - inst/NEWS: added findPerl to locate ActiveState Perl on Windows - if perl= not specified and Rtools perl would have otherwise been - used. Also added INSTALL file. - -2010-01-28 warnes - - * [r1419] DESCRIPTION, inst/NEWS: Update for release 2.7.1 - * [r1418] R/xls2sep.R: xls2sep(): Show output of perl call when - verbose=T - * [r1417] src/build.bat: More Win32 fixes - * [r1416] src/Makefile, src/Makefile.win, src/build.bat: More work - on Win32 building - * [r1415] src/Makefile, src/Makefile.win, src/build.bat: Support - building Compress::Raw::Zlib perl package under windows. - -2010-01-26 warnes - - * [r1413] inst/NEWS: Fix typos - * [r1412] R/sheetCount.R: Show more details in sheetCount() when - verbose=TRUE - -2010-01-24 warnes - - * [r1411] R/xls2sep.R: Replace two calls to 'dQuote', to - 'dQuote.ascii' - * [r1408] inst/doc/mapLevels.pdf, inst/doc/unknown.pdf: Remove - auto-generated pdf files from svn - * [r1407] src/Makefile: create 'distclean' to remove perl binary - dir, currently mac-only - * [r1406] R/read.xls.R, R/xls2sep.R: Make read.xls() and xls2sep() - quieter when verbose=FALSE - * [r1405] tests/test.read.xls.R, tests/test.read.xls.Rout.save: Add - tests for read.xls, sheetCount, and sheetNames - * [r1404] src/Makefile: Modify makefile to 1) clean up after build, - 2) make tar non-verbose - * [r1403] R/read.xls.R, R/sheetCount.R: Close connections when - done. - * [r1402] man/read.xls.Rd: Fix typo - * [r1401] man/read.xls.Rd, man/sheetNames.Rd: Fix R CMD CHECK - errors - * [r1400] src/Compress-Raw-Zlib-2.024, - src/Compress-Raw-Zlib-2.024.tar.gz, src/Makefile: Use the - original gz file for Compress::Raw::Zlib to avoid issues with - 'non-platform-independent' filename error in R CMD CHECK - * [r1399] inst/perl/Archive/README-Archive-Zip, - inst/perl/Archive/README-Archive::Zip: Rename files to remove R - CMD check error - * [r1398] DESCRIPTION, inst/NEWS, inst/doc/mapLevels.pdf, - inst/doc/unknown.pdf: Update for 2.7.0 release - * [r1397] NAMESPACE: Add new functions to NAMESPACE - * [r1396] src, src/Compress-Raw-Zlib-2.024, - src/Compress-Raw-Zlib-2.024/Changes, - src/Compress-Raw-Zlib-2.024/MANIFEST, - src/Compress-Raw-Zlib-2.024/META.yml, - src/Compress-Raw-Zlib-2.024/Makefile.PL, - src/Compress-Raw-Zlib-2.024/README, - src/Compress-Raw-Zlib-2.024/Zlib.xs, - src/Compress-Raw-Zlib-2.024/config.in, - src/Compress-Raw-Zlib-2.024/examples, - src/Compress-Raw-Zlib-2.024/examples/filtdef, - src/Compress-Raw-Zlib-2.024/examples/filtinf, - src/Compress-Raw-Zlib-2.024/fallback, - src/Compress-Raw-Zlib-2.024/fallback/constants.h, - src/Compress-Raw-Zlib-2.024/fallback/constants.xs, - src/Compress-Raw-Zlib-2.024/lib, - src/Compress-Raw-Zlib-2.024/lib/Compress, - src/Compress-Raw-Zlib-2.024/lib/Compress/Raw, - src/Compress-Raw-Zlib-2.024/lib/Compress/Raw/Zlib.pm, - src/Compress-Raw-Zlib-2.024/pod, - src/Compress-Raw-Zlib-2.024/pod/FAQ.pod, - src/Compress-Raw-Zlib-2.024/ppport.h, - src/Compress-Raw-Zlib-2.024/private, - src/Compress-Raw-Zlib-2.024/private/MakeUtil.pm, - src/Compress-Raw-Zlib-2.024/t, - src/Compress-Raw-Zlib-2.024/t/000prereq.t, - src/Compress-Raw-Zlib-2.024/t/01version.t, - src/Compress-Raw-Zlib-2.024/t/02zlib.t, - src/Compress-Raw-Zlib-2.024/t/07bufsize.t, - src/Compress-Raw-Zlib-2.024/t/09limitoutput.t, - src/Compress-Raw-Zlib-2.024/t/18lvalue.t, - src/Compress-Raw-Zlib-2.024/t/99pod.t, - src/Compress-Raw-Zlib-2.024/t/Test, - src/Compress-Raw-Zlib-2.024/t/Test/Builder.pm, - src/Compress-Raw-Zlib-2.024/t/Test/More.pm, - src/Compress-Raw-Zlib-2.024/t/Test/Simple.pm, - src/Compress-Raw-Zlib-2.024/t/compress, - src/Compress-Raw-Zlib-2.024/t/compress/CompTestUtils.pm, - src/Compress-Raw-Zlib-2.024/typemap, - src/Compress-Raw-Zlib-2.024/zlib-src, - src/Compress-Raw-Zlib-2.024/zlib-src/adler32.c, - src/Compress-Raw-Zlib-2.024/zlib-src/compress.c, - src/Compress-Raw-Zlib-2.024/zlib-src/crc32.c, - src/Compress-Raw-Zlib-2.024/zlib-src/crc32.h, - src/Compress-Raw-Zlib-2.024/zlib-src/deflate.c, - src/Compress-Raw-Zlib-2.024/zlib-src/deflate.h, - src/Compress-Raw-Zlib-2.024/zlib-src/infback.c, - src/Compress-Raw-Zlib-2.024/zlib-src/inffast.c, - src/Compress-Raw-Zlib-2.024/zlib-src/inffast.h, - src/Compress-Raw-Zlib-2.024/zlib-src/inffixed.h, - src/Compress-Raw-Zlib-2.024/zlib-src/inflate.c, - src/Compress-Raw-Zlib-2.024/zlib-src/inflate.h, - src/Compress-Raw-Zlib-2.024/zlib-src/inftrees.c, - src/Compress-Raw-Zlib-2.024/zlib-src/inftrees.h, - src/Compress-Raw-Zlib-2.024/zlib-src/trees.c, - src/Compress-Raw-Zlib-2.024/zlib-src/trees.h, - src/Compress-Raw-Zlib-2.024/zlib-src/uncompr.c, - src/Compress-Raw-Zlib-2.024/zlib-src/zconf.h, - src/Compress-Raw-Zlib-2.024/zlib-src/zlib.h, - src/Compress-Raw-Zlib-2.024/zlib-src/zutil.c, - src/Compress-Raw-Zlib-2.024/zlib-src/zutil.h, src/Makefile: Add - Compress::Raw::Zlib code - * [r1395] man/read.xls.Rd, man/sheetCount.Rd: Add/Update - documentation - * [r1394] R/xls2sep.R: Minor formatting change - * [r1393] inst/xls/ExampleExcelFile.xls, - inst/xls/ExampleExcelFile.xlsx: Add additional example files - * [r1392] inst/perl/sheetCount.pl, inst/perl/sheetNames.pl, - inst/perl/xls2csv.pl: Combine sheetCount.pl and sheetNames.pl and - modify to support Excel 2007 'xlsx' format - * [r1391] inst/perl/Spreadsheet/XLSX.pm, - inst/perl/Spreadsheet/XLSX/Fmt2007.pm, inst/perl/xls2csv.pl: - Complete changes to handle Excel 2007 'xlsx' files - * [r1390] inst/perl/Archive, inst/perl/Archive/README-Archive::Zip, - inst/perl/Archive/Zip, inst/perl/Archive/Zip.pm, - inst/perl/Archive/Zip/Archive.pm, - inst/perl/Archive/Zip/BufferedFileHandle.pm, - inst/perl/Archive/Zip/DirectoryMember.pm, - inst/perl/Archive/Zip/FAQ.pod, - inst/perl/Archive/Zip/FileMember.pm, - inst/perl/Archive/Zip/Member.pm, - inst/perl/Archive/Zip/MemberRead.pm, - inst/perl/Archive/Zip/MockFileHandle.pm, - inst/perl/Archive/Zip/NewFileMember.pm, - inst/perl/Archive/Zip/StringMember.pm, - inst/perl/Archive/Zip/Tree.pm, - inst/perl/Archive/Zip/ZipFileMember.pm, - inst/perl/OLE/README-OLE-Storage_Lite, - inst/perl/Spreadsheet/README-ParseExcel, - inst/perl/Spreadsheet/README-XLS, inst/perl/Spreadsheet/XLSX, - inst/perl/Spreadsheet/XLSX.pm, - inst/perl/Spreadsheet/XLSX/Fmt2007.pm, - inst/perl/Spreadsheet/XLSX/Utility2007.pm, inst/perl/VERSIONS: - Add additional Perl modules to support Excel 2007 'xlsx' files - -2010-01-24 ggrothendieck2 - - * [r1389] NAMESPACE, man/sheetNames.Rd: added sheetNames.Rd - (documenting sheetNames/sheetCount) and updated NAMESPACE file. - * [r1388] inst/NEWS: fixed spacing problem in NEWS - -2010-01-23 warnes - - * [r1387] inst/perl/xls2csv.pl: Check if parsing the xls file - succeeds... Current code doesn't handle new XML-based format - * [r1386] inst/perl/Spreadsheet/XLSX: Remove perl - 'Spreadsheet:XLSX' module since it depends on Compress-Raw-Zlib, - which probably won't be available on most machines, and I don't - have time to figure out how to get R to build it properly when - gdata is installed. - * [r1385] inst/perl/Spreadsheet/XLSX, - inst/perl/Spreadsheet/XLSX/Fmt2007.pm, - inst/perl/Spreadsheet/XLSX/Utility2007.pm: Add perl - 'Spreadsheet:XLSX' module to support new Excel XML format files - * [r1384] R/xls2sep.R: Add xls2tsv() convenience wrapper to - xls2sep() - * [r1383] R/read.xls.R, R/xls2sep.R: Update to match new xls2csv.pl - code, allow specification of sheets by name, support CSV and TAB - delimited files using the same code, other minor changes. - * [r1382] R/sheetCount.R: Add sheetNames() function to extract the - names from XLS files - * [r1381] inst/bin/xls2csv.bat: Fix xls2csv.bat - * [r1380] inst/perl/xls2csv.pl: If only one sheet is present in the - file, don't insert the sheet name into the filename - * [r1379] inst/xls/ExampleExcelFile.xls, - inst/xls/ExampleExcelFile.xlsx: Add additional test/example Excel - files - * [r1378] inst/perl/xls2csv.pl, inst/perl/xls2tab.pl, - inst/perl/xls2tsv.pl: Modify xls2csv.pl script to: - - Use tab-delimiter and .tsv or .tab extension if called with the - name - xls2tsv.pl or xls2tab.pl, respectively. This allows a single - source - file and two symlinks to be used intstead of maintaining several - almost-identical files. - - Allow selection of sheets by name - - Provide better error checking - - Other code improvements - * [r1377] inst/perl/sheetCount.pl, inst/perl/sheetNames.pl: Add - perl scripts to extract worksheet names and sheet count from - Excel files - -2010-01-22 warnes - - * [r1376] inst/perl/OLE/Storage_Lite.pm: Upgrade Perl - OLE::StorageLight module to version 0.19 - * [r1375] inst/perl/Spreadsheet/ParseExcel.pm, - inst/perl/Spreadsheet/ParseExcel/Cell.pm, - inst/perl/Spreadsheet/ParseExcel/Dump.pm, - inst/perl/Spreadsheet/ParseExcel/FmtDefault.pm, - inst/perl/Spreadsheet/ParseExcel/FmtJapan.pm, - inst/perl/Spreadsheet/ParseExcel/FmtJapan2.pm, - inst/perl/Spreadsheet/ParseExcel/FmtUnicode.pm, - inst/perl/Spreadsheet/ParseExcel/Font.pm, - inst/perl/Spreadsheet/ParseExcel/Format.pm, - inst/perl/Spreadsheet/ParseExcel/SaveParser, - inst/perl/Spreadsheet/ParseExcel/SaveParser.pm, - inst/perl/Spreadsheet/ParseExcel/SaveParser/Workbook.pm, - inst/perl/Spreadsheet/ParseExcel/SaveParser/Worksheet.pm, - inst/perl/Spreadsheet/ParseExcel/Utility.pm, - inst/perl/Spreadsheet/ParseExcel/Workbook.pm, - inst/perl/Spreadsheet/ParseExcel/Worksheet.pm: Upgrade perl - Spreadsheet::ParseExcel to version 0.56 - * [r1374] DESCRIPTION: Add complete list of contributors - -2010-01-22 arnima - - * [r1373] man/keep.Rd: Minor improvement in help page - * [r1371] R/Args.R, R/env.R, R/is.what.R, R/keep.R, R/ll.R, - man/Args.Rd, man/env.Rd, man/is.what.Rd, man/keep.Rd, man/ll.Rd: - Many small improvements to documentation of Arni's five functions - -2010-01-22 warnes - - * [r1370] R/dQuote.ascii.R, R/read.xls.R, R/sheetCount.R, - R/xls2sep.R: - Move xls2csv(), xls2tab(), xls2sep() to a separate - file - - Move qQuote.ascii to a separate file - - - Bug Fix: xls2csv(), xls2tab() failed to pass the provided - 'perl' - parameter to xls2sep() - - - New Feature: xls2sep() (and hence xls2csv, xls2tab, and - read.xls) - now supports ftp URLs. - -2009-12-06 arnima - - * [r1369] R/Args.R, man/Args.Rd: Minor improvements of Args(). - * [r1368] R/ll.R, man/ll.Rd: Improved ll() so user can limit output - to specified classes - -2009-11-16 arnima - - * [r1366] R/ll.R: ll(.GlobalEnv) does not crash anymore - -2009-08-20 warnes - - * [r1357] man/cbindX.Rd, man/getDateTimePart.Rd, man/mapLevels.Rd, - man/nPairs.Rd, man/trim.Rd, man/trimSum.Rd, man/unknown.Rd, - man/write.fwf.Rd: Replace \ldots with \dots to make the new R CMD - CHECK happy. - -2009-08-19 warnes - - * [r1355] DESCRIPTION: Update for 2.6.1 release - * [r1354] inst/unitTests/runit.getDateTimeParts.R: Modify unit - tests to avoid issues related to zime zones. - -2009-08-05 warnes - - * [r1353] inst/doc/mapLevels.pdf, inst/doc/unknown.pdf: Update - vignettes for 2.6.0 release - * [r1352] man/frameApply.Rd: Fix formatting warning in frameApply - man page - -2009-07-16 ggorjan - - * [r1350] man/write.fwf.Rd: Reverting recent change and clarifying - the meaning. - -2009-07-16 warnes - - * [r1349] inst/doc/mapLevels.pdf, inst/doc/unknown.pdf, - man/resample.Rd: Add contents of \value section for resample() - man page - * [r1348] tests/tests.write.fwf.Rout.save: Update test output to - remove R CMD check warning - * [r1347] inst/NEWS: Update ChangeLog and NEWS for gdata 2.6.0 - release - * [r1346] DESCRIPTION: Update DESCRIPTION file for gdata 2.6.0 - * [r1345] inst/doc/gregmisc.tex, inst/doc/mapLevels.pdf, - inst/doc/unknown.pdf, man/ConvertMedUnits.Rd, - man/aggregate.table.Rd, man/combine.Rd, man/interleave.Rd, - man/matchcols.Rd, man/nobs.Rd, man/rename.vars.Rd, - man/reorder.Rd, man/trim.Rd, man/unmatrix.Rd, - man/upperTriangle.Rd: Correct Greg's email address - * [r1344] man/write.fwf.Rd: Correct minor typos in write.fwf() man - page - * [r1343] man/resample.Rd: Correct page for resample() - * [r1342] NAMESPACE, R/read.xls.R, inst/perl/xls2tab.pl, - man/read.xls.Rd: Add support for using tab for field separator - during translation from xls format in read.xls - -2009-04-19 arnima - - * [r1314] R/env.R, R/ll.R: Changed object.size(object) to - unclass(object.size(object)). - -2008-12-31 ggorjan - - * [r1312] NAMESPACE, inst/NEWS: Documenting changes and exporting - the functions. - * [r1311] R/object.size.R, man/humanReadable.Rd, - man/object.size.Rd: Enhanced function object.size that returns - the size of multiple objects. There is also a handy print method - that can print size of an object in "human readable" format when - options(humanReadable=TRUE) or print(object.size(x), - humanReadable=TRUE). - * [r1310] R/wideByFactor.R, inst/unitTests/runit.wideByFactor.R, - man/wideByFactor.Rd: New function wideByFactor that reshapes - given dataset by a given factor - it creates a "multivariate" - data.frame. - * [r1309] R/nPairs.R, inst/unitTests/runit.nPairs.R, man/nPairs.Rd: - New function nPairs that gives the number of variable pairs in a - data.frame or a matrix. - * [r1308] R/getDateTimeParts.R, - inst/unitTests/runit.getDateTimeParts.R, man/getDateTimePart.Rd: - New functions getYear, getMonth, getDay, getHour, getMin, and - getSec for extracting the date/time parts from objects of a - date/time class. - * [r1307] R/bindData.R, inst/unitTests/runit.bindData.R, - man/bindData.Rd: New function bindData that binds two data frames - into a multivariate data frame in a different way than merge. - * [r1306] R/runRUnitTests.R, inst/unitTests/Makefile, - inst/unitTests/runRUnitTests.R, man/gdata-package.Rd, - man/runRUnitTests.Rd, tests/doRUnit.R: New function - .runRUnitTestsGdata that enables run of all RUnit tests during - the R CMD check as well as directly from within R. - -2008-12-20 ggorjan - - * [r1305] NAMESPACE, R/trimSum.R, inst/NEWS, - inst/unitTests/runit.trimSum.R, man/trimSum.Rd: - * [r1304] tests/tests.write.fwf.Rout.save: To remove some output in - the R CMD check - -2008-08-05 ggorjan - - * [r1300] DESCRIPTION, NAMESPACE, R/cbindX.R, R/write.fwf.R, - inst/NEWS, inst/doc/mapLevels.pdf, inst/doc/unknown.pdf, - inst/unitTests/runit.cbindX.R, inst/unitTests/runit.write.fwf.R, - man/cbindX.Rd, man/write.fwf.Rd, tests/tests.write.fwf.R, - tests/tests.write.fwf.Rout.save: - Increased version to 2.5.0 - - - New function cbindX that can bind objects with different number - of rows. - - - write.fwf gains width argument. Unknown values can increase or - decrease - the width of the columns. Additional tests and documentation - fixes. - -2008-06-30 arnima - - * [r1299] R/env.R, R/ll.R, man/env.Rd, man/ll.Rd: Simplified - default 'unit' argument from c("KB","MB","bytes") to "KB". - -2008-05-13 warnes - - * [r1270] inst/NEWS, inst/doc/mapLevels.pdf, inst/doc/unknown.pdf: - Update NEWS file for 2.4.2 - * [r1269] R/read.xls.R: Use path.expand() to give proper full path - to xls file to be translated by read.xls() - * [r1268] R/read.xls.R: Modifed read.xls() failed to return the - converted data... fixed. - * [r1267] inst/perl/Spreadsheet/ParseExcel/Utility.pm: Correct - broken patch for open-office support - * [r1266] DESCRIPTION, R/read.xls.R: For read.xls() and xls2csv(): - - Implement more informative log messages when verbose=TRUE - - Quote temporary file name to avoid errors when calling perl to - do the work. - - Add better error messages, particularly when perl fails to - generate an - output .csv file. - - Update version number in DESCRIPTION. - -2008-05-12 warnes - - * [r1265] inst/perl/Spreadsheet/ParseExcel/Utility.pm: Patch to - correct issue with OpenOffice-created XLS files. Thanks to - Robert Burns for pointing out the patch at - http://rt.cpan.org/Public/Bug/Display.html?id=7206 - -2008-03-25 warnes - - * [r1250] DESCRIPTION, inst/NEWS, inst/doc/mapLevels.pdf, - inst/doc/unknown.pdf: Update for version 2.4.1 - * [r1249] inst/xls/iris.xls: Example iris.xls file didn't complete - & properly formatted iris data set. Fixed. - * [r1248] inst/perl/IO/AtomicFile.pm, inst/perl/IO/InnerFile.pm, - inst/perl/IO/Lines.pm, inst/perl/IO/Scalar.pm, - inst/perl/IO/ScalarArray.pm, inst/perl/IO/Stringy.pm, - inst/perl/IO/Wrap.pm, inst/perl/IO/WrapTie.pm, - inst/perl/OLE/Storage_Lite.pm, - inst/perl/Spreadsheet/ParseExcel.pm, - inst/perl/Spreadsheet/ParseExcel/Dump.pm, - inst/perl/Spreadsheet/ParseExcel/FmtDefault.pm, - inst/perl/Spreadsheet/ParseExcel/FmtJapan.pm, - inst/perl/Spreadsheet/ParseExcel/FmtJapan2.pm, - inst/perl/Spreadsheet/ParseExcel/FmtUnicode.pm, - inst/perl/Spreadsheet/ParseExcel/SaveParser.pm, - inst/perl/Spreadsheet/ParseExcel/Utility.pm: Update perl modules - to latest versions - -2008-03-24 warnes - - * [r1247] man/read.xls.Rd: Fix typo in win32 example for read.xls() - -2008-03-11 warnes - - * [r1246] NAMESPACE: Add xls2csv to exported function list - -2008-01-30 warnes - - * [r1241] ChangeLog, DESCRIPTION, inst/NEWS: Update DESCRIPTION and - NEWS for release 2.4.0 - -2008-01-29 arnima - - * [r1240] man/keep.Rd: Added argument 'all'. - * [r1239] R/keep.R: Added argument 'all'. - -2007-10-22 warnes - - * [r1196] DESCRIPTION: Clarify GPL version - -2007-09-10 ggorjan - - * [r1169] man/upperTriangle.Rd: removed unmatched brace - * [r1168] man/gdata-package.Rd: adding alias - -2007-09-06 ggorjan - - * [r1162] man/gdata-package.Rd: keyword - -2007-08-21 ggorjan - - * [r1154] man/gdata-package.Rd: package help page - * [r1153] NEWS, inst/NEWS: move - * [r1152] NEWS: move - -2007-08-20 ggorjan - - * [r1151] inst/doc/mapLevels.tex: clean - * [r1150] inst/doc/mapLevels.Rnw, inst/doc/mapLevels.pdf, - inst/doc/mapLevels.tex: a real vignette - * [r1149] inst/doc/unknown.Rnw, inst/doc/unknown.pdf, - inst/doc/unknown.tex: a real vignette - * [r1148] man/unknown.Rd: additional keyword for searchig - -2007-08-17 ggorjan - - * [r1147] man/unknown.Rd: keyword - -2007-07-22 arnima - - * [r1103] R/Args.R, R/keep.R: Reverted back to - as.character(substitute(x)), so user can run keep(x), - keep("x"), Args(x), and Args("x"). - -2007-07-21 arnima - - * [r1102] R/keep.R: Changed as.character(substitute()) to - deparse(substitute()), following - help(substitute) recommendation. - * [r1101] R/Args.R: Changed as.character(substitute()) to - deparse(substitute()), following - help(substitute) recommendation. - -2007-07-10 warnes - - * [r1099] R/read.xls.R, man/read.xls.Rd: Update read.xls() code and - docs with enhacements by Gabor Grothendieck - -2007-06-06 ggorjan - - * [r1097] inst/doc/unknown.pdf, inst/doc/unknown.tex: last edits - from newsletter - * [r1096] R/drop.levels.R, man/drop.levels.Rd: drop levels as - suggested by Brian Ripley - * [r1095] inst/unitTests/Makefile, tests/doRUnit.R: better - integration of unit tests - * [r1094] R/mapLevels.R, R/unknown.R: making codetools happy - -2007-01-28 arnima - - * [r1042] R/keep.R: Throw warnings rather than errors - -2007-01-27 arnima - - * [r1041] R/keep.R: Meaningful error message is given when - requested object does not exist - * [r1040] R/is.what.R: is.* tests that return NA are not reported - is.what recursion is avoided - -2006-11-30 ggorjan - - * [r1035] R/unknown.R: minor commet to the code - * [r1034] inst/doc/mapLevels.pdf, inst/doc/mapLevels.tex: - description of mapLevels methods - * [r1033] inst/doc/unknown.pdf, inst/doc/unknown.tex: description - of unknown methods - -2006-11-16 ggorjan - - * [r1013] R/c.factor.R, man/c.factor.Rd: seems that c.factor was - not a good idea and there were better examples posted on r-devel - list - -2006-11-14 ggorjan - - * [r1012] man/combine.Rd, man/frameApply.Rd: Removed executable - property - -2006-11-10 ggorjan - - * [r1004] NAMESPACE, NEWS: just formatting - -2006-11-02 ggorjan - - * [r1002] man/mapLevels.Rd, man/unknown.Rd: typos - -2006-10-30 ggorjan - - * [r1001] man/write.fwf.Rd: some more examples for use of read.fwf - after write.fwf - * [r1000] inst/unitTests: ignore for report files - * [r999] tests/tests.write.fwf.Rout.save: Id tag from source - * [r998] NAMESPACE: removing unused import - * [r997] R/write.fwf.R, inst/unitTests/runit.write.fwf.R, - man/write.fwf.Rd, tests/tests.write.fwf.R: Id tag - * [r996] NAMESPACE, NEWS, R/write.fwf.R, - inst/unitTests/runit.write.fwf.R, man/write.fwf.Rd, - tests/tests.write.fwf.R, tests/tests.write.fwf.Rout.save: - write.fwf - * [r995] inst/unitTests/runit.reorder.factor.R: Id tag - * [r994] inst/unitTests/runit.reorder.factor.R: added unit tests - for reorder.factor - * [r993] R/c.factor.R, R/mapLevels.R, R/unknown.R, - inst/unitTests/runit.drop.levels.R, - inst/unitTests/runit.mapLevels.R, inst/unitTests/runit.trim.R, - inst/unitTests/runit.unknown.R, man/c.factor.Rd, man/unknown.Rd, - tests/doRUnit.R: mapply keeps names in R 2.4; POSIX unit tests - solved; $ should work now - -2006-10-29 ggorjan - - * [r992] NEWS, R/unknown.R, inst/unitTests/runit.unknown.R, - man/unknown.Rd: fixed problem in tests; added unknown methods and - tests for matrices - * [r991] R/drop.levels.R, R/mapLevels.R, - inst/unitTests/runit.mapLevels.R, man/mapLevels.Rd, - tests/doRUnit.R: sort is generic now; mapply keeps names in R - 2.4.0; some codetools suggestions fixed - * [r990] DESCRIPTION, NAMESPACE: sort is generic from R 2.4.0 - * [r989] DESCRIPTION, NEWS, R/trim.R, man/trim.Rd: trim() gains ... - argument; version bump - * [r988] NEWS, R/reorder.R, man/reorder.Rd: Fixed collision bug - with stats version of reorder.factor - -2006-10-27 warnes - - * [r987] R/c.factor.R, man/c.factor.Rd: Add c() method for factor - objects, submitted by Gregor Gorjanc - -2006-09-19 warnes - - * [r986] NEWS: Update NEWS file for 2.3.0 release - * [r985] inst/unitTests/runit.trim.R: Explicitly set the local in - runit.trim.R to one where leading spaces affect sort order so - that the unit test works properly. - -2006-09-18 warnes - - * [r984] inst/doc/Rnews.sty: Update Rnews.sty to the latest version - * [r983] R/trim.R, inst/unitTests/Makefile, - inst/unitTests/runit.trim.R, man/trim.Rd, tests/doRUnit.R: - Integrate fixes for trim() from Gregor and myself. - * [r982] inst/unitTests/report.html, inst/unitTests/report.txt: - Remove unneeded files. - -2006-09-13 warnes - - * [r981] R/unknown.R, inst/unitTests, inst/unitTests/Makefile, - inst/unitTests/report.html, inst/unitTests/report.txt, - inst/unitTests/runit.drop.levels.R, - inst/unitTests/runit.mapLevels.R, inst/unitTests/runit.trim.R, - inst/unitTests/runit.unknown.R, man/unknown.Rd, tests, - tests/doRUnit.R: Add unknown() and unit test files - * [r980] NAMESPACE, R/drop.levels.R, R/mapLevels.R, R/trim.R: More - fixes from Gregor Gorjanc - * [r979] DESCRIPTION, NAMESPACE, R/combineLevels.R, R/mapFactor.R, - R/mapLevels.R, man/combineLevels.Rd, man/mapFactor.Rd, - man/mapLevels.Rd: Add mapLevels functions from Gregor Gorjanc, - along with associated unit tests. - -2006-08-03 warnes - - * [r978] DESCRIPTION, NAMESPACE, R/combineLevels.R, R/mapFactor.R, - man/combineLevels.Rd, man/mapFactor.Rd: Add Gregor Gorjanc's - mapFactor() and combineLevels() functions. - -2006-08-02 warnes - - * [r977] inst/doc/gregmisc.tex, man/ConvertMedUnits.Rd, - man/aggregate.table.Rd, man/combine.Rd, man/interleave.Rd, - man/matchcols.Rd, man/nobs.Rd, man/read.xls.Rd, - man/rename.vars.Rd, man/reorder.Rd, man/trim.Rd, man/unmatrix.Rd, - man/upperTriangle.Rd: Update my email address - * [r976] data/MedUnits.rda: Remove MedUnits.rda to convert to - binary format - * [r975] data/MedUnits.rda: Remove MedUnits.rda to convert to - binary format - * [r974] DESCRIPTION: Update version number - * [r973] NAMESPACE, R/drop.levels.R, R/trim.R, man/drop.levels.Rd, - man/trim.Rd: Integrate changes suggested by Gregor Gorjanc - -2006-03-14 nj7w - - * [r940] NAMESPACE, R/trim.R, man/resample.Rd: Fixed R CMD check - errors and added trim.default to NAMESPACE - -2006-03-13 nj7w - - * [r939] R/trim.R: Added trim.character and trim.factor as per - Gregor's suggestions - -2006-01-03 warnes - - * [r839] NAMESPACE, R/resample.R, man/resample.Rd: Add resample() - function, which generates a random sample or - permutation from the elements of the supplied vector, even if the - vector has length 1. This avoide the problems caused by - base::sample()'s special case for vectors of length 1, where it - attempts to sample from 1:x. - -2005-12-13 nj7w - - * [r806] ChangeLog, NEWS: Updated news and removed changelog - -2005-12-12 nj7w - - * [r798] DESCRIPTION, man/interleave.Rd: Updated version number for - CRAN release - -2005-12-08 warnes - - * [r789] R/interleave.R: Andrew Burgess reported that interleave() - converts 1-column matrixes - to vectors and provided a patch. A slight modification of his - patch - has been applied. There is now a 'drop' argument, which controls - whether 'unnecessary' dimensions are dropped. The default is - FALSE. - -2005-12-04 warnes - - * [r779] man/interleave.Rd: Andrew Burgess reported that - interleave() converts 1-column matrixes - to vectors and provided a patch. A slight modification of his - patch - has been applied. There is now a 'drop' argument, which controls - whether 'unnecessary' dimensions are dropped. The default is - FALSE. - -2005-12-01 nj7w - - * [r775] man/combine.Rd, man/reorder.Rd: Updated Greg's email - address - * [r774] man/drop.levels.Rd, man/frameApply.Rd, man/ll.Rd, - man/read.xls.Rd: Updated Jim's email address - -2005-11-21 arnima - - * [r744] R/ll.R: Suppressed warning message in attach() call. - -2005-10-27 warnes - - * [r716] DESCRIPTION: Bump version number again to show that I - fixed a bug. - * [r715] DESCRIPTION, R/upperTriangle.R: Update version number - * [r714] man/frameApply.Rd, man/reorder.Rd: Remove explicit loading - of gtools in examples, so that failure to - import functions from gtools gets properly caught by running the - examples. - * [r713] man/upperTriangle.Rd: Add missing close-bracket - * [r712] NAMESPACE: Add upperTriangle and friends - * [r711] R/upperTriangle.R, man/upperTriangle.Rd: Add functions for - extracting, modifying upper and lower trianglular - components of matrices. - -2005-10-19 arnima - - * [r695] R/is.what.R: Replaced the "not.using" vector with a more - robust try(get(test)) to find out whether a particular is.* - function returns a logical of length one. - -2005-09-12 nj7w - - * [r671] man/aggregate.table.Rd, man/interleave.Rd, - man/matchcols.Rd, man/nobs.Rd, man/read.xls.Rd, - man/rename.vars.Rd, man/trim.Rd, man/unmatrix.Rd: Updated Greg's - email - -2005-09-06 nj7w - - * [r661] man/reorder.Rd: Added library(gtools) in the example - * [r660] NAMESPACE: Removed gtools dependency from NAMESPACE, as it - was being used only in an example, and was giving warning - * [r659] DESCRIPTION: Added Suggests field - -2005-09-02 nj7w - - * [r658] man/frameApply.Rd: Updated the example in frameApply - * [r656] NEWS: Added NEWS - * [r654] ChangeLog: ChangeLog - -2005-08-31 nj7w - - * [r644] DESCRIPTION: Added DESCRIPTION file - * [r643] DESCRIPTION.in: removed DESCRIPTION.in - -2005-07-20 nj7w - - * [r631] man/ll.Rd: updated documentation - * [r630] R/Args.R: ## Args() was using a different search path from - args(), e.g. - rep <- function(local) return(NULL) - args() - Args() - ## Fixed - * [r629] R/is.what.R: ## is.what() was giving needless warnings for - functions, e.g. - is.what(plot) - ## Fixed - * [r628] R/ll.R: ## ll() was crashing if argument was a list of - length zero, e.g. - x <- list() - ll(x) - ## Fixed, and added sort.elements (see new help page) - -2005-06-09 nj7w - - * [r625] R/Args.R, R/aggregate.table.R, R/combine.R, R/elem.R, - R/env.R, R/frameApply.R, R/interleave.R, R/is.what.R, R/keep.R, - R/ll.R, R/matchcols.R, R/nobs.R, R/read.xls.R, R/rename.vars.R, - R/reorder.R, R/trim.R, R/unmatrix.R, inst/perl/IO/AtomicFile.pm, - inst/perl/IO/InnerFile.pm, inst/perl/IO/Lines.pm, - inst/perl/IO/Scalar.pm, inst/perl/IO/Scalar.pm.html, - inst/perl/IO/ScalarArray.pm, inst/perl/IO/Stringy.pm, - inst/perl/IO/Wrap.pm, inst/perl/IO/WrapTie.pm, - man/aggregate.table.Rd, man/combine.Rd, man/drop.levels.Rd, - man/interleave.Rd, man/nobs.Rd, man/rename.vars.Rd, - man/reorder.Rd: Updating the version number, and various help - files to synchronize splitting of gregmisc bundle in 4 individual - components. - -2005-06-07 nj7w - - * [r622] R/drop.levels.R: Reverting to the previous version of - drop.levels.R by replacing - sapply(...) with as.data.frame(lapply(...)) because sapply has - the undesirable effect of converting the object to a matrix, - which in turn coerces the factors to numeric. - -2005-05-13 nj7w - - * [r621] R/read.xls.R: 1) Using dQuote.ascii function in read.xls - as the new version of dQuote doesn't work proprly with UTF-8 - locale. - 2) Modified CrossTable.Rd usage in gmodels - 3) Modified heatmap.2 usage in gplots. - -2005-04-02 warnes - - * [r600] NAMESPACE, R/drop.levels.R, man/drop.levels.Rd: Move - drop.levels() from gtools to gdata. - * [r598] NAMESPACE, R/frameApply.R, man/frameApply.Rd: Move - frameApply() to gdata package. - -2005-03-31 warnes - - * [r586] man/elem.Rd: Comment out example to avoid R CMD check - warnings - -2005-03-22 warnes - - * [r578] NAMESPACE, R/ConvertMedUnits.R, data/MedUnits.Rda, - data/MedUnits.rda, man/ConvertMedUnits.Rd, man/MedUnits.Rd: Fixes - to pass `R CMD check'. - * [r577] R/Args.R, R/env.R, R/ll.R, man/Args.Rd: Integrated fixes - from Arni. - * [r576] man/read.xls.Rd: Improve documentation of 'perl' argument - and give examples. - -2005-03-09 warnes - - * [r573] R/ConvertMedUnits.R, man/ConvertMedUnits.Rd, - man/MedUnits.Rd: - Add ConvertMedUnits() plus documentation - - Add documentation for MedUnits data set. - * [r572] data/MedUnits.Rda: Update MedUnits data file. - * [r571] data/MedUnits.tab: Don't need both .Rda and .tab forms of - the data. - * [r570] data, data/MedUnits.Rda, data/MedUnits.tab: Add MedUnits - data set, which provides conversions between American - 'Conventional' and Standard Intertional (SI) medical units. - -2005-03-01 warnes - - * [r566] man/elem.Rd, man/ll.Rd: - Remove 'elem' call from ll - example. - - Add note to 'elem' man page that it is depreciated and 'll' - should - be used instead. - -2005-02-26 nj7w - - * [r565] NAMESPACE, man/elem.Rd, man/env.Rd, man/ll.Rd, - man/read.xls.Rd: *** empty log message *** - -2005-02-25 warnes - - * [r564] NAMESPACE: Remove ll methods since the base function now - handles lists and data frames. - * [r563] R/elem.R, R/env.R, R/ll.R, man/Args.Rd, man/env.Rd, - man/ll.Rd: Integrate changes submitted by Arni Magnusson - -2005-01-31 warnes - - * [r529] R/read.xls.R, man/read.xls.Rd: Add ability to specify the - perl executable and path. - -2005-01-28 warnes - - * [r526] DESCRIPTION.in, NAMESPACE: Add dependency on stats. - -2005-01-12 warnes - - * [r515] DESCRIPTION.in: Add dependency on R 1.9.0+ to prevent - poeple from installing on old - versions of R which don't support namespaces. - -2004-12-27 warnes - - * [r509] man/unmatrix.Rd: Update usage to match code. - * [r508] R/unmatrix.R: Replace 'F' with 'FALSE'. - -2004-10-12 warneg - - * [r465] R/unmatrix.R, man/unmatrix.Rd: Add unmatrix() function - -2004-09-27 warneg - - * [r461] DESCRIPTION, DESCRIPTION.in, NAMESPACE, man/.Rhistory: - Updated to pass R CMD check. - -2004-09-03 warneg - - * [r455] inst/xls, inst/xls/iris.xls: added to cvs. - * [r454] inst/perl/xls2csv.pl: Checkin xls2csv.pl. Should have been - in long ago, must have been an oversight - * [r451] R/read.xls.R: Need to look for files using the new package - name. - * [r449] man/read.xls.Rd: Need to use the new package name when - looking for iris.xls. - * [r448] man/ll.Rd: Add ll.list to the to the list of functions - described - * [r447] NAMESPACE: Add ll and friends to the namespace - * [r446] DESCRIPTION, DESCRIPTION.in, NAMESPACE, R/Args.R, - R/aggregate.table.R, R/combine.R, R/elem.R, R/env.R, - R/interleave.R, R/is.what.R, R/keep.R, R/ll.R, R/matchcols.R, - R/nobs.R, R/read.xls.R, R/rename.vars.R, R/reorder.R, R/trim.R, - man/reorder.Rd: initial bundle checkin - -2004-09-02 warneg - - * [r442] DESCRIPTION, DESCRIPTION.in, NAMESPACE, man/.Rhistory: - Initial revision - -2004-08-27 warnes - - * [r441] R/reorder.R, man/reorder.Rd: Fixed bug in mixedsort, and - modified reorder.factor to use mixedsort. - -2004-07-29 warnes - - * [r427] inst/perl, inst/perl/IO, inst/perl/IO/AtomicFile.pm, - inst/perl/IO/InnerFile.pm, inst/perl/IO/Lines.pm, - inst/perl/IO/Scalar.pm, inst/perl/IO/Scalar.pm.html, - inst/perl/IO/ScalarArray.pm, inst/perl/IO/Stringy.pm, - inst/perl/IO/Wrap.pm, inst/perl/IO/WrapTie.pm, inst/perl/OLE, - inst/perl/OLE/Storage_Lite.pm, inst/perl/Spreadsheet, - inst/perl/Spreadsheet/ParseExcel, - inst/perl/Spreadsheet/ParseExcel.pm, - inst/perl/Spreadsheet/ParseExcel/Dump.pm, - inst/perl/Spreadsheet/ParseExcel/FmtDefault.pm, - inst/perl/Spreadsheet/ParseExcel/FmtJapan.pm, - inst/perl/Spreadsheet/ParseExcel/FmtJapan2.pm, - inst/perl/Spreadsheet/ParseExcel/FmtUnicode.pm, - inst/perl/Spreadsheet/ParseExcel/SaveParser.pm, - inst/perl/Spreadsheet/ParseExcel/Utility.pm: Add perl modules to - CVS. - -2004-07-27 warnes - - * [r425] man/read.xls.Rd: Fix typos/spelling. - * [r424] man/read.xls.Rd: Add note that Perl is required for - read.xls to work properly. - -2004-07-16 warnes - - * [r420] R/read.xls.R: Remove the temporary csv file if reading it - in fails. - -2004-06-22 warnes - - * [r377] R/ll.R, man/ll.Rd: Add S3 methods for data frames and - lists. - -2004-06-08 warnes - - * [r371] inst/bin, inst/bin/xls2csv, inst/bin/xls2csv.bat: Moved - from gregmisc/src/. - * [r370] inst/tools: Remove the files in src, instead provide - "pre-installed" perl packages - in inst/perl. - -2004-06-05 warnes - - * [r365] inst/tools/Makefile: Fix typo. - * [r364] inst/tools/Makefile: Fix Unix makefile so that it works - when invoked directly. - * [r363] inst/tools/Makefile: Fixes for Windows - * [r362] man/read.xls.Rd: Minor enhancment to read.xls example. - * [r361] inst/tools/Makefile, inst/xls: - Merge Makefile.win into - Makefile. Makefile.win now just redirects - to Makefile. - - Update xls2csv.bat and xls2csv shell script to correctly obtain - thier installion path and infer the location of the perl code and - libraries. - - The xls2csv.pl script now assumes that the libraries it needs - are - installed into the same directory where it is. - -2004-06-04 warnes - - * [r360] inst/tools/Makefile: More changes, indended to improve - installation reliabilty and to make - Makefile and Makefile.win as similar as possible. - -2004-05-27 warnes - - * [r358] inst/tools/Makefile: Clean should remove scripts from - source directory. - * [r357] inst/perl: Moved to xls2csv.pl.in. - * [r354] inst/perl/xls2csv.pl, inst/tools/Makefile: More fixes. - * [r353] man/elem.Rd: Fix missing brace. - * [r352] man/elem.Rd: Add explicit package name to see also links. - * [r351] inst/perl/xls2csv.pl, inst/tools/Makefile: More xls2csv - perl module support changes. - * [r350] inst/tools/Makefile: More changes to fix local - installation of perl modules. - -2004-05-26 warnes - - * [r345] man/read.xls.Rd: Escape underscores in email addresses so - Latex is happy. - -2004-05-25 warnes - - * [r339] inst/perl/xls2csv.pl, inst/tools/Makefile: More changes to - xls2csv code. - * [r337] R/Args.R, man/Args.Rd: Add Args() function contributed by - Arni Magnusson . - * [r335] R/read.xls.R: - Change to call perl directly rather than - depending on the installed - shell script. This should make the code more portable to - MS-Windows - systes. - - - Add additional commants.. - * [r332] inst/tools/Makefile: Makefile now modifies xls2csv.bat - xls2csv.pl and xls2csv to contain an - explicit path to the perl script/libraries. - * [r330] inst/tools/Makefile: R CMD build calls the clean target to - purge build files from the - source tree when packaging. To get use this behavior correctly, - I've - renamed the clean target to cleanup and distclean target to - clean. - * [r329] R/read.xls.R, man/read.xls.Rd: Add read.xls(), a function - to read Microsoft Excel files by - translating them to csv files via the xls2csv.pl script. - * [r326] inst/tools/Makefile: More fixes. Seems to work now. - -2004-05-24 warnes - - * [r325] inst/perl, inst/perl/xls2csv.pl, inst/tools, - inst/tools/Makefile, inst/xls, inst/xls/iris.xls: Add files to - enable inclusion and installation of xls2csv.pl as part - of the package. - -2004-04-01 warnes - - * [r312] R/rename.vars.R, man/rename.vars.Rd: Add function - remove.vars(). - -2004-03-26 warnes - - * [r307] man/reorder.Rd: Contents of package 'mva' moveed to - 'stats'. - * [r298] R/is.what.R: - Fix is.what() for use under R 1.9.0 - - is.what() now uses is.* functions found in any attached frame - -2004-01-21 warnes - - * [r282] R/reorder.R, man/reorder.Rd: - Add ... argument to match - generic provided in mva. - -2004-01-19 warnes - - * [r275] R/elem.R, R/env.R, R/ll.R, man/keep.Rd, man/ll.Rd: - - Integrated (partial) patch submitted by Arni Magnusson to clarify - help text. - - Modifed code to use match.arg(). - -2003-12-15 warnes - - * [r271] R/env.R: - Applied patch from Arni that fixed a bug that - caused env() to crash - if any environment was completely empty - -2003-12-03 warnes - - * [r253] man/elem.Rd, man/ll.Rd: - match function argument defaults - with 'usage' - -2003-12-02 warnes - - * [r249] man/ll.Rd: Add one argument, to match code. - -2003-12-01 warnes - - * [r244] R/elem.R, R/env.R, R/is.what.R, R/keep.R, R/ll.R: - Apply - changes submitted by Arni Magnusson - -2003-11-19 warnes - - * [r229] man/env.Rd, man/is.what.Rd, man/keep.Rd, man/ll.Rd: - Changes to pass R CMD check. - -2003-11-18 warnes - - * [r224] R/elem.R, R/env.R, R/is.what.R, R/keep.R, R/ll.R: - - Convert from MS-Dos to Unix line endings. - - Reformat to 80 columns. - -2003-11-17 warnes - - * [r223] man/elem.Rd: Replace 'T' with 'TRUE' to remove R CMD check - error. - * [r222] man/aggregate.table.Rd: Fix syntax error. - -2003-11-10 warnes - - * [r220] R/elem.R, R/env.R, R/is.what.R, R/keep.R, R/ll.R, - man/elem.Rd, man/env.Rd, man/is.what.Rd, man/keep.Rd, man/ll.Rd: - - Add files contributed by Arni Magnusson - . As well as some of my own. - -2003-06-07 warnes - - * [r198] man/aggregate.table.Rd, man/interleave.Rd: - Fixed error - in examples. Had sqrt(var(x)/(n-1)) for the standard - error of the mean instead of sqrt(var(x)/n). - -2003-05-23 warnes - - * [r197] R/matchcols.R, man/matchcols.Rd: - Fixed typos - * [r196] R/matchcols.R, man/matchcols.Rd: - library() backported - from 1.7-devel. This version of the function - adds the "pos=" argument to specify where in the search path the - library should be placed. - - - updated .First.lib to use library(...pos=3) for MASS to avoid - the - 'genotype' data set in MASS from masking the genotype funciton in - genetics when it loads gregmisc - - - Added logit() inv.logit() matchcols() function and - corresponding docs - -2003-05-20 warnes - - * [r195] R/interleave.R: - Omit NULL variables. - * [r194] R/trim.R, man/trim.Rd: - Added function trim() and - assocated docs. - -2003-04-22 warnes - - * [r188] R/reorder.R, man/reorder.Rd: - The mva package (which is - part of recommended) now provides a - generic 'reorder' function. Consequently, the 'reorder' function - here has been renamed to 'reorder.factor'. - - - Removed check of whether the argument is a factor object. - -2003-03-03 warnes - - * [r165] man/reorder.Rd: - Updated to match reorder.Rd which was - exetended to handle factor - label names in addition to numeric indices. - * [r164] R/reorder.R: - Added handling of factor level names in - addition to numeric indexes. - -2002-09-23 warnes - - * [r118] inst, inst/doc, inst/doc/Rnews.dtx, inst/doc/Rnews.sty, - inst/doc/gregmisc.pdf, inst/doc/gregmisc.tex: Added inst/doc - directory and contents to CVS. - * [r117] R/aggregate.table.R, R/combine.R, R/interleave.R, - R/nobs.R, man/aggregate.table.Rd, man/combine.Rd, - man/interleave.Rd, man/nobs.Rd, man/rename.vars.Rd, - man/reorder.Rd: - Modified all files to include CVS Id and Log - tags. - -2002-08-01 warnes - - * [r112] R/reorder.R: Added reorder() function to reorder the - levels of a factor. - -2002-04-09 warneg - - * [r109] R/rename.vars.R, man/aggregate.table.Rd, - man/interleave.Rd, man/reorder.Rd: Checkin for version 0.5.3 - * [r108] R/interleave.R: - Properly handle case when some or all - arguments are vectors. - -2002-03-26 warneg - - * [r104] man/reorder.Rd: - Changed methods to include '...' to - match the generic. - - Updated for version 0.5.1 - * [r102] R/nobs.R: Added ... to methods. - * [r101] man/nobs.Rd: Updated to add ... parameter to function - calls. - * [r98] man/reorder.Rd: Initial checkin. - * [r95] R/nobs.R: - Added CVS tags - -2002-02-21 warneg - - * [r87] R/aggregate.table.R: - Fixed bug where row and column - labels didn't always correspond to the - contents. This only occured when a factor was used for by1 or by2 - and - the factors levels weren't in the default sort order. - -2002-02-20 warneg - - * [r86] R/aggregate.table.R: New function. - * [r85] man/aggregate.table.Rd: Initial checkin. - * [r84] R/interleave.R, man/interleave.Rd: Initial checkin. - * [r83] man/nobs.Rd: Noted that specialized methods exist. - * [r82] man/nobs.Rd: Incorrectly had contents of nobs.R here - instead of help text. Corrected. - * [r81] man/rename.vars.Rd: Minor changes, typo and formatting - fixes. - * [r79] R/nobs.R, man/nobs.Rd: - initial checkin. - -2001-12-12 warneg - - * [r53] man/rename.vars.Rd: Added omitted documentaton for 'info' - parameter. Changed example code - not to use 'Orthodont' data set so that the nlme package is not - required. - -2001-12-08 warneg - - * [r47] R/rename.vars.R: Changed 'T' to 'TRUE' in parameter list. - -2001-12-07 warneg - - * [r45] man/rename.vars.Rd: - Fixed see also link. Mis-typed - 'data.frame' as 'dataframe'. - * [r44] R/rename.vars.R: Added attribution. - * [r43] man/rename.vars.Rd: Added proper attribution to Don - MacQueen. - * [r39] man/rename.vars.Rd: Initial checkin. Unfortunately, I've - lost the email of the person who - sent this to me. I'll credit him/her when I find out who it was! - * [r38] R/rename.vars.R: Initial checkin - -2001-12-05 warneg - - * [r34] R, R/combine.R: - Renamed 'concat' function to 'combine' to - avoid name conflict with - an existing S-Plus function. - * [r32] ., man, man/combine.Rd: - Changed function name 'concat' to - 'combine' and renamed concat.Rd to - combine.Rd - diff -Nru gdata-2.13.3/debian/changelog gdata-2.17.0/debian/changelog --- gdata-2.13.3/debian/changelog 2015-07-30 07:43:28.000000000 +0000 +++ gdata-2.17.0/debian/changelog 2015-07-30 07:43:28.000000000 +0000 @@ -1,3 +1,26 @@ +gdata (2.17.0-1) unstable; urgency=low + + * New upstream release + + * debian/control: Set Build-Depends: to current R version + + -- Dirk Eddelbuettel Wed, 08 Jul 2015 06:35:31 -0500 + +gdata (2.16.1-2) unstable; urgency=low + + * Rebuilding as pbuilder get persistent size mismatch + + -- Dirk Eddelbuettel Sat, 23 May 2015 08:59:28 -0500 + +gdata (2.16.1-1) unstable; urgency=low + + * New upstream release + + * debian/control: Set Build-Depends: to current R version + * debian/control: Set Standards-Version: to current version + + -- Dirk Eddelbuettel Fri, 01 May 2015 21:01:17 -0500 + gdata (2.13.3-1) unstable; urgency=low * New upstream release diff -Nru gdata-2.13.3/debian/control gdata-2.17.0/debian/control --- gdata-2.13.3/debian/control 2015-07-30 07:43:28.000000000 +0000 +++ gdata-2.17.0/debian/control 2015-07-30 07:43:28.000000000 +0000 @@ -2,8 +2,8 @@ Section: gnu-r Priority: optional Maintainer: Dirk Eddelbuettel -Build-Depends: cdbs, debhelper (>= 7.0.0), r-base-dev (>= 3.0.3), r-cran-gtools -Standards-Version: 3.9.5 +Build-Depends: cdbs, debhelper (>= 7.0.0), r-base-dev (>= 3.2.1), r-cran-gtools +Standards-Version: 3.9.6 Package: r-cran-gdata Architecture: all diff -Nru gdata-2.13.3/debian/r-cran-gdata.lintian-overrides gdata-2.17.0/debian/r-cran-gdata.lintian-overrides --- gdata-2.13.3/debian/r-cran-gdata.lintian-overrides 2015-07-30 07:43:28.000000000 +0000 +++ gdata-2.17.0/debian/r-cran-gdata.lintian-overrides 2015-07-30 07:43:28.000000000 +0000 @@ -12,3 +12,4 @@ r-cran-gdata: executable-not-elf-or-script usr/lib/R/site-library/gdata/perl/Spreadsheet/XLSX.pm r-cran-gdata: executable-not-elf-or-script usr/lib/R/site-library/gdata/perl/Spreadsheet/XLSX/Utility2007.pm r-cran-gdata: script-not-executable usr/lib/R/site-library/gdata/bin/xls2csv +r-cran-gdata: executable-not-elf-or-script usr/lib/R/site-library/gdata/perl/Digest/Perl/MD5.pm diff -Nru gdata-2.13.3/DESCRIPTION gdata-2.17.0/DESCRIPTION --- gdata-2.13.3/DESCRIPTION 2014-04-06 06:00:48.000000000 +0000 +++ gdata-2.17.0/DESCRIPTION 2015-07-04 07:51:58.000000000 +0000 @@ -1,11 +1,30 @@ Package: gdata -Title: Various R programming tools for data manipulation -Description: Various R programming tools for data manipulation -Depends: R (>= 2.13.0) -SystemRequirements: perl -Imports: gtools -Version: 2.13.3 -Date: 2014-04-04 +Title: Various R Programming Tools for Data Manipulation +Description: Various R programming tools for data manipulation, including: + - medical unit conversions ('ConvertMedUnits', 'MedUnits'), + - combining objects ('bindData', 'cbindX', 'combine', 'interleave'), + - character vector operations ('centerText', 'startsWith', 'trim'), + - factor manipulation ('levels', 'reorder.factor', 'mapLevels'), + - obtaining information about R objects ('object.size', 'elem', 'env', + 'humanReadable', 'is.what', 'll', 'keep', 'ls.funs', + 'Args','nPairs', 'nobs'), + - manipulating MS-Excel formatted files ('read.xls', + 'installXLSXsupport', 'sheetCount', 'xlsFormats'), + - generating fixed-width format files ('write.fwf'), + - extricating components of date & time objects ('getYear', 'getMonth', + 'getDay', 'getHour', 'getMin', 'getSec'), + - operations on columns of data frames ('matchcols', 'rename.vars'), + - matrix operations ('unmatrix', 'upperTriangle', 'lowerTriangle'), + - operations on vectors ('case', 'unknownToNA', 'duplicated2', 'trimSum'), + - operations on data frames ('frameApply', 'wideByFactor'), + - value of last evaluated expression ('ans'), and + - wrapper for 'sample' that ensures consistent behavior for both + scalar and vector arguments ('resample'). +Depends: R (>= 2.3.0) +SystemRequirements: perl (>= 5.10.0) +Imports: gtools, stats, methods, utils +Version: 2.17.0 +Date: 2015-07-02 Author: Gregory R. Warnes, Ben Bolker, Gregor Gorjanc, Gabor Grothendieck, Ales Korosec, Thomas Lumley, Don MacQueen, Arni Magnusson, Jim Rogers, and others @@ -13,6 +32,6 @@ License: GPL-2 NeedsCompilation: no Suggests: RUnit -Packaged: 2014-04-05 21:08:58 UTC; warnes +Packaged: 2015-07-03 23:13:13 UTC; warnes Repository: CRAN -Date/Publication: 2014-04-06 08:00:48 +Date/Publication: 2015-07-04 09:51:58 diff -Nru gdata-2.13.3/inst/ChangeLog gdata-2.17.0/inst/ChangeLog --- gdata-2.13.3/inst/ChangeLog 2014-04-05 18:39:47.000000000 +0000 +++ gdata-2.17.0/inst/ChangeLog 2015-07-03 02:44:25.000000000 +0000 @@ -1,150 +1,255 @@ +2015-06-29 warnes + + * [r2055] inst/ChangeLog: Update ChangeLog + * [r2054] tests/test.humanReadable.Rout.save, + tests/test.read.xls.R, tests/test.read.xls.Rout.save, + tests/test.reorder.factor.Rout.save, + tests/tests.write.fwf.Rout.save: Add note for R CMD check to help + reviewers not freak out when diffs occur because of absence of a + PERL library needed to support XLSX files. + * [r2053] R/upperTriangle.R, man/upperTriangle.Rd: Add 'byrow' + argument to lowerTriangle()/upperTriangle() functions. + +2015-05-02 warnes + + * [r2018] Rename 'trunk' to 'pkg' for compatibility with R-forge + +2015-04-29 warnes + + * [r1993] Update ChangeLog and NEWS again. + * [r1992] Apparentely read.csv() needs different combination of + "fileEncoding=`latin1`" and "encoding=`latin1`" on unix and + windows platforms. + * [r1991] In mapLevels(), use sapply() instead of lapply() to avoid + warning message. + * [r1990] Displaying all the latin1 characters for diff isn't + reliable across platforms. Simply summarize the latin1 data + instead. + * [r1989] Display read latin1 data so that diff can catch changes. + +2015-04-28 warnes + + * [r1988] Update ChangeLog for gdata 2.16.1 + * [r1987] Update NEWS for gdata 2.16.1 + * [r1986] Remove no-longer defined methods. + * [r1985] Summary: Minor formatting changes, use rnorm() for X in + example, and use set.seed() for consistent results. + * [r1984] Summary: Replace unicode single-quote characters with + ASCII ones. + * [r1983] Summary: Call base::sort instead of sort, which has been + redefined by arguments. + * [r1982] Update NEWS and ChangeLog. + * [r1981] Bump version number. + * [r1980] Remove CVS header tag. + * [r1979] Update version requirement for R (>= 2.3.0) and perl + (5.10.0). + * [r1978] - first() and last() are now simply wrappers to + utils::head() and + utils::tail() with a default 'n=1' instead of 'n=6'. + - Move code for left() and right() into a separate file. + * [r1977] If arguments 'X' or 'FUN' is supplied to + reorder.factor(), mimic the + behavior of stats::reorder.default() rather than trying to call + it via + NextMethod. + +2015-04-25 warnes + + * [r1974] List needs a conjuction + * [r1973] Fix spelling errors & typos + * [r1972] Fix typographical errors + * [r1971] Update NEWS and ChangeLog (again) + * [r1970] Remove aggregate.table() entirely + * [r1969] 'test.humanReadable.R' needed set.seed() to make the + results consistent. + * [r1968] Update .save files + * [r1967] Missed on commit. + * [r1966] Modfy write.fwf() to properly handle matrix argument, + avoiding conversion to dataframe unless rownames=TRUE. Add + corresponding unit tests. + * [r1965] Installing PERL modules was failing. Adding CPAN + configuration option fixed the problem. + * [r1964] Error message about executable name was missing one + alternative + * [r1963] Better describe gdata contents + * [r1962] is.* and as.* aren't generics + * [r1961] Add 'justify' argument to print and format object_sizes + methods + * [r1960] Add 'justify' argument to print and format object_sizes + methods + * [r1959] Remove stray call to 'browser' + * [r1958] Update DESCRIPTION, ChangeLog, and NEWS + * [r1957] Complete work on object.size(), object_sizes methods, and + humanReadable. + * [r1956] Add error message if Excel file format is too old + +2015-04-23 warnes + + * [r1953] Update NEWS and ChangeLog + * [r1952] - write.fwf() now properly supports matrix objects, + including matrix + objects wihtout column names. (Reported by Carl Witthoft.) + * [r1951] Remove 'use POSIX' from xls2csv.pl since it is no longer + needed + * [r1939] Update NEWS and ChangeLog + * [r1938] reorder.factor() now hands off processing to + stats:::reorder.default() when either 'X' or 'FUN' is specified. + +2015-04-22 warnes + + * [r1937] Update NEWS and ChangeLog for changes to humanReadable() + * [r1936] Fix 'units' argument of humanReadable() + * [r1935] Update object.size() man page to reflect change in class + of return value from 'object_size' to 'object_sizes' + * [r1934] Update NEWS and ChangeLog for gdata 2.16.0 + * [r1933] Modify gdaata:object.size to generate S3 objects of class + 'object_sizes' (note the final 's') to avoid conflicts with + methods in utils for object_size. + * [r1932] Correct behavior of reorder.factor() when argument 'X' is + supplied by delgating to stats:::reorder.default() + +2015-04-14 warnes + + * [r1929] Update ChangeLog + * [r1928] Remove editorializing + * [r1927] Update NEWS and ChangeLog for gdata 2.15.0 + * [r1926] Add 'scientific' argument to write.fwf to allow control + of whether numeric values can be displated using scientific + notation. + * [r1925] Replace depricated PERL function POSIX::isnumeric with + equivalent regexp + * [r1924] Add gdata ChangeLog to SVN + +2015-04-10 warnes + + * [r1922] Update files for gdata 2.15.0 + +2015-04-08 warnes + + * [r1919] Move first/last/left/right to from gtools to gdata + +2014-08-28 warnes + + * [r1883] Everything works now! + * [r1882] Suppress annoying warnings in + Spreadsheet::ParseXLS::FmtDefalt. + * [r1881] Add tests and corresponding test files for 1900 and 1904 + based XLX/XLSX files + * [r1880] Complete transition from Spreadsheet::XLSX to + Spreadsheet::ParseXLSX + * [r1879] Handle Excel files created on the Mac, where by default + Excel uses + 1904-01-01 as the baseline for dates, rather than the usual + 1900-01-01. + * [r1878] Remove dotfiles + * [r1877] Update for release + * [r1876] Add test for handling fo very wide xls and xlsx files. + * [r1875] Add test for handling fo very wide xls and xlsx files. + * [r1874] Modify code to use latest version of + Spreadsheet::ParseExcel and to replace Spreadsheet::XLSX woth + Spreadsheet::ParseXLSX + * [r1873] Update Spreadsheet::ParseExcel, add + Spreadsheet:ParseXLSX, add dependencies + 2014-04-05 warnes - * [r1799] R/nobs.R: Call stats::nobs instead of - stats:::nobs.default within + * [r1801] Apply same changes to NAToUnknown that were previously + applied to + unknownToNA for POSIXlt. + * [r1800] Update NEWS with latest changes + * [r1799] Call stats::nobs instead of stats:::nobs.default within gdata::nobs.default. This avoids R CMD check warning. - * [r1798] tests/unitTests/runit.unknown.R: Don't compare optional - POSIXlt field. Explicitly compare POSIXlt, with special handling - of '-1' unknown value. - * [r1797] R/mapLevels.R, R/unknown.R: Don't use gdata::: - prefix to access gdata function - * [r1796] DESCRIPTION: Fix syntax error in DESCRIPTION file. - * [r1795] tests/runRUnitTests.R: Package name needs to be defined - outside of if test. - * [r1794] vignettes/Rnews.sty: Style file needed - * [r1793] R/unknown.R, tests/unitTests/runit.unknown.R: The issue - Brian pointed out was an error in the isUnknown() code, not an - error in the unit tests! - * [r1792] tests/unitTests/runit.unknown.R: Apply changes Brian - recommned to NAtoUnknown as well as unknownToNA. - * [r1791] inst/NEWS: Update NEWS file - * [r1790] inst/doc/Rnews.dtx: Don't need latex .dtx source file - * [r1789] inst/doc/mapLevels.Rnw, inst/doc/unknown.Rnw, vignettes, - vignettes/mapLevels.Rnw, vignettes/unknown.Rnw: Move vignettes - from inst/doc/ to vignettes/ - * [r1788] R/aggregate.table.R, man/aggregate.table.Rd, - man/gdata-defunct.Rd: Change 'aggregate.table' from deprecated to - defunct. - * [r1787] DESCRIPTION, inst/unitTests, man/gdata-package.Rd, - tests/runRUnitTests.R, tests/unitTests: Complete changes so that - the unit tests are run as part of R CMD check - * [r1786] DESCRIPTION, inst/NEWS: Update NEWS for gdata 2.13.4 - * [r1785] NAMESPACE: Update NAMESPACE file to remove deleted - function - * [r1784] inst/unitTests/Makefile, inst/unitTests/runit.bindData.R, - inst/unitTests/runit.cbindX.R, - inst/unitTests/runit.drop.levels.R, - inst/unitTests/runit.getDateTimeParts.R, - inst/unitTests/runit.mapLevels.R, inst/unitTests/runit.nPairs.R, - inst/unitTests/runit.reorder.factor.R, - inst/unitTests/runit.trim.R, inst/unitTests/runit.trimSum.R, - inst/unitTests/runit.unknown.R, - inst/unitTests/runit.wideByFactor.R, - inst/unitTests/runit.write.fwf.R, tests/Makefile, - tests/runRUnitTests.R, tests/runit.bindData.R, - tests/runit.cbindX.R, tests/runit.drop.levels.R, - tests/runit.getDateTimeParts.R, tests/runit.mapLevels.R, - tests/runit.nPairs.R, tests/runit.reorder.factor.R, - tests/runit.trim.R, tests/runit.trimSum.R, tests/runit.unknown.R, - tests/runit.wideByFactor.R, tests/runit.write.fwf.R: Move unit - test files back to inst/unitTests. Fix up runRUnitTests.R to work - properly in the new location - * [r1783] tests/runit.unknown.R: - For unit tests, don't check for - equality of optional POSIXlt + * [r1798] Don't compare optional POSIXlt field. Explicitly compare + POSIXlt, with special handling of '-1' unknown value. + * [r1797] Don't use gdata::: prefix to access gdata function + + * [r1796] Fix syntax error in DESCRIPTION file. + * [r1795] Package name needs to be defined outside of if test. + * [r1794] Style file needed + * [r1793] The issue Brian pointed out was an error in the + isUnknown() code, not an error in the unit tests! + * [r1792] Apply changes Brian recommned to NAtoUnknown as well as + unknownToNA. + * [r1791] Update NEWS file + * [r1790] Don't need latex .dtx source file + * [r1789] Move vignettes from inst/doc/ to vignettes/ + * [r1788] Change 'aggregate.table' from deprecated to defunct. + * [r1787] Complete changes so that the unit tests are run as part + of R CMD check + * [r1786] Update NEWS for gdata 2.13.4 + * [r1785] Update NAMESPACE file to remove deleted function + * [r1784] Move unit test files back to inst/unitTests. Fix up + runRUnitTests.R to work properly in the new location + * [r1783] - For unit tests, don't check for equality of optional + POSIXlt components. (Bug reported by Brian Ripley). - * [r1782] R/runRUnitTests.R, inst/unitTests/Makefile, - inst/unitTests/runRUnitTests.R, inst/unitTests/runit.bindData.R, - inst/unitTests/runit.cbindX.R, - inst/unitTests/runit.drop.levels.R, - inst/unitTests/runit.getDateTimeParts.R, - inst/unitTests/runit.mapLevels.R, inst/unitTests/runit.nPairs.R, - inst/unitTests/runit.reorder.factor.R, - inst/unitTests/runit.trim.R, inst/unitTests/runit.trimSum.R, - inst/unitTests/runit.unknown.R, - inst/unitTests/runit.wideByFactor.R, - inst/unitTests/runit.write.fwf.R, man/runRUnitTests.Rd, - tests/Makefile, tests/runRUnitTests.R, tests/runit.bindData.R, - tests/runit.cbindX.R, tests/runit.drop.levels.R, - tests/runit.getDateTimeParts.R, tests/runit.mapLevels.R, - tests/runit.nPairs.R, tests/runit.reorder.factor.R, - tests/runit.trim.R, tests/runit.trimSum.R, tests/runit.unknown.R, - tests/runit.wideByFactor.R, tests/runit.write.fwf.R: Move unit - test code into the (now) standard location + * [r1782] Move unit test code into the (now) standard location 2014-03-19 arnima - * [r1777] R/keep.R: change warning message to R standards + * [r1777] change warning message to R standards 2013-12-18 arnima - * [r1758] R/ll.R: Retain original list order unless sort=FALSE; - also stop if unnamed list + * [r1758] Retain original list order unless sort=FALSE; also stop + if unnamed list 2013-12-16 warnes - * [r1757] R/trim.R: Trim will now remove all types of - leading/trailing whitespace by using + * [r1757] Trim will now remove all types of leading/trailing + whitespace by using the [:blank:] character class. 2013-06-29 warnes - * [r1692] inst/NEWS: Update NEWS for second try for gdata 2.13.2 - * [r1691] R/ll.R: Simplify ll() by stuffing list arguments into an + * [r1692] Update NEWS for second try for gdata 2.13.2 + * [r1691] Simplify ll() by stuffing list arguments into an environment, avoiding the need to use attach/detach. 2013-06-28 warnes - * [r1685] inst/NEWS: Update NEWS for gdata 2.13.2 - * [r1684] tests/test.read.xls.Rout.save, - tests/tests.write.fwf.Rout.save: Minor update to - tests/*.Rout.save - * [r1683] R/ll.R: Add on.exit() handler to ensure a matching detach - occurs when attach is used in ll() - * [r1682] DESCRIPTION: Update for gdata 2.13.2 - * [r1681] R/aggregate.table.R: Improve deprecated message + * [r1685] Update NEWS for gdata 2.13.2 + * [r1684] Minor update to tests/*.Rout.save + * [r1683] Add on.exit() handler to ensure a matching detach occurs + when attach is used in ll() + * [r1682] Update for gdata 2.13.2 + * [r1681] Improve deprecated message 2013-03-24 warnes - * [r1645] tests/test.read.xls.Rout.save, - tests/tests.write.fwf.Rout.save: Update test files for code - changes - * [r1644] inst/NEWS: Fix formatting in NEWS - * [r1643] DESCRIPTION, inst/NEWS, man/read.xls.Rd, - man/sheetCount.Rd, tests/test.read.xls.R: Replaced calls to - depreciated function ".path.package" with the new public function - "path.package". + * [r1645] Update test files for code changes + * [r1644] Fix formatting in NEWS + * [r1643] Replaced calls to depreciated function ".path.package" + with the new public function "path.package". 2013-01-14 warnes - * [r1639] R/installXLSXsupport.R, R/sheetCount.R, R/xls2sep.R, - R/xlsFormats.R: Replace (obsolete) '.path.package' with - 'find.package' function. + * [r1639] Replace (obsolete) '.path.package' with 'find.package' + function. 2012-09-20 warnes - * [r1622] man/MedUnits.Rd, man/ans.Rd, man/duplicated2.Rd: Correct - .Rd file errors detected by 'R CMD check'. - * [r1621] NAMESPACE: Add duplicated() and ans() to the NAMESPACE. - * [r1620] DESCRIPTION, inst/NEWS: Update for gdata 2.13.0. - * [r1619] man/ConvertMedUnits.Rd: Fix typographic error. - * [r1618] R/ans.R, R/duplicated2.R, man/ans.Rd, man/duplicated2.Rd: - Add 'ans()' and 'duplicated()' contributed by Liviu Andronic. + * [r1622] Correct .Rd file errors detected by 'R CMD check'. + * [r1621] Add duplicated() and ans() to the NAMESPACE. + * [r1620] Update for gdata 2.13.0. + * [r1619] Fix typographic error. + * [r1618] Add 'ans()' and 'duplicated()' contributed by Liviu + Andronic. 2012-09-19 warnes - * [r1617] data/MedUnits.rda: Correct column names. Unit columns - were reversed and misspelled. - * [r1616] R/sheetCount.R: Add ignore.stderr to system command in - sheetCmd() to prevent stderr + * [r1617] Correct column names. Unit columns were reversed and + misspelled. + * [r1616] Add ignore.stderr to system command in sheetCmd() to + prevent stderr messages from being included in the captured output from the perl script. 2012-09-12 warnes - * [r1606] DESCRIPTION, inst/NEWS: Update for gdata 2.12.0 - * [r1605] R/aggregate.table.R, man/aggregate.table.Rd: - 'stats::aggregate' was made into a generic on 27-Jan-2010, so - that + * [r1606] Update for gdata 2.12.0 + * [r1605] 'stats::aggregate' was made into a generic on + 27-Jan-2010, so that attempting to call 'aggregate' on a 'table' object will now incorrectly call 'aggregate.table'. Since 'aggregate.table' can be @@ -155,57 +260,47 @@ the 'aggregate.table' function will now display a warning that it is depreciated and recommending the equivalent call to tapply. It will be removed entirely in a future version of gdata. - * [r1604] .Rinstignore: Don't ignore .Rnw files, but do ignore .svn - files. + * [r1604] Don't ignore .Rnw files, but do ignore .svn files. 2012-09-11 warnes - * [r1603] man/interleave.Rd: Clarify workding of DROP argument to - interleave(). - * [r1602] man/interleave.Rd: Replace call to aggregate.table() with - equivalent tapply() call since aggregate.table() is being - depreciated. + * [r1603] Clarify workding of DROP argument to interleave(). + * [r1602] Replace call to aggregate.table() with equivalent + tapply() call since aggregate.table() is being depreciated. 2012-08-22 warnes - * [r1601] DESCRIPTION, inst/NEWS: Update DESCRIPTION and NEWS for - gdate 2.11.1. - * [r1600] man/read.xls.Rd: Add example for read.xls() that shows - how to use the fileEncoding + * [r1601] Update DESCRIPTION and NEWS for gdate 2.11.1. + * [r1600] Add example for read.xls() that shows how to use the + fileEncoding argument to read in latin-1 encoded data. - * [r1599] tests/latin-1.xls, tests/test.read.xls.R, - tests/test.read.xls.Rout.save: Add XLSX test for latin-1 - characters, and look for them in their new + * [r1599] Add XLSX test for latin-1 characters, and look for them + in their new location in inst/xls/. - * [r1598] inst/xls/latin-1.xls, inst/xls/latin-1.xlsx: add XLSX - version of latin-1.xls - * [r1597] tests/latin-1.xls, tests/test.read.xls.R, - tests/test.read.xls.Rout.save: Add test file and code to ensure - that read.xls() can properly handle + * [r1598] add XLSX version of latin-1.xls + * [r1597] Add test file and code to ensure that read.xls() can + properly handle files with alternative encodings. latin-1.xls contains each of the non-ascii latin-1 special characters in both the column headings and the body of the file. - * [r1596] R/read.xls.R: Change code to have R read the csv/tab data - from the file rather than + * [r1596] Change code to have R read the csv/tab data from the file + rather than from the connetion we made, so that file encodings can be properly handled. - * [r1595] R/read.xls.R: Always close the connection. + * [r1595] Always close the connection. 2012-08-13 warnes - * [r1594] inst/perl/xls2csv.pl: Remove trailing space from output - line. + * [r1594] Remove trailing space from output line. 2012-06-18 warnes - * [r1567] inst/NEWS: Update NEWS for 2.11.0 release. - * [r1566] DESCRIPTION: Bump version number and add - SystemRequirements for perl. - * [r1565] R/xls2sep.R, inst/perl/xls2csv.pl, man/read.xls.Rd, - tests/test.read.xls.R, tests/test.read.xls.Rout.save: read.xls() - and supporting functions now allow blank lines to be + * [r1567] Update NEWS for 2.11.0 release. + * [r1566] Bump version number and add SystemRequirements for perl. + * [r1565] read.xls() and supporting functions now allow blank lines + to be preserved, rather than skipped, by supplying the argument "blank.lines.skip=FALSE". The underlying perl function has been extended to suppor this via an optional "-s" argument which, when @@ -213,384 +308,249 @@ 2012-06-13 warnes - * [r1564] DESCRIPTION, R/nobs.R, inst/NEWS: - nobs.default needs to - handle logical vectors in addition to numeric + * [r1564] - nobs.default needs to handle logical vectors in + addition to numeric vectors. - update DESCRIPTION and NEWS for 2.10.6. - * [r1563] R/nobs.R: nobs.default needs to handle logical as well as - numeric vectors. + * [r1563] nobs.default needs to handle logical as well as numeric + vectors. 2012-06-08 warnes - * [r1562] DESCRIPTION, tests/test.read.xls.Rout.save: Update - DESCRIPTION and tests - * [r1561] tests/test.read.xls.R: fix incorrect function name - * [r1560] DESCRIPTION, man/installXLSXsupport.Rd: Mark example for - installXLSXsupport() to not be executed durin R CMD check. - * [r1559] DESCRIPTION: stats:::nobs.default and stats::nobs.lm - require R > 2.13.0, so add this as a dependency. + * [r1562] Update DESCRIPTION and tests + * [r1561] fix incorrect function name + * [r1560] Mark example for installXLSXsupport() to not be executed + durin R CMD check. + * [r1559] stats:::nobs.default and stats::nobs.lm require R > + 2.13.0, so add this as a dependency. 2012-06-06 warnes - * [r1552] DESCRIPTION, inst/NEWS: Update for release 2.10.2 - * [r1551] R/nobs.R: Fix bugs in nobs.default. - * [r1550] tests/test.read.xls.Rout.save, - tests/tests.write.fwf.Rout.save: Update to reflect warning on - startup that 'nobs' hides 'stats::nobs'. - * [r1549] man/nobs.Rd: Remove stray non-ASCII characters. - * [r1548] R/nobs.R: The nobs() dispatch method must be defined in - the gdata namespace to + * [r1552] Update for release 2.10.2 + * [r1551] Fix bugs in nobs.default. + * [r1550] Update to reflect warning on startup that 'nobs' hides + 'stats::nobs'. + * [r1549] Remove stray non-ASCII characters. + * [r1548] The nobs() dispatch method must be defined in the gdata + namespace to pick up the definition of gdata::nobs.default. - * [r1547] DESCRIPTION, inst/NEWS: Update DESCRIPTION and NEWS for - 2.10.1 release. - * [r1546] NAMESPACE, R/nobs.R, man/nobs.Rd: Define aliases for - 'nobs' and 'nobs.lm' to support backward + * [r1547] Update DESCRIPTION and NEWS for 2.10.1 release. + * [r1546] Define aliases for 'nobs' and 'nobs.lm' to support + backward compatibility for packages depending on gdata. - * [r1545] DESCRIPTION, inst/NEWS: Update DESCRIPTION and NEWS for - 2.10.0 release - * [r1544] NAMESPACE, R/startsWith.R, man/startsWith.Rd: - Add - manual page and NAMESPACE entry for startsWith(). + * [r1545] Update DESCRIPTION and NEWS for 2.10.0 release + * [r1544] - Add manual page and NAMESPACE entry for startsWith(). - Add 'ignore.case' argument to startsWith(). - * [r1543] tests/test.read.xls.Rout.save: Update to match new code. - * [r1542] man/read.xls.Rd: Replace non-ASCII characters. - * [r1541] R/read.xls.R, man/read.xls.Rd, tests/test.read.xls.R: Add - na.strings to read.xls call to convert "#DIV/0!" to NA. + * [r1543] Update to match new code. + * [r1542] Replace non-ASCII characters. + * [r1541] Add na.strings to read.xls call to convert "#DIV/0!" to + NA. 2012-06-05 warnes - * [r1540] NAMESPACE: Remove nobs method dispatch and lm methods - since these are now provided by the stats package. - * [r1539] R/env.R: Spell out arguments to ls() to avoid R CMD check + * [r1540] Remove nobs method dispatch and lm methods since these + are now provided by the stats package. + * [r1539] Spell out arguments to ls() to avoid R CMD check warnings. - * [r1538] .Rinstignore: Add .Rinstignore file to omit latex style - and source files from distributed inst/doc directory. - * [r1537] R/ConvertMedUnits.R: - Add NULL definition of MedUnits to - avoid R CMD check warning. + * [r1538] Add .Rinstignore file to omit latex style and source + files from distributed inst/doc directory. + * [r1537] - Add NULL definition of MedUnits to avoid R CMD check + warning. - Specify local environment when calling data() so that MedUnits gets defined in the function's environment rather than the global environment. - * [r1536] R/ls.funs.R: Fix error in ls.funs() that occurs when - there are no objects in the environment. - * [r1535] R/object.size.R: Avoid warning by calling - utils::object.size rather than Internal(object.size(x)) + * [r1536] Fix error in ls.funs() that occurs when there are no + objects in the environment. + * [r1535] Avoid warning by calling utils::object.size rather than + Internal(object.size(x)) 2012-05-31 warnes - * [r1534] R/nobs.R, man/nobs.Rd: - Remove dispatch function 'nobs' - and method 'nobs.lm' since these are + * [r1534] - Remove dispatch function 'nobs' and method 'nobs.lm' + since these are now provided by the R 'stats' package. 2012-05-04 warnes - * [r1532] DESCRIPTION: Update for next release - * [r1531] NAMESPACE, R/ls.funs.R, man/ls.funs.Rd: Add ls.funs() to - show functions defined in the specified environment. - * [r1530] man/is.what.Rd: Fix enumerate syntax. + * [r1532] Update for next release + * [r1531] Add ls.funs() to show functions defined in the specified + environment. + * [r1530] Fix enumerate syntax. 2012-04-03 warnes - * [r1522] R/startsWith.R: Add startsWith() function. + * [r1522] Add startsWith() function. 2011-10-05 warnes - * [r1516] man/read.xls.Rd: Fix typo + * [r1516] Fix typo 2011-09-30 warnes - * [r1515] inst/NEWS: Update DESCRIPTION and README for 2.9.0 - release. - * [r1514] DESCRIPTION: Update DESCRIPTION and README for 2.9.0 - release. + * [r1515] Update DESCRIPTION and README for 2.9.0 release. + * [r1514] Update DESCRIPTION and README for 2.9.0 release. 2011-09-20 warnes - * [r1508] man/read.xls.Rd: Improve xls2csv() man page - * [r1507] NAMESPACE: Add case() function, a vector equivalent of - the switch() function - * [r1506] R/case.R, man/case.Rd: Add case() function, a vector - equivalent of the switch() function + * [r1508] Improve xls2csv() man page + * [r1507] Add case() function, a vector equivalent of the switch() + function + * [r1506] Add case() function, a vector equivalent of the switch() + function 2011-09-02 warnes - * [r1500] NAMESPACE: Add 'centerText' function to center text - strings for a specified width. - * [r1499] R/centerText.R, man/centerText.Rd: Add 'centerText' - function to center text strings for a specified width. + * [r1500] Add 'centerText' function to center text strings for a + specified width. + * [r1499] Add 'centerText' function to center text strings for a + specified width. 2011-04-16 warnes - * [r1469] DESCRIPTION, inst/NEWS: Update for release 2.8.2 + * [r1469] Update for release 2.8.2 2011-04-15 warnes - * [r1468] R/dQuote.ascii.R, R/installXLSXsupport.R, R/read.xls.R, - R/sheetCount.R, R/xls2sep.R: Fix errors on windows when R or Perl - install path includes spaces by properly quoting the path. - * [r1467] R/xlsFormats.R: Fix error in xlsFormat() on windows when - R or Perl install path includes spaces by quoting the path. + * [r1468] Fix errors on windows when R or Perl install path + includes spaces by properly quoting the path. + * [r1467] Fix error in xlsFormat() on windows when R or Perl + install path includes spaces by quoting the path. 2011-01-15 ggorjan - * [r1465] NAMESPACE, R/nPairs.R, inst/NEWS, - inst/unitTests/runit.nPairs.R, man/nPairs.Rd: Adding summary - method for nPairs + * [r1465] Adding summary method for nPairs 2010-11-12 warnes - * [r1462] inst/NEWS: Update NEWS for gdata 2.8.1 - * [r1461] DESCRIPTION: Update DEScription file for 2.8.1 release - * [r1460] tests/test.read.xls.Rout.save, - tests/tests.write.fwf.Rout.save: Update test output to match - latest code - * [r1459] R/write.fwf.R, man/write.fwf.Rd, - tests/test.write.fwf.eol.R: Modify write.fwf() to capture and - pass on additional arguments for + * [r1462] Update NEWS for gdata 2.8.1 + * [r1461] Update DEScription file for 2.8.1 release + * [r1460] Update test output to match latest code + * [r1459] Modify write.fwf() to capture and pass on additional + arguments for write.table(). This resolves a bug reported by Jan Wijffels. 2010-11-01 arnima - * [r1453] man/Args.Rd: Minor improvement in Args.Rd help page + * [r1453] Minor improvement in Args.Rd help page 2010-10-19 warnes - * [r1452] R/onAttach.R, R/xls2sep.R: Avoid use of file.access() - which is unreliable on Windows network shares. + * [r1452] Avoid use of file.access() which is unreliable on Windows + network shares. 2010-07-08 ggrothendieck2 - * [r1448] R/xls2sep.R: findPerl call added to xls2sep + * [r1448] findPerl call added to xls2sep 2010-07-07 ggrothendieck2 - * [r1447] man/read.xls.Rd: small improvements to read.xls.Rd + * [r1447] small improvements to read.xls.Rd 2010-05-03 warnes - * [r1439] NAMESPACE, R/installXLSXModules.R, - R/installXLSXsupport.R, R/onAttach.R, inst/NEWS, - man/installXLSXsupport.Rd, man/xlsFormats.Rd: Rename - installXLSXModules() to installXLSXsupport() and provide - documentation for it. - * [r1438] inst/NEWS: Update news for gdata 2.8.0 - * [r1437] DESCRIPTION, NAMESPACE, R/installXLSXModules.R, - R/onAttach.R, inst/perl/install_modules.pl, - inst/perl/module_tools.pl, tests/test.read.xls.R: Add .onAttach - function to check & inform user if perl is available, to check - whether XLS and XLSX formats are avaiable, and to run the (new) - installXLSXModules() functon to attempt to install the necessar - libraries if not. Added installXLSXModules() function. + * [r1439] Rename installXLSXModules() to installXLSXsupport() and + provide documentation for it. + * [r1438] Update news for gdata 2.8.0 + * [r1437] Add .onAttach function to check & inform user if perl is + available, to check whether XLS and XLSX formats are avaiable, + and to run the (new) installXLSXModules() functon to attempt to + install the necessar libraries if not. Added installXLSXModules() + function. 2010-05-02 warnes - * [r1436] man/xlsFormats.Rd: Correct error in xlsFormat example - * [r1435] DESCRIPTION, NAMESPACE, R/dQuote.ascii.R, R/findPerl.R, - R/read.xls.R, R/xlsFormats.R, inst/doc/gregmisc.tex, - inst/perl/install_modules.pl, inst/perl/module_tools.pl, - inst/perl/sheetCount.pl, inst/perl/supportedFormats.pl, - inst/perl/xls2csv.pl, man/ConvertMedUnits.Rd, - man/aggregate.table.Rd, man/combine.Rd, man/interleave.Rd, - man/matchcols.Rd, man/nobs.Rd, man/read.xls.Rd, - man/rename.vars.Rd, man/reorder.Rd, man/resample.Rd, - man/sheetCount.Rd, man/trim.Rd, man/unmatrix.Rd, - man/upperTriangle.Rd, man/xlsFormats.Rd, src, - tests/test.read.xls.R, tests/test.read.xls.Rout.save, - tests/tests.write.fwf.Rout.save: Update perl code to work (but - generate warnings) when Zlib or SpreadSheet::XLXS is not - instaled. Also update Greg's email address + * [r1436] Correct error in xlsFormat example + * [r1435] Update perl code to work (but generate warnings) when + Zlib or SpreadSheet::XLXS is not instaled. Also update Greg's + email address 2010-02-21 ggrothendieck2 - * [r1423] R/read.xls.R, man/read.xls.Rd: isOpen problems fixed - (isOpen must have changed in R since this worked in earlier - versions). Also nba.xls link in read.xls.Rd disappeared. Replaced - with similar link. + * [r1423] isOpen problems fixed (isOpen must have changed in R + since this worked in earlier versions). Also nba.xls link in + read.xls.Rd disappeared. Replaced with similar link. 2010-02-20 ggrothendieck2 - * [r1422] INSTALL: improved INSTALL file + * [r1422] improved INSTALL file 2010-02-19 ggrothendieck2 - * [r1421] INSTALL, R/dQuote.ascii.R, R/read.xls.R, R/sheetCount.R, - inst/NEWS: added findPerl to locate ActiveState Perl on Windows - if perl= not specified and Rtools perl would have otherwise been + * [r1421] added findPerl to locate ActiveState Perl on Windows if + perl= not specified and Rtools perl would have otherwise been used. Also added INSTALL file. 2010-01-28 warnes - * [r1419] DESCRIPTION, inst/NEWS: Update for release 2.7.1 - * [r1418] R/xls2sep.R: xls2sep(): Show output of perl call when - verbose=T - * [r1417] src/build.bat: More Win32 fixes - * [r1416] src/Makefile, src/Makefile.win, src/build.bat: More work - on Win32 building - * [r1415] src/Makefile, src/Makefile.win, src/build.bat: Support - building Compress::Raw::Zlib perl package under windows. + * [r1419] Update for release 2.7.1 + * [r1418] xls2sep(): Show output of perl call when verbose=T + * [r1417] More Win32 fixes + * [r1416] More work on Win32 building + * [r1415] Support building Compress::Raw::Zlib perl package under + windows. 2010-01-26 warnes - * [r1413] inst/NEWS: Fix typos - * [r1412] R/sheetCount.R: Show more details in sheetCount() when - verbose=TRUE + * [r1413] Fix typos + * [r1412] Show more details in sheetCount() when verbose=TRUE 2010-01-24 warnes - * [r1411] R/xls2sep.R: Replace two calls to 'dQuote', to - 'dQuote.ascii' - * [r1408] inst/doc/mapLevels.pdf, inst/doc/unknown.pdf: Remove - auto-generated pdf files from svn - * [r1407] src/Makefile: create 'distclean' to remove perl binary - dir, currently mac-only - * [r1406] R/read.xls.R, R/xls2sep.R: Make read.xls() and xls2sep() - quieter when verbose=FALSE - * [r1405] tests/test.read.xls.R, tests/test.read.xls.Rout.save: Add - tests for read.xls, sheetCount, and sheetNames - * [r1404] src/Makefile: Modify makefile to 1) clean up after build, - 2) make tar non-verbose - * [r1403] R/read.xls.R, R/sheetCount.R: Close connections when - done. - * [r1402] man/read.xls.Rd: Fix typo - * [r1401] man/read.xls.Rd, man/sheetNames.Rd: Fix R CMD CHECK - errors - * [r1400] src/Compress-Raw-Zlib-2.024, - src/Compress-Raw-Zlib-2.024.tar.gz, src/Makefile: Use the - original gz file for Compress::Raw::Zlib to avoid issues with - 'non-platform-independent' filename error in R CMD CHECK - * [r1399] inst/perl/Archive/README-Archive-Zip, - inst/perl/Archive/README-Archive::Zip: Rename files to remove R - CMD check error - * [r1398] DESCRIPTION, inst/NEWS, inst/doc/mapLevels.pdf, - inst/doc/unknown.pdf: Update for 2.7.0 release - * [r1397] NAMESPACE: Add new functions to NAMESPACE - * [r1396] src, src/Compress-Raw-Zlib-2.024, - src/Compress-Raw-Zlib-2.024/Changes, - src/Compress-Raw-Zlib-2.024/MANIFEST, - src/Compress-Raw-Zlib-2.024/META.yml, - src/Compress-Raw-Zlib-2.024/Makefile.PL, - src/Compress-Raw-Zlib-2.024/README, - src/Compress-Raw-Zlib-2.024/Zlib.xs, - src/Compress-Raw-Zlib-2.024/config.in, - src/Compress-Raw-Zlib-2.024/examples, - src/Compress-Raw-Zlib-2.024/examples/filtdef, - src/Compress-Raw-Zlib-2.024/examples/filtinf, - src/Compress-Raw-Zlib-2.024/fallback, - src/Compress-Raw-Zlib-2.024/fallback/constants.h, - src/Compress-Raw-Zlib-2.024/fallback/constants.xs, - src/Compress-Raw-Zlib-2.024/lib, - src/Compress-Raw-Zlib-2.024/lib/Compress, - src/Compress-Raw-Zlib-2.024/lib/Compress/Raw, - src/Compress-Raw-Zlib-2.024/lib/Compress/Raw/Zlib.pm, - src/Compress-Raw-Zlib-2.024/pod, - src/Compress-Raw-Zlib-2.024/pod/FAQ.pod, - src/Compress-Raw-Zlib-2.024/ppport.h, - src/Compress-Raw-Zlib-2.024/private, - src/Compress-Raw-Zlib-2.024/private/MakeUtil.pm, - src/Compress-Raw-Zlib-2.024/t, - src/Compress-Raw-Zlib-2.024/t/000prereq.t, - src/Compress-Raw-Zlib-2.024/t/01version.t, - src/Compress-Raw-Zlib-2.024/t/02zlib.t, - src/Compress-Raw-Zlib-2.024/t/07bufsize.t, - src/Compress-Raw-Zlib-2.024/t/09limitoutput.t, - src/Compress-Raw-Zlib-2.024/t/18lvalue.t, - src/Compress-Raw-Zlib-2.024/t/99pod.t, - src/Compress-Raw-Zlib-2.024/t/Test, - src/Compress-Raw-Zlib-2.024/t/Test/Builder.pm, - src/Compress-Raw-Zlib-2.024/t/Test/More.pm, - src/Compress-Raw-Zlib-2.024/t/Test/Simple.pm, - src/Compress-Raw-Zlib-2.024/t/compress, - src/Compress-Raw-Zlib-2.024/t/compress/CompTestUtils.pm, - src/Compress-Raw-Zlib-2.024/typemap, - src/Compress-Raw-Zlib-2.024/zlib-src, - src/Compress-Raw-Zlib-2.024/zlib-src/adler32.c, - src/Compress-Raw-Zlib-2.024/zlib-src/compress.c, - src/Compress-Raw-Zlib-2.024/zlib-src/crc32.c, - src/Compress-Raw-Zlib-2.024/zlib-src/crc32.h, - src/Compress-Raw-Zlib-2.024/zlib-src/deflate.c, - src/Compress-Raw-Zlib-2.024/zlib-src/deflate.h, - src/Compress-Raw-Zlib-2.024/zlib-src/infback.c, - src/Compress-Raw-Zlib-2.024/zlib-src/inffast.c, - src/Compress-Raw-Zlib-2.024/zlib-src/inffast.h, - src/Compress-Raw-Zlib-2.024/zlib-src/inffixed.h, - src/Compress-Raw-Zlib-2.024/zlib-src/inflate.c, - src/Compress-Raw-Zlib-2.024/zlib-src/inflate.h, - src/Compress-Raw-Zlib-2.024/zlib-src/inftrees.c, - src/Compress-Raw-Zlib-2.024/zlib-src/inftrees.h, - src/Compress-Raw-Zlib-2.024/zlib-src/trees.c, - src/Compress-Raw-Zlib-2.024/zlib-src/trees.h, - src/Compress-Raw-Zlib-2.024/zlib-src/uncompr.c, - src/Compress-Raw-Zlib-2.024/zlib-src/zconf.h, - src/Compress-Raw-Zlib-2.024/zlib-src/zlib.h, - src/Compress-Raw-Zlib-2.024/zlib-src/zutil.c, - src/Compress-Raw-Zlib-2.024/zlib-src/zutil.h, src/Makefile: Add - Compress::Raw::Zlib code - * [r1395] man/read.xls.Rd, man/sheetCount.Rd: Add/Update - documentation - * [r1394] R/xls2sep.R: Minor formatting change - * [r1393] inst/xls/ExampleExcelFile.xls, - inst/xls/ExampleExcelFile.xlsx: Add additional example files - * [r1392] inst/perl/sheetCount.pl, inst/perl/sheetNames.pl, - inst/perl/xls2csv.pl: Combine sheetCount.pl and sheetNames.pl and - modify to support Excel 2007 'xlsx' format - * [r1391] inst/perl/Spreadsheet/XLSX.pm, - inst/perl/Spreadsheet/XLSX/Fmt2007.pm, inst/perl/xls2csv.pl: - Complete changes to handle Excel 2007 'xlsx' files - * [r1390] inst/perl/Archive, inst/perl/Archive/README-Archive::Zip, - inst/perl/Archive/Zip, inst/perl/Archive/Zip.pm, - inst/perl/Archive/Zip/Archive.pm, - inst/perl/Archive/Zip/BufferedFileHandle.pm, - inst/perl/Archive/Zip/DirectoryMember.pm, - inst/perl/Archive/Zip/FAQ.pod, - inst/perl/Archive/Zip/FileMember.pm, - inst/perl/Archive/Zip/Member.pm, - inst/perl/Archive/Zip/MemberRead.pm, - inst/perl/Archive/Zip/MockFileHandle.pm, - inst/perl/Archive/Zip/NewFileMember.pm, - inst/perl/Archive/Zip/StringMember.pm, - inst/perl/Archive/Zip/Tree.pm, - inst/perl/Archive/Zip/ZipFileMember.pm, - inst/perl/OLE/README-OLE-Storage_Lite, - inst/perl/Spreadsheet/README-ParseExcel, - inst/perl/Spreadsheet/README-XLS, inst/perl/Spreadsheet/XLSX, - inst/perl/Spreadsheet/XLSX.pm, - inst/perl/Spreadsheet/XLSX/Fmt2007.pm, - inst/perl/Spreadsheet/XLSX/Utility2007.pm, inst/perl/VERSIONS: - Add additional Perl modules to support Excel 2007 'xlsx' files + * [r1411] Replace two calls to 'dQuote', to 'dQuote.ascii' + * [r1408] Remove auto-generated pdf files from svn + * [r1407] create 'distclean' to remove perl binary dir, currently + mac-only + * [r1406] Make read.xls() and xls2sep() quieter when verbose=FALSE + * [r1405] Add tests for read.xls, sheetCount, and sheetNames + * [r1404] Modify makefile to 1) clean up after build, 2) make tar + non-verbose + * [r1403] Close connections when done. + * [r1402] Fix typo + * [r1401] Fix R CMD CHECK errors + * [r1400] Use the original gz file for Compress::Raw::Zlib to avoid + issues with 'non-platform-independent' filename error in R CMD + CHECK + * [r1399] Rename files to remove R CMD check error + * [r1398] Update for 2.7.0 release + * [r1397] Add new functions to NAMESPACE + * [r1396] Add Compress::Raw::Zlib code + * [r1395] Add/Update documentation + * [r1394] Minor formatting change + * [r1393] Add additional example files + * [r1392] Combine sheetCount.pl and sheetNames.pl and modify to + support Excel 2007 'xlsx' format + * [r1391] Complete changes to handle Excel 2007 'xlsx' files + * [r1390] Add additional Perl modules to support Excel 2007 'xlsx' + files 2010-01-24 ggrothendieck2 - * [r1389] NAMESPACE, man/sheetNames.Rd: added sheetNames.Rd - (documenting sheetNames/sheetCount) and updated NAMESPACE file. - * [r1388] inst/NEWS: fixed spacing problem in NEWS + * [r1389] added sheetNames.Rd (documenting sheetNames/sheetCount) + and updated NAMESPACE file. + * [r1388] fixed spacing problem in NEWS 2010-01-23 warnes - * [r1387] inst/perl/xls2csv.pl: Check if parsing the xls file - succeeds... Current code doesn't handle new XML-based format - * [r1386] inst/perl/Spreadsheet/XLSX: Remove perl - 'Spreadsheet:XLSX' module since it depends on Compress-Raw-Zlib, - which probably won't be available on most machines, and I don't - have time to figure out how to get R to build it properly when - gdata is installed. - * [r1385] inst/perl/Spreadsheet/XLSX, - inst/perl/Spreadsheet/XLSX/Fmt2007.pm, - inst/perl/Spreadsheet/XLSX/Utility2007.pm: Add perl - 'Spreadsheet:XLSX' module to support new Excel XML format files - * [r1384] R/xls2sep.R: Add xls2tsv() convenience wrapper to - xls2sep() - * [r1383] R/read.xls.R, R/xls2sep.R: Update to match new xls2csv.pl - code, allow specification of sheets by name, support CSV and TAB - delimited files using the same code, other minor changes. - * [r1382] R/sheetCount.R: Add sheetNames() function to extract the - names from XLS files - * [r1381] inst/bin/xls2csv.bat: Fix xls2csv.bat - * [r1380] inst/perl/xls2csv.pl: If only one sheet is present in the - file, don't insert the sheet name into the filename - * [r1379] inst/xls/ExampleExcelFile.xls, - inst/xls/ExampleExcelFile.xlsx: Add additional test/example Excel + * [r1387] Check if parsing the xls file succeeds... Current code + doesn't handle new XML-based format + * [r1386] Remove perl 'Spreadsheet:XLSX' module since it depends on + Compress-Raw-Zlib, which probably won't be available on most + machines, and I don't have time to figure out how to get R to + build it properly when gdata is installed. + * [r1385] Add perl 'Spreadsheet:XLSX' module to support new Excel + XML format files + * [r1384] Add xls2tsv() convenience wrapper to xls2sep() + * [r1383] Update to match new xls2csv.pl code, allow specification + of sheets by name, support CSV and TAB delimited files using the + same code, other minor changes. + * [r1382] Add sheetNames() function to extract the names from XLS files - * [r1378] inst/perl/xls2csv.pl, inst/perl/xls2tab.pl, - inst/perl/xls2tsv.pl: Modify xls2csv.pl script to: + * [r1381] Fix xls2csv.bat + * [r1380] If only one sheet is present in the file, don't insert + the sheet name into the filename + * [r1379] Add additional test/example Excel files + * [r1378] Modify xls2csv.pl script to: - Use tab-delimiter and .tsv or .tab extension if called with the name xls2tsv.pl or xls2tab.pl, respectively. This allows a single @@ -600,45 +560,24 @@ - Allow selection of sheets by name - Provide better error checking - Other code improvements - * [r1377] inst/perl/sheetCount.pl, inst/perl/sheetNames.pl: Add - perl scripts to extract worksheet names and sheet count from - Excel files + * [r1377] Add perl scripts to extract worksheet names and sheet + count from Excel files 2010-01-22 warnes - * [r1376] inst/perl/OLE/Storage_Lite.pm: Upgrade Perl - OLE::StorageLight module to version 0.19 - * [r1375] inst/perl/Spreadsheet/ParseExcel.pm, - inst/perl/Spreadsheet/ParseExcel/Cell.pm, - inst/perl/Spreadsheet/ParseExcel/Dump.pm, - inst/perl/Spreadsheet/ParseExcel/FmtDefault.pm, - inst/perl/Spreadsheet/ParseExcel/FmtJapan.pm, - inst/perl/Spreadsheet/ParseExcel/FmtJapan2.pm, - inst/perl/Spreadsheet/ParseExcel/FmtUnicode.pm, - inst/perl/Spreadsheet/ParseExcel/Font.pm, - inst/perl/Spreadsheet/ParseExcel/Format.pm, - inst/perl/Spreadsheet/ParseExcel/SaveParser, - inst/perl/Spreadsheet/ParseExcel/SaveParser.pm, - inst/perl/Spreadsheet/ParseExcel/SaveParser/Workbook.pm, - inst/perl/Spreadsheet/ParseExcel/SaveParser/Worksheet.pm, - inst/perl/Spreadsheet/ParseExcel/Utility.pm, - inst/perl/Spreadsheet/ParseExcel/Workbook.pm, - inst/perl/Spreadsheet/ParseExcel/Worksheet.pm: Upgrade perl - Spreadsheet::ParseExcel to version 0.56 - * [r1374] DESCRIPTION: Add complete list of contributors + * [r1376] Upgrade Perl OLE::StorageLight module to version 0.19 + * [r1375] Upgrade perl Spreadsheet::ParseExcel to version 0.56 + * [r1374] Add complete list of contributors 2010-01-22 arnima - * [r1373] man/keep.Rd: Minor improvement in help page - * [r1371] R/Args.R, R/env.R, R/is.what.R, R/keep.R, R/ll.R, - man/Args.Rd, man/env.Rd, man/is.what.Rd, man/keep.Rd, man/ll.Rd: - Many small improvements to documentation of Arni's five functions + * [r1373] Minor improvement in help page + * [r1371] Many small improvements to documentation of Arni's five + functions 2010-01-22 warnes - * [r1370] R/dQuote.ascii.R, R/read.xls.R, R/sheetCount.R, - R/xls2sep.R: - Move xls2csv(), xls2tab(), xls2sep() to a separate - file + * [r1370] - Move xls2csv(), xls2tab(), xls2sep() to a separate file - Move qQuote.ascii to a separate file - Bug Fix: xls2csv(), xls2tab() failed to pass the provided @@ -651,112 +590,79 @@ 2009-12-06 arnima - * [r1369] R/Args.R, man/Args.Rd: Minor improvements of Args(). - * [r1368] R/ll.R, man/ll.Rd: Improved ll() so user can limit output - to specified classes + * [r1369] Minor improvements of Args(). + * [r1368] Improved ll() so user can limit output to specified + classes 2009-11-16 arnima - * [r1366] R/ll.R: ll(.GlobalEnv) does not crash anymore + * [r1366] ll(.GlobalEnv) does not crash anymore 2009-08-20 warnes - * [r1357] man/cbindX.Rd, man/getDateTimePart.Rd, man/mapLevels.Rd, - man/nPairs.Rd, man/trim.Rd, man/trimSum.Rd, man/unknown.Rd, - man/write.fwf.Rd: Replace \ldots with \dots to make the new R CMD - CHECK happy. + * [r1357] Replace \ldots with \dots to make the new R CMD CHECK + happy. 2009-08-19 warnes - * [r1355] DESCRIPTION: Update for 2.6.1 release - * [r1354] inst/unitTests/runit.getDateTimeParts.R: Modify unit - tests to avoid issues related to zime zones. + * [r1355] Update for 2.6.1 release + * [r1354] Modify unit tests to avoid issues related to zime zones. 2009-08-05 warnes - * [r1353] inst/doc/mapLevels.pdf, inst/doc/unknown.pdf: Update - vignettes for 2.6.0 release - * [r1352] man/frameApply.Rd: Fix formatting warning in frameApply - man page + * [r1353] Update vignettes for 2.6.0 release + * [r1352] Fix formatting warning in frameApply man page 2009-07-16 ggorjan - * [r1350] man/write.fwf.Rd: Reverting recent change and clarifying - the meaning. + * [r1350] Reverting recent change and clarifying the meaning. 2009-07-16 warnes - * [r1349] inst/doc/mapLevels.pdf, inst/doc/unknown.pdf, - man/resample.Rd: Add contents of \value section for resample() - man page - * [r1348] tests/tests.write.fwf.Rout.save: Update test output to - remove R CMD check warning - * [r1347] inst/NEWS: Update ChangeLog and NEWS for gdata 2.6.0 - release - * [r1346] DESCRIPTION: Update DESCRIPTION file for gdata 2.6.0 - * [r1345] inst/doc/gregmisc.tex, inst/doc/mapLevels.pdf, - inst/doc/unknown.pdf, man/ConvertMedUnits.Rd, - man/aggregate.table.Rd, man/combine.Rd, man/interleave.Rd, - man/matchcols.Rd, man/nobs.Rd, man/rename.vars.Rd, - man/reorder.Rd, man/trim.Rd, man/unmatrix.Rd, - man/upperTriangle.Rd: Correct Greg's email address - * [r1344] man/write.fwf.Rd: Correct minor typos in write.fwf() man - page - * [r1343] man/resample.Rd: Correct page for resample() - * [r1342] NAMESPACE, R/read.xls.R, inst/perl/xls2tab.pl, - man/read.xls.Rd: Add support for using tab for field separator - during translation from xls format in read.xls + * [r1349] Add contents of \value section for resample() man page + * [r1348] Update test output to remove R CMD check warning + * [r1347] Update ChangeLog and NEWS for gdata 2.6.0 release + * [r1346] Update DESCRIPTION file for gdata 2.6.0 + * [r1345] Correct Greg's email address + * [r1344] Correct minor typos in write.fwf() man page + * [r1343] Correct page for resample() + * [r1342] Add support for using tab for field separator during + translation from xls format in read.xls 2009-04-19 arnima - * [r1314] R/env.R, R/ll.R: Changed object.size(object) to + * [r1314] Changed object.size(object) to unclass(object.size(object)). 2008-12-31 ggorjan - * [r1312] NAMESPACE, inst/NEWS: Documenting changes and exporting - the functions. - * [r1311] R/object.size.R, man/humanReadable.Rd, - man/object.size.Rd: Enhanced function object.size that returns - the size of multiple objects. There is also a handy print method - that can print size of an object in "human readable" format when + * [r1312] Documenting changes and exporting the functions. + * [r1311] Enhanced function object.size that returns the size of + multiple objects. There is also a handy print method that can + print size of an object in "human readable" format when options(humanReadable=TRUE) or print(object.size(x), humanReadable=TRUE). - * [r1310] R/wideByFactor.R, inst/unitTests/runit.wideByFactor.R, - man/wideByFactor.Rd: New function wideByFactor that reshapes - given dataset by a given factor - it creates a "multivariate" - data.frame. - * [r1309] R/nPairs.R, inst/unitTests/runit.nPairs.R, man/nPairs.Rd: - New function nPairs that gives the number of variable pairs in a - data.frame or a matrix. - * [r1308] R/getDateTimeParts.R, - inst/unitTests/runit.getDateTimeParts.R, man/getDateTimePart.Rd: - New functions getYear, getMonth, getDay, getHour, getMin, and - getSec for extracting the date/time parts from objects of a + * [r1310] New function wideByFactor that reshapes given dataset by + a given factor - it creates a "multivariate" data.frame. + * [r1309] New function nPairs that gives the number of variable + pairs in a data.frame or a matrix. + * [r1308] New functions getYear, getMonth, getDay, getHour, getMin, + and getSec for extracting the date/time parts from objects of a date/time class. - * [r1307] R/bindData.R, inst/unitTests/runit.bindData.R, - man/bindData.Rd: New function bindData that binds two data frames - into a multivariate data frame in a different way than merge. - * [r1306] R/runRUnitTests.R, inst/unitTests/Makefile, - inst/unitTests/runRUnitTests.R, man/gdata-package.Rd, - man/runRUnitTests.Rd, tests/doRUnit.R: New function - .runRUnitTestsGdata that enables run of all RUnit tests during - the R CMD check as well as directly from within R. + * [r1307] New function bindData that binds two data frames into a + multivariate data frame in a different way than merge. + * [r1306] New function .runRUnitTestsGdata that enables run of all + RUnit tests during the R CMD check as well as directly from + within R. 2008-12-20 ggorjan - * [r1305] NAMESPACE, R/trimSum.R, inst/NEWS, - inst/unitTests/runit.trimSum.R, man/trimSum.Rd: - * [r1304] tests/tests.write.fwf.Rout.save: To remove some output in - the R CMD check + * [r1305] + * [r1304] To remove some output in the R CMD check 2008-08-05 ggorjan - * [r1300] DESCRIPTION, NAMESPACE, R/cbindX.R, R/write.fwf.R, - inst/NEWS, inst/doc/mapLevels.pdf, inst/doc/unknown.pdf, - inst/unitTests/runit.cbindX.R, inst/unitTests/runit.write.fwf.R, - man/cbindX.Rd, man/write.fwf.Rd, tests/tests.write.fwf.R, - tests/tests.write.fwf.Rout.save: - Increased version to 2.5.0 + * [r1300] - Increased version to 2.5.0 - New function cbindX that can bind objects with different number of rows. @@ -768,20 +674,18 @@ 2008-06-30 arnima - * [r1299] R/env.R, R/ll.R, man/env.Rd, man/ll.Rd: Simplified - default 'unit' argument from c("KB","MB","bytes") to "KB". + * [r1299] Simplified default 'unit' argument from + c("KB","MB","bytes") to "KB". 2008-05-13 warnes - * [r1270] inst/NEWS, inst/doc/mapLevels.pdf, inst/doc/unknown.pdf: - Update NEWS file for 2.4.2 - * [r1269] R/read.xls.R: Use path.expand() to give proper full path - to xls file to be translated by read.xls() - * [r1268] R/read.xls.R: Modifed read.xls() failed to return the - converted data... fixed. - * [r1267] inst/perl/Spreadsheet/ParseExcel/Utility.pm: Correct - broken patch for open-office support - * [r1266] DESCRIPTION, R/read.xls.R: For read.xls() and xls2csv(): + * [r1270] Update NEWS file for 2.4.2 + * [r1269] Use path.expand() to give proper full path to xls file to + be translated by read.xls() + * [r1268] Modifed read.xls() failed to return the converted data... + fixed. + * [r1267] Correct broken patch for open-office support + * [r1266] For read.xls() and xls2csv(): - Implement more informative log messages when verbose=TRUE - Quote temporary file name to avoid errors when calling perl to do the work. @@ -793,259 +697,201 @@ 2008-05-12 warnes - * [r1265] inst/perl/Spreadsheet/ParseExcel/Utility.pm: Patch to - correct issue with OpenOffice-created XLS files. Thanks to + * [r1265] Patch to correct issue with OpenOffice-created XLS files. + Thanks to Robert Burns for pointing out the patch at http://rt.cpan.org/Public/Bug/Display.html?id=7206 2008-03-25 warnes - * [r1250] DESCRIPTION, inst/NEWS, inst/doc/mapLevels.pdf, - inst/doc/unknown.pdf: Update for version 2.4.1 - * [r1249] inst/xls/iris.xls: Example iris.xls file didn't complete - & properly formatted iris data set. Fixed. - * [r1248] inst/perl/IO/AtomicFile.pm, inst/perl/IO/InnerFile.pm, - inst/perl/IO/Lines.pm, inst/perl/IO/Scalar.pm, - inst/perl/IO/ScalarArray.pm, inst/perl/IO/Stringy.pm, - inst/perl/IO/Wrap.pm, inst/perl/IO/WrapTie.pm, - inst/perl/OLE/Storage_Lite.pm, - inst/perl/Spreadsheet/ParseExcel.pm, - inst/perl/Spreadsheet/ParseExcel/Dump.pm, - inst/perl/Spreadsheet/ParseExcel/FmtDefault.pm, - inst/perl/Spreadsheet/ParseExcel/FmtJapan.pm, - inst/perl/Spreadsheet/ParseExcel/FmtJapan2.pm, - inst/perl/Spreadsheet/ParseExcel/FmtUnicode.pm, - inst/perl/Spreadsheet/ParseExcel/SaveParser.pm, - inst/perl/Spreadsheet/ParseExcel/Utility.pm: Update perl modules - to latest versions + * [r1250] Update for version 2.4.1 + * [r1249] Example iris.xls file didn't complete & properly + formatted iris data set. Fixed. + * [r1248] Update perl modules to latest versions 2008-03-24 warnes - * [r1247] man/read.xls.Rd: Fix typo in win32 example for read.xls() + * [r1247] Fix typo in win32 example for read.xls() 2008-03-11 warnes - * [r1246] NAMESPACE: Add xls2csv to exported function list + * [r1246] Add xls2csv to exported function list 2008-01-30 warnes - * [r1241] ChangeLog, DESCRIPTION, inst/NEWS: Update DESCRIPTION and - NEWS for release 2.4.0 + * [r1241] Update DESCRIPTION and NEWS for release 2.4.0 2008-01-29 arnima - * [r1240] man/keep.Rd: Added argument 'all'. - * [r1239] R/keep.R: Added argument 'all'. + * [r1240] Added argument 'all'. + * [r1239] Added argument 'all'. 2007-10-22 warnes - * [r1196] DESCRIPTION: Clarify GPL version + * [r1196] Clarify GPL version 2007-09-10 ggorjan - * [r1169] man/upperTriangle.Rd: removed unmatched brace - * [r1168] man/gdata-package.Rd: adding alias + * [r1169] removed unmatched brace + * [r1168] adding alias 2007-09-06 ggorjan - * [r1162] man/gdata-package.Rd: keyword + * [r1162] keyword 2007-08-21 ggorjan - * [r1154] man/gdata-package.Rd: package help page - * [r1153] NEWS, inst/NEWS: move - * [r1152] NEWS: move + * [r1154] package help page + * [r1153] move + * [r1152] move 2007-08-20 ggorjan - * [r1151] inst/doc/mapLevels.tex: clean - * [r1150] inst/doc/mapLevels.Rnw, inst/doc/mapLevels.pdf, - inst/doc/mapLevels.tex: a real vignette - * [r1149] inst/doc/unknown.Rnw, inst/doc/unknown.pdf, - inst/doc/unknown.tex: a real vignette - * [r1148] man/unknown.Rd: additional keyword for searchig + * [r1151] clean + * [r1150] a real vignette + * [r1149] a real vignette + * [r1148] additional keyword for searchig 2007-08-17 ggorjan - * [r1147] man/unknown.Rd: keyword + * [r1147] keyword 2007-07-22 arnima - * [r1103] R/Args.R, R/keep.R: Reverted back to - as.character(substitute(x)), so user can run keep(x), + * [r1103] Reverted back to as.character(substitute(x)), so user can + run keep(x), keep("x"), Args(x), and Args("x"). 2007-07-21 arnima - * [r1102] R/keep.R: Changed as.character(substitute()) to + * [r1102] Changed as.character(substitute()) to deparse(substitute()), following help(substitute) recommendation. - * [r1101] R/Args.R: Changed as.character(substitute()) to + * [r1101] Changed as.character(substitute()) to deparse(substitute()), following help(substitute) recommendation. 2007-07-10 warnes - * [r1099] R/read.xls.R, man/read.xls.Rd: Update read.xls() code and - docs with enhacements by Gabor Grothendieck + * [r1099] Update read.xls() code and docs with enhacements by Gabor + Grothendieck 2007-06-06 ggorjan - * [r1097] inst/doc/unknown.pdf, inst/doc/unknown.tex: last edits - from newsletter - * [r1096] R/drop.levels.R, man/drop.levels.Rd: drop levels as - suggested by Brian Ripley - * [r1095] inst/unitTests/Makefile, tests/doRUnit.R: better - integration of unit tests - * [r1094] R/mapLevels.R, R/unknown.R: making codetools happy + * [r1097] last edits from newsletter + * [r1096] drop levels as suggested by Brian Ripley + * [r1095] better integration of unit tests + * [r1094] making codetools happy 2007-01-28 arnima - * [r1042] R/keep.R: Throw warnings rather than errors + * [r1042] Throw warnings rather than errors 2007-01-27 arnima - * [r1041] R/keep.R: Meaningful error message is given when - requested object does not exist - * [r1040] R/is.what.R: is.* tests that return NA are not reported + * [r1041] Meaningful error message is given when requested object + does not exist + * [r1040] is.* tests that return NA are not reported is.what recursion is avoided 2006-11-30 ggorjan - * [r1035] R/unknown.R: minor commet to the code - * [r1034] inst/doc/mapLevels.pdf, inst/doc/mapLevels.tex: - description of mapLevels methods - * [r1033] inst/doc/unknown.pdf, inst/doc/unknown.tex: description - of unknown methods + * [r1035] minor commet to the code + * [r1034] description of mapLevels methods + * [r1033] description of unknown methods 2006-11-16 ggorjan - * [r1013] R/c.factor.R, man/c.factor.Rd: seems that c.factor was - not a good idea and there were better examples posted on r-devel - list + * [r1013] seems that c.factor was not a good idea and there were + better examples posted on r-devel list 2006-11-14 ggorjan - * [r1012] man/combine.Rd, man/frameApply.Rd: Removed executable - property + * [r1012] Removed executable property 2006-11-10 ggorjan - * [r1004] NAMESPACE, NEWS: just formatting + * [r1004] just formatting 2006-11-02 ggorjan - * [r1002] man/mapLevels.Rd, man/unknown.Rd: typos + * [r1002] typos 2006-10-30 ggorjan - * [r1001] man/write.fwf.Rd: some more examples for use of read.fwf - after write.fwf - * [r1000] inst/unitTests: ignore for report files - * [r999] tests/tests.write.fwf.Rout.save: Id tag from source - * [r998] NAMESPACE: removing unused import - * [r997] R/write.fwf.R, inst/unitTests/runit.write.fwf.R, - man/write.fwf.Rd, tests/tests.write.fwf.R: Id tag - * [r996] NAMESPACE, NEWS, R/write.fwf.R, - inst/unitTests/runit.write.fwf.R, man/write.fwf.Rd, - tests/tests.write.fwf.R, tests/tests.write.fwf.Rout.save: - write.fwf - * [r995] inst/unitTests/runit.reorder.factor.R: Id tag - * [r994] inst/unitTests/runit.reorder.factor.R: added unit tests - for reorder.factor - * [r993] R/c.factor.R, R/mapLevels.R, R/unknown.R, - inst/unitTests/runit.drop.levels.R, - inst/unitTests/runit.mapLevels.R, inst/unitTests/runit.trim.R, - inst/unitTests/runit.unknown.R, man/c.factor.Rd, man/unknown.Rd, - tests/doRUnit.R: mapply keeps names in R 2.4; POSIX unit tests - solved; $ should work now + * [r1001] some more examples for use of read.fwf after write.fwf + * [r1000] ignore for report files + * [r999] Id tag from source + * [r998] removing unused import + * [r997] Id tag + * [r996] write.fwf + * [r995] Id tag + * [r994] added unit tests for reorder.factor + * [r993] mapply keeps names in R 2.4; POSIX unit tests solved; $ + should work now 2006-10-29 ggorjan - * [r992] NEWS, R/unknown.R, inst/unitTests/runit.unknown.R, - man/unknown.Rd: fixed problem in tests; added unknown methods and - tests for matrices - * [r991] R/drop.levels.R, R/mapLevels.R, - inst/unitTests/runit.mapLevels.R, man/mapLevels.Rd, - tests/doRUnit.R: sort is generic now; mapply keeps names in R - 2.4.0; some codetools suggestions fixed - * [r990] DESCRIPTION, NAMESPACE: sort is generic from R 2.4.0 - * [r989] DESCRIPTION, NEWS, R/trim.R, man/trim.Rd: trim() gains ... - argument; version bump - * [r988] NEWS, R/reorder.R, man/reorder.Rd: Fixed collision bug - with stats version of reorder.factor + * [r992] fixed problem in tests; added unknown methods and tests + for matrices + * [r991] sort is generic now; mapply keeps names in R 2.4.0; some + codetools suggestions fixed + * [r990] sort is generic from R 2.4.0 + * [r989] trim() gains ... argument; version bump + * [r988] Fixed collision bug with stats version of reorder.factor 2006-10-27 warnes - * [r987] R/c.factor.R, man/c.factor.Rd: Add c() method for factor - objects, submitted by Gregor Gorjanc + * [r987] Add c() method for factor objects, submitted by Gregor + Gorjanc 2006-09-19 warnes - * [r986] NEWS: Update NEWS file for 2.3.0 release - * [r985] inst/unitTests/runit.trim.R: Explicitly set the local in - runit.trim.R to one where leading spaces affect sort order so - that the unit test works properly. + * [r986] Update NEWS file for 2.3.0 release + * [r985] Explicitly set the local in runit.trim.R to one where + leading spaces affect sort order so that the unit test works + properly. 2006-09-18 warnes - * [r984] inst/doc/Rnews.sty: Update Rnews.sty to the latest version - * [r983] R/trim.R, inst/unitTests/Makefile, - inst/unitTests/runit.trim.R, man/trim.Rd, tests/doRUnit.R: - Integrate fixes for trim() from Gregor and myself. - * [r982] inst/unitTests/report.html, inst/unitTests/report.txt: - Remove unneeded files. + * [r984] Update Rnews.sty to the latest version + * [r983] Integrate fixes for trim() from Gregor and myself. + * [r982] Remove unneeded files. 2006-09-13 warnes - * [r981] R/unknown.R, inst/unitTests, inst/unitTests/Makefile, - inst/unitTests/report.html, inst/unitTests/report.txt, - inst/unitTests/runit.drop.levels.R, - inst/unitTests/runit.mapLevels.R, inst/unitTests/runit.trim.R, - inst/unitTests/runit.unknown.R, man/unknown.Rd, tests, - tests/doRUnit.R: Add unknown() and unit test files - * [r980] NAMESPACE, R/drop.levels.R, R/mapLevels.R, R/trim.R: More - fixes from Gregor Gorjanc - * [r979] DESCRIPTION, NAMESPACE, R/combineLevels.R, R/mapFactor.R, - R/mapLevels.R, man/combineLevels.Rd, man/mapFactor.Rd, - man/mapLevels.Rd: Add mapLevels functions from Gregor Gorjanc, - along with associated unit tests. + * [r981] Add unknown() and unit test files + * [r980] More fixes from Gregor Gorjanc + * [r979] Add mapLevels functions from Gregor Gorjanc, along with + associated unit tests. 2006-08-03 warnes - * [r978] DESCRIPTION, NAMESPACE, R/combineLevels.R, R/mapFactor.R, - man/combineLevels.Rd, man/mapFactor.Rd: Add Gregor Gorjanc's - mapFactor() and combineLevels() functions. + * [r978] Add Gregor Gorjanc's mapFactor() and combineLevels() + functions. 2006-08-02 warnes - * [r977] inst/doc/gregmisc.tex, man/ConvertMedUnits.Rd, - man/aggregate.table.Rd, man/combine.Rd, man/interleave.Rd, - man/matchcols.Rd, man/nobs.Rd, man/read.xls.Rd, - man/rename.vars.Rd, man/reorder.Rd, man/trim.Rd, man/unmatrix.Rd, - man/upperTriangle.Rd: Update my email address - * [r976] data/MedUnits.rda: Remove MedUnits.rda to convert to - binary format - * [r975] data/MedUnits.rda: Remove MedUnits.rda to convert to - binary format - * [r974] DESCRIPTION: Update version number - * [r973] NAMESPACE, R/drop.levels.R, R/trim.R, man/drop.levels.Rd, - man/trim.Rd: Integrate changes suggested by Gregor Gorjanc + * [r977] Update my email address + * [r976] Remove MedUnits.rda to convert to binary format + * [r975] Remove MedUnits.rda to convert to binary format + * [r974] Update version number + * [r973] Integrate changes suggested by Gregor Gorjanc 2006-03-14 nj7w - * [r940] NAMESPACE, R/trim.R, man/resample.Rd: Fixed R CMD check - errors and added trim.default to NAMESPACE + * [r940] Fixed R CMD check errors and added trim.default to + NAMESPACE 2006-03-13 nj7w - * [r939] R/trim.R: Added trim.character and trim.factor as per - Gregor's suggestions + * [r939] Added trim.character and trim.factor as per Gregor's + suggestions 2006-01-03 warnes - * [r839] NAMESPACE, R/resample.R, man/resample.Rd: Add resample() - function, which generates a random sample or + * [r839] Add resample() function, which generates a random sample + or permutation from the elements of the supplied vector, even if the vector has length 1. This avoide the problems caused by base::sample()'s special case for vectors of length 1, where it @@ -1053,17 +899,16 @@ 2005-12-13 nj7w - * [r806] ChangeLog, NEWS: Updated news and removed changelog + * [r806] Updated news and removed changelog 2005-12-12 nj7w - * [r798] DESCRIPTION, man/interleave.Rd: Updated version number for - CRAN release + * [r798] Updated version number for CRAN release 2005-12-08 warnes - * [r789] R/interleave.R: Andrew Burgess reported that interleave() - converts 1-column matrixes + * [r789] Andrew Burgess reported that interleave() converts + 1-column matrixes to vectors and provided a patch. A slight modification of his patch has been applied. There is now a 'drop' argument, which controls @@ -1072,8 +917,8 @@ 2005-12-04 warnes - * [r779] man/interleave.Rd: Andrew Burgess reported that - interleave() converts 1-column matrixes + * [r779] Andrew Burgess reported that interleave() converts + 1-column matrixes to vectors and provided a patch. A slight modification of his patch has been applied. There is now a 'drop' argument, which controls @@ -1082,275 +927,219 @@ 2005-12-01 nj7w - * [r775] man/combine.Rd, man/reorder.Rd: Updated Greg's email - address - * [r774] man/drop.levels.Rd, man/frameApply.Rd, man/ll.Rd, - man/read.xls.Rd: Updated Jim's email address + * [r775] Updated Greg's email address + * [r774] Updated Jim's email address 2005-11-21 arnima - * [r744] R/ll.R: Suppressed warning message in attach() call. + * [r744] Suppressed warning message in attach() call. 2005-10-27 warnes - * [r716] DESCRIPTION: Bump version number again to show that I - fixed a bug. - * [r715] DESCRIPTION, R/upperTriangle.R: Update version number - * [r714] man/frameApply.Rd, man/reorder.Rd: Remove explicit loading - of gtools in examples, so that failure to + * [r716] Bump version number again to show that I fixed a bug. + * [r715] Update version number + * [r714] Remove explicit loading of gtools in examples, so that + failure to import functions from gtools gets properly caught by running the examples. - * [r713] man/upperTriangle.Rd: Add missing close-bracket - * [r712] NAMESPACE: Add upperTriangle and friends - * [r711] R/upperTriangle.R, man/upperTriangle.Rd: Add functions for - extracting, modifying upper and lower trianglular + * [r713] Add missing close-bracket + * [r712] Add upperTriangle and friends + * [r711] Add functions for extracting, modifying upper and lower + trianglular components of matrices. 2005-10-19 arnima - * [r695] R/is.what.R: Replaced the "not.using" vector with a more - robust try(get(test)) to find out whether a particular is.* - function returns a logical of length one. + * [r695] Replaced the "not.using" vector with a more robust + try(get(test)) to find out whether a particular is.* function + returns a logical of length one. 2005-09-12 nj7w - * [r671] man/aggregate.table.Rd, man/interleave.Rd, - man/matchcols.Rd, man/nobs.Rd, man/read.xls.Rd, - man/rename.vars.Rd, man/trim.Rd, man/unmatrix.Rd: Updated Greg's - email + * [r671] Updated Greg's email 2005-09-06 nj7w - * [r661] man/reorder.Rd: Added library(gtools) in the example - * [r660] NAMESPACE: Removed gtools dependency from NAMESPACE, as it - was being used only in an example, and was giving warning - * [r659] DESCRIPTION: Added Suggests field + * [r661] Added library(gtools) in the example + * [r660] Removed gtools dependency from NAMESPACE, as it was being + used only in an example, and was giving warning + * [r659] Added Suggests field 2005-09-02 nj7w - * [r658] man/frameApply.Rd: Updated the example in frameApply - * [r656] NEWS: Added NEWS - * [r654] ChangeLog: ChangeLog + * [r658] Updated the example in frameApply + * [r656] Added NEWS + * [r654] ChangeLog 2005-08-31 nj7w - * [r644] DESCRIPTION: Added DESCRIPTION file - * [r643] DESCRIPTION.in: removed DESCRIPTION.in + * [r644] Added DESCRIPTION file + * [r643] removed DESCRIPTION.in 2005-07-20 nj7w - * [r631] man/ll.Rd: updated documentation - * [r630] R/Args.R: ## Args() was using a different search path from - args(), e.g. + * [r631] updated documentation + * [r630] ## Args() was using a different search path from args(), + e.g. rep <- function(local) return(NULL) args() Args() ## Fixed - * [r629] R/is.what.R: ## is.what() was giving needless warnings for - functions, e.g. + * [r629] ## is.what() was giving needless warnings for functions, + e.g. is.what(plot) ## Fixed - * [r628] R/ll.R: ## ll() was crashing if argument was a list of - length zero, e.g. + * [r628] ## ll() was crashing if argument was a list of length + zero, e.g. x <- list() ll(x) ## Fixed, and added sort.elements (see new help page) 2005-06-09 nj7w - * [r625] R/Args.R, R/aggregate.table.R, R/combine.R, R/elem.R, - R/env.R, R/frameApply.R, R/interleave.R, R/is.what.R, R/keep.R, - R/ll.R, R/matchcols.R, R/nobs.R, R/read.xls.R, R/rename.vars.R, - R/reorder.R, R/trim.R, R/unmatrix.R, inst/perl/IO/AtomicFile.pm, - inst/perl/IO/InnerFile.pm, inst/perl/IO/Lines.pm, - inst/perl/IO/Scalar.pm, inst/perl/IO/Scalar.pm.html, - inst/perl/IO/ScalarArray.pm, inst/perl/IO/Stringy.pm, - inst/perl/IO/Wrap.pm, inst/perl/IO/WrapTie.pm, - man/aggregate.table.Rd, man/combine.Rd, man/drop.levels.Rd, - man/interleave.Rd, man/nobs.Rd, man/rename.vars.Rd, - man/reorder.Rd: Updating the version number, and various help - files to synchronize splitting of gregmisc bundle in 4 individual + * [r625] Updating the version number, and various help files to + synchronize splitting of gregmisc bundle in 4 individual components. 2005-06-07 nj7w - * [r622] R/drop.levels.R: Reverting to the previous version of - drop.levels.R by replacing + * [r622] Reverting to the previous version of drop.levels.R by + replacing sapply(...) with as.data.frame(lapply(...)) because sapply has the undesirable effect of converting the object to a matrix, which in turn coerces the factors to numeric. 2005-05-13 nj7w - * [r621] R/read.xls.R: 1) Using dQuote.ascii function in read.xls - as the new version of dQuote doesn't work proprly with UTF-8 - locale. + * [r621] 1) Using dQuote.ascii function in read.xls as the new + version of dQuote doesn't work proprly with UTF-8 locale. 2) Modified CrossTable.Rd usage in gmodels 3) Modified heatmap.2 usage in gplots. 2005-04-02 warnes - * [r600] NAMESPACE, R/drop.levels.R, man/drop.levels.Rd: Move - drop.levels() from gtools to gdata. - * [r598] NAMESPACE, R/frameApply.R, man/frameApply.Rd: Move - frameApply() to gdata package. + * [r600] Move drop.levels() from gtools to gdata. + * [r598] Move frameApply() to gdata package. 2005-03-31 warnes - * [r586] man/elem.Rd: Comment out example to avoid R CMD check - warnings + * [r586] Comment out example to avoid R CMD check warnings 2005-03-22 warnes - * [r578] NAMESPACE, R/ConvertMedUnits.R, data/MedUnits.Rda, - data/MedUnits.rda, man/ConvertMedUnits.Rd, man/MedUnits.Rd: Fixes - to pass `R CMD check'. - * [r577] R/Args.R, R/env.R, R/ll.R, man/Args.Rd: Integrated fixes - from Arni. - * [r576] man/read.xls.Rd: Improve documentation of 'perl' argument - and give examples. + * [r578] Fixes to pass `R CMD check'. + * [r577] Integrated fixes from Arni. + * [r576] Improve documentation of 'perl' argument and give + examples. 2005-03-09 warnes - * [r573] R/ConvertMedUnits.R, man/ConvertMedUnits.Rd, - man/MedUnits.Rd: - Add ConvertMedUnits() plus documentation + * [r573] - Add ConvertMedUnits() plus documentation - Add documentation for MedUnits data set. - * [r572] data/MedUnits.Rda: Update MedUnits data file. - * [r571] data/MedUnits.tab: Don't need both .Rda and .tab forms of - the data. - * [r570] data, data/MedUnits.Rda, data/MedUnits.tab: Add MedUnits - data set, which provides conversions between American + * [r572] Update MedUnits data file. + * [r571] Don't need both .Rda and .tab forms of the data. + * [r570] Add MedUnits data set, which provides conversions between + American 'Conventional' and Standard Intertional (SI) medical units. 2005-03-01 warnes - * [r566] man/elem.Rd, man/ll.Rd: - Remove 'elem' call from ll - example. + * [r566] - Remove 'elem' call from ll example. - Add note to 'elem' man page that it is depreciated and 'll' should be used instead. 2005-02-26 nj7w - * [r565] NAMESPACE, man/elem.Rd, man/env.Rd, man/ll.Rd, - man/read.xls.Rd: *** empty log message *** + * [r565] *** empty log message *** 2005-02-25 warnes - * [r564] NAMESPACE: Remove ll methods since the base function now - handles lists and data frames. - * [r563] R/elem.R, R/env.R, R/ll.R, man/Args.Rd, man/env.Rd, - man/ll.Rd: Integrate changes submitted by Arni Magnusson + * [r564] Remove ll methods since the base function now handles + lists and data frames. + * [r563] Integrate changes submitted by Arni Magnusson 2005-01-31 warnes - * [r529] R/read.xls.R, man/read.xls.Rd: Add ability to specify the - perl executable and path. + * [r529] Add ability to specify the perl executable and path. 2005-01-28 warnes - * [r526] DESCRIPTION.in, NAMESPACE: Add dependency on stats. + * [r526] Add dependency on stats. 2005-01-12 warnes - * [r515] DESCRIPTION.in: Add dependency on R 1.9.0+ to prevent - poeple from installing on old + * [r515] Add dependency on R 1.9.0+ to prevent poeple from + installing on old versions of R which don't support namespaces. 2004-12-27 warnes - * [r509] man/unmatrix.Rd: Update usage to match code. - * [r508] R/unmatrix.R: Replace 'F' with 'FALSE'. + * [r509] Update usage to match code. + * [r508] Replace 'F' with 'FALSE'. 2004-10-12 warneg - * [r465] R/unmatrix.R, man/unmatrix.Rd: Add unmatrix() function + * [r465] Add unmatrix() function 2004-09-27 warneg - * [r461] DESCRIPTION, DESCRIPTION.in, NAMESPACE, man/.Rhistory: - Updated to pass R CMD check. + * [r461] Updated to pass R CMD check. 2004-09-03 warneg - * [r455] inst/xls, inst/xls/iris.xls: added to cvs. - * [r454] inst/perl/xls2csv.pl: Checkin xls2csv.pl. Should have been - in long ago, must have been an oversight - * [r451] R/read.xls.R: Need to look for files using the new package - name. - * [r449] man/read.xls.Rd: Need to use the new package name when - looking for iris.xls. - * [r448] man/ll.Rd: Add ll.list to the to the list of functions - described - * [r447] NAMESPACE: Add ll and friends to the namespace - * [r446] DESCRIPTION, DESCRIPTION.in, NAMESPACE, R/Args.R, - R/aggregate.table.R, R/combine.R, R/elem.R, R/env.R, - R/interleave.R, R/is.what.R, R/keep.R, R/ll.R, R/matchcols.R, - R/nobs.R, R/read.xls.R, R/rename.vars.R, R/reorder.R, R/trim.R, - man/reorder.Rd: initial bundle checkin + * [r455] added to cvs. + * [r454] Checkin xls2csv.pl. Should have been in long ago, must + have been an oversight + * [r451] Need to look for files using the new package name. + * [r449] Need to use the new package name when looking for + iris.xls. + * [r448] Add ll.list to the to the list of functions described + * [r447] Add ll and friends to the namespace + * [r446] initial bundle checkin 2004-09-02 warneg - * [r442] DESCRIPTION, DESCRIPTION.in, NAMESPACE, man/.Rhistory: - Initial revision + * [r442] Initial revision 2004-08-27 warnes - * [r441] R/reorder.R, man/reorder.Rd: Fixed bug in mixedsort, and - modified reorder.factor to use mixedsort. + * [r441] Fixed bug in mixedsort, and modified reorder.factor to use + mixedsort. 2004-07-29 warnes - * [r427] inst/perl, inst/perl/IO, inst/perl/IO/AtomicFile.pm, - inst/perl/IO/InnerFile.pm, inst/perl/IO/Lines.pm, - inst/perl/IO/Scalar.pm, inst/perl/IO/Scalar.pm.html, - inst/perl/IO/ScalarArray.pm, inst/perl/IO/Stringy.pm, - inst/perl/IO/Wrap.pm, inst/perl/IO/WrapTie.pm, inst/perl/OLE, - inst/perl/OLE/Storage_Lite.pm, inst/perl/Spreadsheet, - inst/perl/Spreadsheet/ParseExcel, - inst/perl/Spreadsheet/ParseExcel.pm, - inst/perl/Spreadsheet/ParseExcel/Dump.pm, - inst/perl/Spreadsheet/ParseExcel/FmtDefault.pm, - inst/perl/Spreadsheet/ParseExcel/FmtJapan.pm, - inst/perl/Spreadsheet/ParseExcel/FmtJapan2.pm, - inst/perl/Spreadsheet/ParseExcel/FmtUnicode.pm, - inst/perl/Spreadsheet/ParseExcel/SaveParser.pm, - inst/perl/Spreadsheet/ParseExcel/Utility.pm: Add perl modules to - CVS. + * [r427] Add perl modules to CVS. 2004-07-27 warnes - * [r425] man/read.xls.Rd: Fix typos/spelling. - * [r424] man/read.xls.Rd: Add note that Perl is required for - read.xls to work properly. + * [r425] Fix typos/spelling. + * [r424] Add note that Perl is required for read.xls to work + properly. 2004-07-16 warnes - * [r420] R/read.xls.R: Remove the temporary csv file if reading it - in fails. + * [r420] Remove the temporary csv file if reading it in fails. 2004-06-22 warnes - * [r377] R/ll.R, man/ll.Rd: Add S3 methods for data frames and - lists. + * [r377] Add S3 methods for data frames and lists. 2004-06-08 warnes - * [r371] inst/bin, inst/bin/xls2csv, inst/bin/xls2csv.bat: Moved - from gregmisc/src/. - * [r370] inst/tools: Remove the files in src, instead provide - "pre-installed" perl packages + * [r371] Moved from gregmisc/src/. + * [r370] Remove the files in src, instead provide "pre-installed" + perl packages in inst/perl. 2004-06-05 warnes - * [r365] inst/tools/Makefile: Fix typo. - * [r364] inst/tools/Makefile: Fix Unix makefile so that it works - when invoked directly. - * [r363] inst/tools/Makefile: Fixes for Windows - * [r362] man/read.xls.Rd: Minor enhancment to read.xls example. - * [r361] inst/tools/Makefile, inst/xls: - Merge Makefile.win into - Makefile. Makefile.win now just redirects + * [r365] Fix typo. + * [r364] Fix Unix makefile so that it works when invoked directly. + * [r363] Fixes for Windows + * [r362] Minor enhancment to read.xls example. + * [r361] - Merge Makefile.win into Makefile. Makefile.win now just + redirects to Makefile. - Update xls2csv.bat and xls2csv shell script to correctly obtain thier installion path and infer the location of the perl code and @@ -1361,141 +1150,124 @@ 2004-06-04 warnes - * [r360] inst/tools/Makefile: More changes, indended to improve - installation reliabilty and to make + * [r360] More changes, indended to improve installation reliabilty + and to make Makefile and Makefile.win as similar as possible. 2004-05-27 warnes - * [r358] inst/tools/Makefile: Clean should remove scripts from - source directory. - * [r357] inst/perl: Moved to xls2csv.pl.in. - * [r354] inst/perl/xls2csv.pl, inst/tools/Makefile: More fixes. - * [r353] man/elem.Rd: Fix missing brace. - * [r352] man/elem.Rd: Add explicit package name to see also links. - * [r351] inst/perl/xls2csv.pl, inst/tools/Makefile: More xls2csv - perl module support changes. - * [r350] inst/tools/Makefile: More changes to fix local - installation of perl modules. + * [r358] Clean should remove scripts from source directory. + * [r357] Moved to xls2csv.pl.in. + * [r354] More fixes. + * [r353] Fix missing brace. + * [r352] Add explicit package name to see also links. + * [r351] More xls2csv perl module support changes. + * [r350] More changes to fix local installation of perl modules. 2004-05-26 warnes - * [r345] man/read.xls.Rd: Escape underscores in email addresses so - Latex is happy. + * [r345] Escape underscores in email addresses so Latex is happy. 2004-05-25 warnes - * [r339] inst/perl/xls2csv.pl, inst/tools/Makefile: More changes to - xls2csv code. - * [r337] R/Args.R, man/Args.Rd: Add Args() function contributed by - Arni Magnusson . - * [r335] R/read.xls.R: - Change to call perl directly rather than - depending on the installed + * [r339] More changes to xls2csv code. + * [r337] Add Args() function contributed by Arni Magnusson + . + * [r335] - Change to call perl directly rather than depending on + the installed shell script. This should make the code more portable to MS-Windows systes. - Add additional commants.. - * [r332] inst/tools/Makefile: Makefile now modifies xls2csv.bat - xls2csv.pl and xls2csv to contain an + * [r332] Makefile now modifies xls2csv.bat xls2csv.pl and xls2csv + to contain an explicit path to the perl script/libraries. - * [r330] inst/tools/Makefile: R CMD build calls the clean target to - purge build files from the + * [r330] R CMD build calls the clean target to purge build files + from the source tree when packaging. To get use this behavior correctly, I've renamed the clean target to cleanup and distclean target to clean. - * [r329] R/read.xls.R, man/read.xls.Rd: Add read.xls(), a function - to read Microsoft Excel files by + * [r329] Add read.xls(), a function to read Microsoft Excel files + by translating them to csv files via the xls2csv.pl script. - * [r326] inst/tools/Makefile: More fixes. Seems to work now. + * [r326] More fixes. Seems to work now. 2004-05-24 warnes - * [r325] inst/perl, inst/perl/xls2csv.pl, inst/tools, - inst/tools/Makefile, inst/xls, inst/xls/iris.xls: Add files to - enable inclusion and installation of xls2csv.pl as part + * [r325] Add files to enable inclusion and installation of + xls2csv.pl as part of the package. 2004-04-01 warnes - * [r312] R/rename.vars.R, man/rename.vars.Rd: Add function - remove.vars(). + * [r312] Add function remove.vars(). 2004-03-26 warnes - * [r307] man/reorder.Rd: Contents of package 'mva' moveed to - 'stats'. - * [r298] R/is.what.R: - Fix is.what() for use under R 1.9.0 + * [r307] Contents of package 'mva' moveed to 'stats'. + * [r298] - Fix is.what() for use under R 1.9.0 - is.what() now uses is.* functions found in any attached frame 2004-01-21 warnes - * [r282] R/reorder.R, man/reorder.Rd: - Add ... argument to match - generic provided in mva. + * [r282] - Add ... argument to match generic provided in mva. 2004-01-19 warnes - * [r275] R/elem.R, R/env.R, R/ll.R, man/keep.Rd, man/ll.Rd: - - Integrated (partial) patch submitted by Arni Magnusson to clarify - help text. + * [r275] - Integrated (partial) patch submitted by Arni Magnusson + to clarify help text. - Modifed code to use match.arg(). 2003-12-15 warnes - * [r271] R/env.R: - Applied patch from Arni that fixed a bug that - caused env() to crash + * [r271] - Applied patch from Arni that fixed a bug that caused + env() to crash if any environment was completely empty 2003-12-03 warnes - * [r253] man/elem.Rd, man/ll.Rd: - match function argument defaults - with 'usage' + * [r253] - match function argument defaults with 'usage' 2003-12-02 warnes - * [r249] man/ll.Rd: Add one argument, to match code. + * [r249] Add one argument, to match code. 2003-12-01 warnes - * [r244] R/elem.R, R/env.R, R/is.what.R, R/keep.R, R/ll.R: - Apply - changes submitted by Arni Magnusson + * [r244] - Apply changes submitted by Arni Magnusson 2003-11-19 warnes - * [r229] man/env.Rd, man/is.what.Rd, man/keep.Rd, man/ll.Rd: - Changes to pass R CMD check. + * [r229] Changes to pass R CMD check. 2003-11-18 warnes - * [r224] R/elem.R, R/env.R, R/is.what.R, R/keep.R, R/ll.R: - - Convert from MS-Dos to Unix line endings. + * [r224] - Convert from MS-Dos to Unix line endings. - Reformat to 80 columns. 2003-11-17 warnes - * [r223] man/elem.Rd: Replace 'T' with 'TRUE' to remove R CMD check - error. - * [r222] man/aggregate.table.Rd: Fix syntax error. + * [r223] Replace 'T' with 'TRUE' to remove R CMD check error. + * [r222] Fix syntax error. 2003-11-10 warnes - * [r220] R/elem.R, R/env.R, R/is.what.R, R/keep.R, R/ll.R, - man/elem.Rd, man/env.Rd, man/is.what.Rd, man/keep.Rd, man/ll.Rd: - - Add files contributed by Arni Magnusson + * [r220] - Add files contributed by Arni Magnusson . As well as some of my own. 2003-06-07 warnes - * [r198] man/aggregate.table.Rd, man/interleave.Rd: - Fixed error - in examples. Had sqrt(var(x)/(n-1)) for the standard + * [r198] - Fixed error in examples. Had sqrt(var(x)/(n-1)) for the + standard error of the mean instead of sqrt(var(x)/n). 2003-05-23 warnes - * [r197] R/matchcols.R, man/matchcols.Rd: - Fixed typos - * [r196] R/matchcols.R, man/matchcols.Rd: - library() backported - from 1.7-devel. This version of the function + * [r197] - Fixed typos + * [r196] - library() backported from 1.7-devel. This version of the + function adds the "pos=" argument to specify where in the search path the library should be placed. @@ -1509,14 +1281,13 @@ 2003-05-20 warnes - * [r195] R/interleave.R: - Omit NULL variables. - * [r194] R/trim.R, man/trim.Rd: - Added function trim() and - assocated docs. + * [r195] - Omit NULL variables. + * [r194] - Added function trim() and assocated docs. 2003-04-22 warnes - * [r188] R/reorder.R, man/reorder.Rd: - The mva package (which is - part of recommended) now provides a + * [r188] - The mva package (which is part of recommended) now + provides a generic 'reorder' function. Consequently, the 'reorder' function here has been renamed to 'reorder.factor'. @@ -1524,95 +1295,84 @@ 2003-03-03 warnes - * [r165] man/reorder.Rd: - Updated to match reorder.Rd which was - exetended to handle factor + * [r165] - Updated to match reorder.Rd which was exetended to + handle factor label names in addition to numeric indices. - * [r164] R/reorder.R: - Added handling of factor level names in - addition to numeric indexes. + * [r164] - Added handling of factor level names in addition to + numeric indexes. 2002-09-23 warnes - * [r118] inst, inst/doc, inst/doc/Rnews.dtx, inst/doc/Rnews.sty, - inst/doc/gregmisc.pdf, inst/doc/gregmisc.tex: Added inst/doc - directory and contents to CVS. - * [r117] R/aggregate.table.R, R/combine.R, R/interleave.R, - R/nobs.R, man/aggregate.table.Rd, man/combine.Rd, - man/interleave.Rd, man/nobs.Rd, man/rename.vars.Rd, - man/reorder.Rd: - Modified all files to include CVS Id and Log - tags. + * [r118] Added inst/doc directory and contents to CVS. + * [r117] - Modified all files to include CVS Id and Log tags. 2002-08-01 warnes - * [r112] R/reorder.R: Added reorder() function to reorder the - levels of a factor. + * [r112] Added reorder() function to reorder the levels of a + factor. 2002-04-09 warneg - * [r109] R/rename.vars.R, man/aggregate.table.Rd, - man/interleave.Rd, man/reorder.Rd: Checkin for version 0.5.3 - * [r108] R/interleave.R: - Properly handle case when some or all - arguments are vectors. + * [r109] Checkin for version 0.5.3 + * [r108] - Properly handle case when some or all arguments are + vectors. 2002-03-26 warneg - * [r104] man/reorder.Rd: - Changed methods to include '...' to - match the generic. + * [r104] - Changed methods to include '...' to match the generic. - Updated for version 0.5.1 - * [r102] R/nobs.R: Added ... to methods. - * [r101] man/nobs.Rd: Updated to add ... parameter to function - calls. - * [r98] man/reorder.Rd: Initial checkin. - * [r95] R/nobs.R: - Added CVS tags + * [r102] Added ... to methods. + * [r101] Updated to add ... parameter to function calls. + * [r98] Initial checkin. + * [r95] - Added CVS tags 2002-02-21 warneg - * [r87] R/aggregate.table.R: - Fixed bug where row and column - labels didn't always correspond to the + * [r87] - Fixed bug where row and column labels didn't always + correspond to the contents. This only occured when a factor was used for by1 or by2 and the factors levels weren't in the default sort order. 2002-02-20 warneg - * [r86] R/aggregate.table.R: New function. - * [r85] man/aggregate.table.Rd: Initial checkin. - * [r84] R/interleave.R, man/interleave.Rd: Initial checkin. - * [r83] man/nobs.Rd: Noted that specialized methods exist. - * [r82] man/nobs.Rd: Incorrectly had contents of nobs.R here - instead of help text. Corrected. - * [r81] man/rename.vars.Rd: Minor changes, typo and formatting - fixes. - * [r79] R/nobs.R, man/nobs.Rd: - initial checkin. + * [r86] New function. + * [r85] Initial checkin. + * [r84] Initial checkin. + * [r83] Noted that specialized methods exist. + * [r82] Incorrectly had contents of nobs.R here instead of help + text. Corrected. + * [r81] Minor changes, typo and formatting fixes. + * [r79] - initial checkin. 2001-12-12 warneg - * [r53] man/rename.vars.Rd: Added omitted documentaton for 'info' - parameter. Changed example code + * [r53] Added omitted documentaton for 'info' parameter. Changed + example code not to use 'Orthodont' data set so that the nlme package is not required. 2001-12-08 warneg - * [r47] R/rename.vars.R: Changed 'T' to 'TRUE' in parameter list. + * [r47] Changed 'T' to 'TRUE' in parameter list. 2001-12-07 warneg - * [r45] man/rename.vars.Rd: - Fixed see also link. Mis-typed - 'data.frame' as 'dataframe'. - * [r44] R/rename.vars.R: Added attribution. - * [r43] man/rename.vars.Rd: Added proper attribution to Don - MacQueen. - * [r39] man/rename.vars.Rd: Initial checkin. Unfortunately, I've - lost the email of the person who + * [r45] - Fixed see also link. Mis-typed 'data.frame' as + 'dataframe'. + * [r44] Added attribution. + * [r43] Added proper attribution to Don MacQueen. + * [r39] Initial checkin. Unfortunately, I've lost the email of the + person who sent this to me. I'll credit him/her when I find out who it was! - * [r38] R/rename.vars.R: Initial checkin + * [r38] Initial checkin 2001-12-05 warneg - * [r34] R, R/combine.R: - Renamed 'concat' function to 'combine' to - avoid name conflict with + * [r34] - Renamed 'concat' function to 'combine' to avoid name + conflict with an existing S-Plus function. - * [r32] ., man, man/combine.Rd: - Changed function name 'concat' to - 'combine' and renamed concat.Rd to + * [r32] - Changed function name 'concat' to 'combine' and renamed + concat.Rd to combine.Rd Binary files /tmp/WlnUgzz6bE/gdata-2.13.3/inst/doc/mapLevels.pdf and /tmp/J1mSmQsMZw/gdata-2.17.0/inst/doc/mapLevels.pdf differ Binary files /tmp/WlnUgzz6bE/gdata-2.13.3/inst/doc/unknown.pdf and /tmp/J1mSmQsMZw/gdata-2.17.0/inst/doc/unknown.pdf differ diff -Nru gdata-2.13.3/inst/NEWS gdata-2.17.0/inst/NEWS --- gdata-2.13.3/inst/NEWS 2014-04-05 18:41:12.000000000 +0000 +++ gdata-2.17.0/inst/NEWS 2015-07-03 02:51:41.000000000 +0000 @@ -1,12 +1,123 @@ +Changes in 2.17.0 (2015-07-02) +------------------------------ + +New features: + +- Add new argument 'byrow' to upperTriangle(), lowerTriangle(), + upperTriangle<-(), and lowerTriangle<-() to specify by-row rather + by-column order. This makes it simpler to copy values between the + lower and upper triangular areas, e.g. to construct a symmetric + matrix. + +Other changes: + +- Add inline comments to tests to alert reviewers of expected diffs on + systems lacking the libraries for read.xls() to support XLSX + formatted files. + + +Changes in 2.16.1 (2015-04-28) +----------------------------- + +Bug fixes: + +- mapLevels() no longer generates warnings about conversion of lists to vectors. + +Other changes: + +- Requirement for Perl version 5.10.0 or later is specified in the + package DESCRITION. + +- first() and last() are now simply wrappers for calls to 'head(x, n=1)' and + 'tail(x, n=1)', respectively. + + +Changes in 2.16.0 (2015-04-25) +------------------------------ + +New features: + +- New functions first() and last() to return the first or last element + of an object. + +- New functions left() and right() to return the leftmost or + rightmost n (default to 6) columns of a matrix or dataframe. + +- New 'scientific' argument to write.fwf(). Set 'scientific=FALSE' to + prevent numeric columns from being displayed using scientific + notification. + +- The 'standard' argument to humanReadable() now accepts three values, + 'SI' for base 1000 ('MB'), 'IEC' for base 1024 ('MiB'), and 'Unix' + for base 1024 and single-character units ('M') + +- object.size() now returns objects with S3 class 'object_sizes' (note the + final 's') to avoid conflicts with methods in utils for class 'object_size' + provided by package 'utils' which can only handle a scalar size. + +- New 'units' argument to humanReadable()--and hence to + print.object_sizes() and format.object_sizes()--that permits + specifying the unit to use for all values. Use 'bytes' to display + all values with the unit 'bytes', use 'auto' (or leave it missing) to + automatically select the best unit, and use a unit from the selected + standard to use that unit (i.e. 'MiB'). + +- The default arguments to humanReadable() have changed. The defaults + are now 'width=NULL' and 'digits=1', so that the default behavior is + now to show digit after the decimal for all values. + +Bug fixes: + +- reorder.factor() was ignoring the argument 'X' unless 'FUN' was + supplied, making it incompatible with the behavior of + stats:::reorder.default(). This has been corrected, so that calling + reorder on a factor with arguments 'X' and/or 'FUN' should now + return the same results whether gdata is loaded or not. (Reported + by Sam Hunter.) + +- write.fwf() now properly supports matrix objects, including matrix + objects without column names. (Reported by Carl Witthoft.) + +Other changes: + +- Replaced deprecated PERL function POSIX::isdigit in xls2csv.pl + (which is used by read.xls() ) with an equivalent regular + expression. (Reported by both Charles Plessy, Gerrit-jan Schutten, + and Paul Johnson. Charles also provided a patch to correct the + issue.) + +- aggregate.table(), which has been defunct gdata 2.13.3 (2014-04-04) + has now been completely removed. + + +Changes in 2.14.0 (2014-08-27) +------------------------------ + +Bug Fixes: + +- read.xls() can now properly process XLSX files with up to 16385 columns (the + maximum generated by Microsoft Excel). + +- read.xls() now properly handles XLS/XLSX files that use 1904-01-01 + as the reference value for dates instead of 1900-01-01 (the + default for MS-Excel files created on the Mac). + +Other changes: + +- Updated perl libraries and code underlying read.xls() to the latest + version, including switching from Spreadsheet::XLSX to + Spreadsheet::ParseXLSX. + + Changes in 2.13.3 (2014-04-04) ------------------------------ -Bug Fixes +Bug Fixes: - Unit tests were incorrectly checking for equality of optional POSIXlt components. (Bug reported by Brian Ripley). -Other Changes +Other Changes: - 'aggregate.table' is now defunct. See '?gdata-defunct' for details. diff -Nru gdata-2.13.3/inst/perl/Crypt/RC4.pm gdata-2.17.0/inst/perl/Crypt/RC4.pm --- gdata-2.13.3/inst/perl/Crypt/RC4.pm 1970-01-01 00:00:00.000000000 +0000 +++ gdata-2.17.0/inst/perl/Crypt/RC4.pm 2014-08-28 01:16:18.000000000 +0000 @@ -0,0 +1,165 @@ +#--------------------------------------------------------------------# +# Crypt::RC4 +# Date Written: 07-Jun-2000 04:15:55 PM +# Last Modified: 13-Dec-2001 03:33:49 PM +# Author: Kurt Kincaid (sifukurt@yahoo.com) +# Copyright (c) 2001, Kurt Kincaid +# All Rights Reserved. +# +# This is free software and may be modified and/or +# redistributed under the same terms as Perl itself. +#--------------------------------------------------------------------# + +package Crypt::RC4; + +use strict; +use vars qw( $VERSION @ISA @EXPORT $MAX_CHUNK_SIZE ); + +$MAX_CHUNK_SIZE = 1024 unless $MAX_CHUNK_SIZE; + +require Exporter; + +@ISA = qw(Exporter); +@EXPORT = qw(RC4); +$VERSION = '2.02'; + +sub new { + my ( $class, $key ) = @_; + my $self = bless {}, $class; + $self->{state} = Setup( $key ); + $self->{x} = 0; + $self->{y} = 0; + $self; +} + +sub RC4 { + my $self; + my( @state, $x, $y ); + if ( ref $_[0] ) { + $self = shift; + @state = @{ $self->{state} }; + $x = $self->{x}; + $y = $self->{y}; + } else { + @state = Setup( shift ); + $x = $y = 0; + } + my $message = shift; + my $num_pieces = do { + my $num = length($message) / $MAX_CHUNK_SIZE; + my $int = int $num; + $int == $num ? $int : $int+1; + }; + for my $piece ( 0..$num_pieces - 1 ) { + my @message = unpack "C*", substr($message, $piece * $MAX_CHUNK_SIZE, $MAX_CHUNK_SIZE); + for ( @message ) { + $x = 0 if ++$x > 255; + $y -= 256 if ($y += $state[$x]) > 255; + @state[$x, $y] = @state[$y, $x]; + $_ ^= $state[( $state[$x] + $state[$y] ) % 256]; + } + substr($message, $piece * $MAX_CHUNK_SIZE, $MAX_CHUNK_SIZE) = pack "C*", @message; + } + if ($self) { + $self->{state} = \@state; + $self->{x} = $x; + $self->{y} = $y; + } + $message; +} + +sub Setup { + my @k = unpack( 'C*', shift ); + my @state = 0..255; + my $y = 0; + for my $x (0..255) { + $y = ( $k[$x % @k] + $state[$x] + $y ) % 256; + @state[$x, $y] = @state[$y, $x]; + } + wantarray ? @state : \@state; +} + + +1; +__END__ + +=head1 NAME + +Crypt::RC4 - Perl implementation of the RC4 encryption algorithm + +=head1 SYNOPSIS + +# Functional Style + use Crypt::RC4; + $encrypted = RC4( $passphrase, $plaintext ); + $decrypt = RC4( $passphrase, $encrypted ); + +# OO Style + use Crypt::RC4; + $ref = Crypt::RC4->new( $passphrase ); + $encrypted = $ref->RC4( $plaintext ); + + $ref2 = Crypt::RC4->new( $passphrase ); + $decrypted = $ref2->RC4( $encrypted ); + +# process an entire file, one line at a time +# (Warning: Encrypted file leaks line lengths.) + $ref3 = Crypt::RC4->new( $passphrase ); + while () { + chomp; + print $ref3->RC4($_), "\n"; + } + +=head1 DESCRIPTION + +A simple implementation of the RC4 algorithm, developed by RSA Security, Inc. Here is the description +from RSA's website: + +RC4 is a stream cipher designed by Rivest for RSA Data Security (now RSA Security). It is a variable +key-size stream cipher with byte-oriented operations. The algorithm is based on the use of a random +permutation. Analysis shows that the period of the cipher is overwhelmingly likely to be greater than +10100. Eight to sixteen machine operations are required per output byte, and the cipher can be +expected to run very quickly in software. Independent analysts have scrutinized the algorithm and it +is considered secure. + +Based substantially on the "RC4 in 3 lines of perl" found at http://www.cypherspace.org + +A major bug in v1.0 was fixed by David Hook (dgh@wumpus.com.au). Thanks, David. + +=head1 AUTHOR + +Kurt Kincaid (sifukurt@yahoo.com) +Ronald Rivest for RSA Security, Inc. + +=head1 BUGS + +Disclaimer: Strictly speaking, this module uses the "alleged" RC4 +algorithm. The Algorithm known as "RC4" is a trademark of RSA Security +Inc., and this document makes no claims one way or another that this +is the correct algorithm, and further, make no claims about the +quality of the source code nor any licensing requirements for +commercial use. + +There's nothing preventing you from using this module in an insecure +way which leaks information. For example, encrypting multilple +messages with the same passphrase may allow an attacker to decode all of +them with little effort, even though they'll appear to be secured. If +serious crypto is your goal, be careful. Be very careful. + +It's a pure-Perl implementation, so that rating of "Eight +to sixteen machine operations" is good for nothing but a good laugh. +If encryption and decryption are a bottleneck for you, please re-write +this module to use native code wherever practical. + +=head1 LICENSE + +This is free software and may be modified and/or +redistributed under the same terms as Perl itself. + +=head1 SEE ALSO + +L, L, L, +L, +L + +=cut diff -Nru gdata-2.13.3/inst/perl/Digest/Perl/MD5.pm gdata-2.17.0/inst/perl/Digest/Perl/MD5.pm --- gdata-2.13.3/inst/perl/Digest/Perl/MD5.pm 1970-01-01 00:00:00.000000000 +0000 +++ gdata-2.17.0/inst/perl/Digest/Perl/MD5.pm 2014-08-28 01:21:54.000000000 +0000 @@ -0,0 +1,476 @@ +package Digest::Perl::MD5; +use strict; +use integer; +use Exporter; +use vars qw($VERSION @ISA @EXPORTER @EXPORT_OK); + +@EXPORT_OK = qw(md5 md5_hex md5_base64); + +@ISA = 'Exporter'; +$VERSION = '1.9'; + +# I-Vektor +sub A() { 0x67_45_23_01 } +sub B() { 0xef_cd_ab_89 } +sub C() { 0x98_ba_dc_fe } +sub D() { 0x10_32_54_76 } + +# for internal use +sub MAX() { 0xFFFFFFFF } + +# pad a message to a multiple of 64 +sub padding { + my $l = length (my $msg = shift() . chr(128)); + $msg .= "\0" x (($l%64<=56?56:120)-$l%64); + $l = ($l-1)*8; + $msg .= pack 'VV', $l & MAX , ($l >> 16 >> 16); +} + + +sub rotate_left($$) { + #$_[0] << $_[1] | $_[0] >> (32 - $_[1]); + #my $right = $_[0] >> (32 - $_[1]); + #my $rmask = (1 << $_[1]) - 1; + ($_[0] << $_[1]) | (( $_[0] >> (32 - $_[1]) ) & ((1 << $_[1]) - 1)); + #$_[0] << $_[1] | (($_[0]>> (32 - $_[1])) & (1 << (32 - $_[1])) - 1); +} + +sub gen_code { + # Discard upper 32 bits on 64 bit archs. + my $MSK = ((1 << 16) << 16) ? ' & ' . MAX : ''; +# FF => "X0=rotate_left(((X1&X2)|(~X1&X3))+X0+X4+X6$MSK,X5)+X1$MSK;", +# GG => "X0=rotate_left(((X1&X3)|(X2&(~X3)))+X0+X4+X6$MSK,X5)+X1$MSK;", + my %f = ( + FF => "X0=rotate_left((X3^(X1&(X2^X3)))+X0+X4+X6$MSK,X5)+X1$MSK;", + GG => "X0=rotate_left((X2^(X3&(X1^X2)))+X0+X4+X6$MSK,X5)+X1$MSK;", + HH => "X0=rotate_left((X1^X2^X3)+X0+X4+X6$MSK,X5)+X1$MSK;", + II => "X0=rotate_left((X2^(X1|(~X3)))+X0+X4+X6$MSK,X5)+X1$MSK;", + ); + #unless ( (1 << 16) << 16) { %f = %{$CODES{'32bit'}} } + #else { %f = %{$CODES{'64bit'}} } + + my %s = ( # shift lengths + S11 => 7, S12 => 12, S13 => 17, S14 => 22, S21 => 5, S22 => 9, S23 => 14, + S24 => 20, S31 => 4, S32 => 11, S33 => 16, S34 => 23, S41 => 6, S42 => 10, + S43 => 15, S44 => 21 + ); + + my $insert = "\n"; + while(defined( my $data = )) { + chomp $data; + next unless $data =~ /^[FGHI]/; + my ($func,@x) = split /,/, $data; + my $c = $f{$func}; + $c =~ s/X(\d)/$x[$1]/g; + $c =~ s/(S\d{2})/$s{$1}/; + $c =~ s/^(.*)=rotate_left\((.*),(.*)\)\+(.*)$//; + + my $su = 32 - $3; + my $sh = (1 << $3) - 1; + + $c = "$1=(((\$r=$2)<<$3)|((\$r>>$su)&$sh))+$4"; + + #my $rotate = "(($2 << $3) || (($2 >> (32 - $3)) & (1 << $2) - 1)))"; + # $c = "\$r = $2; + # $1 = ((\$r << $3) | ((\$r >> (32 - $3)) & ((1 << $3) - 1))) + $4"; + $insert .= "\t$c\n"; + } + close DATA; + + my $dump = ' + sub round { + my ($a,$b,$c,$d) = @_[0 .. 3]; + my $r;' . $insert . ' + $_[0]+$a' . $MSK . ', $_[1]+$b ' . $MSK . + ', $_[2]+$c' . $MSK . ', $_[3]+$d' . $MSK . '; + }'; + eval $dump; + # print "$dump\n"; + # exit 0; +} + +gen_code(); + +######################################### +# Private output converter functions: +sub _encode_hex { unpack 'H*', $_[0] } +sub _encode_base64 { + my $res; + while ($_[0] =~ /(.{1,45})/gs) { + $res .= substr pack('u', $1), 1; + chop $res; + } + $res =~ tr|` -_|AA-Za-z0-9+/|;#` + chop $res; chop $res; + $res +} + +######################################### +# OOP interface: +sub new { + my $proto = shift; + my $class = ref $proto || $proto; + my $self = {}; + bless $self, $class; + $self->reset(); + $self +} + +sub reset { + my $self = shift; + delete $self->{_data}; + $self->{_state} = [A,B,C,D]; + $self->{_length} = 0; + $self +} + +sub add { + my $self = shift; + $self->{_data} .= join '', @_ if @_; + my ($i,$c); + for $i (0 .. (length $self->{_data})/64-1) { + my @X = unpack 'V16', substr $self->{_data}, $i*64, 64; + @{$self->{_state}} = round(@{$self->{_state}},@X); + ++$c; + } + if ($c) { + substr ($self->{_data}, 0, $c*64) = ''; + $self->{_length} += $c*64; + } + $self +} + +sub finalize { + my $self = shift; + $self->{_data} .= chr(128); + my $l = $self->{_length} + length $self->{_data}; + $self->{_data} .= "\0" x (($l%64<=56?56:120)-$l%64); + $l = ($l-1)*8; + $self->{_data} .= pack 'VV', $l & MAX , ($l >> 16 >> 16); + $self->add(); + $self +} + +sub addfile { + my ($self,$fh) = @_; + if (!ref($fh) && ref(\$fh) ne "GLOB") { + require Symbol; + $fh = Symbol::qualify($fh, scalar caller); + } + # $self->{_data} .= do{local$/;<$fh>}; + my $read = 0; + my $buffer = ''; + $self->add($buffer) while $read = read $fh, $buffer, 8192; + die __PACKAGE__, " read failed: $!" unless defined $read; + $self +} + +sub add_bits { + my $self = shift; + return $self->add( pack 'B*', shift ) if @_ == 1; + my ($b,$n) = @_; + die __PACKAGE__, " Invalid number of bits\n" if $n%8; + $self->add( substr $b, 0, $n/8 ) +} + +sub digest { + my $self = shift; + $self->finalize(); + my $res = pack 'V4', @{$self->{_state}}; + $self->reset(); + $res +} + +sub hexdigest { + _encode_hex($_[0]->digest) +} + +sub b64digest { + _encode_base64($_[0]->digest) +} + +sub clone { + my $self = shift; + my $clone = { + _state => [@{$self->{_state}}], + _length => $self->{_length}, + _data => $self->{_data} + }; + bless $clone, ref $self || $self; +} + +######################################### +# Procedural interface: +sub md5 { + my $message = padding(join'',@_); + my ($a,$b,$c,$d) = (A,B,C,D); + my $i; + for $i (0 .. (length $message)/64-1) { + my @X = unpack 'V16', substr $message,$i*64,64; + ($a,$b,$c,$d) = round($a,$b,$c,$d,@X); + } + pack 'V4',$a,$b,$c,$d; +} +sub md5_hex { _encode_hex &md5 } +sub md5_base64 { _encode_base64 &md5 } + + +1; + +=head1 NAME + +Digest::MD5::Perl - Perl implementation of Ron Rivests MD5 Algorithm + +=head1 DISCLAIMER + +This is B an interface (like C) but a Perl implementation of MD5. +It is written in perl only and because of this it is slow but it works without C-Code. +You should use C instead of this module if it is available. +This module is only useful for + +=over 4 + +=item + +computers where you cannot install C (e.g. lack of a C-Compiler) + +=item + +encrypting only small amounts of data (less than one million bytes). I use it to +hash passwords. + +=item + +educational purposes + +=back + +=head1 SYNOPSIS + + # Functional style + use Digest::MD5 qw(md5 md5_hex md5_base64); + + $hash = md5 $data; + $hash = md5_hex $data; + $hash = md5_base64 $data; + + + # OO style + use Digest::MD5; + + $ctx = Digest::MD5->new; + + $ctx->add($data); + $ctx->addfile(*FILE); + + $digest = $ctx->digest; + $digest = $ctx->hexdigest; + $digest = $ctx->b64digest; + +=head1 DESCRIPTION + +This modules has the same interface as the much faster C. So you can +easily exchange them, e.g. + + BEGIN { + eval { + require Digest::MD5; + import Digest::MD5 'md5_hex' + }; + if ($@) { # ups, no Digest::MD5 + require Digest::Perl::MD5; + import Digest::Perl::MD5 'md5_hex' + } + } + +If the C module is available it is used and if not you take +C. + +You can also install the Perl part of Digest::MD5 together with Digest::Perl::MD5 +and use Digest::MD5 as normal, it falls back to Digest::Perl::MD5 if it +cannot load its object files. + +For a detailed Documentation see the C module. + +=head1 EXAMPLES + +The simplest way to use this library is to import the md5_hex() +function (or one of its cousins): + + use Digest::Perl::MD5 'md5_hex'; + print 'Digest is ', md5_hex('foobarbaz'), "\n"; + +The above example would print out the message + + Digest is 6df23dc03f9b54cc38a0fc1483df6e21 + +provided that the implementation is working correctly. The same +checksum can also be calculated in OO style: + + use Digest::MD5; + + $md5 = Digest::MD5->new; + $md5->add('foo', 'bar'); + $md5->add('baz'); + $digest = $md5->hexdigest; + + print "Digest is $digest\n"; + +The digest methods are destructive. That means you can only call them +once and the $md5 objects is reset after use. You can make a copy with clone: + + $md5->clone->hexdigest + +=head1 LIMITATIONS + +This implementation of the MD5 algorithm has some limitations: + +=over 4 + +=item + +It's slow, very slow. I've done my very best but Digest::MD5 is still about 100 times faster. +You can only encrypt Data up to one million bytes in an acceptable time. But it's very useful +for encrypting small amounts of data like passwords. + +=item + +You can only encrypt up to 2^32 bits = 512 MB on 32bit archs. But You should +use C for those amounts of data anyway. + +=back + +=head1 SEE ALSO + +L + +L + +RFC 1321 + +tools/md5: a small BSD compatible md5 tool written in pure perl. + +=head1 COPYRIGHT + +This library is free software; you can redistribute it and/or +modify it under the same terms as Perl itself. + + Copyright 2000 Christian Lackas, Imperia Software Solutions + Copyright 1998-1999 Gisle Aas. + Copyright 1995-1996 Neil Winton. + Copyright 1991-1992 RSA Data Security, Inc. + +The MD5 algorithm is defined in RFC 1321. The basic C code +implementing the algorithm is derived from that in the RFC and is +covered by the following copyright: + +=over 4 + +=item + +Copyright (C) 1991-1992, RSA Data Security, Inc. Created 1991. All +rights reserved. + +License to copy and use this software is granted provided that it +is identified as the "RSA Data Security, Inc. MD5 Message-Digest +Algorithm" in all material mentioning or referencing this software +or this function. + +License is also granted to make and use derivative works provided +that such works are identified as "derived from the RSA Data +Security, Inc. MD5 Message-Digest Algorithm" in all material +mentioning or referencing the derived work. + +RSA Data Security, Inc. makes no representations concerning either +the merchantability of this software or the suitability of this +software for any particular purpose. It is provided "as is" +without express or implied warranty of any kind. + +These notices must be retained in any copies of any part of this +documentation and/or software. + +=back + +This copyright does not prohibit distribution of any version of Perl +containing this extension under the terms of the GNU or Artistic +licenses. + +=head1 AUTHORS + +The original MD5 interface was written by Neil Winton +(). + +C was made by Gisle Aas (I took his Interface +and part of the documentation). + +Thanks to Guido Flohr for his 'use integer'-hint. + +This release was made by Christian Lackas . + +=cut + +__DATA__ +FF,$a,$b,$c,$d,$_[4],7,0xd76aa478,/* 1 */ +FF,$d,$a,$b,$c,$_[5],12,0xe8c7b756,/* 2 */ +FF,$c,$d,$a,$b,$_[6],17,0x242070db,/* 3 */ +FF,$b,$c,$d,$a,$_[7],22,0xc1bdceee,/* 4 */ +FF,$a,$b,$c,$d,$_[8],7,0xf57c0faf,/* 5 */ +FF,$d,$a,$b,$c,$_[9],12,0x4787c62a,/* 6 */ +FF,$c,$d,$a,$b,$_[10],17,0xa8304613,/* 7 */ +FF,$b,$c,$d,$a,$_[11],22,0xfd469501,/* 8 */ +FF,$a,$b,$c,$d,$_[12],7,0x698098d8,/* 9 */ +FF,$d,$a,$b,$c,$_[13],12,0x8b44f7af,/* 10 */ +FF,$c,$d,$a,$b,$_[14],17,0xffff5bb1,/* 11 */ +FF,$b,$c,$d,$a,$_[15],22,0x895cd7be,/* 12 */ +FF,$a,$b,$c,$d,$_[16],7,0x6b901122,/* 13 */ +FF,$d,$a,$b,$c,$_[17],12,0xfd987193,/* 14 */ +FF,$c,$d,$a,$b,$_[18],17,0xa679438e,/* 15 */ +FF,$b,$c,$d,$a,$_[19],22,0x49b40821,/* 16 */ +GG,$a,$b,$c,$d,$_[5],5,0xf61e2562,/* 17 */ +GG,$d,$a,$b,$c,$_[10],9,0xc040b340,/* 18 */ +GG,$c,$d,$a,$b,$_[15],14,0x265e5a51,/* 19 */ +GG,$b,$c,$d,$a,$_[4],20,0xe9b6c7aa,/* 20 */ +GG,$a,$b,$c,$d,$_[9],5,0xd62f105d,/* 21 */ +GG,$d,$a,$b,$c,$_[14],9,0x2441453,/* 22 */ +GG,$c,$d,$a,$b,$_[19],14,0xd8a1e681,/* 23 */ +GG,$b,$c,$d,$a,$_[8],20,0xe7d3fbc8,/* 24 */ +GG,$a,$b,$c,$d,$_[13],5,0x21e1cde6,/* 25 */ +GG,$d,$a,$b,$c,$_[18],9,0xc33707d6,/* 26 */ +GG,$c,$d,$a,$b,$_[7],14,0xf4d50d87,/* 27 */ +GG,$b,$c,$d,$a,$_[12],20,0x455a14ed,/* 28 */ +GG,$a,$b,$c,$d,$_[17],5,0xa9e3e905,/* 29 */ +GG,$d,$a,$b,$c,$_[6],9,0xfcefa3f8,/* 30 */ +GG,$c,$d,$a,$b,$_[11],14,0x676f02d9,/* 31 */ +GG,$b,$c,$d,$a,$_[16],20,0x8d2a4c8a,/* 32 */ +HH,$a,$b,$c,$d,$_[9],4,0xfffa3942,/* 33 */ +HH,$d,$a,$b,$c,$_[12],11,0x8771f681,/* 34 */ +HH,$c,$d,$a,$b,$_[15],16,0x6d9d6122,/* 35 */ +HH,$b,$c,$d,$a,$_[18],23,0xfde5380c,/* 36 */ +HH,$a,$b,$c,$d,$_[5],4,0xa4beea44,/* 37 */ +HH,$d,$a,$b,$c,$_[8],11,0x4bdecfa9,/* 38 */ +HH,$c,$d,$a,$b,$_[11],16,0xf6bb4b60,/* 39 */ +HH,$b,$c,$d,$a,$_[14],23,0xbebfbc70,/* 40 */ +HH,$a,$b,$c,$d,$_[17],4,0x289b7ec6,/* 41 */ +HH,$d,$a,$b,$c,$_[4],11,0xeaa127fa,/* 42 */ +HH,$c,$d,$a,$b,$_[7],16,0xd4ef3085,/* 43 */ +HH,$b,$c,$d,$a,$_[10],23,0x4881d05,/* 44 */ +HH,$a,$b,$c,$d,$_[13],4,0xd9d4d039,/* 45 */ +HH,$d,$a,$b,$c,$_[16],11,0xe6db99e5,/* 46 */ +HH,$c,$d,$a,$b,$_[19],16,0x1fa27cf8,/* 47 */ +HH,$b,$c,$d,$a,$_[6],23,0xc4ac5665,/* 48 */ +II,$a,$b,$c,$d,$_[4],6,0xf4292244,/* 49 */ +II,$d,$a,$b,$c,$_[11],10,0x432aff97,/* 50 */ +II,$c,$d,$a,$b,$_[18],15,0xab9423a7,/* 51 */ +II,$b,$c,$d,$a,$_[9],21,0xfc93a039,/* 52 */ +II,$a,$b,$c,$d,$_[16],6,0x655b59c3,/* 53 */ +II,$d,$a,$b,$c,$_[7],10,0x8f0ccc92,/* 54 */ +II,$c,$d,$a,$b,$_[14],15,0xffeff47d,/* 55 */ +II,$b,$c,$d,$a,$_[5],21,0x85845dd1,/* 56 */ +II,$a,$b,$c,$d,$_[12],6,0x6fa87e4f,/* 57 */ +II,$d,$a,$b,$c,$_[19],10,0xfe2ce6e0,/* 58 */ +II,$c,$d,$a,$b,$_[10],15,0xa3014314,/* 59 */ +II,$b,$c,$d,$a,$_[17],21,0x4e0811a1,/* 60 */ +II,$a,$b,$c,$d,$_[8],6,0xf7537e82,/* 61 */ +II,$d,$a,$b,$c,$_[15],10,0xbd3af235,/* 62 */ +II,$c,$d,$a,$b,$_[6],15,0x2ad7d2bb,/* 63 */ +II,$b,$c,$d,$a,$_[13],21,0xeb86d391,/* 64 */ diff -Nru gdata-2.13.3/inst/perl/Graphics/ColorUtils.pm gdata-2.17.0/inst/perl/Graphics/ColorUtils.pm --- gdata-2.13.3/inst/perl/Graphics/ColorUtils.pm 1970-01-01 00:00:00.000000000 +0000 +++ gdata-2.17.0/inst/perl/Graphics/ColorUtils.pm 2014-08-28 01:12:41.000000000 +0000 @@ -0,0 +1,1393 @@ + +package Graphics::ColorUtils; + +use 5.008003; +use strict; +use warnings; + +use Carp; + +require Exporter; + +our @ISA = qw(Exporter); + +our %EXPORT_TAGS = ( 'gradients' => [ qw( gradient + grad2rgb + available_gradients + register_gradient) ], + 'names' => [ qw( name2rgb + available_names + register_name + set_default_namespace + get_default_namespace ) ], + 'all' => [ qw( rgb2yiq yiq2rgb + rgb2cmy cmy2rgb + rgb2hls hls2rgb + rgb2hsv hsv2rgb + gradient + grad2rgb + available_gradients + register_gradient + name2rgb + available_names + register_name + set_default_namespace + get_default_namespace ) ], + ); + +our @EXPORT_OK = ( @{ $EXPORT_TAGS{'all'} } ); + +our @EXPORT = qw( rgb2yiq yiq2rgb + rgb2cmy cmy2rgb + rgb2hls hls2rgb + rgb2hsv hsv2rgb ); + +our $VERSION = '0.17'; + +# ================================================== +# ++++++++++++++++++++++++++++++++++++++++++++++++++ +# ================================================== + +# ================================================== +# Utility + +# Takes a (r,g,b) triple of numbers (possibly floats) and returns +# - a string like '#33FF21' in scalar context +# - a triple of corresponding integers in array context +sub _fmt { + return wantarray ? map { int } @_ : sprintf( "#%02x%02x%02x", @_ ); +} + +# ================================================== +# YIQ + +sub rgb2yiq { + # $r, $g, $b : 0..255 + my ( $r, $g, $b ) = map { $_/255.0 } @_; # Scale RGB to 0..1 + + my $y = 0.299*$r + 0.587*$g + 0.114*$b; + my $i = 0.596*$r - 0.275*$g - 0.321*$b; + my $q = 0.212*$r - 0.523*$g + 0.311*$b; + + return ( $y, $i, $q ); +} + +sub yiq2rgb { + # $y, $i, $q : 0..1 + my ( $y, $i, $q ) = @_; + + my $r = 255.0*( $y + 0.956*$i + 0.621*$q ); + my $g = 255.0*( $y - 0.272*$i - 0.647*$q ); + my $b = 255.0*( $y - 1.105*$i + 1.705*$q ); + + return _fmt( $r, $g, $b ); +} + +# ================================================== +# CMY + +sub rgb2cmy { + # $r, $g, $b : 0..255 + my ( $r, $g, $b ) = map { $_/255.0 } @_; # Scale RGB to 0..1 + + return ( 1.0 - $r, 1.0 - $g, 1.0 - $b ); +} + +sub cmy2rgb { + # $c, $m, $y : 0..1 + my ( $c, $m, $y ) = @_; + + return _fmt( 255*(1.0-$c), 255*(1.0-$m), 255*(1.0-$y) ); +} + +# ================================================== +# HLS + +# Foley, van Dam, et al: +# Computer Grapics-Principles and Practice (1990) p595f + +sub rgb2hls { + # $r, $g, $b : 0..255 + # Note special name '$bb' to avoid conflict with ($a,$b) in sort() + my ( $r, $g, $bb ) = map { $_/255.0 } @_; # Scale RGB to 0..1 + + my ( $minc, $maxc ) = ( sort { $a <=> $b } ( $r, $g, $bb ) )[0,2]; + + my $m = $minc + $maxc; # "Mean" + + if( $maxc == $minc ) { return ( 0, 0.5*$m, 0 ); } # Achromatic case + + my $d = $maxc - $minc; # "Delta" + my $s = ( $m <= 1.0 ) ? $d/$m : $d/(2.0-$m ); # Saturation + + my $h = 0; # Hue + if( $r == $maxc ) { $h = ( $g-$bb )/$d; } + elsif( $g == $maxc ) { $h = 2 + ( $bb-$r )/$d; } + elsif( $bb == $maxc ) { $h = 4 + ( $r-$g )/$d; } + else { + # Never get here! + croak "Internal Error: Unexpected value ,$maxc, in Graphics::ColorUtils::rgb2hls( $r, $g, $bb )"; + } + + $h *= 60; # Convert to degrees + if( $h < 0 ) { $h += 360; } # Ensure positive hue + + return ( $h, 0.5*$m, $s ); +} + +sub hls2rgb { + # $h: 0..360 (red=0->yellow->green=120->cyan->blue=240->magenta steps of 60) + # $l, $s : 0..1 (inclusive) + my ( $h, $l, $s ) = @_; + + if( $s == 0.0 ) { return _fmt(255*$l, 255*$l, 255*$l); } # achromatic (grey) + +# This is the INCORRECT line as it is in the book quoted above: +# my $m2 = ( $l <= 0.5 ) ? ($l*($l+$s)) : ($l - $l*$s + $s); +# This is the CORRECT line: (first alternative: 1 vs $l) + my $m2 = ( $l <= 0.5 ) ? ($l*(1+$s)) : ($l - $l*$s + $s); + my $m1 = 2.0*$l - $m2; + + my $r = 255 * _value( $m1, $m2, $h + 120 ); + my $g = 255 * _value( $m1, $m2, $h ); + my $b = 255 * _value( $m1, $m2, $h - 120 ); + + return _fmt( $r, $g, $b ); +} + +sub _value { + my ( $n1, $n2, $hue ) = @_; + + if( $hue > 360 ) { $hue -= 360; } + elsif( $hue < 0 ) { $hue += 360; } + + if( $hue < 60 ) { return $n1 + $hue * ( $n2-$n1 )/60.0; } + elsif( $hue < 180 ) { return $n2; } + elsif( $hue < 240 ) { return $n1 + ( 240-$hue ) * ( $n2-$n1 )/60.0; } + else { return $n1; } +} + +# ================================================== +# HSV + +# Foley, van Dam, et al: +# Computer Grapics-Principles and Practice (1990) p592f + +sub rgb2hsv { + # $r, $g, $b : 0..25 + # Note special name '$bb' to avoid conflict with ($a,$b) in sort() + my ( $r, $g, $bb ) = map { $_/255.0 } @_; # Scale RGB to 0..1 + + my ( $minc, $maxc ) = ( sort { $a <=> $b } ( $r, $g, $bb ) )[0,2]; + + my $v = $maxc; # Value + my $d = $maxc - $minc; # "Delta" + my $s = ( $maxc == 0 ) ? 0 : $d/$maxc; # No saturation if R=G=B=0 + + if( $s == 0 ) { return ( 0, 0, $v ); } # Achromatic case + + my $h = 0; # Hue + if( $r == $maxc ) { $h = ( $g-$bb )/$d; } + elsif( $g == $maxc ) { $h = 2 + ( $bb-$r )/$d; } + elsif( $bb == $maxc ) { $h = 4 + ( $r-$g )/$d; } + else { + # Never get here! + croak "Internal Error: Unexpected value ,$maxc, in Graphics::ColorUtils::rgb2hsv( $r, $g, $bb )"; + } + + $h *= 60; # Convert to degrees + if( $h < 0 ) { $h += 360; } # Ensure positive hue + + return ( $h, $s, $v ); +} + +sub hsv2rgb { + # $h: 0..360 (red=0->yellow->green=120->cyan->blue=240->magenta steps of 60) + # (tolerates larger values of $h by reducing them to the standard circle) + # $s, $v : 0..1 (inclusive) + my ( $h, $s, $v ) = @_; + + $v *= 255; + if( $s == 0 ) { return _fmt( $v, $v, $v ); } # achromatic (grey) + + my $i = int( $h/60 ); # sector 0 to 5 + my $f = ($h/60) - $i; # fractional part of h/60 + + my $p = $v * ( 1 - $s ); + my $q = $v * ( 1 - $s * $f ); + my $t = $v * ( 1 - $s * ( 1 - $f ) ); + + $i %= 6; # tolerate values of $h larger than 360 + if( $i==0 ) { return _fmt( $v, $t, $p ); } + elsif( $i==1 ) { return _fmt( $q, $v, $p ); } + elsif( $i==2 ) { return _fmt( $p, $v, $t ); } + elsif( $i==3 ) { return _fmt( $p, $q, $v ); } + elsif( $i==4 ) { return _fmt( $t, $p, $v ); } + elsif( $i==5 ) { return _fmt( $v, $p, $q ); } + else { + # Never get here! + croak "Internal Error: Unexpected value ,$i, in Graphics::ColorUtils::hsv2rgb( $h, $s, $v )"; + } +} + +# ================================================== +# Gradients + +# Gradients grey, heat, map, and rainbow have been inspired by similar +# ideas in Yorick. +# For Yorick, cf http://yorick.sourceforge.net +# and also http://www.maumae.net/yorick/doc/index.php +# as well as http://www.mhatt.aps.anl.gov/dohn/software/yorick/ + +BEGIN { +my %_gradients = ( + 'grey' => [ + [ 0, 0, 0],[ 1, 1, 1],[ 2, 2, 2],[ 3, 3, 3],[ 4, 4, 4], + [ 5, 5, 5],[ 6, 6, 6],[ 7, 7, 7],[ 9, 9, 9],[ 10, 10, 10], + [ 11, 11, 11],[ 12, 12, 12],[ 13, 13, 13],[ 14, 14, 14],[ 15, 15, 15], + [ 16, 16, 16],[ 17, 17, 17],[ 18, 18, 18],[ 19, 19, 19],[ 20, 20, 20], + [ 21, 21, 21],[ 22, 22, 22],[ 23, 23, 23],[ 25, 25, 25],[ 26, 26, 26], + [ 27, 27, 27],[ 28, 28, 28],[ 29, 29, 29],[ 30, 30, 30],[ 31, 31, 31], + [ 32, 32, 32],[ 33, 33, 33],[ 34, 34, 34],[ 35, 35, 35],[ 36, 36, 36], + [ 37, 37, 37],[ 38, 38, 38],[ 39, 39, 39],[ 41, 41, 41],[ 42, 42, 42], + [ 43, 43, 43],[ 44, 44, 44],[ 45, 45, 45],[ 46, 46, 46],[ 47, 47, 47], + [ 48, 48, 48],[ 49, 49, 49],[ 50, 50, 50],[ 51, 51, 51],[ 52, 52, 52], + [ 53, 53, 53],[ 54, 54, 54],[ 55, 55, 55],[ 57, 57, 57],[ 58, 58, 58], + [ 59, 59, 59],[ 60, 60, 60],[ 61, 61, 61],[ 62, 62, 62],[ 63, 63, 63], + [ 64, 64, 64],[ 65, 65, 65],[ 66, 66, 66],[ 67, 67, 67],[ 68, 68, 68], + [ 69, 69, 69],[ 70, 70, 70],[ 71, 71, 71],[ 73, 73, 73],[ 74, 74, 74], + [ 75, 75, 75],[ 76, 76, 76],[ 77, 77, 77],[ 78, 78, 78],[ 79, 79, 79], + [ 80, 80, 80],[ 81, 81, 81],[ 82, 82, 82],[ 83, 83, 83],[ 84, 84, 84], + [ 85, 85, 85],[ 86, 86, 86],[ 87, 87, 87],[ 89, 89, 89],[ 90, 90, 90], + [ 91, 91, 91],[ 92, 92, 92],[ 93, 93, 93],[ 94, 94, 94],[ 95, 95, 95], + [ 96, 96, 96],[ 97, 97, 97],[ 98, 98, 98],[ 99, 99, 99],[100,100,100], + [101,101,101],[102,102,102],[103,103,103],[105,105,105],[106,106,106], + [107,107,107],[108,108,108],[109,109,109],[110,110,110],[111,111,111], + [112,112,112],[113,113,113],[114,114,114],[115,115,115],[116,116,116], + [117,117,117],[118,118,118],[119,119,119],[121,121,121],[122,122,122], + [123,123,123],[124,124,124],[125,125,125],[126,126,126],[127,127,127], + [128,128,128],[129,129,129],[130,130,130],[131,131,131],[132,132,132], + [133,133,133],[134,134,134],[135,135,135],[137,137,137],[138,138,138], + [139,139,139],[140,140,140],[141,141,141],[142,142,142],[143,143,143], + [144,144,144],[145,145,145],[146,146,146],[147,147,147],[148,148,148], + [149,149,149],[150,150,150],[151,151,151],[153,153,153],[154,154,154], + [155,155,155],[156,156,156],[157,157,157],[158,158,158],[159,159,159], + [160,160,160],[161,161,161],[162,162,162],[163,163,163],[164,164,164], + [165,165,165],[166,166,166],[167,167,167],[169,169,169],[170,170,170], + [171,171,171],[172,172,172],[173,173,173],[174,174,174],[175,175,175], + [176,176,176],[177,177,177],[178,178,178],[179,179,179],[180,180,180], + [181,181,181],[182,182,182],[183,183,183],[185,185,185],[186,186,186], + [187,187,187],[188,188,188],[189,189,189],[190,190,190],[191,191,191], + [192,192,192],[193,193,193],[194,194,194],[195,195,195],[196,196,196], + [197,197,197],[198,198,198],[199,199,199],[201,201,201],[202,202,202], + [203,203,203],[204,204,204],[205,205,205],[206,206,206],[207,207,207], + [208,208,208],[209,209,209],[210,210,210],[211,211,211],[212,212,212], + [213,213,213],[214,214,214],[215,215,215],[217,217,217],[218,218,218], + [219,219,219],[220,220,220],[221,221,221],[222,222,222],[223,223,223], + [224,224,224],[225,225,225],[226,226,226],[227,227,227],[228,228,228], + [229,229,229],[230,230,230],[231,231,231],[233,233,233],[234,234,234], + [235,235,235],[236,236,236],[237,237,237],[238,238,238],[239,239,239], + [240,240,240],[241,241,241],[242,242,242],[243,243,243],[244,244,244], + [245,245,245],[246,246,246],[247,247,247],[249,249,249],[250,250,250], + [251,251,251],[252,252,252],[253,253,253],[254,254,254],[255,255,255] + ], + 'heat' => [ + [ 0, 0, 0],[ 1, 0, 0],[ 2, 0, 0],[ 4, 0, 0],[ 5, 0, 0], + [ 7, 0, 0],[ 8, 0, 0],[ 10, 0, 0],[ 11, 0, 0],[ 13, 0, 0], + [ 15, 0, 0],[ 17, 0, 0],[ 18, 0, 0],[ 20, 0, 0],[ 21, 0, 0], + [ 23, 0, 0],[ 24, 0, 0],[ 26, 0, 0],[ 27, 0, 0],[ 28, 0, 0], + [ 30, 0, 0],[ 31, 0, 0],[ 33, 0, 0],[ 34, 0, 0],[ 36, 0, 0], + [ 37, 0, 0],[ 39, 0, 0],[ 40, 0, 0],[ 42, 0, 0],[ 43, 0, 0], + [ 46, 0, 0],[ 47, 0, 0],[ 49, 0, 0],[ 50, 0, 0],[ 52, 0, 0], + [ 53, 0, 0],[ 55, 0, 0],[ 56, 0, 0],[ 57, 0, 0],[ 59, 0, 0], + [ 60, 0, 0],[ 62, 0, 0],[ 63, 0, 0],[ 65, 0, 0],[ 66, 0, 0], + [ 68, 0, 0],[ 69, 0, 0],[ 70, 0, 0],[ 72, 0, 0],[ 73, 0, 0], + [ 76, 0, 0],[ 78, 0, 0],[ 79, 0, 0],[ 81, 0, 0],[ 82, 0, 0], + [ 84, 0, 0],[ 85, 0, 0],[ 86, 0, 0],[ 88, 0, 0],[ 89, 0, 0], + [ 92, 0, 0],[ 94, 0, 0],[ 95, 0, 0],[ 97, 0, 0],[ 98, 0, 0], + [ 99, 0, 0],[101, 0, 0],[102, 0, 0],[104, 0, 0],[105, 0, 0], + [108, 0, 0],[110, 0, 0],[111, 0, 0],[113, 0, 0],[114, 0, 0], + [115, 0, 0],[117, 0, 0],[118, 0, 0],[120, 0, 0],[121, 0, 0], + [123, 0, 0],[124, 0, 0],[126, 0, 0],[127, 0, 0],[128, 0, 0], + [130, 0, 0],[131, 0, 0],[133, 0, 0],[134, 0, 0],[136, 0, 0], + [139, 0, 0],[140, 0, 0],[141, 0, 0],[143, 0, 0],[144, 0, 0], + [146, 0, 0],[147, 0, 0],[149, 0, 0],[150, 0, 0],[152, 0, 0], + [153, 0, 0],[155, 0, 0],[156, 0, 0],[157, 0, 0],[159, 0, 0], + [160, 0, 0],[162, 0, 0],[163, 0, 0],[165, 0, 0],[166, 0, 0], + [169, 0, 0],[170, 0, 0],[172, 0, 0],[173, 0, 0],[175, 1, 0], + [176, 3, 0],[178, 5, 0],[179, 7, 0],[181, 9, 0],[182, 11, 0], + [185, 15, 0],[186, 17, 0],[188, 18, 0],[189, 20, 0],[191, 22, 0], + [192, 24, 0],[194, 26, 0],[195, 28, 0],[197, 30, 0],[198, 32, 0], + [201, 35, 0],[202, 37, 0],[204, 39, 0],[205, 41, 0],[207, 43, 0], + [208, 45, 0],[210, 47, 0],[211, 49, 0],[212, 51, 0],[214, 52, 0], + [215, 54, 0],[217, 56, 0],[218, 58, 0],[220, 60, 0],[221, 62, 0], + [223, 64, 0],[224, 66, 0],[226, 68, 0],[227, 69, 0],[228, 71, 0], + [231, 75, 0],[233, 77, 0],[234, 79, 0],[236, 81, 0],[237, 83, 0], + [239, 85, 0],[240, 86, 0],[241, 88, 0],[243, 90, 0],[244, 92, 0], + [246, 94, 0],[247, 96, 0],[249, 98, 0],[250,100, 0],[252,102, 0], + [253,103, 0],[255,105, 0],[255,107, 0],[255,109, 0],[255,111, 0], + [255,115, 0],[255,117, 0],[255,119, 0],[255,120, 0],[255,122, 0], + [255,124, 0],[255,126, 0],[255,128, 0],[255,130, 0],[255,132, 0], + [255,136, 7],[255,137, 11],[255,139, 15],[255,141, 19],[255,143, 23], + [255,145, 27],[255,147, 31],[255,149, 35],[255,151, 39],[255,153, 43], + [255,156, 51],[255,158, 54],[255,160, 58],[255,162, 62],[255,164, 66], + [255,166, 70],[255,168, 74],[255,170, 78],[255,171, 82],[255,173, 86], + [255,175, 90],[255,177, 94],[255,179, 98],[255,181,102],[255,183,105], + [255,185,109],[255,187,113],[255,188,117],[255,190,121],[255,192,125], + [255,196,133],[255,198,137],[255,200,141],[255,202,145],[255,204,149], + [255,205,153],[255,207,156],[255,209,160],[255,211,164],[255,213,168], + [255,215,172],[255,217,176],[255,219,180],[255,221,184],[255,222,188], + [255,224,192],[255,226,196],[255,228,200],[255,230,204],[255,232,207], + [255,236,215],[255,238,219],[255,239,223],[255,241,227],[255,243,231], + [255,245,235],[255,247,239],[255,249,243],[255,251,247],[255,253,251] + ], + 'map' => [ + [ 0, 0, 0],[ 0, 0, 46],[ 0, 0, 58],[ 0, 0, 69],[ 0, 0, 81], + [ 0, 0, 92],[ 0, 0,104],[ 0, 0,116],[ 0, 3,116],[ 1, 6,116], + [ 2, 8,116],[ 2, 11,116],[ 3, 13,117],[ 4, 16,117],[ 5, 18,117], + [ 5, 21,117],[ 6, 23,117],[ 7, 26,118],[ 8, 28,118],[ 8, 31,118], + [ 9, 33,118],[ 10, 36,118],[ 11, 38,119],[ 11, 41,119],[ 12, 43,119], + [ 13, 45,119],[ 14, 48,119],[ 15, 50,120],[ 15, 52,120],[ 16, 55,120], + [ 17, 57,120],[ 18, 59,120],[ 18, 61,121],[ 19, 64,121],[ 20, 66,121], + [ 21, 68,121],[ 22, 70,121],[ 22, 72,122],[ 23, 74,122],[ 24, 77,122], + [ 25, 79,122],[ 26, 81,122],[ 26, 83,123],[ 27, 85,123],[ 28, 87,123], + [ 29, 89,123],[ 30, 91,123],[ 31, 93,124],[ 31, 95,124],[ 32, 97,124], + [ 33, 99,124],[ 34,100,124],[ 35,102,125],[ 36,104,125],[ 36,106,125], + [ 37,108,125],[ 38,109,125],[ 39,111,126],[ 40,113,126],[ 41,115,126], + [ 41,116,126],[ 42,118,126],[ 43,120,127],[ 44,121,127],[ 45,123,127], + [ 46,125,127],[ 47,126,127],[ 48,128,128],[ 48,128,126],[ 48,129,125], + [ 49,129,124],[ 49,130,123],[ 50,131,122],[ 50,131,120],[ 51,132,119], + [ 51,133,118],[ 52,133,117],[ 52,134,115],[ 53,134,114],[ 53,135,113], + [ 54,136,111],[ 54,136,110],[ 55,137,109],[ 55,138,108],[ 56,138,106], + [ 56,139,105],[ 57,140,104],[ 57,140,102],[ 58,141,101],[ 58,141,100], + [ 59,142, 98],[ 59,143, 97],[ 60,143, 96],[ 61,144, 94],[ 61,145, 93], + [ 62,145, 92],[ 62,146, 90],[ 63,146, 89],[ 63,147, 88],[ 64,148, 86], + [ 64,148, 85],[ 65,149, 84],[ 65,150, 82],[ 66,150, 81],[ 67,151, 80], + [ 67,151, 78],[ 68,152, 77],[ 68,153, 76],[ 69,153, 74],[ 69,154, 73], + [ 70,155, 71],[ 71,155, 70],[ 73,156, 71],[ 76,156, 72],[ 78,157, 72], + [ 81,158, 73],[ 83,158, 73],[ 86,159, 74],[ 88,160, 75],[ 91,160, 75], + [ 94,161, 76],[ 96,161, 76],[ 99,162, 77],[101,163, 77],[104,163, 78], + [106,164, 79],[109,165, 79],[111,165, 80],[114,166, 80],[117,166, 81], + [119,167, 82],[121,168, 82],[122,168, 82],[124,168, 83],[126,169, 83], + [128,169, 83],[129,170, 84],[131,170, 84],[133,171, 84],[135,171, 85], + [136,172, 85],[138,172, 85],[140,172, 86],[141,173, 86],[143,173, 86], + [145,174, 87],[147,174, 87],[149,175, 87],[150,175, 88],[152,175, 88], + [154,176, 88],[156,176, 89],[157,177, 89],[159,177, 89],[161,178, 90], + [163,178, 90],[165,179, 90],[166,179, 91],[168,179, 91],[170,180, 91], + [172,180, 92],[174,181, 92],[175,181, 92],[177,182, 93],[179,182, 93], + [181,183, 93],[183,183, 94],[183,182, 94],[184,181, 94],[184,181, 95], + [185,180, 95],[185,179, 95],[186,178, 96],[186,177, 96],[187,176, 97], + [187,175, 97],[187,174, 97],[188,173, 98],[188,172, 98],[189,171, 98], + [189,170, 99],[190,169, 99],[190,168, 99],[190,167,100],[191,166,100], + [191,165,100],[192,164,101],[192,163,101],[193,163,104],[195,164,106], + [196,164,108],[197,165,111],[198,165,113],[199,166,116],[201,167,118], + [202,167,121],[203,168,123],[204,169,126],[205,170,129],[207,171,131], + [208,172,134],[209,173,137],[210,174,139],[211,175,142],[213,176,145], + [214,177,148],[215,178,150],[216,179,153],[217,181,156],[219,182,159], + [220,184,162],[221,185,165],[222,187,168],[223,188,170],[225,190,173], + [226,192,176],[227,194,179],[228,196,182],[229,198,185],[231,200,189], + [232,202,192],[233,204,195],[234,206,198],[235,208,201],[237,211,204], + [238,213,207],[239,215,211],[240,218,214],[241,221,217],[243,223,220], + [244,226,224],[245,229,227],[246,232,230],[247,235,234],[249,238,237], + [250,241,241],[251,244,244],[252,248,248],[253,251,251],[255,255,255] + ], + 'rainbow' => [ + [255, 0, 42],[255, 0, 36],[255, 0, 31],[255, 0, 26],[255, 0, 20], + [255, 0, 15],[255, 0, 10],[255, 0, 4],[255, 5, 0],[255, 11, 0], + [255, 16, 0],[255, 22, 0],[255, 27, 0],[255, 32, 0],[255, 38, 0], + [255, 43, 0],[255, 48, 0],[255, 54, 0],[255, 59, 0],[255, 65, 0], + [255, 70, 0],[255, 75, 0],[255, 81, 0],[255, 91, 0],[255, 97, 0], + [255,102, 0],[255,108, 0],[255,113, 0],[255,118, 0],[255,124, 0], + [255,129, 0],[255,135, 0],[255,140, 0],[255,145, 0],[255,151, 0], + [255,156, 0],[255,161, 0],[255,167, 0],[255,178, 0],[255,183, 0], + [255,188, 0],[255,194, 0],[255,199, 0],[255,204, 0],[255,210, 0], + [255,215, 0],[255,221, 0],[255,226, 0],[255,231, 0],[255,237, 0], + [255,242, 0],[255,247, 0],[255,253, 0],[245,255, 0],[240,255, 0], + [235,255, 0],[229,255, 0],[224,255, 0],[219,255, 0],[213,255, 0], + [208,255, 0],[202,255, 0],[197,255, 0],[192,255, 0],[186,255, 0], + [181,255, 0],[175,255, 0],[170,255, 0],[159,255, 0],[154,255, 0], + [149,255, 0],[143,255, 0],[138,255, 0],[132,255, 0],[127,255, 0], + [122,255, 0],[116,255, 0],[111,255, 0],[106,255, 0],[100,255, 0], + [ 95,255, 0],[ 89,255, 0],[ 84,255, 0],[ 73,255, 0],[ 68,255, 0], + [ 63,255, 0],[ 57,255, 0],[ 52,255, 0],[ 46,255, 0],[ 41,255, 0], + [ 36,255, 0],[ 30,255, 0],[ 25,255, 0],[ 19,255, 0],[ 14,255, 0], + [ 9,255, 0],[ 3,255, 0],[ 0,255, 1],[ 0,255, 12],[ 0,255, 17], + [ 0,255, 23],[ 0,255, 28],[ 0,255, 33],[ 0,255, 39],[ 0,255, 44], + [ 0,255, 49],[ 0,255, 55],[ 0,255, 60],[ 0,255, 66],[ 0,255, 71], + [ 0,255, 76],[ 0,255, 82],[ 0,255, 87],[ 0,255, 98],[ 0,255,103], + [ 0,255,109],[ 0,255,114],[ 0,255,119],[ 0,255,125],[ 0,255,130], + [ 0,255,135],[ 0,255,141],[ 0,255,146],[ 0,255,152],[ 0,255,157], + [ 0,255,162],[ 0,255,168],[ 0,255,173],[ 0,255,184],[ 0,255,189], + [ 0,255,195],[ 0,255,200],[ 0,255,205],[ 0,255,211],[ 0,255,216], + [ 0,255,222],[ 0,255,227],[ 0,255,232],[ 0,255,238],[ 0,255,243], + [ 0,255,248],[ 0,255,254],[ 0,250,255],[ 0,239,255],[ 0,234,255], + [ 0,228,255],[ 0,223,255],[ 0,218,255],[ 0,212,255],[ 0,207,255], + [ 0,201,255],[ 0,196,255],[ 0,191,255],[ 0,185,255],[ 0,180,255], + [ 0,174,255],[ 0,169,255],[ 0,164,255],[ 0,153,255],[ 0,148,255], + [ 0,142,255],[ 0,137,255],[ 0,131,255],[ 0,126,255],[ 0,121,255], + [ 0,115,255],[ 0,110,255],[ 0,105,255],[ 0, 99,255],[ 0, 94,255], + [ 0, 88,255],[ 0, 83,255],[ 0, 78,255],[ 0, 67,255],[ 0, 62,255], + [ 0, 56,255],[ 0, 51,255],[ 0, 45,255],[ 0, 40,255],[ 0, 35,255], + [ 0, 29,255],[ 0, 24,255],[ 0, 18,255],[ 0, 13,255],[ 0, 8,255], + [ 0, 2,255],[ 2, 0,255],[ 7, 0,255],[ 18, 0,255],[ 24, 0,255], + [ 29, 0,255],[ 34, 0,255],[ 40, 0,255],[ 45, 0,255],[ 50, 0,255], + [ 56, 0,255],[ 61, 0,255],[ 67, 0,255],[ 72, 0,255],[ 77, 0,255], + [ 83, 0,255],[ 88, 0,255],[ 93, 0,255],[104, 0,255],[110, 0,255], + [115, 0,255],[120, 0,255],[126, 0,255],[131, 0,255],[136, 0,255], + [142, 0,255],[147, 0,255],[153, 0,255],[158, 0,255],[163, 0,255], + [169, 0,255],[174, 0,255],[180, 0,255],[190, 0,255],[196, 0,255], + [201, 0,255],[206, 0,255],[212, 0,255],[217, 0,255],[223, 0,255], + [228, 0,255],[233, 0,255],[239, 0,255],[244, 0,255],[249, 0,255], + [255, 0,254],[255, 0,249],[255, 0,243],[255, 0,233],[255, 0,227], + [255, 0,222],[255, 0,217],[255, 0,211],[255, 0,206],[255, 0,201] + ] + ); + +# Returns a hash: gradient-name => color-count +sub available_gradients { + return map { $_, scalar( @{ $_gradients{$_} } ) } keys %_gradients; +} + +# Returns array-ref of rgb-triples, undef if gradient-name not found +sub gradient { + my ( $name ) = @_; + + unless( exists $_gradients{ $name } ) { return; } + + return $_gradients{$name}; +} + +# Returns the color corresponding to the position in the gradient given by f. +# Returns undef when gradient not found or f outside valid range. +sub grad2rgb { + my ( $name, $frac ) = @_; + + unless( exists $_gradients{ $name } ) { return; } + if( $frac < 0.0 || $frac >= 1.0 ) { return; } + + my $idx = int( $frac * scalar( @{$_gradients{$name}} ) ); + return _fmt( @{ $_gradients{$name}[$idx] } ); +} + +# Expects a gradient and and array-ref to an array of rgb triples. +# If the name already exists, the function returns the old array; undef otherws +sub register_gradient { + my ( $name, $array_ref ) = @_; + + if( exists $_gradients{ $name } ) { + my $old = $_gradients{ $name }; + $_gradients{ $name } = $array_ref; + return $old; + } + + $_gradients{ $name } = $array_ref; + return undef; +} + +} # end BEGIN (Gradients) + +# ================================================== +# Names + +BEGIN { + +my $_default_namespace = 'x11'; + +my %_colors = ( + 'www:aqua' => [ 0,255,255],'www:black' => [ 0, 0, 0], + 'www:blue' => [ 0, 0,255],'www:fuchsia' => [255, 0,255], + 'www:gray' => [190,190,190],'www:green' => [ 0,128, 0], + 'www:lime' => [ 0,255, 0],'www:maroon' => [128, 0, 0], + 'www:navy' => [ 0, 0,128],'www:olive' => [128,128, 0], + 'www:purple' => [128, 0,128],'www:red' => [255, 0, 0], + 'www:silver' => [192,192,192],'www:teal' => [ 0,128,128], + 'www:white' => [255,255,255],'www:yellow' => [255,255, 0], + 'www:orange' => [255,165, 0], + +'svg:palevioletred' => [219,112,147],'svg:mediumslateblue' => [123,104,238], +'svg:gold' => [255,215,0],'svg:gainsboro' => [220,220,220], +'svg:yellow' => [255,255,0],'svg:limegreen' => [50,205,50], +'svg:lightgoldenrodyellow' => [250,250,210],'svg:lavenderblush' => [255,240,245], +'svg:darkmagenta' => [139,0,139],'svg:darkgrey' => [169,169,169], +'svg:blanchedalmond' => [255,235,205],'svg:ghostwhite' => [248,248,255], +'svg:floralwhite' => [255,250,240],'svg:coral' => [255,127,80], +'svg:honeydew' => [240,255,240],'svg:mistyrose' => [255,228,225], +'svg:slateblue' => [106,90,205],'svg:goldenrod' => [218,165,32], +'svg:darkcyan' => [0,139,139],'svg:moccasin' => [255,228,181], +'svg:mediumvioletred' => [199,21,133],'svg:maroon' => [128,0,0], +'svg:lightpink' => [255,182,193],'svg:lightsalmon' => [255,160,122], +'svg:paleturquoise' => [175,238,238],'svg:darksalmon' => [233,150,122], +'svg:yellowgreen' => [154,205,50],'svg:mediumturquoise' => [72,209,204], +'svg:chartreuse' => [127,255,0],'svg:peru' => [205,133,63], +'svg:palegoldenrod' => [238,232,170],'svg:red' => [255,0,0], +'svg:lavender' => [230,230,250],'svg:lightseagreen' => [32,178,170], +'svg:powderblue' => [176,224,230],'svg:orchid' => [218,112,214], +'svg:cornsilk' => [255,248,220],'svg:seagreen' => [46,139,87], +'svg:royalblue' => [65,105,225],'svg:ivory' => [255,255,240], +'svg:tan' => [210,180,140],'svg:linen' => [250,240,230], +'svg:darkorchid' => [153,50,204],'svg:tomato' => [255,99,71], +'svg:lightcyan' => [224,255,255],'svg:darkolivegreen' => [85,107,47], +'svg:sienna' => [160,82,45],'svg:lightsteelblue' => [176,196,222], +'svg:indigo' => [75,0,130],'svg:peachpuff' => [255,218,185], +'svg:lime' => [0,255,0],'svg:mediumspringgreen' => [0,250,154], +'svg:silver' => [192,192,192],'svg:saddlebrown' => [139,69,19], +'svg:lightyellow' => [255,255,224],'svg:grey' => [128,128,128], +'svg:thistle' => [216,191,216],'svg:deepskyblue' => [0,191,255], +'svg:lightgreen' => [144,238,144],'svg:blueviolet' => [138,43,226], +'svg:aqua' => [0,255,255],'svg:cyan' => [0,255,255], +'svg:papayawhip' => [255,239,213],'svg:deeppink' => [255,20,147], +'svg:firebrick' => [178,34,34],'svg:navy' => [0,0,128], +'svg:hotpink' => [255,105,180],'svg:pink' => [255,192,203], +'svg:darkturquoise' => [0,206,209],'svg:navajowhite' => [255,222,173], +'svg:lightslategrey' => [119,136,153],'svg:lawngreen' => [124,252,0], +'svg:lightcoral' => [240,128,128],'svg:palegreen' => [152,251,152], +'svg:dodgerblue' => [30,144,255],'svg:greenyellow' => [173,255,47], +'svg:lightskyblue' => [135,206,250],'svg:brown' => [165,42,42], +'svg:dimgrey' => [105,105,105],'svg:aquamarine' => [127,255,212], +'svg:darkseagreen' => [143,188,143],'svg:fuchsia' => [255,0,255], +'svg:magenta' => [255,0,255],'svg:chocolate' => [210,105,30], +'svg:mediumseagreen' => [60,179,113],'svg:cadetblue' => [95,158,160], +'svg:purple' => [128,0,128],'svg:turquoise' => [64,224,208], +'svg:darkkhaki' => [189,183,107],'svg:antiquewhite' => [250,235,215], +'svg:skyblue' => [135,206,235],'svg:sandybrown' => [244,164,96], +'svg:mediumblue' => [0,0,205],'svg:steelblue' => [70,130,180], +'svg:indianred' => [205,92,92],'svg:khaki' => [240,230,140], +'svg:lightblue' => [173,216,230],'svg:green' => [0,128,0], +'svg:olive' => [128,128,0],'svg:mediumorchid' => [186,85,211], +'svg:blue' => [0,0,255],'svg:snow' => [255,250,250], +'svg:rosybrown' => [188,143,143],'svg:orange' => [255,165,0], +'svg:slategrey' => [112,128,144],'svg:darkorange' => [255,140,0], +'svg:violet' => [238,130,238],'svg:darkslategrey' => [47,79,79], +'svg:whitesmoke' => [245,245,245],'svg:burlywood' => [222,184,135], +'svg:darkgreen' => [0,100,0],'svg:lemonchiffon' => [255,250,205], +'svg:midnightblue' => [25,25,112],'svg:mintcream' => [245,255,250], +'svg:oldlace' => [253,245,230],'svg:black' => [0,0,0], +'svg:bisque' => [255,228,196],'svg:mediumaquamarine' => [102,205,170], +'svg:olivedrab' => [107,142,35],'svg:salmon' => [250,128,114], +'svg:teal' => [0,128,128],'svg:seashell' => [255,245,238], +'svg:springgreen' => [0,255,127],'svg:plum' => [221,160,221], +'svg:darkviolet' => [148,0,211],'svg:wheat' => [245,222,179], +'svg:mediumpurple' => [147,112,219],'svg:cornflowerblue' => [100,149,237], +'svg:forestgreen' => [34,139,34],'svg:darkgoldenrod' => [184,134,11], +'svg:aliceblue' => [240,248,255],'svg:white' => [255,255,255], +'svg:darkblue' => [0,0,139],'svg:azure' => [240,255,255], +'svg:darkred' => [139,0,0],'svg:orangered' => [255,69,0], +'svg:darkslateblue' => [72,61,139],'svg:crimson' => [220,20,60], +'svg:lightgrey' => [211,211,211],'svg:beige' => [245,245,220], + +'x11:deepskyblue3' => [0,154,205],'x11:gold' => [255,215,0], +'x11:gold1' => [255,215,0],'x11:mediumpurple3' => [137,104,205], +'x11:royalblue3' => [58,95,205],'x11:lightgoldenrodyellow' => [250,250,210], +'x11:lavenderblush' => [255,240,245],'x11:lavenderblush1' => [255,240,245], +'x11:pink1' => [255,181,197],'x11:green3' => [0,205,0], +'x11:lightsteelblue1' => [202,225,255],'x11:blanchedalmond' => [255,235,205], +'x11:salmon1' => [255,140,105],'x11:ghostwhite' => [248,248,255], +'x11:floralwhite' => [255,250,240],'x11:dodgerblue4' => [16,78,139], +'x11:grey43' => [110,110,110],'x11:indianred4' => [139,58,58], +'x11:mistyrose1' => [255,228,225],'x11:mistyrose' => [255,228,225], +'x11:dodgerblue2' => [28,134,238],'x11:grey37' => [94,94,94], +'x11:grey9' => [23,23,23],'x11:purple4' => [85,26,139], +'x11:orchid2' => [238,122,233],'x11:cornsilk3' => [205,200,177], +'x11:goldenrod' => [218,165,32],'x11:hotpink4' => [139,58,98], +'x11:lightpink' => [255,182,193],'x11:coral2' => [238,106,80], +'x11:cyan2' => [0,238,238],'x11:grey87' => [222,222,222], +'x11:grey91' => [232,232,232],'x11:violetred4' => [139,34,82], +'x11:violetred2' => [238,58,140],'x11:indianred2' => [238,99,99], +'x11:lightyellow3' => [205,205,180],'x11:darkolivegreen2' => [188,238,104], +'x11:magenta3' => [205,0,205],'x11:grey64' => [163,163,163], +'x11:honeydew3' => [193,205,193],'x11:lightsalmon3' => [205,129,98], +'x11:springgreen4' => [0,139,69],'x11:grey57' => [145,145,145], +'x11:grey50' => [127,127,127],'x11:grey66' => [168,168,168], +'x11:antiquewhite1' => [255,239,219],'x11:paleturquoise' => [175,238,238], +'x11:navajowhite2' => [238,207,161],'x11:lightpink3' => [205,140,149], +'x11:darksalmon' => [233,150,122],'x11:grey52' => [133,133,133], +'x11:slategrey3' => [159,182,205],'x11:darkseagreen4' => [105,139,105], +'x11:chartreuse' => [127,255,0],'x11:chartreuse1' => [127,255,0], +'x11:grey42' => [107,107,107],'x11:peru' => [205,133,63], +'x11:tan3' => [205,133,63],'x11:grey19' => [48,48,48], +'x11:palegreen3' => [124,205,124],'x11:lavender' => [230,230,250], +'x11:red3' => [205,0,0],'x11:orchid' => [218,112,214], +'x11:powderblue' => [176,224,230],'x11:grey35' => [89,89,89], +'x11:plum4' => [139,102,139],'x11:cornsilk' => [255,248,220], +'x11:cornsilk1' => [255,248,220],'x11:royalblue' => [65,105,225], +'x11:darkgoldenrod2' => [238,173,14],'x11:lightpink4' => [139,95,101], +'x11:springgreen2' => [0,238,118],'x11:tan' => [210,180,140], +'x11:lightslateblue' => [132,112,255],'x11:darkorchid' => [153,50,204], +'x11:orangered2' => [238,64,0],'x11:palevioletred1' => [255,130,171], +'x11:grey63' => [161,161,161],'x11:maroon2' => [238,48,167], +'x11:blue2' => [0,0,238],'x11:turquoise4' => [0,134,139], +'x11:lightcyan1' => [224,255,255],'x11:lightcyan' => [224,255,255], +'x11:springgreen3' => [0,205,102],'x11:darkorchid4' => [104,34,139], +'x11:sienna' => [160,82,45],'x11:goldenrod2' => [238,180,34], +'x11:lightgoldenrod3' => [205,190,112],'x11:green' => [0,255,0], +'x11:green1' => [0,255,0],'x11:peachpuff1' => [255,218,185], +'x11:peachpuff' => [255,218,185],'x11:yellow3' => [205,205,0], +'x11:mediumspringgreen' => [0,250,154],'x11:cadetblue3' => [122,197,205], +'x11:royalblue1' => [72,118,255],'x11:deepskyblue1' => [0,191,255], +'x11:deepskyblue' => [0,191,255],'x11:firebrick1' => [255,48,48], +'x11:grey80' => [204,204,204],'x11:grey28' => [71,71,71], +'x11:palegreen2' => [144,238,144],'x11:lightgreen' => [144,238,144], +'x11:blueviolet' => [138,43,226],'x11:deeppink1' => [255,20,147], +'x11:deeppink' => [255,20,147],'x11:deeppink2' => [238,18,137], +'x11:lightskyblue2' => [164,211,238],'x11:grey77' => [196,196,196], +'x11:grey72' => [184,184,184],'x11:tomato2' => [238,92,66], +'x11:steelblue2' => [92,172,238],'x11:hotpink' => [255,105,180], +'x11:slateblue4' => [71,60,139],'x11:pink' => [255,192,203], +'x11:darkturquoise' => [0,206,209],'x11:antiquewhite3' => [205,192,176], +'x11:grey32' => [82,82,82],'x11:lightyellow2' => [238,238,209], +'x11:olivedrab4' => [105,139,34],'x11:lightblue4' => [104,131,139], +'x11:royalblue2' => [67,110,238],'x11:navajowhite1' => [255,222,173], +'x11:navajowhite' => [255,222,173],'x11:lightgoldenrod' => [238,221,130], +'x11:grey85' => [217,217,217],'x11:maroon4' => [139,28,98], +'x11:grey90' => [229,229,229],'x11:grey17' => [43,43,43], +'x11:seashell4' => [139,134,130],'x11:greenyellow' => [173,255,47], +'x11:dodgerblue1' => [30,144,255],'x11:dodgerblue' => [30,144,255], +'x11:grey89' => [227,227,227],'x11:brown2' => [238,59,59], +'x11:paleturquoise2' => [174,238,238],'x11:lightskyblue' => [135,206,250], +'x11:salmon4' => [139,76,57],'x11:chocolate3' => [205,102,29], +'x11:grey70' => [179,179,179],'x11:grey25' => [64,64,64], +'x11:darkolivegreen4' => [110,139,61],'x11:mediumorchid2' => [209,95,238], +'x11:brown' => [165,42,42],'x11:grey67' => [171,171,171], +'x11:grey41' => [105,105,105],'x11:dimgrey' => [105,105,105], +'x11:grey60' => [153,153,153],'x11:indianred3' => [205,85,85], +'x11:chocolate' => [210,105,30],'x11:darkslategrey1' => [151,255,255], +'x11:grey2' => [5,5,5],'x11:firebrick3' => [205,38,38], +'x11:snow4' => [139,137,137],'x11:mediumseagreen' => [60,179,113], +'x11:darkorchid1' => [191,62,255],'x11:pink3' => [205,145,158], +'x11:violetred1' => [255,62,150],'x11:grey83' => [212,212,212], +'x11:olivedrab1' => [192,255,62],'x11:darkkhaki' => [189,183,107], +'x11:deepskyblue4' => [0,104,139],'x11:darkorchid2' => [178,58,238], +'x11:skyblue' => [135,206,235],'x11:mediumorchid3' => [180,82,205], +'x11:rosybrown4' => [139,105,105],'x11:grey16' => [41,41,41], +'x11:yellow4' => [139,139,0],'x11:maroon' => [176,48,96], +'x11:turquoise2' => [0,229,238],'x11:mistyrose2' => [238,213,210], +'x11:blue3' => [0,0,205],'x11:mediumblue' => [0,0,205], +'x11:grey4' => [10,10,10],'x11:pink2' => [238,169,184], +'x11:chocolate2' => [238,118,33],'x11:lightyellow4' => [139,139,122], +'x11:grey99' => [252,252,252],'x11:red2' => [238,0,0], +'x11:tan4' => [139,90,43],'x11:yellow2' => [238,238,0], +'x11:grey12' => [31,31,31],'x11:deeppink4' => [139,10,80], +'x11:lightsalmon4' => [139,87,66],'x11:lightcyan4' => [122,139,139], +'x11:snow1' => [255,250,250],'x11:snow' => [255,250,250], +'x11:brown4' => [139,35,35],'x11:darkseagreen2' => [180,238,180], +'x11:lightsteelblue2' => [188,210,238],'x11:rosybrown' => [188,143,143], +'x11:maroon1' => [255,52,179],'x11:slategrey' => [112,128,144], +'x11:orange' => [255,165,0],'x11:orange1' => [255,165,0], +'x11:orangered3' => [205,55,0],'x11:plum3' => [205,150,205], +'x11:turquoise3' => [0,197,205],'x11:pink4' => [139,99,108], +'x11:violet' => [238,130,238],'x11:grey96' => [245,245,245], +'x11:whitesmoke' => [245,245,245],'x11:lightgoldenrod1' => [255,236,139], +'x11:darkorange1' => [255,127,0],'x11:seashell2' => [238,229,222], +'x11:midnightblue' => [25,25,112],'x11:grey27' => [69,69,69], +'x11:mediumpurple2' => [159,121,238],'x11:bisque4' => [139,125,107], +'x11:black' => [0,0,0],'x11:grey0' => [0,0,0], +'x11:lavenderblush4' => [139,131,134],'x11:bisque1' => [255,228,196], +'x11:bisque' => [255,228,196],'x11:mediumaquamarine' => [102,205,170], +'x11:aquamarine3' => [102,205,170],'x11:goldenrod1' => [255,193,37], +'x11:green4' => [0,139,0],'x11:bisque3' => [205,183,158], +'x11:salmon' => [250,128,114],'x11:grey1' => [3,3,3], +'x11:purple3' => [125,38,205],'x11:khaki4' => [139,134,78], +'x11:grey' => [190,190,190],'x11:cadetblue4' => [83,134,139], +'x11:cadetblue1' => [152,245,255],'x11:hotpink3' => [205,96,144], +'x11:antiquewhite2' => [238,223,204],'x11:darkorange4' => [139,69,0], +'x11:cornsilk2' => [238,232,205],'x11:grey93' => [237,237,237], +'x11:thistle3' => [205,181,205],'x11:plum2' => [238,174,238], +'x11:burlywood2' => [238,197,145],'x11:skyblue4' => [74,112,139], +'x11:peachpuff2' => [238,203,173],'x11:grey62' => [158,158,158], +'x11:paleturquoise3' => [150,205,205],'x11:lightblue1' => [191,239,255], +'x11:mediumpurple' => [147,112,219],'x11:peachpuff3' => [205,175,149], +'x11:grey49' => [125,125,125],'x11:grey3' => [8,8,8], +'x11:steelblue1' => [99,184,255],'x11:grey73' => [186,186,186], +'x11:grey44' => [112,112,112],'x11:palevioletred4' => [139,71,93], +'x11:khaki2' => [238,230,133],'x11:gold3' => [205,173,0], +'x11:grey47' => [120,120,120],'x11:aliceblue' => [240,248,255], +'x11:grey58' => [148,148,148],'x11:darkslategrey4' => [82,139,139], +'x11:mediumorchid4' => [122,55,139],'x11:thistle1' => [255,225,255], +'x11:mistyrose4' => [139,125,123],'x11:orchid1' => [255,131,250], +'x11:hotpink2' => [238,106,167],'x11:azure' => [240,255,255], +'x11:azure1' => [240,255,255],'x11:darkred' => [139,0,0], +'x11:red4' => [139,0,0],'x11:chartreuse2' => [118,238,0], +'x11:slateblue1' => [131,111,255],'x11:grey15' => [38,38,38], +'x11:grey71' => [181,181,181],'x11:darkslategrey2' => [141,238,238], +'x11:snow3' => [205,201,201],'x11:bisque2' => [238,213,183], +'x11:darkslateblue' => [72,61,139],'x11:coral4' => [139,62,47], +'x11:grey69' => [176,176,176],'x11:burlywood4' => [139,115,85], +'x11:coral3' => [205,91,69],'x11:purple' => [160,32,240], +'x11:grey36' => [92,92,92],'x11:grey94' => [240,240,240], +'x11:palevioletred2' => [238,121,159],'x11:grey46' => [117,117,117], +'x11:palevioletred' => [219,112,147],'x11:mediumslateblue' => [123,104,238], +'x11:seagreen1' => [84,255,159],'x11:gainsboro' => [220,220,220], +'x11:yellow1' => [255,255,0],'x11:yellow' => [255,255,0], +'x11:limegreen' => [50,205,50],'x11:darkgrey' => [169,169,169], +'x11:darkmagenta' => [139,0,139],'x11:magenta4' => [139,0,139], +'x11:grey59' => [150,150,150],'x11:firebrick2' => [238,44,44], +'x11:coral' => [255,127,80],'x11:honeydew' => [240,255,240], +'x11:honeydew1' => [240,255,240],'x11:grey86' => [219,219,219], +'x11:grey13' => [33,33,33],'x11:purple1' => [155,48,255], +'x11:grey82' => [209,209,209],'x11:grey65' => [166,166,166], +'x11:grey97' => [247,247,247],'x11:azure4' => [131,139,139], +'x11:darkslategrey3' => [121,205,205],'x11:lightcyan3' => [180,205,205], +'x11:aquamarine2' => [118,238,198],'x11:grey92' => [235,235,235], +'x11:slateblue' => [106,90,205],'x11:darkcyan' => [0,139,139], +'x11:cyan4' => [0,139,139],'x11:chartreuse3' => [102,205,0], +'x11:moccasin' => [255,228,181],'x11:mediumvioletred' => [199,21,133], +'x11:tomato3' => [205,79,57],'x11:grey31' => [79,79,79], +'x11:sienna2' => [238,121,66],'x11:grey98' => [250,250,250], +'x11:gold4' => [139,117,0],'x11:slateblue3' => [105,89,205], +'x11:grey14' => [36,36,36],'x11:honeydew4' => [131,139,131], +'x11:grey61' => [156,156,156],'x11:violetred3' => [205,50,120], +'x11:grey39' => [99,99,99],'x11:aquamarine4' => [69,139,116], +'x11:darkgoldenrod4' => [139,101,8],'x11:mediumpurple1' => [171,130,255], +'x11:lightsalmon1' => [255,160,122],'x11:lightsalmon' => [255,160,122], +'x11:darkolivegreen3' => [162,205,90],'x11:grey10' => [26,26,26], +'x11:khaki3' => [205,198,115],'x11:navajowhite3' => [205,179,139], +'x11:lightpink1' => [255,174,185],'x11:grey81' => [207,207,207], +'x11:grey45' => [115,115,115],'x11:wheat3' => [205,186,150], +'x11:steelblue4' => [54,100,139],'x11:grey48' => [122,122,122], +'x11:olivedrab3' => [154,205,50],'x11:yellowgreen' => [154,205,50], +'x11:mediumturquoise' => [72,209,204],'x11:palegoldenrod' => [238,232,170], +'x11:ivory2' => [238,238,224],'x11:darkolivegreen1' => [202,255,112], +'x11:red1' => [255,0,0],'x11:red' => [255,0,0], +'x11:lemonchiffon4' => [139,137,112],'x11:lightseagreen' => [32,178,170], +'x11:seagreen4' => [46,139,87],'x11:seagreen' => [46,139,87], +'x11:ivory' => [255,255,240],'x11:ivory1' => [255,255,240], +'x11:linen' => [250,240,230],'x11:grey34' => [87,87,87], +'x11:thistle2' => [238,210,238],'x11:tomato' => [255,99,71], +'x11:tomato1' => [255,99,71],'x11:slategrey1' => [198,226,255], +'x11:orchid3' => [205,105,201],'x11:lightcyan2' => [209,238,238], +'x11:grey54' => [138,138,138],'x11:darkolivegreen' => [85,107,47], +'x11:lightsteelblue' => [176,196,222],'x11:grey33' => [84,84,84], +'x11:chocolate4' => [139,69,19],'x11:saddlebrown' => [139,69,19], +'x11:orange3' => [205,133,0],'x11:lightyellow' => [255,255,224], +'x11:lightyellow1' => [255,255,224],'x11:grey75' => [191,191,191], +'x11:khaki1' => [255,246,143],'x11:thistle' => [216,191,216], +'x11:grey79' => [201,201,201],'x11:plum1' => [255,187,255], +'x11:paleturquoise4' => [102,139,139],'x11:cyan1' => [0,255,255], +'x11:cyan' => [0,255,255],'x11:maroon3' => [205,41,144], +'x11:papayawhip' => [255,239,213],'x11:seagreen3' => [67,205,128], +'x11:lightgoldenrod4' => [139,129,76],'x11:lightskyblue1' => [176,226,255], +'x11:firebrick' => [178,34,34],'x11:grey30' => [77,77,77], +'x11:grey26' => [66,66,66],'x11:antiquewhite4' => [139,131,120], +'x11:navyblue' => [0,0,128],'x11:navy' => [0,0,128], +'x11:grey7' => [18,18,18],'x11:grey5' => [13,13,13], +'x11:grey29' => [74,74,74],'x11:turquoise1' => [0,245,255], +'x11:darkgoldenrod3' => [205,149,12],'x11:goldenrod4' => [139,105,20], +'x11:palevioletred3' => [205,104,137],'x11:lightslategrey' => [119,136,153], +'x11:snow2' => [238,233,233],'x11:grey24' => [61,61,61], +'x11:slategrey4' => [108,123,139],'x11:grey55' => [140,140,140], +'x11:seashell3' => [205,197,191],'x11:deeppink3' => [205,16,118], +'x11:lawngreen' => [124,252,0],'x11:darkorchid3' => [154,50,205], +'x11:lightcoral' => [240,128,128],'x11:palegreen' => [152,251,152], +'x11:grey56' => [143,143,143],'x11:grey23' => [59,59,59], +'x11:grey74' => [189,189,189],'x11:azure2' => [224,238,238], +'x11:darkseagreen3' => [155,205,155],'x11:grey20' => [51,51,51], +'x11:cadetblue2' => [142,229,238],'x11:grey84' => [214,214,214], +'x11:cornsilk4' => [139,136,120],'x11:grey38' => [97,97,97], +'x11:magenta1' => [255,0,255],'x11:magenta' => [255,0,255], +'x11:darkseagreen' => [143,188,143],'x11:aquamarine1' => [127,255,212], +'x11:aquamarine' => [127,255,212],'x11:lightblue3' => [154,192,205], +'x11:olivedrab2' => [179,238,58],'x11:grey40' => [102,102,102], +'x11:peachpuff4' => [139,119,101],'x11:paleturquoise1' => [187,255,255], +'x11:darkseagreen1' => [193,255,193],'x11:darkorange3' => [205,102,0], +'x11:brown3' => [205,51,51],'x11:grey51' => [130,130,130], +'x11:mediumpurple4' => [93,71,139],'x11:lightpink2' => [238,162,173], +'x11:cadetblue' => [95,158,160],'x11:lemonchiffon2' => [238,233,191], +'x11:green2' => [0,238,0],'x11:azure3' => [193,205,205], +'x11:turquoise' => [64,224,208],'x11:brown1' => [255,64,64], +'x11:lightsteelblue4' => [110,123,139],'x11:orange2' => [238,154,0], +'x11:antiquewhite' => [250,235,215],'x11:wheat2' => [238,216,174], +'x11:rosybrown2' => [238,180,180],'x11:lightsteelblue3' => [162,181,205], +'x11:grey78' => [199,199,199],'x11:grey21' => [54,54,54], +'x11:sandybrown' => [244,164,96],'x11:lavenderblush2' => [238,224,229], +'x11:steelblue' => [70,130,180],'x11:grey95' => [242,242,242], +'x11:indianred' => [205,92,92],'x11:skyblue1' => [135,206,255], +'x11:khaki' => [240,230,140],'x11:orchid4' => [139,71,137], +'x11:chocolate1' => [255,127,36],'x11:goldenrod3' => [205,155,29], +'x11:sienna4' => [139,71,38],'x11:lightblue' => [173,216,230], +'x11:grey88' => [224,224,224],'x11:palegreen4' => [84,139,84], +'x11:mediumorchid' => [186,85,211],'x11:blue' => [0,0,255], +'x11:blue1' => [0,0,255],'x11:dodgerblue3' => [24,116,205], +'x11:indianred1' => [255,106,106],'x11:cyan3' => [0,205,205], +'x11:tan1' => [255,165,79],'x11:darkorange' => [255,140,0], +'x11:skyblue2' => [126,192,238],'x11:coral1' => [255,114,86], +'x11:darkslategrey' => [47,79,79],'x11:burlywood' => [222,184,135], +'x11:sienna3' => [205,104,57],'x11:darkgreen' => [0,100,0], +'x11:mistyrose3' => [205,183,181],'x11:grey68' => [173,173,173], +'x11:grey53' => [135,135,135],'x11:lemonchiffon' => [255,250,205], +'x11:lemonchiffon1' => [255,250,205],'x11:palegreen1' => [154,255,154], +'x11:grey76' => [194,194,194],'x11:steelblue3' => [79,148,205], +'x11:grey11' => [28,28,28],'x11:oldlace' => [253,245,230], +'x11:mintcream' => [245,255,250],'x11:firebrick4' => [139,26,26], +'x11:lemonchiffon3' => [205,201,165],'x11:olivedrab' => [107,142,35], +'x11:honeydew2' => [224,238,224],'x11:deepskyblue2' => [0,178,238], +'x11:slateblue2' => [122,103,238],'x11:slategrey2' => [185,211,238], +'x11:seagreen2' => [78,238,148],'x11:salmon2' => [238,130,98], +'x11:ivory3' => [205,205,193],'x11:mediumorchid1' => [224,102,255], +'x11:tan2' => [238,154,73],'x11:springgreen' => [0,255,127], +'x11:springgreen1' => [0,255,127],'x11:seashell1' => [255,245,238], +'x11:seashell' => [255,245,238],'x11:skyblue3' => [108,166,205], +'x11:chartreuse4' => [69,139,0],'x11:burlywood3' => [205,170,125], +'x11:plum' => [221,160,221],'x11:ivory4' => [139,139,131], +'x11:darkviolet' => [148,0,211],'x11:lightblue2' => [178,223,238], +'x11:wheat' => [245,222,179],'x11:darkgoldenrod1' => [255,185,15], +'x11:cornflowerblue' => [100,149,237],'x11:purple2' => [145,44,238], +'x11:grey6' => [15,15,15],'x11:magenta2' => [238,0,238], +'x11:sienna1' => [255,130,71],'x11:darkgoldenrod' => [184,134,11], +'x11:forestgreen' => [34,139,34],'x11:navajowhite4' => [139,121,94], +'x11:royalblue4' => [39,64,139],'x11:wheat1' => [255,231,186], +'x11:lightskyblue4' => [96,123,139],'x11:grey18' => [46,46,46], +'x11:orangered4' => [139,37,0],'x11:salmon3' => [205,112,84], +'x11:white' => [255,255,255],'x11:grey100' => [255,255,255], +'x11:orange4' => [139,90,0],'x11:wheat4' => [139,126,102], +'x11:rosybrown1' => [255,193,193],'x11:grey8' => [20,20,20], +'x11:lightgoldenrod2' => [238,220,130],'x11:lightskyblue3' => [141,182,205], +'x11:violetred' => [208,32,144],'x11:blue4' => [0,0,139], +'x11:darkblue' => [0,0,139],'x11:lavenderblush3' => [205,193,197], +'x11:thistle4' => [139,123,139],'x11:hotpink1' => [255,110,180], +'x11:darkorange2' => [238,118,0],'x11:lightsalmon2' => [238,149,114], +'x11:orangered1' => [255,69,0],'x11:orangered' => [255,69,0], +'x11:burlywood1' => [255,211,155],'x11:lightgrey' => [211,211,211], +'x11:grey22' => [56,56,56],'x11:tomato4' => [139,54,38], +'x11:rosybrown3' => [205,155,155],'x11:gold2' => [238,201,0], +'x11:beige' => [245,245,220] + ); + +# Returns a hash-ref: color-name => RGB triple +sub available_names { + return \%_colors; +} + +# Returns the RGB triple for a name, undef if name not found +# The name is normalized before lookup is attempted. Normalization consists +# of: lowercasing and elimination of whitespace. Also, "gray" is replaced +# with "grey". +# If the name is prefixed with a namespace (separated by colon ':'), +# only this namespace is searched. If no namespace is specified, then +# the lookup occurs first in the global namespace, then in the default +# namespace. + +sub name2rgb { + my ( $name ) = @_; + + my ( $ns, $core, $norm ) = _normalize_name( $name ); + + # If explicit namespace: + if( $ns ne '' ) { + if( exists $_colors{ $norm } ) { + return _fmt( @{ $_colors{ $norm } } ); + } else { + return; # Do not search further if explicit namespace is given + } + } + + # No explicit namespace + if( exists $_colors{ $core } ) { + return _fmt( @{ $_colors{ $core } } ); # global namespace + } + + # No namespace, but ':' prefix: search global ONLY, but not default ns + if( $core ne $norm ) { return; } + + $norm = get_default_namespace() . ':' . $core; + if( exists $_colors{ $norm } ) { + return _fmt( @{ $_colors{ $norm } } ); # default namespace + } + + # Not found + return; +} + +# Takes a name and an RGB triple. Registers the triple for the given name. +# The name will be normalized (lowercased, whitespace eliminated, 'gray' +# replaced by 'grey') before assignment is made. +# If the name is not prefixed by a namespace, the color will be entered +# into the global namespace. +# Returns the old value for the name, if the name already exists. + +sub register_name { + my ( $name, $r, $g, $b ) = @_; + + my ( $ns, $core, $norm ) = _normalize_name( $name ); + + # If no explicit ns is given, lookup and replace for $core, which is + # guaranteed not preceeded by ':'. Otherwise, use fully qualified name. + my $crr = ( $ns eq '' ) ? $core : $norm; + + if( exists $_colors{ $crr } ) { + my $old = $_colors{ $crr }; + $_colors{ $crr } = [ $r, $g, $b ]; + + return _fmt( @$old ); + } + + $_colors{ $crr } = [ $r, $g, $b ]; + + return; +} + +sub _normalize_name { + my ( $name ) = @_; + + $name = lc( $name ); # Lowercase + $name =~ s/\s//g; # Eliminate whitespace + $name =~ s/gray/grey/; # gray -> grey + + my ( $ns, $core ) = ( '', $name ); + if( $name =~ /:/ ) { + ( $ns, $core ) = split ':', $name; + } + + return ( $ns, $core, $name ); +} + +# Sets the default namespace. Returns the previous value. +# Giving an empty string as argument makes the global namespace the default. +# Note that the global namespace is initially EMPTY! + +sub set_default_namespace { + my $old = $_default_namespace; + $_default_namespace = $_[0]; + return $old; +} + +sub get_default_namespace { + return $_default_namespace; +} + +} # end BEGIN (Names) + + + +1; +__END__ + + +# ================================================== +# ++++++++++++++++++++++++++++++++++++++++++++++++++ +# ================================================== + +=head1 NAME + +Graphics::ColorUtils - Easy-to-use color space conversions and more. + +=head1 SYNOPSIS + + use Graphics::ColorUtils; + + ( $y, $i, $q ) = rgb2yiq( $r, $g, $b ); + ( $r, $g, $b ) = yiq2rgb( $y, $i, $q ); + $hex_string = yiq2rgb( $y, $i, $q ); + + ( $c, $m, $y ) = rgb2cmy( $r, $g, $b ); + ( $r, $g, $b ) = cmy2rgb( $c, $m, $y ); + $hex_string = cmy2rgb( $c, $m, $y ); + + ( $h, $l, $s ) = rgb2hls( $r, $g, $b ); + ( $r, $g, $b ) = hls2rgb( $h, $l, $s ); + $hex_string = hls2rgb( $h, $l, $s ); + + ( $h, $s, $v ) = rgb2hsv( $r, $g, $b ); + ( $r, $g, $b ) = hsv2rgb( $h, $s, $v ); + $hex_string = hsv2rgb( $h, $s, $v ); + + # ----- + + use Graphics::ColorUtils qw( :gradients ); + + ( $r, $g, $b ) = grad2rgb( $name, $f ); # where 0.0 <= $f < 1.0 + $hex_string = grad2rgb( $name, $f ); + + %color_count_for_gradient_name = available_gradients(); + $array_ref_of_rgb_triples = gradient( $name ); + $array_ref_old_grad = register_gradient( $name, $array_ref_of_rgb_triples ); + + # ----- + + use Graphics::ColorUtils qw( :names ); + + ( $r, $g, $b ) = name2rgb( $name ); + $hex_string = name2rgb( $name ); + + $hash_ref_rgb_triples_for_name = available_names(); + ( $old_r, $old_g, $old_b ) = register_name( $name, $r, $g, $b ); + $old_hex_string = register_name( $name, $r, $g, $b ); + $default_ns = get_default_namespace(); + $old_ns = set_default_namespace( $new_ns ); + + +=head1 DESCRIPTION + +This modules provides some utility functions to handle colors and +color space conversions. + +The interface has been kept simple, so that most functions can be called +"inline" when making calls to graphics libraries such as GD, Tk, or +when generating HTML/CSS. (E.g. for GD: +C<$c = $img-EcolorAllocate( hsv2rgb( 270, 0.5, 0.3 ) );>.) + +Features: + +=over 4 + +=item Color Space Conversions + +Color space conversions, in particular between the "intuitive" color +spaces HSV (Hue/Saturation/Value) and HLS (Hue/Lightness/Saturation) +to and from RGB (Red/Green/Blue). + +=item Color Lookup + +Color lookup by name for three standard sets of colors: WWW/CSS, SVG, and X11. + +=item Color Gradients + +Management of color gradients, which can be indexed by a floating point +number in the range 0..1. (Mostly intended for false-color data visualization.) + +=back + + +=head1 CONVENTIONS + +Legal values: + + Y, I, Q: 0..1 + C, M, Y: 0..1 + + R, G, B: 0..255 (may be float on input, guaranteed int on output) + + H: 0..360 (red=0->yellow->green=120->cyan->blue=240->magenta steps of 60) + S, V: 0..1 + L, S: 0..1 + +All C<...2rgb> functions return a three-element array in list context, +and a string formatted according to C<"#%02x%02x%02x"> (e.g. C<'#ff3a18'>) +in scalar context. + + +=head1 METHODS + +=head2 Color Space Conversions + +=over 4 + +=item YIQ + +C and C + +=item CMY + +C and C + +=item HSV + +C and C + +=item HLS + +C and C + +=back + +All these methods take a triple of values and return a triple of +converted values. However, B the C<...2rgb> +methods return a string formatted according to C<"#%02x%02x%02x"> +(e.g. C<'#ff3a18'>). This format is appropriate e.g. for calls to +Tk routines: C<$mw-Ewidget( -color => hls2rgb( 180, 0.2, 0.1 ) );>, etc. + + +=head2 Color Names + +Names can be arbitrary strings. If names contain a colon (C<':'>), +the part of the name before the colon is considered a "namespace" +specification. Namespaces allow to have multiple color values +corresponding to the same name and to control the priority in +which those values will be retrieved. + +=over 4 + +=item C + +Returns a triple C<( $r, $g, $b )> in list context or a a hex-string +in scalar context if the name has been found, C otherwise. + +The name is normalized before lookup is attempted. Normalization consists +of: lowercasing and elimination of whitespace. Also, "gray" is replaced +with "grey". + +If the name is prefixed with a namespace (separated by colon a C<':'>), +only this namespace is searched. If no namespace is specified, then +the lookup occurs first in the global namespace, then in the default +namespace. + +=item C + +Returns a reference to a hash, the keys of which are the color names, +and the values are references to three-element arrays of RGB values. + +=item C + +Takes a name and an RGB triple. Stores the triple for the given name. +The name will be normalized (lowercased, whitespace eliminated, 'gray' +replaced by 'grey') before assignment is made. + +If the name is not prefixed by a namespace, the color will be entered +into the global namespace. + +Returns the old value for the name, if the name already exists, +C otherwise. + +=item C + +Returns the current value of the default namespace. Note that the +empty string C<''> corresponds to the I namespace. + +=item C + +Sets the default namespace. Returns the previous value. + +Giving an empty string as argument makes the global namespace the default. +Note that the global namespace is initially I. + +(On startup, the default namespace is C<'x11'>.) + +=back + + +=head2 Color Gradients + +=over 4 + +=item C + +Given the name of a gradient and a floating point number between 0 and 1, +returns the color (as RGB triple or formatted hex-string) corresponding +to the position in the gradient given by C<$f>. +Returns C when gradient not found or C<$f> outside valid range. + +=item C + +Returns a hash, the keys of which are the names of the known gradients +and the values being the number of colors in the corresponding gradient. + +=item C + +Given the name of a gradient, returns a reference to an array of RGB +triples or C if the gradient is not found. + +=item C + +Takes the name of a (possibly new) gradient and a reference to an +array of RGB triples. Stores the array as gradient for that name. +If the gradient name already existed, returns a reference to the +old array, C otherwise. + +=back + +An introduction, together with a large number of sample gradients +can be found at Paul Bourke's webpage: +http://local.wasp.uwa.edu.au/~pbourke/texture_colour/colourramp/ + + +=head1 EXPORT + +Exports by default: + + rgb2yiq(), yiq2rgb() + rgb2cmy(), cmy2rgb() + rgb2hls(), hls2rgb() + rgb2hsv(), hsv2rgb() + +Using the export tag C<:names>, exports the following additional methods: + + name2rgb() + available_names() + register_name() + set_default_namespace() + get_default_namespace() + +Using the export tag C<:gradients>, exports the following additional methods: + + gradient() + grad2rgb() + available_gradients() + register_gradient() + + +=head1 BUGS + +=over 4 + +=item Input parameter validation + +Most methods do I explicitly validate that their arguments lie +in the valid range. + +=item Multiple namespaces + +Names containing multiple colons may not be handled correctly. + +=item Hue wrap-around + +While hue should be restricted to 0..360, both C and +C tolerate "moderate" violation of this constraint (up +to +/- 359). + +=back + + +=head1 TODO + +=over 4 + +=item Perl Versions + +This module has only been explicitly tested with Perl 5.8, +but nothing (should) prevent it from running fine with other +versions of Perl. + +=item Additional color space conversions + +For instance to and from XYZ, CIE, Luv; I. + +=item Additional pre-defined gradients + +Suggestions welcome! + +=back + +=head1 SEE ALSO + +=head2 Related Modules + +=over 4 + +=item Color::Rgb + +Lookup of color values for names. Similar to the "names" methods +in this module. Requires F. + +=item Graphics::ColorNames + +Lookup of color values for names. Similar to the "names" methods +in this module. Does I require F. Comes with +several sets of predefined color names (similar to this module). + +=item Graphics::ColorObject + +Color space conversions, including conversions to and from XYZ +and Luv. Object-oriented interface requires instantiation of a +"color-object" for each color, which can then provide a +representation of itself in all color spaces. + +=item Color::Scheme + +Generates pleasant color schemes (sets of colors). + +=back + + +=head2 Standard Color Sets + +=over 4 + +=item WWW/CSS + +The 16 (or 17, including "orange") colors defined by the W3: +http://www.w3.org/TR/css3-color + +=item SVG + +The 138 unique named colors (140 normalized unique names) defined for +SVG by the W3: http://www.w3.org/TR/SVG/types.html#ColorKeywords + +=item X11 + +The 502 unique named colors (549 normalized unique names) defined by +the X11 libraries in /usr/lib/X11/rgb.txt on an X11 system + +=back + + +=head2 Websites + +=over 4 + +=item * + +Poynton's Color FAQ: http://www.poynton.com/ColorFAQ.html + +=item * + +Paper on Color Conversion Algorithms: http://www.poynton.com/PDFs/coloureq.pdf + +=item * + +Paul Bourke's Webpage with many relevant details: http://local.wasp.uwa.edu.au/~pbourke/texture_colour/ + +=back + + +=head2 Books + +=over 4 + +=item * + +B +by James D. Foley, Andries van Dam, Steven K. Feiner, John F. Hughes +(Second Edition in C, 1990, mult. print runs) + +I> + +=item * + +B +by James D. Foley, Andries van Dam, Steven K. Feiner, John F. Hughes, +Richard L. Phillips (1990, mult. print runs) + +I + +=item * + +B +by Donald Hearn and M. Pauline Baker (2nd ed, 1997) + +I + +=back + + +=begin IMPLEMENTATION_NOTE + +There were two intents that drove part of the design: +- I wanted to avoid dependency on other modules as much as possible. + (This is a small module, it should not have all kinds of requirements + on its installation environment.) +- Including the VALUES for the color names and gradients in the source + file itself is certainly a somewhat contentious decision. Here is the + rationale: By embedding the data directly, we avoid the issue of files + missing at run-time and the required error detection and recovery code. + The impact on loading the module (as compared to requiring the data + files) should be minimal - the same amount of data gets read one way + or the other. +- And obviously I did not want to rely on the file rgb.txt to be there. + That's fine for Unix, not ok elsewhere. + +=end IMPLEMENTATION_NOTE + + +=head1 AUTHOR + +Philipp K. Janert, Ejanert at ieee dot org E, http://www.beyondcode.org + +=head1 COPYRIGHT AND LICENSE + +Copyright (C) 2006 by Philipp K. Janert + +This library is free software; you can redistribute it and/or modify +it under the same terms as Perl itself, either Perl version 5.8.3 or, +at your option, any later version of Perl 5 you may have available. + +=cut diff -Nru gdata-2.13.3/inst/perl/install_modules.pl gdata-2.17.0/inst/perl/install_modules.pl --- gdata-2.13.3/inst/perl/install_modules.pl 2014-01-03 19:32:45.000000000 +0000 +++ gdata-2.17.0/inst/perl/install_modules.pl 2015-04-25 14:26:13.000000000 +0000 @@ -8,12 +8,12 @@ require 'module_tools.pl'; -my( $HAS_Spreadsheet_ParseExcel, $HAS_Compress_Raw_Zlib, $HAS_Spreadsheet_XLSX); +my( $HAS_Spreadsheet_ParseExcel, $HAS_Compress_Raw_Zlib, $HAS_Spreadsheet_ParseXLSX); # check if we need to do anything ($HAS_Spreadsheet_ParseExcel, $HAS_Compress_Raw_Zlib, - $HAS_Spreadsheet_XLSX) = check_modules(0); + $HAS_Spreadsheet_ParseXLSX) = check_modules(0); install_modules() unless $HAS_Compress_Raw_Zlib; diff -Nru gdata-2.13.3/inst/perl/module_tools.pl gdata-2.17.0/inst/perl/module_tools.pl --- gdata-2.13.3/inst/perl/module_tools.pl 2014-01-03 19:32:44.000000000 +0000 +++ gdata-2.17.0/inst/perl/module_tools.pl 2015-06-29 22:36:45.000000000 +0000 @@ -17,7 +17,7 @@ $VERBOSE, $HAS_Spreadsheet_ParseExcel, $HAS_Compress_Raw_Zlib, - $HAS_Spreadsheet_XLSX + $HAS_Spreadsheet_ParseXLSX ); $VERBOSE=$_[0]; @@ -25,9 +25,9 @@ eval { require Spreadsheet::ParseExcel; + use Spreadsheet::ParseExcel::Utility qw(ExcelFmt); $HAS_Spreadsheet_ParseExcel=1; print "Loaded Spreadsheet::ParseExcel\n" if $VERBOSE; - }; eval { @@ -37,9 +37,9 @@ }; eval { - require Spreadsheet::XLSX; - $HAS_Spreadsheet_XLSX=1; - print "Loaded Spreadsheet::XLSX\n" if $VERBOSE; + require Spreadsheet::ParseXLSX; + $HAS_Spreadsheet_ParseXLSX=1; + print "Loaded Spreadsheet::ParseXLSX\n" if $VERBOSE; }; if($VERBOSE) @@ -48,11 +48,11 @@ if !$HAS_Spreadsheet_ParseExcel; print "ERROR: Unable to load Compress::Raw::Zlib perl module! \n" if ! $HAS_Compress_Raw_Zlib; - print "ERROR: Unable to load Spreadsheet::XLSX perl module! \n" - if ! $HAS_Spreadsheet_XLSX; + print "ERROR: Unable to load Spreadsheet::ParseXLSX perl module! \n" + if ! $HAS_Spreadsheet_ParseXLSX; } - return $HAS_Spreadsheet_ParseExcel, $HAS_Compress_Raw_Zlib, $HAS_Spreadsheet_XLSX; + return $HAS_Spreadsheet_ParseExcel, $HAS_Compress_Raw_Zlib, $HAS_Spreadsheet_ParseXLSX; } sub check_modules_and_notify() @@ -60,7 +60,7 @@ my( $HAS_Spreadsheet_ParseExcel, $HAS_Compress_Raw_Zlib, - $HAS_Spreadsheet_XLSX) = check_modules(0); + $HAS_Spreadsheet_ParseXLSX) = check_modules(0); $HAS_Spreadsheet_ParseExcel or die("ERROR: Perl module Spreadsheet::ParseExcel cannot be loaded. Exiting.\n"); @@ -68,12 +68,12 @@ $HAS_Compress_Raw_Zlib or warn("WARNING: Perl module Compress::Raw::Zlib cannot be loaded.\n"); - $HAS_Spreadsheet_XLSX or - warn("WARNING: Perl module Spreadsheet::XLSX cannot be loaded.\n"); + $HAS_Spreadsheet_ParseXLSX or + warn("WARNING: Perl module Spreadsheet::ParseXLSX cannot be loaded.\n"); - ($HAS_Compress_Raw_Zlib && $HAS_Spreadsheet_XLSX ) or + ($HAS_Compress_Raw_Zlib && $HAS_Spreadsheet_ParseXLSX ) or warn("WARNING: Microsoft Excel 2007 'XLSX' formatted files will not be processed.\n"); - return $HAS_Spreadsheet_ParseExcel, $HAS_Compress_Raw_Zlib, $HAS_Spreadsheet_XLSX; + return $HAS_Spreadsheet_ParseExcel, $HAS_Compress_Raw_Zlib, $HAS_Spreadsheet_ParseXLSX; } sub install_modules() @@ -91,6 +91,8 @@ CPAN::Index->reload(); # set the target install path + CPAN::Shell->o("conf", "mbuildpl_arg", + "PREFIX=$here LIB=$here --prefix $here --install-base $here"); CPAN::Shell->o("conf", "makepl_arg", "PREFIX=$here LIB=$here --prefix $here --install-base $here"); CPAN::Shell->install("Compress::Raw::Zlib"); @@ -98,7 +100,7 @@ #return 0; # install the libraries we want - for $mod (qw( Compress::Raw::Zlib Spreadsheet::XLSX )){ + for $mod (qw( Compress::Raw::Zlib Spreadsheet::ParseXLSX )){ my $obj = CPAN::Shell->expand('Module',$mod); $obj->install; } diff -Nru gdata-2.13.3/inst/perl/sheetCount.pl gdata-2.17.0/inst/perl/sheetCount.pl --- gdata-2.13.3/inst/perl/sheetCount.pl 2014-01-03 19:32:45.000000000 +0000 +++ gdata-2.17.0/inst/perl/sheetCount.pl 2014-08-28 04:05:07.000000000 +0000 @@ -15,7 +15,7 @@ my( $HAS_Spreadsheet_ParseExcel, $HAS_Compress_Raw_Zlib, - $HAS_Spreadsheet_XLSX + $HAS_Spreadsheet_ParseXLSX ) = check_modules_and_notify(); use File::Spec::Functions; @@ -24,7 +24,7 @@ my($row, $col, $sheet, $cell, $usage, $filename, $volume, $directories, $whoami, $basename, $sheetnumber, $filename, - $text); + $text, $parser); ## @@ -78,15 +78,18 @@ my $oBook; ## First try as a Excel 2007+ 'xml' file +## First try as a Excel 2007+ 'xml' file eval { local $SIG{__WARN__} = sub {}; - $oBook = Spreadsheet::XLSX -> new ($ARGV[0]); + $parser = Spreadsheet::ParseXLSX -> new(); + $oBook = $parser->parse ($ARGV[0]); }; ## Then Excel 97-2004 Format -if($@) +if ( !defined $oBook ) { - $oBook = new Spreadsheet::ParseExcel->Parse($ARGV[0]) or \ + $parser = Spreadsheet::ParseExcel -> new(); + $oBook = $parser->parse($ARGV[0]) or \ die "Error parsing file '$ARGV[0]'.\n"; } diff -Nru gdata-2.13.3/inst/perl/sheetNames.pl gdata-2.17.0/inst/perl/sheetNames.pl --- gdata-2.13.3/inst/perl/sheetNames.pl 2014-01-03 19:32:45.000000000 +0000 +++ gdata-2.17.0/inst/perl/sheetNames.pl 2014-08-28 04:05:07.000000000 +0000 @@ -15,7 +15,7 @@ my( $HAS_Spreadsheet_ParseExcel, $HAS_Compress_Raw_Zlib, - $HAS_Spreadsheet_XLSX + $HAS_Spreadsheet_ParseXLSX ) = check_modules_and_notify(); use File::Spec::Functions; @@ -24,7 +24,7 @@ my($row, $col, $sheet, $cell, $usage, $filename, $volume, $directories, $whoami, $basename, $sheetnumber, $filename, - $text); + $text, $parser); ## @@ -78,15 +78,18 @@ my $oBook; ## First try as a Excel 2007+ 'xml' file +## First try as a Excel 2007+ 'xml' file eval { local $SIG{__WARN__} = sub {}; - $oBook = Spreadsheet::XLSX -> new ($ARGV[0]); + $parser = Spreadsheet::ParseXLSX -> new(); + $oBook = $parser->parse ($ARGV[0]); }; ## Then Excel 97-2004 Format -if($@) +if ( !defined $oBook ) { - $oBook = new Spreadsheet::ParseExcel->Parse($ARGV[0]) or \ + $parser = Spreadsheet::ParseExcel -> new(); + $oBook = $parser->parse($ARGV[0]) or \ die "Error parsing file '$ARGV[0]'.\n"; } diff -Nru gdata-2.13.3/inst/perl/Spreadsheet/ParseExcel/Cell.pm gdata-2.17.0/inst/perl/Spreadsheet/ParseExcel/Cell.pm --- gdata-2.13.3/inst/perl/Spreadsheet/ParseExcel/Cell.pm 2014-01-03 19:32:45.000000000 +0000 +++ gdata-2.17.0/inst/perl/Spreadsheet/ParseExcel/Cell.pm 2014-08-28 01:14:18.000000000 +0000 @@ -6,7 +6,8 @@ # # Used in conjunction with Spreadsheet::ParseExcel. # -# Copyright (c) 2009 John McNamara +# Copyright (c) 2014 Douglas Wilson +# Copyright (c) 2009-2013 John McNamara # Copyright (c) 2006-2008 Gabor Szabo # Copyright (c) 2000-2006 Kawai Takanori # @@ -18,7 +19,7 @@ use strict; use warnings; -our $VERSION = '0.56'; +our $VERSION = '0.65'; ############################################################################### # @@ -140,11 +141,33 @@ ############################################################################### # +# get_hyperlink { +# +# Returns an array ref of hyperlink information if the cell contains a hyperlink. +# Returns undef otherwise +# +# [0] : Description of link (You may want $cell->value, as it will have rich text) +# [1] : URL - the link expressed as a URL. N.B. relative URLs will be defaulted to +# the directory of the input file, if the input file name is known. Otherwise +# %REL% will be inserted as a place-holder. Depending on your application, +# you should either remove %REL% or replace it with the appropriate path. +# [2] : Target frame (or undef if none) + +sub get_hyperlink { + my $self = shift; + + return $self->{Hyperlink} if exists $self->{Hyperlink}; + return undef; +} + +# +############################################################################### +# # Mapping between legacy method names and new names. # { no warnings; # Ignore warnings about variables used only once. - *Value = *value; + *Value = \&value; } 1; @@ -176,6 +199,7 @@ $cell->encoding() $cell->is_merged() $cell->get_rich_text() + $cell->get_hyperlink() =head2 value() @@ -267,6 +291,25 @@ Returns undef if the property isn't set. +=head2 get_hyperlink() + +If a cell contains a hyperlink, the C method returns an array ref of information about it. + +A cell can contain at most one hyperlink. If it does, it contains no other value. + +Otherwise, it returns undef; + +The array contains: + +=over + +=item * 0: Description (what's displayed); undef if not present + +=item * 1: Link, converted to an appropriate URL - Note: Relative links are based on the input file. %REL% is used if the input file is unknown (e.g. a file handle or scalar) + +=item * 2: Target - target frame (or undef if none) + +=back =head1 Dates and Time in Excel @@ -293,7 +336,9 @@ =head1 AUTHOR -Maintainer 0.40+: John McNamara jmcnamara@cpan.org +Current maintainer 0.60+: Douglas Wilson dougw@cpan.org + +Maintainer 0.40-0.59: John McNamara jmcnamara@cpan.org Maintainer 0.27-0.33: Gabor Szabo szabgab@cpan.org @@ -301,7 +346,9 @@ =head1 COPYRIGHT -Copyright (c) 2009 John McNamara +Copyright (c) 2014 Douglas Wilson + +Copyright (c) 2009-2013 John McNamara Copyright (c) 2006-2008 Gabor Szabo diff -Nru gdata-2.13.3/inst/perl/Spreadsheet/ParseExcel/Dump.pm gdata-2.17.0/inst/perl/Spreadsheet/ParseExcel/Dump.pm --- gdata-2.13.3/inst/perl/Spreadsheet/ParseExcel/Dump.pm 2014-01-03 19:32:45.000000000 +0000 +++ gdata-2.17.0/inst/perl/Spreadsheet/ParseExcel/Dump.pm 2014-08-28 01:14:18.000000000 +0000 @@ -6,7 +6,8 @@ # # Used in conjunction with Spreadsheet::ParseExcel. # -# Copyright (c) 2009 John McNamara +# Copyright (c) 2014 Douglas Wilson +# Copyright (c) 2009-2013 John McNamara # Copyright (c) 2006-2008 Gabor Szabo # Copyright (c) 2000-2006 Kawai Takanori # @@ -18,7 +19,7 @@ use strict; use warnings; -our $VERSION = '0.56'; +our $VERSION = '0.65'; my %NameTbl = ( @@ -333,7 +334,9 @@ =head1 AUTHOR -Maintainer 0.40+: John McNamara jmcnamara@cpan.org +Current maintainer 0.60+: Douglas Wilson dougw@cpan.org + +Maintainer 0.40-0.59: John McNamara jmcnamara@cpan.org Maintainer 0.27-0.33: Gabor Szabo szabgab@cpan.org @@ -341,7 +344,9 @@ =head1 COPYRIGHT -Copyright (c) 2009 John McNamara +Copyright (c) 2014 Douglas Wilson + +Copyright (c) 2009-2013 John McNamara Copyright (c) 2006-2008 Gabor Szabo diff -Nru gdata-2.13.3/inst/perl/Spreadsheet/ParseExcel/FmtDefault.pm gdata-2.17.0/inst/perl/Spreadsheet/ParseExcel/FmtDefault.pm --- gdata-2.13.3/inst/perl/Spreadsheet/ParseExcel/FmtDefault.pm 2014-01-03 19:32:45.000000000 +0000 +++ gdata-2.17.0/inst/perl/Spreadsheet/ParseExcel/FmtDefault.pm 2014-08-28 05:21:48.000000000 +0000 @@ -6,7 +6,8 @@ # # Used in conjunction with Spreadsheet::ParseExcel. # -# Copyright (c) 2009 John McNamara +# Copyright (c) 2014 Douglas Wilson +# Copyright (c) 2009-2013 John McNamara # Copyright (c) 2006-2008 Gabor Szabo # Copyright (c) 2000-2006 Kawai Takanori # @@ -19,18 +20,18 @@ use warnings; use Spreadsheet::ParseExcel::Utility qw(ExcelFmt); -our $VERSION = '0.56'; +our $VERSION = '0.65'; my %hFmtDefault = ( - 0x00 => '@', + 0x00 => 'General', 0x01 => '0', 0x02 => '0.00', 0x03 => '#,##0', 0x04 => '#,##0.00', 0x05 => '($#,##0_);($#,##0)', - 0x06 => '($#,##0_);[RED]($#,##0)', + 0x06 => '($#,##0_);[Red]($#,##0)', 0x07 => '($#,##0.00_);($#,##0.00_)', - 0x08 => '($#,##0.00_);[RED]($#,##0.00_)', + 0x08 => '($#,##0.00_);[Red]($#,##0.00_)', 0x09 => '0%', 0x0A => '0.00%', 0x0B => '0.00E+00', @@ -48,9 +49,9 @@ #0x17-0x24 -- Differs in Natinal 0x25 => '(#,##0_);(#,##0)', - 0x26 => '(#,##0_);[RED](#,##0)', + 0x26 => '(#,##0_);[Red](#,##0)', 0x27 => '(#,##0.00);(#,##0.00)', - 0x28 => '(#,##0.00);[RED](#,##0.00)', + 0x28 => '(#,##0.00);[Red](#,##0.00)', 0x29 => '_(*#,##0_);_(*(#,##0);_(*"-"_);_(@_)', 0x2A => '_($*#,##0_);_($*(#,##0);_(*"-"_);_(@_)', 0x2B => '_(*#,##0.00_);_(*(#,##0.00);_(*"-"??_);_(@_)', @@ -101,9 +102,19 @@ sub FmtString { my ( $oThis, $oCell, $oBook ) = @_; + no warnings; my $sFmtStr = $oThis->FmtStringDef( $oBook->{Format}[ $oCell->{FormatNo} ]->{FmtIdx}, $oBook ); + use warnings; + + # Special case for cells that use Lotus123 style leading + # apostrophe to designate text formatting. + no warnings; + if ( $oBook->{Format}[ $oCell->{FormatNo} ]->{Key123} ) { + $sFmtStr = '@'; + } + use warnings; unless ( defined($sFmtStr) ) { if ( $oCell->{Type} eq 'Numeric' ) { @@ -142,13 +153,16 @@ ( ( defined $oCell->{Val} ) && ( $oCell->{Val} ne '' ) ) ? $oThis->TextFmt( $oCell->{Val}, $oCell->{Code} ) : ''; + + return $Dt; } else { - $Dt = $oCell->{Val}; + $Dt = $oCell->{Val}; + $Flg1904 = $oBook->{Flg1904}; + my $sFmtStr = $oThis->FmtString( $oCell, $oBook ); + + return ExcelFmt( $sFmtStr, $Dt, $Flg1904, $oCell->{Type} ); } - $Flg1904 = $oBook->{Flg1904}; - my $sFmtStr = $oThis->FmtString( $oCell, $oBook ); - return ExcelFmt( $sFmtStr, $Dt, $Flg1904, $oCell->{Type} ); } #------------------------------------------------------------------------------ @@ -191,7 +205,9 @@ =head1 AUTHOR -Maintainer 0.40+: John McNamara jmcnamara@cpan.org +Current maintainer 0.60+: Douglas Wilson dougw@cpan.org + +Maintainer 0.40-0.59: John McNamara jmcnamara@cpan.org Maintainer 0.27-0.33: Gabor Szabo szabgab@cpan.org @@ -199,7 +215,9 @@ =head1 COPYRIGHT -Copyright (c) 2009 John McNamara +Copyright (c) 2014 Douglas Wilson + +Copyright (c) 2009-2013 John McNamara Copyright (c) 2006-2008 Gabor Szabo diff -Nru gdata-2.13.3/inst/perl/Spreadsheet/ParseExcel/FmtJapan2.pm gdata-2.17.0/inst/perl/Spreadsheet/ParseExcel/FmtJapan2.pm --- gdata-2.13.3/inst/perl/Spreadsheet/ParseExcel/FmtJapan2.pm 2014-01-03 19:32:45.000000000 +0000 +++ gdata-2.17.0/inst/perl/Spreadsheet/ParseExcel/FmtJapan2.pm 2014-08-28 01:14:18.000000000 +0000 @@ -6,7 +6,8 @@ # # Used in conjunction with Spreadsheet::ParseExcel. # -# Copyright (c) 2009 John McNamara +# Copyright (c) 2014 Douglas Wilson +# Copyright (c) 2009-2013 John McNamara # Copyright (c) 2006-2008 Gabor Szabo # Copyright (c) 2000-2006 Kawai Takanori # @@ -21,7 +22,7 @@ use Jcode; use Unicode::Map; use base 'Spreadsheet::ParseExcel::FmtJapan'; -our $VERSION = '0.56'; +our $VERSION = '0.65'; #------------------------------------------------------------------------------ # new (for Spreadsheet::ParseExcel::FmtJapan2) @@ -82,7 +83,9 @@ =head1 AUTHOR -Maintainer 0.40+: John McNamara jmcnamara@cpan.org +Current maintainer 0.60+: Douglas Wilson dougw@cpan.org + +Maintainer 0.40-0.59: John McNamara jmcnamara@cpan.org Maintainer 0.27-0.33: Gabor Szabo szabgab@cpan.org @@ -90,7 +93,9 @@ =head1 COPYRIGHT -Copyright (c) 2009 John McNamara +Copyright (c) 2014 Douglas Wilson + +Copyright (c) 2009-2013 John McNamara Copyright (c) 2006-2008 Gabor Szabo diff -Nru gdata-2.13.3/inst/perl/Spreadsheet/ParseExcel/FmtJapan.pm gdata-2.17.0/inst/perl/Spreadsheet/ParseExcel/FmtJapan.pm --- gdata-2.13.3/inst/perl/Spreadsheet/ParseExcel/FmtJapan.pm 2014-01-03 19:32:45.000000000 +0000 +++ gdata-2.17.0/inst/perl/Spreadsheet/ParseExcel/FmtJapan.pm 2014-08-28 01:14:18.000000000 +0000 @@ -7,7 +7,8 @@ # # Used in conjunction with Spreadsheet::ParseExcel. # -# Copyright (c) 2009 John McNamara +# Copyright (c) 2014 Douglas Wilson +# Copyright (c) 2009-2013 John McNamara # Copyright (c) 2006-2008 Gabor Szabo # Copyright (c) 2000-2006 Kawai Takanori # @@ -21,18 +22,18 @@ use Encode qw(find_encoding decode); use base 'Spreadsheet::ParseExcel::FmtDefault'; -our $VERSION = '0.56'; +our $VERSION = '0.65'; my %FormatTable = ( - 0x00 => '@', + 0x00 => 'General', 0x01 => '0', 0x02 => '0.00', 0x03 => '#,##0', 0x04 => '#,##0.00', 0x05 => '(\\#,##0_);(\\#,##0)', - 0x06 => '(\\#,##0_);[RED](\\#,##0)', + 0x06 => '(\\#,##0_);[Red](\\#,##0)', 0x07 => '(\\#,##0.00_);(\\#,##0.00_)', - 0x08 => '(\\#,##0.00_);[RED](\\#,##0.00_)', + 0x08 => '(\\#,##0.00_);[Red](\\#,##0.00_)', 0x09 => '0%', 0x0A => '0.00%', 0x0B => '0.00E+00', @@ -60,9 +61,9 @@ #0x17-0x24 -- Differs in Natinal 0x25 => '(#,##0_);(#,##0)', - 0x26 => '(#,##0_);[RED](#,##0)', + 0x26 => '(#,##0_);[Red](#,##0)', 0x27 => '(#,##0.00);(#,##0.00)', - 0x28 => '(#,##0.00);[RED](#,##0.00)', + 0x28 => '(#,##0.00);[Red](#,##0.00)', 0x29 => '_(*#,##0_);_(*(#,##0);_(*"-"_);_(@_)', 0x2A => '_(\\*#,##0_);_(\\*(#,##0);_(*"-"_);_(@_)', 0x2B => '_(*#,##0.00_);_(*(#,##0.00);_(*"-"??_);_(@_)', @@ -189,7 +190,9 @@ =head1 AUTHOR -Maintainer 0.40+: John McNamara jmcnamara@cpan.org +Current maintainer 0.60+: Douglas Wilson dougw@cpan.org + +Maintainer 0.40-0.59: John McNamara jmcnamara@cpan.org Maintainer 0.27-0.33: Gabor Szabo szabgab@cpan.org @@ -197,7 +200,9 @@ =head1 COPYRIGHT -Copyright (c) 2009 John McNamara +Copyright (c) 2014 Douglas Wilson + +Copyright (c) 2009-2013 John McNamara Copyright (c) 2006-2008 Gabor Szabo diff -Nru gdata-2.13.3/inst/perl/Spreadsheet/ParseExcel/FmtUnicode.pm gdata-2.17.0/inst/perl/Spreadsheet/ParseExcel/FmtUnicode.pm --- gdata-2.13.3/inst/perl/Spreadsheet/ParseExcel/FmtUnicode.pm 2014-01-03 19:32:45.000000000 +0000 +++ gdata-2.17.0/inst/perl/Spreadsheet/ParseExcel/FmtUnicode.pm 2014-08-28 01:14:18.000000000 +0000 @@ -6,7 +6,8 @@ # # Used in conjunction with Spreadsheet::ParseExcel. # -# Copyright (c) 2009 John McNamara +# Copyright (c) 2014 Douglas Wilson +# Copyright (c) 2009-2013 John McNamara # Copyright (c) 2006-2008 Gabor Szabo # Copyright (c) 2000-2006 Kawai Takanori # @@ -21,7 +22,7 @@ use Unicode::Map; use base 'Spreadsheet::ParseExcel::FmtDefault'; -our $VERSION = '0.56'; +our $VERSION = '0.65'; #------------------------------------------------------------------------------ # new (for Spreadsheet::ParseExcel::FmtUnicode) @@ -83,7 +84,9 @@ =head1 AUTHOR -Maintainer 0.40+: John McNamara jmcnamara@cpan.org +Current maintainer 0.60+: Douglas Wilson dougw@cpan.org + +Maintainer 0.40-0.59: John McNamara jmcnamara@cpan.org Maintainer 0.27-0.33: Gabor Szabo szabgab@cpan.org @@ -91,7 +94,9 @@ =head1 COPYRIGHT -Copyright (c) 2009 John McNamara +Copyright (c) 2014 Douglas Wilson + +Copyright (c) 2009-2013 John McNamara Copyright (c) 2006-2008 Gabor Szabo diff -Nru gdata-2.13.3/inst/perl/Spreadsheet/ParseExcel/Font.pm gdata-2.17.0/inst/perl/Spreadsheet/ParseExcel/Font.pm --- gdata-2.13.3/inst/perl/Spreadsheet/ParseExcel/Font.pm 2014-01-03 19:32:45.000000000 +0000 +++ gdata-2.17.0/inst/perl/Spreadsheet/ParseExcel/Font.pm 2014-08-28 01:14:18.000000000 +0000 @@ -6,7 +6,8 @@ # # Used in conjunction with Spreadsheet::ParseExcel. # -# Copyright (c) 2009 John McNamara +# Copyright (c) 2014 Douglas Wilson +# Copyright (c) 2009-2013 John McNamara # Copyright (c) 2006-2008 Gabor Szabo # Copyright (c) 2000-2006 Kawai Takanori # @@ -18,7 +19,7 @@ use strict; use warnings; -our $VERSION = '0.56'; +our $VERSION = '0.65'; sub new { my ( $class, %rhIni ) = @_; @@ -47,7 +48,9 @@ =head1 AUTHOR -Maintainer 0.40+: John McNamara jmcnamara@cpan.org +Current maintainer 0.60+: Douglas Wilson dougw@cpan.org + +Maintainer 0.40-0.59: John McNamara jmcnamara@cpan.org Maintainer 0.27-0.33: Gabor Szabo szabgab@cpan.org @@ -55,7 +58,9 @@ =head1 COPYRIGHT -Copyright (c) 2009 John McNamara +Copyright (c) 2014 Douglas Wilson + +Copyright (c) 2009-2013 John McNamara Copyright (c) 2006-2008 Gabor Szabo diff -Nru gdata-2.13.3/inst/perl/Spreadsheet/ParseExcel/Format.pm gdata-2.17.0/inst/perl/Spreadsheet/ParseExcel/Format.pm --- gdata-2.13.3/inst/perl/Spreadsheet/ParseExcel/Format.pm 2014-01-03 19:32:45.000000000 +0000 +++ gdata-2.17.0/inst/perl/Spreadsheet/ParseExcel/Format.pm 2014-08-28 01:14:18.000000000 +0000 @@ -6,7 +6,8 @@ # # Used in conjunction with Spreadsheet::ParseExcel. # -# Copyright (c) 2009 John McNamara +# Copyright (c) 2014 Douglas Wilson +# Copyright (c) 2009-2013 John McNamara # Copyright (c) 2006-2008 Gabor Szabo # Copyright (c) 2000-2006 Kawai Takanori # @@ -18,7 +19,7 @@ use strict; use warnings; -our $VERSION = '0.56'; +our $VERSION = '0.65'; sub new { my ( $class, %rhIni ) = @_; @@ -47,7 +48,9 @@ =head1 AUTHOR -Maintainer 0.40+: John McNamara jmcnamara@cpan.org +Current maintainer 0.60+: Douglas Wilson dougw@cpan.org + +Maintainer 0.40-0.59: John McNamara jmcnamara@cpan.org Maintainer 0.27-0.33: Gabor Szabo szabgab@cpan.org @@ -55,7 +58,9 @@ =head1 COPYRIGHT -Copyright (c) 2009 John McNamara +Copyright (c) 2014 Douglas Wilson + +Copyright (c) 2009-2013 John McNamara Copyright (c) 2006-2008 Gabor Szabo diff -Nru gdata-2.13.3/inst/perl/Spreadsheet/ParseExcel/SaveParser/Workbook.pm gdata-2.17.0/inst/perl/Spreadsheet/ParseExcel/SaveParser/Workbook.pm --- gdata-2.13.3/inst/perl/Spreadsheet/ParseExcel/SaveParser/Workbook.pm 2014-01-03 19:32:45.000000000 +0000 +++ gdata-2.17.0/inst/perl/Spreadsheet/ParseExcel/SaveParser/Workbook.pm 2014-08-28 01:14:18.000000000 +0000 @@ -6,7 +6,8 @@ # # Used in conjunction with Spreadsheet::ParseExcel. # -# Copyright (c) 2009 John McNamara +# Copyright (c) 2014 Douglas Wilson +# Copyright (c) 2009-2013 John McNamara # Copyright (c) 2006-2008 Gabor Szabo # Copyright (c) 2000-2006 Kawai Takanori # @@ -19,7 +20,7 @@ use warnings; use base 'Spreadsheet::ParseExcel::Workbook'; -our $VERSION = '0.56'; +our $VERSION = '0.65'; #============================================================================== # Spreadsheet::ParseExcel::SaveParser::Workbook @@ -156,12 +157,12 @@ $oWrS->protect(); } } - if ( ( $oWkS->{FitWidth} == 1 ) and ( $oWkS->{FitHeight} == 1 ) ) { + if ( $oWkS->{Scale} != 100 ) { # Pages on fit with width and Heigt $oWrS->fit_to_pages( $oWkS->{FitWidth}, $oWkS->{FitHeight} ); - #Print Scale + #Print Scale and reset FitWidth/FitHeight $oWrS->set_print_scale( $oWkS->{Scale} ); } else { @@ -232,7 +233,7 @@ # PageStart => $oWkS->{PageStart}, # Page number for start # UsePage => $oWkS->{UsePage}, # Use own start page number -# NoColor => $oWkS->{NoColor}, # Print in blcak-white +# NoColor => $oWkS->{NoColor}, # Print in black-white # Draft => $oWkS->{Draft}, # Print in draft mode # Notes => $oWkS->{Notes}, # Print notes # LeftToRight => $oWkS->{LeftToRight}, # Left to Right @@ -254,7 +255,16 @@ } } } - for ( + + my $merged_areas = $oWkS->get_merged_areas(); + my $merged_areas_h = {}; + if ($merged_areas) { + foreach my $range (@$merged_areas) { + $merged_areas_h->{$range->[0]}{$range->[1]} = $range; + } + } + + for ( my $iR = $oWkS->{MinRow} ; defined $oWkS->{MaxRow} && $iR <= $oWkS->{MaxRow} ; $iR++ @@ -270,13 +280,11 @@ my $oWkC = $oWkS->{Cells}[$iR][$iC]; if ($oWkC) { - if ( $oWkC->{Merged} ) { + if ( $oWkC->{Merged} and exists $merged_areas_h->{$iR}{$iC} ) { my $oFmtN = $oWrEx->addformat(); $oFmtN->copy( $hFmt{ $oWkC->{FormatNo} } ); - $oFmtN->set_merge(1); - $oWrS->write( - $iR, - $iC, + $oWrS->merge_range ( + @{$merged_areas_h->{$iR}{$iC}}, $oBook->{FmtClass} ->TextFmt( $oWkC->{Val}, $oWkC->{Code} ), $oFmtN @@ -378,7 +386,8 @@ sub AddCell { my ( $oBook, $iSheet, $iR, $iC, $sVal, $oCell, $sCode ) = @_; my %rhKey; - $oCell ||= 0; + $oCell ||= $oBook->{Worksheet}[$iSheet] + ->{Cells}[$iR][$iC]->{FormatNo} || 0; my $iFmt = ( UNIVERSAL::isa( $oCell, 'Spreadsheet::ParseExcel::Cell' ) ) ? $oCell->{FormatNo} @@ -389,9 +398,22 @@ $rhKey{Val} = $sVal; $rhKey{Code} = $sCode || '_native_'; $oBook->{_CurSheet} = $iSheet; + + my $merged_areas = $oBook->{Worksheet}[$iSheet]->get_merged_areas(); + my $merged_areas_h = {}; + if ($merged_areas) { + foreach my $range (@$merged_areas) { + $merged_areas_h->{$range->[0]}{$range->[1]} = $range; + } + } + my $oNewCell = Spreadsheet::ParseExcel::_NewCell( $oBook, $iR, $iC, %rhKey ); Spreadsheet::ParseExcel::_SetDimension( $oBook, $iR, $iC, $iC ); + + $oNewCell->{Merged} = 1 + if exists $merged_areas_h->{$iR}{$iC}; + return $oNewCell; } @@ -415,7 +437,9 @@ =head1 AUTHOR -Maintainer 0.40+: John McNamara jmcnamara@cpan.org +Current maintainer 0.60+: Douglas Wilson dougw@cpan.org + +Maintainer 0.40-0.59: John McNamara jmcnamara@cpan.org Maintainer 0.27-0.33: Gabor Szabo szabgab@cpan.org @@ -423,7 +447,9 @@ =head1 COPYRIGHT -Copyright (c) 2009 John McNamara +Copyright (c) 2014 Douglas Wilson + +Copyright (c) 2009-2013 John McNamara Copyright (c) 2006-2008 Gabor Szabo diff -Nru gdata-2.13.3/inst/perl/Spreadsheet/ParseExcel/SaveParser/Worksheet.pm gdata-2.17.0/inst/perl/Spreadsheet/ParseExcel/SaveParser/Worksheet.pm --- gdata-2.13.3/inst/perl/Spreadsheet/ParseExcel/SaveParser/Worksheet.pm 2014-01-03 19:32:45.000000000 +0000 +++ gdata-2.17.0/inst/perl/Spreadsheet/ParseExcel/SaveParser/Worksheet.pm 2014-08-28 01:14:18.000000000 +0000 @@ -6,7 +6,8 @@ # # Used in conjunction with Spreadsheet::ParseExcel. # -# Copyright (c) 2009 John McNamara +# Copyright (c) 2014 Douglas Wilson +# Copyright (c) 2009-2013 John McNamara # Copyright (c) 2006-2008 Gabor Szabo # Copyright (c) 2000-2006 Kawai Takanori # @@ -23,7 +24,7 @@ #============================================================================== use base 'Spreadsheet::ParseExcel::Worksheet'; -our $VERSION = '0.56'; +our $VERSION = '0.65'; sub new { my ( $sClass, %rhIni ) = @_; @@ -35,7 +36,8 @@ #------------------------------------------------------------------------------ sub AddCell { my ( $oSelf, $iR, $iC, $sVal, $oCell, $sCode ) = @_; - $oSelf->{_Book} + + $oSelf->{_Book} ->AddCell( $oSelf->{_SheetNo}, $iR, $iC, $sVal, $oCell, $sCode ); } @@ -70,7 +72,9 @@ =head1 AUTHOR -Maintainer 0.40+: John McNamara jmcnamara@cpan.org +Current maintainer 0.60+: Douglas Wilson dougw@cpan.org + +Maintainer 0.40-0.59: John McNamara jmcnamara@cpan.org Maintainer 0.27-0.33: Gabor Szabo szabgab@cpan.org @@ -78,7 +82,9 @@ =head1 COPYRIGHT -Copyright (c) 2009 John McNamara +Copyright (c) 2014 Douglas Wilson + +Copyright (c) 2009-2013 John McNamara Copyright (c) 2006-2008 Gabor Szabo diff -Nru gdata-2.13.3/inst/perl/Spreadsheet/ParseExcel/SaveParser.pm gdata-2.17.0/inst/perl/Spreadsheet/ParseExcel/SaveParser.pm --- gdata-2.13.3/inst/perl/Spreadsheet/ParseExcel/SaveParser.pm 2014-01-03 19:32:45.000000000 +0000 +++ gdata-2.17.0/inst/perl/Spreadsheet/ParseExcel/SaveParser.pm 2014-08-28 01:14:18.000000000 +0000 @@ -6,7 +6,8 @@ # # Used in conjunction with Spreadsheet::ParseExcel. # -# Copyright (c) 2009 John McNamara +# Copyright (c) 2014 Douglas Wilson +# Copyright (c) 2009-2013 John McNamara # Copyright (c) 2006-2008 Gabor Szabo # Copyright (c) 2000-2006 Kawai Takanori # @@ -24,7 +25,7 @@ use Spreadsheet::WriteExcel; use base 'Spreadsheet::ParseExcel'; -our $VERSION = '0.56'; +our $VERSION = '0.65'; ############################################################################### # @@ -48,7 +49,7 @@ my $workbook = Spreadsheet::ParseExcel::Workbook->new(); $workbook->{SheetCount} = 0; - # User specified formater class. + # User specified formatter class. if ($formatter) { $workbook->{FmtClass} = $formatter; } @@ -288,7 +289,9 @@ =head1 AUTHOR -Maintainer 0.40+: John McNamara jmcnamara@cpan.org +Current maintainer 0.60+: Douglas Wilson dougw@cpan.org + +Maintainer 0.40-0.59: John McNamara jmcnamara@cpan.org Maintainer 0.27-0.33: Gabor Szabo szabgab@cpan.org @@ -296,7 +299,9 @@ =head1 COPYRIGHT -Copyright (c) 2009 John McNamara +Copyright (c) 2014 Douglas Wilson + +Copyright (c) 2009-2013 John McNamara Copyright (c) 2006-2008 Gabor Szabo diff -Nru gdata-2.13.3/inst/perl/Spreadsheet/ParseExcel/Utility.pm gdata-2.17.0/inst/perl/Spreadsheet/ParseExcel/Utility.pm --- gdata-2.13.3/inst/perl/Spreadsheet/ParseExcel/Utility.pm 2014-01-03 19:32:45.000000000 +0000 +++ gdata-2.17.0/inst/perl/Spreadsheet/ParseExcel/Utility.pm 2014-08-28 01:14:18.000000000 +0000 @@ -6,7 +6,8 @@ # # Used in conjunction with Spreadsheet::ParseExcel. # -# Copyright (c) 2009 John McNamara +# Copyright (c) 2014 Douglas Wilson +# Copyright (c) 2009-2013 John McNamara # Copyright (c) 2006-2008 Gabor Szabo # Copyright (c) 2000-2006 Kawai Takanori # @@ -24,7 +25,7 @@ @EXPORT_OK = qw(ExcelFmt LocaltimeExcel ExcelLocaltime col2int int2col sheetRef xls2csv); -our $VERSION = '0.56'; +our $VERSION = '0.65'; my $qrNUMBER = qr/(^[+-]?\d+(\.\d+)?$)|(^[+-]?\d+\.?(\d*)[eE][+-](\d+))$/; @@ -180,16 +181,23 @@ # We don't use the colour but we return it to the caller. # my $color = ''; - if ( $format =~ s/^(\[[A-Z][a-z]{2,}(\d{1,2})?\])// ) { + if ( $format =~ s/^(\[[A-Za-z]{3,}(\d{1,2})?\])// ) { $color = $1; } # Remove the locale, such as [$-409], from the format string. my $locale = ''; - if ( $format =~ s/^(\[\$-\d+\])// ) { + if ( $format =~ s/^(\[\$?-F?\d+\])// ) { $locale = $1; } + # Replace currency locale, such as [$$-409], with $ in the format string. + # See the RT#60547 test cases in 21_number_format_user.t. + if ( $format =~ s/(\[\$([^-]+)(-\d+)?\])/$2/s ) { + $locale = $1; + } + + # Remove leading # from '# ?/?', '# ??/??' fraction formats. $format =~ s{# \?}{?}g; @@ -577,8 +585,7 @@ # Replace the placeholders in the template such as yyyy mm dd with # actual numbers or strings. my $replacement; - for ( my $i = @placeholders - 1 ; $i >= 0 ; $i-- ) { - my $placeholder = $placeholders[$i]; + for my $placeholder ( reverse @placeholders ) { if ( $placeholder->[-1] eq 'minutes' ) { @@ -729,6 +736,9 @@ # Text format. $replacement = $number; } + elsif ( $placeholder->[0] eq ',' ) { + next; + } # Substitute the replacement string back into the template. substr( $result, $placeholder->[1], $placeholder->[2], @@ -1113,6 +1123,10 @@ return $iTime; } +my @month_days = qw( + 0 31 28 31 30 31 30 31 31 30 31 30 31 +); + #------------------------------------------------------------------------------ # ExcelLocaltime (for Spreadsheet::ParseExcel::Utility) #------------------------------------------------------------------------------ @@ -1120,7 +1134,7 @@ my ( $dObj, $flg1904 ) = @_; my ( $iSec, $iMin, $iHour, $iDay, $iMon, $iYear, $iwDay, $iMSec ); - my ( $iDt, $iTime, $iYDays ); + my ( $iDt, $iTime, $iYDays, $iMD ); $iDt = int($dObj); $iTime = $dObj - $iDt; @@ -1146,33 +1160,42 @@ } $iYear -= 1900; # Localtime year is relative to 1900. - for ( $iMon = 1 ; $iMon < 12 ; $iMon++ ) { - my $iMD; - if ( $iMon == 1 - || $iMon == 3 - || $iMon == 5 - || $iMon == 7 - || $iMon == 8 - || $iMon == 10 - || $iMon == 12 ) - { - $iMD = 31; - } - elsif ( $iMon == 4 || $iMon == 6 || $iMon == 9 || $iMon == 11 ) { - $iMD = 30; - } - elsif ( $iMon == 2 ) { - $iMD = ( ( $iYear % 4 ) == 0 ) ? 29 : 28; - } + for ( $iMon = 1 ; $iMon <= 12 ; $iMon++ ) { + $iMD = $month_days[$iMon]; + $iMD++ if $iMon == 2 and $iYear % 4 == 0; + last if ( $iDt <= $iMD ); $iDt -= $iMD; } - $iMon -= 1; # Localtime month is 0 based. - #2. Calc Time $iDay = $iDt; $iTime += ( 0.0005 / 86400.0 ); + if ($iTime >= 1.0) + { + $iTime -= int($iTime); + $iwDay = ($iwDay == 6) ? 0 : $iwDay + 1; + if ($iDay == $iMD) + { + if ($iMon == 12) + { + $iMon = 1; + $iYear++; + } + else + { + $iMon++; + } + $iDay = 1; + } + else + { + $iDay++; + } + } + + # Localtime month is 0 based. + $iMon -= 1; $iTime *= 24.0; $iHour = int($iTime); $iTime -= $iHour; @@ -1587,7 +1610,9 @@ =head1 AUTHOR -Maintainer 0.40+: John McNamara jmcnamara@cpan.org +Current maintainer 0.60+: Douglas Wilson dougw@cpan.org + +Maintainer 0.40-0.59: John McNamara jmcnamara@cpan.org Maintainer 0.27-0.33: Gabor Szabo szabgab@cpan.org @@ -1595,7 +1620,9 @@ =head1 COPYRIGHT -Copyright (c) 2009 John McNamara +Copyright (c) 2014 Douglas Wilson + +Copyright (c) 2009-2013 John McNamara Copyright (c) 2006-2008 Gabor Szabo diff -Nru gdata-2.13.3/inst/perl/Spreadsheet/ParseExcel/Workbook.pm gdata-2.17.0/inst/perl/Spreadsheet/ParseExcel/Workbook.pm --- gdata-2.13.3/inst/perl/Spreadsheet/ParseExcel/Workbook.pm 2014-01-03 19:32:45.000000000 +0000 +++ gdata-2.17.0/inst/perl/Spreadsheet/ParseExcel/Workbook.pm 2014-08-28 01:14:18.000000000 +0000 @@ -6,7 +6,8 @@ # # Used in conjunction with Spreadsheet::ParseExcel. # -# Copyright (c) 2009 John McNamara +# Copyright (c) 2014 Douglas Wilson +# Copyright (c) 2009-2013 John McNamara # Copyright (c) 2006-2008 Gabor Szabo # Copyright (c) 2000-2006 Kawai Takanori # @@ -18,7 +19,7 @@ use strict; use warnings; -our $VERSION = '0.56'; +our $VERSION = '0.65'; ############################################################################### # @@ -33,6 +34,14 @@ } ############################################################################### +sub color_idx_to_rgb { + my( $workbook, $iidx ) = @_; + + my $palette = $workbook->{aColor}; + return ( ( defined $palette->[$iidx] ) ? $palette->[$iidx] : $palette->[0] ); +} + +############################################################################### # # worksheet() # @@ -54,7 +63,7 @@ # # worksheets() # -# Returns an array ofWorksheet objects. +# Returns an array of Worksheet objects. # sub worksheets { my $self = shift; @@ -142,12 +151,25 @@ $self->{_ParseAbort} = $val; } +=head2 get_active_sheet() + +Return the number of the active (open) worksheet (at the time the workbook +was saved. May return undef. + +=cut + +sub get_active_sheet { + my $workbook = shift; + + return $workbook->{ActiveSheet}; +} + ############################################################################### # # Parse(). Deprecated. # # Syntactic wrapper around Spreadsheet::ParseExcel::Parse(). -# This method is *deprecated* since it doesn't conform to the the current +# This method is *deprecated* since it doesn't conform to the current # error handling in the S::PE Parse() method. # sub Parse { @@ -276,7 +298,9 @@ =head1 AUTHOR -Maintainer 0.40+: John McNamara jmcnamara@cpan.org +Current maintainer 0.60+: Douglas Wilson dougw@cpan.org + +Maintainer 0.40-0.59: John McNamara jmcnamara@cpan.org Maintainer 0.27-0.33: Gabor Szabo szabgab@cpan.org @@ -284,7 +308,9 @@ =head1 COPYRIGHT -Copyright (c) 2009 John McNamara +Copyright (c) 2014 Douglas Wilson + +Copyright (c) 2009-2013 John McNamara Copyright (c) 2006-2008 Gabor Szabo diff -Nru gdata-2.13.3/inst/perl/Spreadsheet/ParseExcel/Worksheet.pm gdata-2.17.0/inst/perl/Spreadsheet/ParseExcel/Worksheet.pm --- gdata-2.13.3/inst/perl/Spreadsheet/ParseExcel/Worksheet.pm 2014-01-03 19:32:45.000000000 +0000 +++ gdata-2.17.0/inst/perl/Spreadsheet/ParseExcel/Worksheet.pm 2014-08-28 01:14:18.000000000 +0000 @@ -6,7 +6,8 @@ # # Used in conjunction with Spreadsheet::ParseExcel. # -# Copyright (c) 2009 John McNamara +# Copyright (c) 2014 Douglas Wilson +# Copyright (c) 2009-2013 John McNamara # Copyright (c) 2006-2008 Gabor Szabo # Copyright (c) 2000-2006 Kawai Takanori # @@ -19,7 +20,7 @@ use warnings; use Scalar::Util qw(weaken); -our $VERSION = '0.56'; +our $VERSION = '0.65'; ############################################################################### # @@ -179,26 +180,34 @@ # # get_row_heights() # -# Returns an array_ref of row heights. +# Returns an array of row heights. # sub get_row_heights { my $self = shift; - return @{ $self->{RowHeight} }; + if ( wantarray() ) { + return unless $self->{RowHeight}; + return @{ $self->{RowHeight} }; + } + return $self->{RowHeight}; } ############################################################################### # # get_col_widths() # -# Returns an array_ref of column widths. +# Returns an array of column widths. # sub get_col_widths { my $self = shift; - return @{ $self->{ColWidth} }; + if ( wantarray() ) { + return unless $self->{ColWidth}; + return @{ $self->{ColWidth} }; + } + return $self->{ColWidth}; } ############################################################################### @@ -540,6 +549,74 @@ return $self->{Notes}; } +=head2 get_tab_color() + +Return color index of tab, or undef if not set. + +=cut + +sub get_tab_color { + my $worksheet = shift; + + return $worksheet->{TabColor}; +} + +=head2 is_sheet_hidden() + +Return true if sheet is hidden + +=cut + +sub is_sheet_hidden { + my $worksheet = shift; + + return $worksheet->{SheetHidden}; +} + +=head2 is_row_hidden($row) + +In scalar context, return true if $row is hidden +In array context, return an array whose elements are true +if the corresponding row is hidden. + +=cut + +sub is_row_hidden { + my $worksheet = shift; + + my ($row) = @_; + + unless ( $worksheet->{RowHidden} ) { + return () if (wantarray); + return 0; + } + + return @{ $worksheet->{RowHidden} } if (wantarray); + return $worksheet->{RowHidden}[$row]; +} + +=head2 is_col_hidden($col) + +In scalar context, return true if $col is hidden +In array context, return an array whose elements are true +if the corresponding column is hidden. + +=cut + +sub is_col_hidden { + my $worksheet = shift; + + my ($col) = @_; + + unless ( $worksheet->{ColHidden} ) { + return () if (wantarray); + return 0; + } + + return @{ $worksheet->{ColHidden} } if (wantarray); + return $worksheet->{ColHidden}[$col]; +} + ############################################################################### # # Mapping between legacy method names and new names. @@ -627,15 +704,6 @@ my ( $col_min, $col_max ) = $worksheet->col_range(); -=head2 col_range() - -The C method returns TODO. - - my $col_range = $worksheet->col_range(); - -Returns 0 if the property isn't set. - - =head2 get_name() The C method returns the name of the worksheet. @@ -676,7 +744,8 @@ =head2 get_row_heights() -The C method returns an array_ref of row heights. +The C method returns an array_ref of row heights in scalar context, +and an array in list context. my $row_heights = $worksheet->get_row_heights(); @@ -685,7 +754,8 @@ =head2 get_col_widths() -The C method returns an array_ref of column widths. +The C method returns an array_ref of column widths in scalar context, +and an array in list context. my $col_widths = $worksheet->get_col_widths(); @@ -853,7 +923,7 @@ =head2 get_print_scale() -The C method returns the workbook scale for printing. The print scale fctor can be in the range 10 .. 400. +The C method returns the workbook scale for printing. The print scale factor can be in the range 10 .. 400. my $print_scale = $worksheet->get_print_scale(); @@ -943,7 +1013,9 @@ =head1 AUTHOR -Maintainer 0.40+: John McNamara jmcnamara@cpan.org +Current maintainer 0.60+: Douglas Wilson dougw@cpan.org + +Maintainer 0.40-0.59: John McNamara jmcnamara@cpan.org Maintainer 0.27-0.33: Gabor Szabo szabgab@cpan.org @@ -951,7 +1023,9 @@ =head1 COPYRIGHT -Copyright (c) 2009 John McNamara +Copyright (c) 2014 Douglas Wilson + +Copyright (c) 2009-2013 John McNamara Copyright (c) 2006-2008 Gabor Szabo diff -Nru gdata-2.13.3/inst/perl/Spreadsheet/ParseExcel.pm gdata-2.17.0/inst/perl/Spreadsheet/ParseExcel.pm --- gdata-2.13.3/inst/perl/Spreadsheet/ParseExcel.pm 2014-01-03 19:32:45.000000000 +0000 +++ gdata-2.17.0/inst/perl/Spreadsheet/ParseExcel.pm 2015-04-25 02:50:05.000000000 +0000 @@ -4,7 +4,10 @@ # # Spreadsheet::ParseExcel - Extract information from an Excel file. # -# Copyright 2000-2008, Takanori Kawai +# Copyright (c) 2014 Douglas Wilson +# Copyright (c) 2009-2013 John McNamara +# Copyright (c) 2006-2008 Gabor Szabo +# Copyright (c) 2000-2008 Takanori Kawai # # perltidy with standard settings. # @@ -16,9 +19,14 @@ use 5.008; use OLE::Storage_Lite; +use File::Basename qw(fileparse); use IO::File; use Config; -our $VERSION = '0.56'; + +use Crypt::RC4; +use Digest::Perl::MD5; + +our $VERSION = '0.65'; use Spreadsheet::ParseExcel::Workbook; use Spreadsheet::ParseExcel::Worksheet; @@ -27,10 +35,11 @@ use Spreadsheet::ParseExcel::Cell; use Spreadsheet::ParseExcel::FmtDefault; +my $currentbook; my @aColor = ( '000000', # 0x00 'FFFFFF', 'FFFFFF', 'FFFFFF', 'FFFFFF', - 'FFFFFF', 'FFFFFF', 'FFFFFF', 'FFFFFF', # 0x08 + 'FFFFFF', 'FFFFFF', 'FFFFFF', '000000', # 0x08 'FFFFFF', 'FF0000', '00FF00', '0000FF', 'FFFF00', 'FF00FF', '00FFFF', '800000', # 0x10 '008000', '000080', '808000', '800080', @@ -44,7 +53,7 @@ '33CCCC', '99CC00', 'FFCC00', 'FF9900', 'FF6600', '666699', '969696', '003366', # 0x38 '339966', '003300', '333300', '993300', - '993366', '333399', '333333', 'FFFFFF' # 0x40 + '993366', '333399', '333333', '000000' # 0x40 ); use constant verExcel95 => 0x500; use constant verExcel97 => 0x600; @@ -54,6 +63,13 @@ use constant verBIFF5 => 0x08; use constant verBIFF8 => 0x18; +use constant MS_BIFF_CRYPTO_NONE => 0; +use constant MS_BIFF_CRYPTO_XOR => 1; +use constant MS_BIFF_CRYPTO_RC4 => 2; + +use constant sizeof_BIFF_8_FILEPASS => ( 6 + 3 * 16 ); + +use constant REKEY_BLOCK => 0x400; # Error code for some of the common parsing errors. use constant ErrorNone => 0; @@ -61,11 +77,14 @@ use constant ErrorNoExcelData => 2; use constant ErrorFileEncrypted => 3; +# Color index for the 'auto' color +use constant AutoColor => 64; + our %error_strings = ( - ErrorNone, '', # 0 - ErrorNoFile, 'File not found', # 1 - ErrorNoExcelData, 'No Excel data found in file', # 2 - ErrorFileEncrypted, 'File is encrypted', # 3 + ErrorNone, '', # 0 + ErrorNoFile, 'File not found', # 1 + ErrorNoExcelData, 'No Excel data found in file', # 2 + ErrorFileEncrypted, 'File is encrypted', # 3 ); @@ -73,79 +92,80 @@ our %ProcTbl = ( #Develpers' Kit P291 - 0x14 => \&_subHeader, # Header - 0x15 => \&_subFooter, # Footer - 0x18 => \&_subName, # NAME(?) - 0x1A => \&_subVPageBreak, # Vertical Page Break - 0x1B => \&_subHPageBreak, # Horizontal Page Break - 0x22 => \&_subFlg1904, # 1904 Flag - 0x26 => \&_subMargin, # Left Margin - 0x27 => \&_subMargin, # Right Margin - 0x28 => \&_subMargin, # Top Margin - 0x29 => \&_subMargin, # Bottom Margin - 0x2A => \&_subPrintHeaders, # Print Headers - 0x2B => \&_subPrintGridlines, # Print Gridlines - 0x3C => \&_subContinue, # Continue - 0x43 => \&_subXF, # Extended Format(?) + 0x14 => \&_subHeader, # Header + 0x15 => \&_subFooter, # Footer + 0x18 => \&_subName, # NAME(?) + 0x1A => \&_subVPageBreak, # Vertical Page Break + 0x1B => \&_subHPageBreak, # Horizontal Page Break + 0x22 => \&_subFlg1904, # 1904 Flag + 0x26 => \&_subMargin, # Left Margin + 0x27 => \&_subMargin, # Right Margin + 0x28 => \&_subMargin, # Top Margin + 0x29 => \&_subMargin, # Bottom Margin + 0x2A => \&_subPrintHeaders, # Print Headers + 0x2B => \&_subPrintGridlines, # Print Gridlines + 0x3C => \&_subContinue, # Continue + 0x3D => \&_subWindow1, # Window1 + 0x43 => \&_subXF, # XF for Excel < 4. + 0x0443 => \&_subXF, # XF for Excel = 4. + 0x862 => \&_subSheetLayout, # Sheet Layout + 0x1B8 => \&_subHyperlink, # HYPERLINK #Develpers' Kit P292 - 0x55 => \&_subDefColWidth, # Consider - 0x5C => \&_subWriteAccess, # WRITEACCESS - 0x7D => \&_subColInfo, # Colinfo - 0x7E => \&_subRK, # RK - 0x81 => \&_subWSBOOL, # WSBOOL - 0x83 => \&_subHcenter, # HCENTER - 0x84 => \&_subVcenter, # VCENTER - 0x85 => \&_subBoundSheet, # BoundSheet + 0x55 => \&_subDefColWidth, # Consider + 0x5C => \&_subWriteAccess, # WRITEACCESS + 0x7D => \&_subColInfo, # Colinfo + 0x7E => \&_subRK, # RK + 0x81 => \&_subWSBOOL, # WSBOOL + 0x83 => \&_subHcenter, # HCENTER + 0x84 => \&_subVcenter, # VCENTER + 0x85 => \&_subBoundSheet, # BoundSheet - 0x92 => \&_subPalette, # Palette, fgp + 0x92 => \&_subPalette, # Palette, fgp - 0x99 => \&_subStandardWidth, # Standard Col + 0x99 => \&_subStandardWidth, # Standard Col #Develpers' Kit P293 - 0xA1 => \&_subSETUP, # SETUP - 0xBD => \&_subMulRK, # MULRK - 0xBE => \&_subMulBlank, # MULBLANK - 0xD6 => \&_subRString, # RString + 0xA1 => \&_subSETUP, # SETUP + 0xBD => \&_subMulRK, # MULRK + 0xBE => \&_subMulBlank, # MULBLANK + 0xD6 => \&_subRString, # RString #Develpers' Kit P294 - 0xE0 => \&_subXF, # ExTended Format - 0xE5 => \&_subMergeArea, # MergeArea (Not Documented) - 0xFC => \&_subSST, # Shared String Table - 0xFD => \&_subLabelSST, # Label SST + 0xE0 => \&_subXF, # ExTended Format + 0xE5 => \&_subMergeArea, # MergeArea (Not Documented) + 0xFC => \&_subSST, # Shared String Table + 0xFD => \&_subLabelSST, # Label SST #Develpers' Kit P295 - 0x201 => \&_subBlank, # Blank + 0x201 => \&_subBlank, # Blank - 0x202 => \&_subInteger, # Integer(Not Documented) - 0x203 => \&_subNumber, # Number - 0x204 => \&_subLabel, # Label - 0x205 => \&_subBoolErr, # BoolErr - 0x207 => \&_subString, # STRING - 0x208 => \&_subRow, # RowData - 0x221 => \&_subArray, # Array (Consider) - 0x225 => \&_subDefaultRowHeight, # Consider - - 0x31 => \&_subFont, # Font - 0x231 => \&_subFont, # Font - - 0x27E => \&_subRK, # RK - 0x41E => \&_subFormat, # Format - - 0x06 => \&_subFormula, # Formula - 0x406 => \&_subFormula, # Formula - - 0x009 => \&_subBOF, # BOF(BIFF2) - 0x209 => \&_subBOF, # BOF(BIFF3) - 0x409 => \&_subBOF, # BOF(BIFF4) - 0x809 => \&_subBOF, # BOF(BIFF5-8) + 0x202 => \&_subInteger, # Integer(Not Documented) + 0x203 => \&_subNumber, # Number + 0x204 => \&_subLabel, # Label + 0x205 => \&_subBoolErr, # BoolErr + 0x207 => \&_subString, # STRING + 0x208 => \&_subRow, # RowData + 0x221 => \&_subArray, # Array (Consider) + 0x225 => \&_subDefaultRowHeight, # Consider + + 0x31 => \&_subFont, # Font + 0x231 => \&_subFont, # Font + + 0x27E => \&_subRK, # RK + 0x41E => \&_subFormat, # Format + + 0x06 => \&_subFormula, # Formula + 0x406 => \&_subFormula, # Formula + + 0x009 => \&_subBOF, # BOF(BIFF2) + 0x209 => \&_subBOF, # BOF(BIFF3) + 0x409 => \&_subBOF, # BOF(BIFF4) + 0x809 => \&_subBOF, # BOF(BIFF5-8) ); our $BIGENDIAN; our $PREFUNC; -our $_CellHandler; -our $_NotSetCell; -our $_Object; our $_use_perlio; #------------------------------------------------------------------------------ @@ -168,7 +188,7 @@ } } - # Check ENDIAN(Little: Interl etc. BIG: Sparc etc) + # Check ENDIAN(Little: Intel etc. BIG: Sparc etc) $BIGENDIAN = ( defined $hParam{Endian} ) ? $hParam{Endian} : ( unpack( "H08", pack( "L", 2 ) ) eq '02000000' ) ? 0 @@ -189,9 +209,17 @@ $self->SetEventHandler( $sKey, $hParam{AddHandlers}->{$sKey} ); } } - $_CellHandler = $hParam{CellHandler} if ( $hParam{CellHandler} ); - $_NotSetCell = $hParam{NotSetCell}; - $_Object = $hParam{Object}; + $self->{CellHandler} = $hParam{CellHandler}; + $self->{NotSetCell} = $hParam{NotSetCell}; + $self->{Object} = $hParam{Object}; + + + if ( defined $hParam{Password} ) { + $self->{Password} = $hParam{Password}; + } + else { + $self->{Password} = 'VelvetSweatshop'; + } $self->{_error_status} = ErrorNone; return $self; @@ -216,6 +244,274 @@ } } +#------------------------------------------------------------------------------ +# Decryption routines +# based on sources of gnumeric (ms-biff.c ms-excel-read.c) +#------------------------------------------------------------------------------ +sub md5state { + my ( $md5 ) = @_; + my $s = ''; + for ( my $i = 0 ; $i < 4 ; $i++ ) { + my $v = $md5->{_state}[$i]; + $s .= chr( $v & 0xff ); + $s .= chr( ( $v >> 8 ) & 0xff ); + $s .= chr( ( $v >> 16 ) & 0xff ); + $s .= chr( ( $v >> 24 ) & 0xff ); + } + + return $s; +} + +sub MakeKey { + my ( $block, $key, $valContext ) = @_; + + my $pwarray = "\0" x 64; + + substr( $pwarray, 0, 5 ) = substr( $valContext, 0, 5 ); + + substr( $pwarray, 5, 1 ) = chr( $block & 0xff ); + substr( $pwarray, 6, 1 ) = chr( ( $block >> 8 ) & 0xff ); + substr( $pwarray, 7, 1 ) = chr( ( $block >> 16 ) & 0xff ); + substr( $pwarray, 8, 1 ) = chr( ( $block >> 24 ) & 0xff ); + + substr( $pwarray, 9, 1 ) = "\x80"; + substr( $pwarray, 56, 1 ) = "\x48"; + + my $md5 = Digest::Perl::MD5->new(); + $md5->add( $pwarray ); + + my $s = md5state( $md5 ); + + ${$key} = Crypt::RC4->new( $s ); +} + +sub VerifyPassword { + my ( $password, $docid, $salt_data, $hashedsalt_data, $valContext ) = @_; + + my $pwarray = "\0" x 64; + my $i; + my $md5 = Digest::Perl::MD5->new(); + + for ( $i = 0 ; $i < length( $password ) ; $i++ ) { + my $o = ord( substr( $password, $i, 1 ) ); + substr( $pwarray, 2 * $i, 1 ) = chr( $o & 0xff ); + substr( $pwarray, 2 * $i + 1, 1 ) = chr( ( $o >> 8 ) & 0xff ); + } + substr( $pwarray, 2 * $i, 1 ) = chr( 0x80 ); + substr( $pwarray, 56, 1 ) = chr( ( $i << 4 ) & 0xff ); + + $md5->add( $pwarray ); + + my $mdContext1 = md5state( $md5 ); + + my $offset = 0; + my $keyoffset = 0; + my $tocopy = 5; + + $md5->reset; + + while ( $offset != 16 ) { + if ( ( 64 - $offset ) < 5 ) { + $tocopy = 64 - $offset; + } + + substr( $pwarray, $offset, $tocopy ) = + substr( $mdContext1, $keyoffset, $tocopy ); + + $offset += $tocopy; + + if ( $offset == 64 ) { + $md5->add( $pwarray ); + $keyoffset = $tocopy; + $tocopy = 5 - $tocopy; + $offset = 0; + next; + } + + $keyoffset = 0; + $tocopy = 5; + substr( $pwarray, $offset, 16 ) = $docid; + $offset += 16; + } + + substr( $pwarray, 16, 1 ) = "\x80"; + substr( $pwarray, 17, 47 ) = "\0" x 47; + substr( $pwarray, 56, 1 ) = "\x80"; + substr( $pwarray, 57, 1 ) = "\x0a"; + + $md5->add( $pwarray ); + ${$valContext} = md5state( $md5 ); + + my $key; + + MakeKey( 0, \$key, ${$valContext} ); + + my $salt = $key->RC4( $salt_data ); + my $hashedsalt = $key->RC4( $hashedsalt_data ); + + $salt .= "\x80" . "\0" x 47; + + substr( $salt, 56, 1 ) = "\x80"; + + $md5->reset; + $md5->add( $salt ); + my $mdContext2 = md5state( $md5 ); + + return ( $mdContext2 eq $hashedsalt ); +} + +sub SkipBytes { + my ( $q, $start, $count ) = @_; + + my $scratch = "\0" x REKEY_BLOCK; + my $block; + + $block = int( ( $start + $count ) / REKEY_BLOCK ); + + if ( $block != $q->{block} ) { + MakeKey( $q->{block} = $block, \$q->{rc4_key}, $q->{md5_ctxt} ); + $count = ( $start + $count ) % REKEY_BLOCK; + } + + $q->{rc4_key}->RC4( substr( $scratch, 0, $count ) ); + + return 1; +} + +sub SetDecrypt { + my ( $q, $version, $password ) = @_; + + if ( $q->{opcode} != 0x2f ) { + return 0; + } + + if ( $password eq '' ) { + return 0; + } + + # TODO old versions decryption + #if (version < MS_BIFF_V8 || q->data[0] == 0) + # return ms_biff_pre_biff8_query_set_decrypt (q, password); + + if ( $q->{length} != sizeof_BIFF_8_FILEPASS ) { + return 0; + } + + unless ( + VerifyPassword( + $password, + substr( $q->{data}, 6, 16 ), + substr( $q->{data}, 22, 16 ), + substr( $q->{data}, 38, 16 ), + \$q->{md5_ctxt} + ) + ) + { + return 0; + } + + $q->{encryption} = MS_BIFF_CRYPTO_RC4; + $q->{block} = -1; + + # The first record after FILEPASS seems to be unencrypted + $q->{dont_decrypt_next_record} = 1; + + # Pretend to decrypt the entire stream up till this point, it was + # encrypted, but do it anyway to keep the rc4 state in sync + + SkipBytes( $q, 0, $q->{streamPos} ); + + return 1; +} + +sub InitStream { + my ( $stream_data ) = @_; + my %q; + + $q{opcode} = 0; + $q{length} = 0; + $q{data} = ''; + + $q{stream} = $stream_data; # data stream + $q{streamLen} = length( $stream_data ); # stream length + $q{streamPos} = 0; # stream position + + $q{encryption} = 0; + $q{xor_key} = ''; + $q{rc4_key} = ''; + $q{md5_ctxt} = ''; + $q{block} = 0; + $q{dont_decrypt_next_record} = 0; + + return \%q; +} + +sub QueryNext { + my ( $q ) = @_; + + if ( $q->{streamPos} + 4 >= $q->{streamLen} ) { + return 0; + } + + my $data = substr( $q->{stream}, $q->{streamPos}, 4 ); + + ( $q->{opcode}, $q->{length} ) = unpack( 'v2', $data ); + + # No biff record should be larger than around 20,000. + if ( $q->{length} >= 20000 ) { + return 0; + } + + if ( $q->{length} > 0 ) { + $q->{data} = substr( $q->{stream}, $q->{streamPos} + 4, $q->{length} ); + } + else { + $q->{data} = undef; + $q->{dont_decrypt_next_record} = 1; + } + + if ( $q->{encryption} == MS_BIFF_CRYPTO_RC4 ) { + if ( $q->{dont_decrypt_next_record} ) { + SkipBytes( $q, $q->{streamPos}, 4 + $q->{length} ); + $q->{dont_decrypt_next_record} = 0; + } + else { + my $pos = $q->{streamPos}; + my $data = $q->{data}; + my $len = $q->{length}; + my $res = ''; + + # Pretend to decrypt header. + SkipBytes( $q, $pos, 4 ); + $pos += 4; + + while ( $q->{block} != int( ( $pos + $len ) / REKEY_BLOCK ) ) { + my $step = REKEY_BLOCK - ( $pos % REKEY_BLOCK ); + $res .= $q->{rc4_key}->RC4( substr( $data, 0, $step ) ); + $data = substr( $data, $step ); + $pos += $step; + $len -= $step; + MakeKey( ++$q->{block}, \$q->{rc4_key}, $q->{md5_ctxt} ); + } + + $res .= $q->{rc4_key}->RC4( substr( $data, 0, $len ) ); + $q->{data} = $res; + } + } + elsif ( $q->{encryption} == MS_BIFF_CRYPTO_XOR ) { + + # not implemented + return 0; + } + elsif ( $q->{encryption} == MS_BIFF_CRYPTO_NONE ) { + + } + + $q->{streamPos} += 4 + $q->{length}; + + return 1; +} + ############################################################################### # # Parse() @@ -227,12 +523,17 @@ my ( $self, $source, $formatter ) = @_; my $workbook = Spreadsheet::ParseExcel::Workbook->new(); + $currentbook = $workbook; $workbook->{SheetCount} = 0; + $workbook->{CellHandler} = $self->{CellHandler}; + $workbook->{NotSetCell} = $self->{NotSetCell}; + $workbook->{Object} = $self->{Object}; + $workbook->{aColor} = [ @aColor ]; my ( $biff_data, $data_length ) = $self->_get_content( $source, $workbook ); return undef if not $biff_data; - if ($formatter) { + if ( $formatter ) { $workbook->{FmtClass} = $formatter; } else { @@ -240,27 +541,24 @@ } # Parse the BIFF data. - my $pos = 0; - my $record_header = substr( $biff_data, $pos, 4 ); - $pos += 4; - - while ( $pos <= $data_length ) { - my ( $record, $record_length ) = unpack( "v2", $record_header ); - - if ($record_length) { - $record_header = substr( $biff_data, $pos, $record_length ); - $pos += $record_length; - } + my $stream = InitStream( $biff_data ); + + while ( QueryNext( $stream ) ) { + + my $record = $stream->{opcode}; + my $record_length = $stream->{length}; + my $record_header = $stream->{data}; # If the file contains a FILEPASS record we assume that it is encrypted # and cannot be parsed. if ( $record == 0x002F ) { - $self->{_error_status} = ErrorFileEncrypted; - return undef; + unless ( SetDecrypt( $stream, '', $self->{Password} ) ) { + $self->{_error_status} = ErrorFileEncrypted; + return undef; + } } - # Special case of a formula String with no string. if ( $workbook->{_PrevPos} && ( defined $self->{FuncTbl}->{$record} ) @@ -288,20 +586,31 @@ $workbook->{_skip_chart} = 0; } - if ( defined $self->{FuncTbl}->{$record} && !$workbook->{_skip_chart} ) { - $self->{FuncTbl}->{$record}->( $workbook, $record, $record_length, $record_header ); + if ( defined $self->{FuncTbl}->{$record} && !$workbook->{_skip_chart} ) + { + $self->{FuncTbl}->{$record} + ->( $workbook, $record, $record_length, $record_header ); } $PREFUNC = $record if ( $record != 0x3C ); #Not Continue - if ( ( $pos + 4 ) <= $data_length ) { - $record_header = substr( $biff_data, $pos, 4 ); - } - - $pos += 4; - return $workbook if defined $workbook->{_ParseAbort}; + last if defined $workbook->{_ParseAbort}; } + foreach my $worksheet (@{$workbook->{Worksheet}} ) { + # Install hyperlinks into each cell + # Range is undocumented for user; allows reuse of data + + if ($worksheet->{HyperLinks}) { + foreach my $link (@{$worksheet->{HyperLinks}}) { + for( my $row = $link->[3]; $row <= $link->[4]; $row++ ) { + for( my $col = $link->[5]; $col <= $link->[6]; $col++ ) { + $worksheet->{Cells}[$row][$col]{Hyperlink} = $link; + } + } + } + } + } return $workbook; } @@ -318,50 +627,69 @@ # Reset the error status in case method is called more than once. $self->{_error_status} = ErrorNone; + + my $ref = ref($source); - if ( ref($source) eq "SCALAR" ) { + if ( $ref ) { + if ( $ref eq 'SCALAR' ) { - # Specified by a scalar buffer. - ( $biff_data, $data_length ) = $self->{GetContent}->($source); + # Specified by a scalar buffer. + ( $biff_data, $data_length ) = $self->{GetContent}->( $source ); - } - elsif (( ref($source) =~ /GLOB/ ) || ( ref($source) eq 'Fh' ) ) { + } + elsif ( $ref eq 'ARRAY' ) { + + # Specified by file content + $workbook->{File} = undef; + my $sData = join( '', @$source ); + ( $biff_data, $data_length ) = $self->{GetContent}->( \$sData ); + } + else { - # For CGI.pm (Light FileHandle) - binmode($source); - my $sWk; - my $sBuff = ''; + # Assume filehandle - while ( read( $source, $sWk, 4096 ) ) { - $sBuff .= $sWk; - } + # For CGI.pm (Light FileHandle) + my $sBuff = ''; + if ( eval { binmode( $source ) } ) { + my $sWk; - ( $biff_data, $data_length ) = $self->{GetContent}->( \$sBuff ); + while ( read( $source, $sWk, 4096 ) ) { + $sBuff .= $sWk; + } + } + else { - } - elsif ( ref($source) eq 'ARRAY' ) { + # Assume IO::Wrap or some other filehandle-like OO-only object + my $sWk; + + # IO::Wrap does not implement binmode + eval { $source->binmode() }; + + while ( $source->read( $sWk, 4096 ) ) { + $sBuff .= $sWk; + } + } + + ( $biff_data, $data_length ) = $self->{GetContent}->( \$sBuff ); - # Specified by file content - $workbook->{File} = undef; - my $sData = join( '', @$source ); - ( $biff_data, $data_length ) = $self->{GetContent}->( \$sData ); + } } else { # Specified by filename . $workbook->{File} = $source; - if ( ! -e $source ) { + if ( !-e $source ) { $self->{_error_status} = ErrorNoFile; return undef; - } + } - ( $biff_data, $data_length ) = $self->{GetContent}->($source); + ( $biff_data, $data_length ) = $self->{GetContent}->( $source ); } # If the read was successful return the data. - if ($data_length) { - return ($biff_data, $data_length ); + if ( $data_length ) { + return ( $biff_data, $data_length ); } else { $self->{_error_status} = ErrorNoExcelData; @@ -374,14 +702,14 @@ # _subGetContent (for Spreadsheet::ParseExcel) #------------------------------------------------------------------------------ sub _subGetContent { - my ($sFile) = @_; + my ( $sFile ) = @_; - my $oOl = OLE::Storage_Lite->new($sFile); - return ( undef, undef ) unless ($oOl); + my $oOl = OLE::Storage_Lite->new( $sFile ); + return ( undef, undef ) unless ( $oOl ); my @aRes = $oOl->getPpsSearch( [ - OLE::Storage_Lite::Asc2Ucs('Book'), - OLE::Storage_Lite::Asc2Ucs('Workbook') + OLE::Storage_Lite::Asc2Ucs( 'Book' ), + OLE::Storage_Lite::Asc2Ucs( 'Workbook' ) ], 1, 1 ); @@ -396,27 +724,27 @@ my $oIo; #1. $sFile is Ref of scalar - if ( ref($sFile) eq 'SCALAR' ) { - if ($_use_perlio) { + if ( ref( $sFile ) eq 'SCALAR' ) { + if ( $_use_perlio ) { open $oIo, "<", \$sFile; } else { $oIo = IO::Scalar->new; - $oIo->open($sFile); + $oIo->open( $sFile ); } } #2. $sFile is a IO::Handle object elsif ( UNIVERSAL::isa( $sFile, 'IO::Handle' ) ) { $oIo = $sFile; - binmode($oIo); + binmode( $oIo ); } #3. $sFile is a simple filename string - elsif ( !ref($sFile) ) { + elsif ( !ref( $sFile ) ) { $oIo = IO::File->new; - $oIo->open("<$sFile") || return undef; - binmode($oIo); + $oIo->open( "<$sFile" ) || return undef; + binmode( $oIo ); } my $sWk; my $sBuff = ''; @@ -428,7 +756,7 @@ #Not Excel file (simple method) return ( undef, undef ) if ( substr( $sBuff, 0, 1 ) ne "\x09" ); - return ( $sBuff, length($sBuff) ); + return ( $sBuff, length( $sBuff ) ); } #------------------------------------------------------------------------------ @@ -458,7 +786,7 @@ $oBook->{Worksheet}[ $oBook->{_CurSheet} ]->{SheetType}, ) = unpack( "v2", $sWk ) - if ( length($sWk) > 4 ); + if ( length( $sWk ) > 4 ); } else { $oBook->{BIFFVersion} = int( $bOp / 0x100 ); @@ -557,10 +885,10 @@ # _convDval (for Spreadsheet::ParseExcel) #------------------------------------------------------------------------------ sub _convDval { - my ($sWk) = @_; + my ( $sWk ) = @_; return unpack( "d", - ($BIGENDIAN) ? pack( "c8", reverse( unpack( "c8", $sWk ) ) ) : $sWk ); + ( $BIGENDIAN ) ? pack( "c8", reverse( unpack( "c8", $sWk ) ) ) : $sWk ); } #------------------------------------------------------------------------------ @@ -573,7 +901,7 @@ $sTxt = substr( $sWk, 8, $iL ); #Has STRUN - if ( length($sWk) > ( 8 + $iL ) ) { + if ( length( $sWk ) > ( 8 + $iL ) ) { _NewCell( $oBook, $iR, $iC, Kind => 'RString', @@ -639,7 +967,7 @@ my ( $row, $col, $format_index, $rk_number ) = unpack( 'vvvV', $data ); - my $number = _decode_rk_number($rk_number); + my $number = _decode_rk_number( $rk_number ); _NewCell( $workbook, $row, $col, @@ -672,10 +1000,10 @@ my ( $oBook, $bOp, $bLen, $sWk ) = @_; my ( $iR, $iC, $iF ) = unpack( "v3", $sWk ); - my ($iFlg) = unpack( "v", substr( $sWk, 12, 2 ) ); + my ( $iFlg ) = unpack( "v", substr( $sWk, 12, 2 ) ); if ( $iFlg == 0xFFFF ) { - my ($iKind) = unpack( "c", substr( $sWk, 6, 1 ) ); - my ($iVal) = unpack( "c", substr( $sWk, 8, 1 ) ); + my ( $iKind ) = unpack( "c", substr( $sWk, 6, 1 ) ); + my ( $iVal ) = unpack( "c", substr( $sWk, 8, 1 ) ); if ( ( $iKind == 1 ) or ( $iKind == 2 ) ) { my $sTxt = @@ -724,7 +1052,7 @@ #Position (not enough for ARRAY) my $iPos = $oBook->{_PrevPos}; - return undef unless ($iPos); + return undef unless ( $iPos ); $oBook->{_PrevPos} = undef; my ( $iR, $iC, $iF ) = @$iPos; @@ -809,7 +1137,7 @@ return if $workbook->{SheetCount} <= 0; my ( $row, $first_col ) = unpack( "v2", $data ); - my $last_col = unpack( "v", substr( $data, length($data) - 2, 2 ) ); + my $last_col = unpack( "v", substr( $data, length( $data ) - 2, 2 ) ); # Iterate over the RK array and decode the data. my $pos = 4; @@ -817,7 +1145,7 @@ my $data = substr( $data, $pos, 6 ); my ( $format_index, $rk_number ) = unpack 'vV', $data; - my $number = _decode_rk_number($rk_number); + my $number = _decode_rk_number( $rk_number ); _NewCell( $workbook, $row, $col, @@ -842,7 +1170,7 @@ sub _subMulBlank { my ( $oBook, $bOp, $bLen, $sWk ) = @_; my ( $iR, $iSc ) = unpack( "v2", $sWk ); - my $iEc = unpack( "v", substr( $sWk, length($sWk) - 2, 2 ) ); + my $iEc = unpack( "v", substr( $sWk, length( $sWk ) - 2, 2 ) ); my $iPos = 4; for ( my $iC = $iSc ; $iC <= $iEc ; $iC++ ) { my $iF = unpack( 'v', substr( $sWk, $iPos, 2 ) ); @@ -906,8 +1234,10 @@ unpack( "v8", $sWk ); $iEc--; - # TODO. we need to handle hidden rows: - # $iGr & 0x20 + if ( $iGr & 0x20 ) { + $oBook->{Worksheet}[ $oBook->{_CurSheet} ]->{RowHidden}[$iR] = 1; + } + $oBook->{Worksheet}[ $oBook->{_CurSheet} ]->{RowHeight}[$iR] = $iHght / 20; #2.MaxRow, MaxCol, MinRow, MinCol @@ -1036,11 +1366,172 @@ $oBook->{Worksheet}[ $oBook->{_CurSheet} ]->{ColFmtNo}[$i] = $iXF; - # TODO. we need to handle hidden cols: $iGr & 0x01. + if ( $iGr & 0x01 ) { + $oBook->{Worksheet}[ $oBook->{_CurSheet} ]->{ColHidden}[$i] = 1; + } } } #------------------------------------------------------------------------------ +# _subWindow1 Window information P 273 +#------------------------------------------------------------------------------ +sub _subWindow1 { + my ( $workbook, $op, $len, $wk ) = @_; + + return if ( $workbook->{BIFFVersion} <= verBIFF4() ); + + my ( + $hpos, $vpos, $width, + $height, $options, $active, + $firsttab, $numselected, $tabbarwidth + ) = unpack( "v9", $wk ); + + $workbook->{ActiveSheet} = $active; +} + +#------------------------------------------------------------------------------ +# _subSheetLayout OpenOffice 5.96 (P207) +#------------------------------------------------------------------------------ +sub _subSheetLayout { + my ( $workbook, $op, $len, $wk ) = @_; + + my @unused; + ( + my $rc, + @unused[ 1 .. 10 ], + @unused[ 11 .. 14 ], + my $color, @unused[ 15, 16 ] + ) = unpack( "vC10C4vC2", $wk ); + + return unless ( $rc == 0x0862 ); + + $workbook->{Worksheet}[ $workbook->{_CurSheet} ]->{TabColor} = $color; +} + +#------------------------------------------------------------------------------ +# _subHyperlink OpenOffice 5.96 (P182) +# +# Also see: http://msdn.microsoft.com/en-us/library/gg615407(v=office.14).aspx +#------------------------------------------------------------------------------ + +# Helper: Extract a GID, returns as text string + +sub _getguid { + my( $wk ) = @_; + my( $text, $guidl, $guids1, $guids2, @guidb ); + + ( $guidl, $guids1, $guids2, @guidb[0..7] ) = unpack( 'Vv2C8', $wk ); + + $text = sprintf( '%08X-%04X-%04X-%02X%02X-%02X%02X%02X%02X%02X%02X', $guidl, $guids1, $guids2, @guidb); + return $text; +} + +# Helper: Extract a counted (16-bit) unicode string, returns string, +# updates $offset +# $zterm == 1 if string is null-terminated. +# $bc if length is in bytes (not chars) + +sub _getustr { + my( $wk, $offset, $zterm, $bc ) = @_; + + my $len = unpack( 'V', substr( $wk, $offset ) ); + $offset += 4; + + if( $bc ) { + $len /= 2; + } + $len -= $zterm; + my $text = join( '', map { chr $_ } unpack( "v$len", substr( $wk, $offset ) ) ); + $text =~ s/\0.*\z// if( $zterm ); + $_[1] = ( $offset += ($len + $zterm) *2 ); + return $text; +} + +# HYPERLINK record + +sub _subHyperlink { + my ( $workbook, $op, $len, $wk ) = @_; + + # REF + my( $srow, $erow, $scol, $ecol ) = unpack( 'v4', $wk ); + + my $guid = _getguid( substr( $wk, 8 ) ); + return unless( $guid eq '79EAC9D0-BAF9-11CE-8C82-00AA004BA90B' ); + + my( $stmvers, $flags ) = unpack( 'VV', substr( $wk, 24 ) ); + return if( $flags & 0x60 || $stmvers != 2 ); + + my $offset = 32; + my( $desc,$frame, $link, $mark ); + + if( ($flags & 0x14) == 0x14 ) { + $desc = _getustr( $wk, $offset, 1, 0 ); + } + + if( $flags & 0x80 ) { + $frame = _getustr( $wk, $offset, 1, 0 ); + } + + $link = ''; + if( $flags & 0x100 ) { + # UNC path + $link = 'file:///' . _getustr( $wk, $offset, 1, 0 ); + } elsif( $flags & 0x1 ) { + # Has link (URI) + $guid = _getguid( substr( $wk, $offset ) ); + $offset += 16; + if( $guid eq '79EAC9E0-BAF9-11CE-8C82-00AA004BA90B' ) { + # URI + $link = _getustr( $wk, $offset, 1, 1 ); + } elsif( $guid eq '00000303-0000-0000-C000-000000000046' ) { + # Local file + $link = 'file:///'; + # !($flags & 2) = 'relative path' + if( !($flags & 0x2) ) { + my $file = $workbook->{File}; + if( defined $file && length $file ) { + $link .= (fileparse($file))[1]; + } + else { + $link .= '%REL%' + } + } + my $dirn = unpack( 'v', substr( $wk, $offset ) ); + $offset += 2; + $link .= '..\\' x $dirn; + my $namelen = unpack( 'V', substr( $wk, $offset ) ); + $offset += 4; + my $name = unpack( 'Z*', substr( $wk, $offset ) ); + $offset += $namelen; + $offset += 24; + my $size = unpack( 'V', substr( $wk, $offset ) ); + $offset += 4; + if( $size ) { + my $xlen = unpack( 'V', substr( $wk, $offset ) ) / 2; + $name = join( '', map { chr $_} unpack( "v$xlen", substr( $wk, $offset+4+2) ) ); + $offset += $size; + } + $link .= $name; + } else { + return; + } + } + + # Text mark (Fragment identifier) + if( $flags & 0x8 ) { + # Cellrefs contain reserved characters, so url-encode + my $fragment = _getustr( $wk, $offset, 1 ); + $fragment =~ s/([^\w.~-])/sprintf( '%%%02X', ord( $1 ) )/gems; + $link .= '#' . $fragment; + } + + # Update loop at end of parse() if this changes + + push @{ $workbook->{Worksheet}[ $workbook->{_CurSheet} ]->{HyperLinks} }, [ + $desc, $link, $frame, $srow, $erow, $scol, $ecol ]; +} + +#------------------------------------------------------------------------------ # _subSST (for Spreadsheet::ParseExcel) DK:P413 #------------------------------------------------------------------------------ sub _subSST { @@ -1075,7 +1566,7 @@ #Before BIFF8 else { - my ($iLen) = unpack( "c", $sWk ); + my ( $iLen ) = unpack( "c", $sWk ); $oBook->{Author} = $oBook->{FmtClass}->TextFmt( substr( $sWk, 1, $iLen ), '_native_' ); } @@ -1095,39 +1586,40 @@ $iStPos = 9; ( $iRichCnt, $iExtCnt ) = unpack( 'vV', substr( $sWk, 3, 6 ) ); } - elsif ($iRich) { #Only Rich + elsif ( $iRich ) { #Only Rich $iStPos = 5; $iRichCnt = unpack( 'v', substr( $sWk, 3, 2 ) ); $iExtCnt = 0; } - elsif ($iExt) { #Only Ext + elsif ( $iExt ) { #Only Ext $iStPos = 7; $iRichCnt = 0; $iExtCnt = unpack( 'V', substr( $sWk, 3, 4 ) ); } - else { #Nothing Special + else { #Nothing Special $iStPos = 3; $iExtCnt = 0; $iRichCnt = 0; } #3.Get String - if ($iHigh) { #Compressed + if ( $iHigh ) { #Compressed $iLen *= 2; $sStr = substr( $sWk, $iStPos, $iLen ); _SwapForUnicode( \$sStr ); - $sStr = $oBook->{FmtClass}->TextFmt( $sStr, 'ucs2' ) unless ($iCnvFlg); + $sStr = $oBook->{FmtClass}->TextFmt( $sStr, 'ucs2' ) + unless ( $iCnvFlg ); } - else { #Not Compressed + else { #Not Compressed $sStr = substr( $sWk, $iStPos, $iLen ); - $sStr = $oBook->{FmtClass}->TextFmt( $sStr, undef ) unless ($iCnvFlg); + $sStr = $oBook->{FmtClass}->TextFmt( $sStr, undef ) unless ( $iCnvFlg ); } #4. return - if (wantarray) { + if ( wantarray ) { #4.1 Get Rich and Ext - if ( length($sWk) < $iStPos + $iLen + $iRichCnt * 4 + $iExtCnt ) { + if ( length( $sWk ) < $iStPos + $iLen + $iRichCnt * 4 + $iExtCnt ) { return ( [ undef, $iHigh, undef, undef ], $iStPos + $iLen + $iRichCnt * 4 + $iExtCnt, @@ -1167,7 +1659,19 @@ $iFillP, $iFillCF, $iFillCB ); - if ( $oBook->{BIFFVersion} == verBIFF8 ) { + if ( $oBook->{BIFFVersion} == verBIFF2 ) { + die "Unsupported file format: Excel Version 2.0 (4.0 or later required)"; + } + elsif ( $oBook->{BIFFVersion} == verBIFF3 ) { + die "Unsupported file format: Excel Version 3.0 (4.0 or later required)"; + } + elsif ( $oBook->{BIFFVersion} == verBIFF4 ) { + + # Minimal support for Excel 4. We just get the font and format indices + # so that the cell data value can be formatted. + ( $iFnt, $iIdx, ) = unpack( "CC", $sWk ); + } + elsif ( $oBook->{BIFFVersion} == verBIFF8 ) { my ( $iGen, $iAlign, $iGen2, $iBdr1, $iBdr2, $iBdr3, $iPtn ); ( $iFnt, $iIdx, $iGen, $iAlign, $iGen2, $iBdr1, $iBdr2, $iBdr3, $iPtn ) @@ -1271,20 +1775,27 @@ # _subFormat (for Spreadsheet::ParseExcel) DK: P336 #------------------------------------------------------------------------------ sub _subFormat { + my ( $oBook, $bOp, $bLen, $sWk ) = @_; my $sFmt; - if ( ( $oBook->{BIFFVersion} == verBIFF2 ) - || ( $oBook->{BIFFVersion} == verBIFF3 ) - || ( $oBook->{BIFFVersion} == verBIFF4 ) - || ( $oBook->{BIFFVersion} == verBIFF5 ) ) - { + + if ( $oBook->{BIFFVersion} <= verBIFF5 ) { $sFmt = substr( $sWk, 3, unpack( 'c', substr( $sWk, 2, 1 ) ) ); $sFmt = $oBook->{FmtClass}->TextFmt( $sFmt, '_native_' ); } else { $sFmt = _convBIFF8String( $oBook, substr( $sWk, 2 ) ); } - $oBook->{FormatStr}->{ unpack( 'v', substr( $sWk, 0, 2 ) ) } = $sFmt; + + my $format_index = unpack( 'v', substr( $sWk, 0, 2 ) ); + + # Excel 4 and earlier used an index of 0 to indicate that a built-in format + # that was stored implicitly. + if ( $oBook->{BIFFVersion} <= verBIFF4 && $format_index == 0 ) { + $format_index = keys %{ $oBook->{FormatStr} }; + } + + $oBook->{FormatStr}->{$format_index} = $sFmt; } #------------------------------------------------------------------------------ @@ -1295,7 +1806,7 @@ for ( my $i = 0 ; $i < unpack( 'v', $sWk ) ; $i++ ) { # push @aColor, unpack('H6', substr($sWk, $i*4+2)); - $aColor[ $i + 8 ] = unpack( 'H6', substr( $sWk, $i * 4 + 2 ) ); + $oBook->{aColor}[ $i + 8 ] = unpack( 'H6', substr( $sWk, $i * 4 + 2 ) ); } } @@ -1311,7 +1822,7 @@ ( $iHeight, $iAttr, $iCIdx, $iBold, $iSuper, $iUnderline ) = unpack( "v5c", $sWk ); my ( $iSize, $iHigh ) = unpack( 'cc', substr( $sWk, 14, 2 ) ); - if ($iHigh) { + if ( $iHigh ) { $sFntName = substr( $sWk, 16, $iSize * 2 ); _SwapForUnicode( \$sFntName ); $sFntName = $oBook->{FmtClass}->TextFmt( $sFntName, 'ucs2' ); @@ -1323,7 +1834,7 @@ $bBold = ( $iBold >= 0x2BC ) ? 1 : 0; $bItalic = ( $iAttr & 0x02 ) ? 1 : 0; $bStrikeout = ( $iAttr & 0x08 ) ? 1 : 0; - $bUnderline = ($iUnderline) ? 1 : 0; + $bUnderline = ( $iUnderline ) ? 1 : 0; } elsif ( $oBook->{BIFFVersion} == verBIFF5 ) { ( $iHeight, $iAttr, $iCIdx, $iBold, $iSuper, $iUnderline ) = @@ -1335,7 +1846,7 @@ $bBold = ( $iBold >= 0x2BC ) ? 1 : 0; $bItalic = ( $iAttr & 0x02 ) ? 1 : 0; $bStrikeout = ( $iAttr & 0x08 ) ? 1 : 0; - $bUnderline = ($iUnderline) ? 1 : 0; + $bUnderline = ( $iUnderline ) ? 1 : 0; } else { ( $iHeight, $iAttr ) = unpack( "v2", $sWk ); @@ -1386,11 +1897,12 @@ } $oBook->{Worksheet}[ $oBook->{SheetCount} ] = Spreadsheet::ParseExcel::Worksheet->new( - Name => $sWsName, - Kind => $iKind, - _Pos => $iPos, - _Book => $oBook, - _SheetNo => $oBook->{SheetCount}, + Name => $sWsName, + Kind => $iKind, + _Pos => $iPos, + _Book => $oBook, + _SheetNo => $oBook->{SheetCount}, + SheetHidden => $iGr & 0x03 ); } else { @@ -1398,10 +1910,11 @@ Spreadsheet::ParseExcel::Worksheet->new( Name => $oBook->{FmtClass}->TextFmt( substr( $sWk, 7 ), '_native_' ), - Kind => $iKind, - _Pos => $iPos, - _Book => $oBook, - _SheetNo => $oBook->{SheetCount}, + Kind => $iKind, + _Pos => $iPos, + _Book => $oBook, + _SheetNo => $oBook->{SheetCount}, + SheetHidden => $iGr & 0x03 ); } $oBook->{SheetCount}++; @@ -1415,6 +1928,11 @@ return undef unless ( defined $oBook->{_CurSheet} ); my $sW; + if ( !defined $sWk ) { + $oBook->{Worksheet}[ $oBook->{_CurSheet} ]->{Header} = undef; + return; + } + #BIFF8 if ( $oBook->{BIFFVersion} >= verBIFF8 ) { $sW = _convBIFF8String( $oBook, $sWk ); @@ -1424,7 +1942,7 @@ #Before BIFF8 else { - my ($iLen) = unpack( "c", $sWk ); + my ( $iLen ) = unpack( "c", $sWk ); $sW = $oBook->{FmtClass}->TextFmt( substr( $sWk, 1, $iLen ), '_native_' ); $oBook->{Worksheet}[ $oBook->{_CurSheet} ]->{Header} = @@ -1440,6 +1958,11 @@ return undef unless ( defined $oBook->{_CurSheet} ); my $sW; + if ( !defined $sWk ) { + $oBook->{Worksheet}[ $oBook->{_CurSheet} ]->{Footer} = undef; + return; + } + #BIFF8 if ( $oBook->{BIFFVersion} >= verBIFF8 ) { $sW = _convBIFF8String( $oBook, $sWk ); @@ -1449,7 +1972,7 @@ #Before BIFF8 else { - my ($iLen) = unpack( "c", $sWk ); + my ( $iLen ) = unpack( "c", $sWk ); $sW = $oBook->{FmtClass}->TextFmt( substr( $sWk, 1, $iLen ), '_native_' ); $oBook->{Worksheet}[ $oBook->{_CurSheet} ]->{Footer} = @@ -1481,7 +2004,7 @@ #Before BIFF8 else { for ( my $i = 0 ; $i < $iCnt ; $i++ ) { - my ($iRow) = unpack( 'v', substr( $sWk, 2 + $i * 2, 2 ) ); + my ( $iRow ) = unpack( 'v', substr( $sWk, 2 + $i * 2, 2 ) ); push @aBreak, $iRow; # push @aBreak, [$iRow, 0, 255]; @@ -1515,7 +2038,7 @@ #Before BIFF8 else { for ( my $i = 0 ; $i < $iCnt ; $i++ ) { - my ($iCol) = unpack( 'v', substr( $sWk, 2 + $i * 2, 2 ) ); + my ( $iCol ) = unpack( 'v', substr( $sWk, 2 + $i * 2, 2 ) ); push @aBreak, $iCol; # push @aBreak, [$iCol, 0, 65535]; @@ -1606,6 +2129,10 @@ my ( $oBook, $bOp, $bLen, $sWk ) = @_; return undef unless ( defined $oBook->{_CurSheet} ); + # Workaround for some apps and older Excels that don't write a + # complete SETUP record. + return undef if $bLen != 34; + my $oWkS = $oBook->{Worksheet}[ $oBook->{_CurSheet} ]; my $iGrBit; @@ -1670,7 +2197,7 @@ my ( $iSheetW, $raArea ) = _ParseNameArea( substr( $sWk, 16 ) ); my @aTtlR = (); my @aTtlC = (); - foreach my $raI (@$raArea) { + foreach my $raI ( @$raArea ) { if ( $raI->[3] == 0xFF ) { #Row Title push @aTtlR, [ $raI->[0], $raI->[2] ]; } @@ -1694,7 +2221,7 @@ _ParseNameArea95( substr( $sWk, 15 ) ); my @aTtlR = (); my @aTtlC = (); - foreach my $raI (@$raArea) { + foreach my $raI ( @$raArea ) { if ( $raI->[3] == 0xFF ) { #Row Title push @aTtlR, [ $raI->[0], $raI->[2] ]; } @@ -1713,8 +2240,8 @@ # ParseNameArea (for Spreadsheet::ParseExcel) DK: 494 (ptgAread3d) #------------------------------------------------------------------------------ sub _ParseNameArea { - my ($sObj) = @_; - my ($iOp); + my ( $sObj ) = @_; + my ( $iOp ); my @aRes = (); $iOp = unpack( 'C', $sObj ); my $iSheet; @@ -1751,8 +2278,8 @@ # ParseNameArea95 (for Spreadsheet::ParseExcel) DK: 494 (ptgAread3d) #------------------------------------------------------------------------------ sub _ParseNameArea95 { - my ($sObj) = @_; - my ($iOp); + my ( $sObj ) = @_; + my ( $iOp ); my @aRes = (); $iOp = unpack( 'C', $sObj ); my $iSheet; @@ -1822,7 +2349,7 @@ #------------------------------------------------------------------------------ sub DecodeBoolErr { my ( $iVal, $iFlg ) = @_; - if ($iFlg) { # ERROR + if ( $iFlg ) { # ERROR if ( $iVal == 0x00 ) { return "#NULL!"; } @@ -1849,7 +2376,7 @@ } } else { - return ($iVal) ? "TRUE" : "FALSE"; + return ( $iVal ) ? "TRUE" : "FALSE"; } } @@ -1917,7 +2444,7 @@ my ( $self, $biff_data, $is_continue ) = @_; - if ($is_continue) { + if ( $is_continue ) { # We are reading a CONTINUE record. @@ -2048,10 +2575,10 @@ # _SwapForUnicode (for Spreadsheet::ParseExcel) #------------------------------------------------------------------------------ sub _SwapForUnicode { - my ($sObj) = @_; + my ( $sObj ) = @_; # for(my $i = 0; $i{Font}[$iFnt] ]; } $oCell->{Rich} = \@aRich; } - if ( defined $_CellHandler ) { - if ( defined $_Object ) { + if ( defined $oBook->{CellHandler} ) { + if ( defined $oBook->{Object} ) { no strict; - ref($_CellHandler) eq "CODE" - ? $_CellHandler->( + ref( $oBook->{CellHandler} ) eq "CODE" + ? $oBook->{CellHandler}->( $_Object, $oBook, $oBook->{_CurSheet}, $iR, $iC, $oCell ) - : $_CellHandler->callback( $_Object, $oBook, $oBook->{_CurSheet}, + : $oBook->{CellHandler}->callback( $_Object, $oBook, $oBook->{_CurSheet}, $iR, $iC, $oCell ); } else { - $_CellHandler->( $oBook, $oBook->{_CurSheet}, $iR, $iC, $oCell ); + $oBook->{CellHandler}->( $oBook, $oBook->{_CurSheet}, $iR, $iC, $oCell ); } } - unless ($_NotSetCell) { + unless ( $oBook->{NotSetCell} ) { $oBook->{Worksheet}[ $oBook->{_CurSheet} ]->{Cells}[$iR][$iC] = $oCell; } return $oCell; @@ -2124,12 +2651,19 @@ #------------------------------------------------------------------------------ # ColorIdxToRGB (for Spreadsheet::ParseExcel) # -# TODO JMN Make this a Workbook method and re-document. +# Returns for most recently opened book for compatibility, use +# Workbook::color_idx_to_rgb instead # #------------------------------------------------------------------------------ sub ColorIdxToRGB { my ( $sPkg, $iIdx ) = @_; - return ( ( defined $aColor[$iIdx] ) ? $aColor[$iIdx] : $aColor[0] ); + + + unless( defined $currentbook ) { + return ( ( defined $aColor[$iIdx] ) ? $aColor[$iIdx] : $aColor[0] ); + } + + return $currentbook->color_idx_to_rgb( $iIdx ); } @@ -2145,7 +2679,7 @@ my $parse_error = $self->{_error_status}; - if (exists $error_strings{$parse_error}) { + if ( exists $error_strings{$parse_error} ) { return $error_strings{$parse_error}; } else { @@ -2178,6 +2712,7 @@ } 1; + __END__ =head1 NAME @@ -2232,26 +2767,30 @@ my $parser = Spreadsheet::ParseExcel->new(); +It is possible to pass a password to decrypt an encrypted file: + + $parser = Spreadsheet::ParseExcel->new( Password => 'secret' ); + +Only the default Excel encryption scheme is currently supported. See L. + As an advanced feature it is also possible to pass a call-back handler to the parser to control the parsing of the spreadsheet. $parser = Spreadsheet::ParseExcel->new( - [ - CellHandler => \&cell_handler, - NotSetCell => 1, - ]); - + CellHandler => \&cell_handler, + NotSetCell => 1, + ); The call-back can be used to ignore certain cells or to reduce memory usage. See the section L for more information. -=head2 parse($filename, [$formatter]) +=head2 parse($filename, $formatter) -The Parser C method return a L object. +The Parser C method returns a L object. my $parser = Spreadsheet::ParseExcel->new(); my $workbook = $parser->parse('Book1.xls'); -If an error occurs C returns C. In general programs should contain a test for failed parsing as follows: +If an error occurs C returns C. In general, programs should contain a test for failed parsing as follows: my $parser = Spreadsheet::ParseExcel->new(); my $workbook = $parser->parse('Book1.xls'); @@ -2262,9 +2801,13 @@ The C<$filename> parameter is generally the file to be parsed. However, it can also be a filehandle or a scalar reference. -The optional C<$formatter> array ref can be an reference to a L to format the value of cells. +The optional C<$formatter> parameter can be an reference to a L to format the value of cells. This is useful for parsing workbooks with Unicode or Asian characters: -Note: Versions of Spreadsheet::ParseExcel prior to 0.50 also documented a Workbook C method as a syntactic shortcut for the above C and C combination. This is now deprecated since it breaks error handling. + my $parser = Spreadsheet::ParseExcel->new(); + my $formatter = Spreadsheet::ParseExcel::FmtJapan->new(); + my $workbook = $parser->parse( 'Book1.xls', $formatter ); + +The L formatter also supports Unicode. If you encounter any encoding problems with the default formatter try that instead. =head2 error() @@ -2287,10 +2830,11 @@ 'No Excel data found in file' 2 'File is encrypted' 3 -Spreadsheet::ParseExcel doesn't try to decrypt an encrypted Excel file. That is beyond the current scope of the module. The C method is explained below. +Spreadsheet::ParseExcel will try to decrypt an encrypted Excel file using the default password or a user supplied password passed to C, see above. If these fail the module will return the C<'File is encrypted'> error. Only the default Excel encryption scheme is currently supported, see L. + =head2 error_code() @@ -2631,7 +3175,12 @@ =head2 $font->{Color} -Returns the color index for the font. The index can be converted to a RGB string using the C Parser method. +Returns the color index for the font. The mapping to an RGB color is defined by each workbook. + +The index can be converted to a RGB string using the C<$workbook->ColorIdxToRGB()> Parser method. + +(Older versions of C provided the C class method, which is deprecated.) + =head2 $font->{Strikeout} @@ -2645,11 +3194,9 @@ 1 => Superscript 2 => Subscript -=head1 Formatter class +=head1 Formatter Class -I - -Formatter class will convert cell data. +Formatters can be passed to the C method to deal with Unicode or Asian formatting. Spreadsheet::ParseExcel includes 2 formatter classes. C and C. It is also possible to create a user defined formatting class. @@ -2845,11 +3392,33 @@ } +=head1 Decryption + +If a workbook is "protected" then Excel will encrypt the file whether a password is supplied or not. As of version 0.59 Spreadsheet::ParseExcel supports decrypting Excel workbooks using a default or user supplied password. However, only the following encryption scheme is supported: + + Office 97/2000 Compatible encryption + +The following encryption methods are not supported: + + Weak Encryption (XOR) + RC4, Microsoft Base Cryptographic Provider v1.0 + RC4, Microsoft Base DSS and Diffie-Hellman Cryptographic Provider + RC4, Microsoft DH SChannel Cryptographic Provider + RC4, Microsoft Enhanced Cryptographic Provider v1.0 + RC4, Microsoft Enhanced DSS and Diffie-Hellman Cryptographic Provider + RC4, Microsoft Enhanced RSA and AES Cryptographic Provider + RC4, Microsoft RSA SChannel Cryptographic Provider + RC4, Microsoft Strong Cryptographic Provider + +See the following for more information on Excel encryption: L. + + + =head1 KNOWN PROBLEMS =over -=item * Issues reported by users: http://rt.cpan.org/Public/Dist/Display.html?Name=Spreadsheet-ParseExcel +=item * Issues reported by users: L =item * This module cannot read the values of formulas from files created with Spreadsheet::WriteExcel unless the user specified the values when creating the file (which is generally not the case). The reason for this is that Spreadsheet::WriteExcel writes the formula but not the formula result since it isn't in a position to calculate arbitrary Excel formulas without access to Excel's formula engine. @@ -2864,7 +3433,7 @@ Bugs can be reported via rt.cpan.org. See the following for instructions on bug reporting for Spreadsheet::ParseExcel -http://rt.cpan.org/Public/Dist/Display.html?Name=Spreadsheet-ParseExcel +L @@ -2873,23 +3442,23 @@ =over -=item * xls2csv by Ken Prows (http://search.cpan.org/~ken/xls2csv-1.06/script/xls2csv). +=item * xls2csv by Ken Prows L. =item * xls2csv and xlscat by H.Merijn Brand (these utilities are part of Spreadsheet::Read, see below). -=item * excel2txt by Ken Youens-Clark, (http://search.cpan.org/~kclark/excel2txt/excel2txt). This is an excellent example of an Excel filter using Spreadsheet::ParseExcel. It can produce CSV, Tab delimited, Html, XML and Yaml. +=item * excel2txt by Ken Youens-Clark, L. This is an excellent example of an Excel filter using Spreadsheet::ParseExcel. It can produce CSV, Tab delimited, Html, XML and Yaml. -=item * XLSperl by Jon Allen (http://search.cpan.org/~jonallen/XLSperl/bin/XLSperl). This application allows you to use Perl "one-liners" with Microsoft Excel files. +=item * XLSperl by Jon Allen L. This application allows you to use Perl "one-liners" with Microsoft Excel files. -=item * Spreadsheet::XLSX (http://search.cpan.org/~dmow/Spreadsheet-XLSX/lib/Spreadsheet/XLSX.pm) by Dmitry Ovsyanko. A module with a similar interface to Spreadsheet::ParseExcel for parsing Excel 2007 XLSX OpenXML files. +=item * Spreadsheet::XLSX L by Dmitry Ovsyanko. A module with a similar interface to Spreadsheet::ParseExcel for parsing Excel 2007 XLSX OpenXML files. -=item * Spreadsheet::Read (http://search.cpan.org/~hmbrand/Spreadsheet-Read/Read.pm) by H.Merijn Brand. A single interface for reading several different spreadsheet formats. +=item * Spreadsheet::Read L by H.Merijn Brand. A single interface for reading several different spreadsheet formats. -=item * Spreadsheet::WriteExcel (http://search.cpan.org/~jmcnamara/Spreadsheet-WriteExcel/lib/Spreadsheet/WriteExcel.pm). A perl module for creating new Excel files. +=item * Spreadsheet::WriteExcel L. A perl module for creating new Excel files. -=item * Spreadsheet::ParseExcel::SaveParser (http://search.cpan.org/~jmcnamara/Spreadsheet-ParseExcel/lib/Spreadsheet/ParseExcel/SaveParser.pm). This is a combination of Spreadsheet::ParseExcel and Spreadsheet::WriteExcel and it allows you to "rewrite" an Excel file. See the following example (http://search.cpan.org/~jmcnamara/Spreadsheet-WriteExcel/lib/Spreadsheet/WriteExcel.pm#MODIFYING_AND_REWRITING_EXCEL_FILES). It is part of the Spreadsheet::ParseExcel distro. +=item * Spreadsheet::ParseExcel::SaveParser L. This is a combination of Spreadsheet::ParseExcel and Spreadsheet::WriteExcel and it allows you to "rewrite" an Excel file. See the following example L. It is part of the Spreadsheet::ParseExcel distro. -=item * Text::CSV_XS (http://search.cpan.org/~hmbrand/Text-CSV_XS/CSV_XS.pm) by H.Merijn Brand. A fast and rigorous module for reading and writing CSV data. Don't consider rolling your own CSV handling, use this module instead. +=item * Text::CSV_XS L by H.Merijn Brand. A fast and rigorous module for reading and writing CSV data. Don't consider rolling your own CSV handling, use this module instead. =back @@ -2898,14 +3467,14 @@ =head1 MAILING LIST -There is a Google group for discussing and asking questions about Spreadsheet::ParseExcel. This is a good place to search to see if your question has been asked before: http://groups-beta.google.com/group/spreadsheet-parseexcel/ +There is a Google group for discussing and asking questions about Spreadsheet::ParseExcel. This is a good place to search to see if your question has been asked before: L =head1 DONATIONS -If you'd care to donate to the Spreadsheet::ParseExcel project, you can do so via PayPal: http://tinyurl.com/7ayes +If you'd care to donate to the Spreadsheet::ParseExcel project, you can do so via PayPal: L @@ -2937,6 +3506,8 @@ In no particular order: Yamaji Haruna, Simamoto Takesi, Noguchi Harumi, Ikezawa Kazuhiro, Suwazono Shugo, Hirofumi Morisada, Michael Edwards, Kim Namusk, Slaven Rezic, Grant Stevens, H.Merijn Brand and many many people + Kawai Mikako. +Alexey Mazurin added the decryption facility. + =head1 DISCLAIMER OF WARRANTY @@ -2950,14 +3521,16 @@ =head1 LICENSE -Either the Perl Artistic Licence http://dev.perl.org/licenses/artistic.html or the GPL http://www.opensource.org/licenses/gpl-license.php +Either the Perl Artistic Licence L or the GPL L =head1 AUTHOR -Current maintainer 0.40+: John McNamara jmcnamara@cpan.org +Current maintainer 0.60+: Douglas Wilson dougw@cpan.org + +Maintainer 0.40-0.59: John McNamara jmcnamara@cpan.org Maintainer 0.27-0.33: Gabor Szabo szabgab@cpan.org @@ -2968,7 +3541,9 @@ =head1 COPYRIGHT -Copyright (c) 2009 John McNamara +Copyright (c) 2014 Douglas Wilson + +Copyright (c) 2009-2013 John McNamara Copyright (c) 2006-2008 Gabor Szabo diff -Nru gdata-2.13.3/inst/perl/Spreadsheet/ParseXLSX.pm gdata-2.17.0/inst/perl/Spreadsheet/ParseXLSX.pm --- gdata-2.13.3/inst/perl/Spreadsheet/ParseXLSX.pm 1970-01-01 00:00:00.000000000 +0000 +++ gdata-2.17.0/inst/perl/Spreadsheet/ParseXLSX.pm 2014-08-28 03:41:38.000000000 +0000 @@ -0,0 +1,937 @@ +package Spreadsheet::ParseXLSX; +BEGIN { + $Spreadsheet::ParseXLSX::AUTHORITY = 'cpan:DOY'; +} +$Spreadsheet::ParseXLSX::VERSION = '0.16'; +use strict; +use warnings; +# ABSTRACT: parse XLSX files + +use Archive::Zip; +use Graphics::ColorUtils 'rgb2hls', 'hls2rgb'; +use Scalar::Util 'openhandle'; +use Spreadsheet::ParseExcel 0.61; +use XML::Twig; + + + +sub new { + bless {}, shift; +} + + +sub parse { + my $self = shift; + my ($file, $formatter) = @_; + + my $zip = Archive::Zip->new; + my $workbook = Spreadsheet::ParseExcel::Workbook->new; + if (openhandle($file)) { + bless $file, 'IO::File' if ref($file) eq 'GLOB'; # sigh + $zip->readFromFileHandle($file) == Archive::Zip::AZ_OK + or die "Can't open filehandle as a zip file"; + $workbook->{File} = undef; + } + elsif (!ref($file)) { + $zip->read($file) == Archive::Zip::AZ_OK + or die "Can't open file '$file' as a zip file"; + $workbook->{File} = $file; + } + else { + die "Argument to 'new' must be a filename or open filehandle"; + } + + return $self->_parse_workbook($zip, $workbook, $formatter); +} + +sub _parse_workbook { + my $self = shift; + my ($zip, $workbook, $formatter) = @_; + + my $files = $self->_extract_files($zip); + + my ($version) = $files->{workbook}->find_nodes('//fileVersion'); + my ($properties) = $files->{workbook}->find_nodes('//workbookPr'); + + if ($version) { + $workbook->{Version} = $version->att('appName') + . ($version->att('lowestEdited') + ? ('-' . $version->att('lowestEdited')) + : ("")); + } + + $workbook->{Flag1904} = $properties && $properties->att('date1904') ? 1 : 0; + + $workbook->{FmtClass} = $formatter || Spreadsheet::ParseExcel::FmtDefault->new; + + my $themes = $self->_parse_themes((values %{ $files->{themes} })[0]); # XXX + + $workbook->{Color} = $themes->{Color}; + + my $styles = $self->_parse_styles($workbook, $files->{styles}); + + $workbook->{Format} = $styles->{Format}; + $workbook->{FormatStr} = $styles->{FormatStr}; + $workbook->{Font} = $styles->{Font}; + + $workbook->{PkgStr} = $self->_parse_shared_strings($files->{strings}) + if $files->{strings}; + + # $workbook->{StandardWidth} = ...; + + # $workbook->{Author} = ...; + + # $workbook->{PrintArea} = ...; + # $workbook->{PrintTitle} = ...; + + my @sheets = map { + my $idx = $_->att('r:id'); + my $sheet = Spreadsheet::ParseExcel::Worksheet->new( + Name => $_->att('name'), + _Book => $workbook, + _SheetNo => $idx, + ); + $self->_parse_sheet($sheet, $files->{sheets}{$idx}); + $sheet + } $files->{workbook}->find_nodes('//sheets/sheet'); + + $workbook->{Worksheet} = \@sheets; + $workbook->{SheetCount} = scalar(@sheets); + + my ($node) = $files->{workbook}->find_nodes('//workbookView'); + my $selected = $node->att('activeTab'); + $workbook->{SelectedSheet} = defined($selected) ? 0+$selected : 0; + + return $workbook; +} + +sub _parse_sheet { + my $self = shift; + my ($sheet, $sheet_file) = @_; + + $sheet->{MinRow} = 0; + $sheet->{MinCol} = 0; + $sheet->{MaxRow} = -1; + $sheet->{MaxCol} = -1; + $sheet->{Selection} = [ 0, 0 ]; + + my @merged_cells; + + my @column_formats; + my @column_widths; + my @row_heights; + + my $default_row_height = 15; + my $default_column_width = 10; + + my $sheet_xml = XML::Twig->new( + twig_roots => { + #XXX need a fallback here, the dimension tag is optional + 'dimension' => sub { + my ($twig, $dimension) = @_; + + my ($rmin, $cmin, $rmax, $cmax) = $self->_dimensions( + $dimension->att('ref') + ); + + $sheet->{MinRow} = $rmin; + $sheet->{MinCol} = $cmin; + $sheet->{MaxRow} = $rmax; + $sheet->{MaxCol} = $cmax; + + $twig->purge; + }, + + 'mergeCells/mergeCell' => sub { + my ( $twig, $merge_area ) = @_; + + if (my $ref = $merge_area->att('ref')) { + my ($topleft, $bottomright) = $ref =~ /([^:]+):([^:]+)/; + + my ($toprow, $leftcol) = $self->_cell_to_row_col($topleft); + my ($bottomrow, $rightcol) = $self->_cell_to_row_col($bottomright); + + push @{ $sheet->{MergedArea} }, [ + $toprow, $leftcol, + $bottomrow, $rightcol, + ]; + for my $row ($toprow .. $bottomrow) { + for my $col ($leftcol .. $rightcol) { + push(@merged_cells, [$row, $col]); + } + } + } + + $twig->purge; + }, + + 'sheetFormatPr' => sub { + my ( $twig, $format ) = @_; + + $default_row_height //= $format->att('defaultRowHeight'); + $default_column_width //= $format->att('baseColWidth'); + + $twig->purge; + }, + + 'col' => sub { + my ( $twig, $col ) = @_; + + for my $colnum ($col->att('min')..$col->att('max')) { + $column_widths[$colnum - 1] = $col->att('width'); + $column_formats[$colnum - 1] = $col->att('style'); + } + + $twig->purge; + }, + + 'row' => sub { + my ( $twig, $row ) = @_; + + $row_heights[ $row->att('r') - 1 ] = $row->att('ht'); + + $twig->purge; + }, + + 'selection' => sub { + my ( $twig, $selection ) = @_; + + if (my $cell = $selection->att('activeCell')) { + $sheet->{Selection} = [ $self->_cell_to_row_col($cell) ]; + } + elsif (my $range = $selection->att('sqref')) { + my ($topleft, $bottomright) = $range =~ /([^:]+):([^:]+)/; + $sheet->{Selection} = [ + $self->_cell_to_row_col($topleft), + $self->_cell_to_row_col($bottomright), + ]; + } + + $twig->purge; + }, + + 'sheetPr/tabColor' => sub { + my ( $twig, $tab_color ) = @_; + + $sheet->{TabColor} = $self->_color($sheet->{_Book}{Color}, $tab_color); + + $twig->purge; + }, + + } + ); + + $sheet_xml->parse( $sheet_file ); + + # 2nd pass: cell/row building is dependent on having parsed the merge definitions + # beforehand. + + $sheet_xml = XML::Twig->new( + twig_roots => { + 'sheetData/row' => sub { + my ( $twig, $row_elt ) = @_; + + for my $cell ( $row_elt->children('c') ){ + my ($row, $col) = $self->_cell_to_row_col($cell->att('r')); + my $type = $cell->att('t') || 'n'; + my $val_xml = $type eq 'inlineStr' + ? $cell->first_child('is')->first_child('t') + : $cell->first_child('v'); + my $val = $val_xml ? $val_xml->text : undef; + + my $long_type; + if (!defined($val)) { + $long_type = 'Text'; + $val = ''; + } + elsif ($type eq 's') { + $long_type = 'Text'; + $val = $sheet->{_Book}{PkgStr}[$val]; + } + elsif ($type eq 'n') { + $long_type = 'Numeric'; + $val = defined($val) ? 0+$val : undef; + } + elsif ($type eq 'd') { + $long_type = 'Date'; + } + elsif ($type eq 'b') { + $long_type = 'Text'; + $val = $val ? "TRUE" : "FALSE"; + } + elsif ($type eq 'e') { + $long_type = 'Text'; + } + elsif ($type eq 'str' || $type eq 'inlineStr') { + $long_type = 'Text'; + } + else { + die "unimplemented type $type"; # XXX + } + + my $format_idx = $cell->att('s') || 0; + my $format = $sheet->{_Book}{Format}[$format_idx]; + $format->{Merged} = !!grep { + $row == $_->[0] && $col == $_->[1] + } @merged_cells; + + # see the list of built-in formats below in _parse_styles + # XXX probably should figure this out from the actual format string, + # but that's not entirely trivial + if (grep { $format->{FmtIdx} == $_ } 14..22, 45..47) { + $long_type = 'Date'; + } + + my $cell = Spreadsheet::ParseExcel::Cell->new( + Val => $val, + Type => $long_type, + Merged => $format->{Merged}, + Format => $format, + FormatNo => $format_idx, + ($cell->first_child('f') + ? (Formula => $cell->first_child('f')->text) + : ()), + ); + $cell->{_Value} = $sheet->{_Book}{FmtClass}->ValFmt( + $cell, $sheet->{_Book} + ); + $sheet->{Cells}[$row][$col] = $cell; + } + + $twig->purge; + }, + + } + ); + + $sheet_xml->parse( $sheet_file ); + + if ( ! $sheet->{Cells} ){ + $sheet->{MaxRow} = $sheet->{MaxCol} = -1; + } + + $sheet->{DefRowHeight} = 0+$default_row_height; + $sheet->{DefColWidth} = 0+$default_column_width; + $sheet->{RowHeight} = [ + map { defined $_ ? 0+$_ : 0+$default_row_height } @row_heights + ]; + $sheet->{ColWidth} = [ + map { defined $_ ? 0+$_ : 0+$default_column_width } @column_widths + ]; + $sheet->{ColFmtNo} = \@column_formats; + +} + +sub _parse_shared_strings { + my $self = shift; + my ($strings) = @_; + + my $PkgStr = []; + + if ($strings) { + my $xml = XML::Twig->new( + twig_handlers => { + 'si' => sub { + my ( $twig, $si ) = @_; + + # XXX this discards information about formatting within cells + # not sure how to represent that + push @$PkgStr, + join( '', map { $_->text } $si->find_nodes('.//t') ); + $twig->purge; + }, + } + ); + $xml->parse( $strings ); + } + return $PkgStr; +} + +sub _parse_themes { + my $self = shift; + my ($themes) = @_; + + return {} unless $themes; + + my @color = map { + $_->name eq 'a:sysClr' ? $_->att('lastClr') : $_->att('val') + } $themes->find_nodes('//a:clrScheme/*/*'); + + # this shouldn't be necessary, but the documentation is wrong here + # see http://stackoverflow.com/questions/2760976/theme-confusion-in-spreadsheetml + ($color[0], $color[1]) = ($color[1], $color[0]); + ($color[2], $color[3]) = ($color[3], $color[2]); + + return { + Color => \@color, + } +} + +sub _parse_styles { + my $self = shift; + my ($workbook, $styles) = @_; + + my %halign = ( + center => 2, + centerContinuous => 6, + distributed => 7, + fill => 4, + general => 0, + justify => 5, + left => 1, + right => 3, + ); + + my %valign = ( + bottom => 2, + center => 1, + distributed => 4, + justify => 3, + top => 0, + ); + + my %border = ( + dashDot => 9, + dashDotDot => 11, + dashed => 3, + dotted => 4, + double => 6, + hair => 7, + medium => 2, + mediumDashDot => 10, + mediumDashDotDot => 12, + mediumDashed => 8, + none => 0, + slantDashDot => 13, + thick => 5, + thin => 1, + ); + + my %fill = ( + darkDown => 7, + darkGray => 3, + darkGrid => 9, + darkHorizontal => 5, + darkTrellis => 10, + darkUp => 8, + darkVertical => 6, + gray0625 => 18, + gray125 => 17, + lightDown => 13, + lightGray => 4, + lightGrid => 15, + lightHorizontal => 11, + lightTrellis => 16, + lightUp => 14, + lightVertical => 12, + mediumGray => 2, + none => 0, + solid => 1, + ); + + my @fills = map { + [ + $fill{$_->att('patternType')}, + $self->_color($workbook->{Color}, $_->first_child('fgColor'), 1), + $self->_color($workbook->{Color}, $_->first_child('bgColor'), 1), + ] + } $styles->find_nodes('//fills/fill/patternFill'); + + my @borders = map { + my $border = $_; + # XXX specs say "begin" and "end" rather than "left" and "right", + # but... that's not what seems to be in the file itself (sigh) + { + colors => [ + map { + $self->_color( + $workbook->{Color}, + $border->first_child($_)->first_child('color') + ) + } qw(left right top bottom) + ], + styles => [ + map { + $border{$border->first_child($_)->att('style') || 'none'} + } qw(left right top bottom) + ], + diagonal => [ + 0, # XXX ->att('diagonalDown') and ->att('diagonalUp') + 0, # XXX ->att('style') + $self->_color( + $workbook->{Color}, + $border->first_child('diagonal')->first_child('color') + ), + ], + } + } $styles->find_nodes('//borders/border'); + + # these defaults are from + # http://social.msdn.microsoft.com/Forums/en-US/oxmlsdk/thread/e27aaf16-b900-4654-8210-83c5774a179c + my %format_str = ( + 0 => 'GENERAL', + 1 => '0', + 2 => '0.00', + 3 => '#,##0', + 4 => '#,##0.00', + 5 => '$#,##0_);($#,##0)', + 6 => '$#,##0_);[Red]($#,##0)', + 7 => '$#,##0.00_);($#,##0.00)', + 8 => '$#,##0.00_);[Red]($#,##0.00)', + 9 => '0%', + 10 => '0.00%', + 11 => '0.00E+00', + 12 => '# ?/?', + 13 => '# ??/??', + 14 => 'm/d/yyyy', + 15 => 'd-mmm-yy', + 16 => 'd-mmm', + 17 => 'mmm-yy', + 18 => 'h:mm AM/PM', + 19 => 'h:mm:ss AM/PM', + 20 => 'h:mm', + 21 => 'h:mm:ss', + 22 => 'm/d/yyyy h:mm', + 37 => '#,##0_);(#,##0)', + 38 => '#,##0_);[Red](#,##0)', + 39 => '#,##0.00_);(#,##0.00)', + 40 => '#,##0.00_);[Red](#,##0.00)', + 45 => 'mm:ss', + 46 => '[h]:mm:ss', + 47 => 'mm:ss.0', + 48 => '##0.0E+0', + 49 => '@', + (map { + $_->att('numFmtId') => $_->att('formatCode') + } $styles->find_nodes('//numFmts/numFmt')), + ); + + my @font = map { + my $vert = $_->first_child('vertAlign'); + my $under = $_->first_child('u'); + Spreadsheet::ParseExcel::Font->new( + Height => 0+$_->first_child('sz')->att('val'), + # Attr => $iAttr, + # XXX not sure if there's a better way to keep the indexing stuff + # intact rather than just going straight to #xxxxxx + # XXX also not sure what it means for the color tag to be missing, + # just assuming black for now + Color => ($_->first_child('color') + ? $self->_color( + $workbook->{Color}, + $_->first_child('color') + ) + : '#000000' + ), + Super => ($vert + ? ($vert->att('val') eq 'superscript' ? 1 + : $vert->att('val') eq 'subscript' ? 2 + : 0) + : 0 + ), + # XXX not sure what the single accounting and double accounting + # underline styles map to in xlsx. also need to map the new + # underline styles + UnderlineStyle => ($under + # XXX sometimes style xml files can contain just with no + # val attribute. i think this means single underline, but not + # sure + ? (!$under->att('val') ? 1 + : $under->att('val') eq 'single' ? 1 + : $under->att('val') eq 'double' ? 2 + : 0) + : 0 + ), + Name => $_->first_child('name')->att('val'), + + Bold => $_->has_child('b') ? 1 : 0, + Italic => $_->has_child('i') ? 1 : 0, + Underline => $_->has_child('u') ? 1 : 0, + Strikeout => $_->has_child('strike') ? 1 : 0, + ) + } $styles->find_nodes('//fonts/font'); + + my @format = map { + my $alignment = $_->first_child('alignment'); + my $protection = $_->first_child('protection'); + Spreadsheet::ParseExcel::Format->new( + IgnoreFont => !$_->att('applyFont'), + IgnoreFill => !$_->att('applyFill'), + IgnoreBorder => !$_->att('applyBorder'), + IgnoreAlignment => !$_->att('applyAlignment'), + IgnoreNumberFormat => !$_->att('applyNumberFormat'), + IgnoreProtection => !$_->att('applyProtection'), + + FontNo => 0+$_->att('fontId'), + Font => $font[$_->att('fontId')], + FmtIdx => 0+$_->att('numFmtId'), + + Lock => $protection && defined $protection->att('locked') + ? $protection->att('locked') + : 1, + Hidden => $protection + ? $protection->att('hidden') + : 0, + # Style => $iStyle, + # Key123 => $i123, + AlignH => $alignment + ? $halign{$alignment->att('horizontal') || 'general'} + : 0, + Wrap => $alignment + ? $alignment->att('wrapText') + : 0, + AlignV => $alignment + ? $valign{$alignment->att('vertical') || 'bottom'} + : 2, + # JustLast => $iJustL, + # Rotate => $iRotate, + + # Indent => $iInd, + # Shrink => $iShrink, + # Merge => $iMerge, + # ReadDir => $iReadDir, + + BdrStyle => $borders[$_->att('borderId')]{styles}, + BdrColor => $borders[$_->att('borderId')]{colors}, + BdrDiag => $borders[$_->att('borderId')]{diagonal}, + Fill => $fills[$_->att('fillId')], + ) + } $styles->find_nodes('//cellXfs/xf'); + + return { + FormatStr => \%format_str, + Font => \@font, + Format => \@format, + } +} + +sub _extract_files { + my $self = shift; + my ($zip) = @_; + + my $type_base = + 'http://schemas.openxmlformats.org/officeDocument/2006/relationships'; + + my $rels = $self->_parse_xml( + $zip, + $self->_rels_for('') + ); + my $wb_name = ($rels->find_nodes( + qq + ))[0]->att('Target'); + my $wb_xml = $self->_parse_xml($zip, $wb_name); + + my $path_base = $self->_base_path_for($wb_name); + my $wb_rels = $self->_parse_xml( + $zip, + $self->_rels_for($wb_name) + ); + + my ($strings_xml) = map { + $zip->memberNamed($path_base . $_->att('Target'))->contents + } $wb_rels->find_nodes(qq); + + my $styles_xml = $self->_parse_xml( + $zip, + $path_base . ($wb_rels->find_nodes( + qq + ))[0]->att('Target') + ); + + my %worksheet_xml = map { + if ( my $sheetfile = $zip->memberNamed($path_base . $_->att('Target'))->contents ) { + ( $_->att('Id') => $sheetfile ); + } + } $wb_rels->find_nodes(qq); + + my %themes_xml = map { + $_->att('Id') => $self->_parse_xml($zip, $path_base . $_->att('Target')) + } $wb_rels->find_nodes(qq); + + return { + workbook => $wb_xml, + styles => $styles_xml, + sheets => \%worksheet_xml, + themes => \%themes_xml, + ($strings_xml + ? (strings => $strings_xml) + : ()), + }; +} + +sub _parse_xml { + my $self = shift; + my ($zip, $subfile) = @_; + + my $member = $zip->memberNamed($subfile); + die "no subfile named $subfile" unless $member; + + my $xml = XML::Twig->new; + $xml->parse(scalar $member->contents); + + return $xml; +} + +sub _rels_for { + my $self = shift; + my ($file) = @_; + + my @path = split '/', $file; + my $name = pop @path; + $name = '' unless defined $name; + push @path, '_rels'; + push @path, "$name.rels"; + + return join '/', @path; +} + +sub _base_path_for { + my $self = shift; + my ($file) = @_; + + my @path = split '/', $file; + pop @path; + + return join('/', @path) . '/'; +} + +sub _dimensions { + my $self = shift; + my ($dim) = @_; + + my ($topleft, $bottomright) = split ':', $dim; + $bottomright = $topleft unless defined $bottomright; + + my ($rmin, $cmin) = $self->_cell_to_row_col($topleft); + my ($rmax, $cmax) = $self->_cell_to_row_col($bottomright); + + return ($rmin, $cmin, $rmax, $cmax); +} + +sub _cell_to_row_col { + my $self = shift; + my ($cell) = @_; + + my ($col, $row) = $cell =~ /([A-Z]+)([0-9]+)/; + + my $ncol = 0; + for my $char (split //, $col) { + $ncol *= 26; + $ncol += ord($char) - ord('A') + 1; + } + $ncol = $ncol - 1; + + my $nrow = $row - 1; + + return ($nrow, $ncol); +} + +sub _color { + my $self = shift; + my ($colors, $color_node, $fill) = @_; + + my $color; + if ($color_node && !$color_node->att('auto')) { + if (defined $color_node->att('indexed')) { + # see https://rt.cpan.org/Public/Bug/Display.html?id=93065 + if ($fill && $color_node->att('indexed') == 64) { + return '#FFFFFF'; + } + else { + $color = '#' . Spreadsheet::ParseExcel->ColorIdxToRGB( + $color_node->att('indexed') + ); + } + } + elsif (defined $color_node->att('rgb')) { + $color = '#' . substr($color_node->att('rgb'), 2, 6); + } + elsif (defined $color_node->att('theme')) { + $color = '#' . $colors->[$color_node->att('theme')]; + } + + $color = $self->_apply_tint($color, $color_node->att('tint')) + if $color_node->att('tint'); + } + + return $color; +} + +sub _apply_tint { + my $self = shift; + my ($color, $tint) = @_; + + my ($r, $g, $b) = map { oct("0x$_") } $color =~ /#(..)(..)(..)/; + my ($h, $l, $s) = rgb2hls($r, $g, $b); + + if ($tint < 0) { + $l = $l * (1.0 + $tint); + } + else { + $l = $l * (1.0 - $tint) + (1.0 - 1.0 * (1.0 - $tint)); + } + + return scalar hls2rgb($h, $l, $s); +} + + +1; + +__END__ + +=pod + +=encoding UTF-8 + +=head1 NAME + +Spreadsheet::ParseXLSX - parse XLSX files + +=head1 VERSION + +version 0.16 + +=head1 SYNOPSIS + + use Spreadsheet::ParseXLSX; + + my $parser = Spreadsheet::ParseXLSX->new; + my $workbook = $parser->parse("file.xlsx"); + # see Spreadsheet::ParseExcel for further documentation + +=head1 DESCRIPTION + +This module is an adaptor for L that reads XLSX files. + +=head1 METHODS + +=head2 new + +Returns a new parser instance. Takes no parameters. + +=head2 parse($file, $formatter) + +Parses an XLSX file. Parsing errors throw an exception. C<$file> can be either +a filename or an open filehandle. Returns a +L instance containing the parsed data. +The C<$formatter> argument is an optional formatter class as described in L. + +=head1 INCOMPATIBILITIES + +This module returns data using classes from L, so for +the most part, it should just be a drop-in replacement. That said, there are a +couple areas where the data returned is intentionally different: + +=over 4 + +=item Colors + +In Spreadsheet::ParseExcel, colors are represented by integers which index into +the color table, and you have to use +C<< Spreadsheet::ParseExcel->ColorIdxToRGB >> in order to get the actual value +out. In Spreadsheet::ParseXLSX, while the color table still exists, cells are +also allowed to specify their color directly rather than going through the +color table. In order to avoid confusion, I normalize all color values in +Spreadsheet::ParseXLSX to their string RGB format (C<"#0088ff">). This affects +the C, C, and C properties of formats, and the +C property of fonts. Note that the default color is represented by +C (the same thing that C would return). + +=item Formulas + +Spreadsheet::ParseExcel doesn't support formulas. Spreadsheet::ParseXLSX +provides basic formula support by returning the text of the formula as part of +the cell data. You can access it via C<< $cell->{Formula} >>. Note that the +restriction still holds that formula cell values aren't available unless they +were explicitly provided when the spreadsheet was written. + +=back + +=head1 BUGS + +=over 4 + +=item Large spreadsheets may cause segfaults on perl 5.14 and earlier + +This module internally uses XML::Twig, which makes it potentially subject to +L +on perl versions 5.14 and below (the underlying bug with perl weak references +was fixed in perl 5.15.5). The larger and more complex the spreadsheet, the +more likely to be affected, but the actual size at which it segfaults is +platform dependent. On a 64-bit perl with 7.6gb memory, it was seen on +spreadsheets about 300mb and above. You can work around this adding +C to your code before parsing the spreadsheet, +although this may have other consequences such as memory leaks. + +=item Worksheets without the C tag are not supported + +=item Intra-cell formatting is discarded + +=item Diagonal border styles are ignored + +=back + +In addition, there are still a few areas which are not yet implemented (the +XLSX spec is quite large). If you run into any of those, bug reports are quite +welcome. + +Please report any bugs to GitHub Issues at +L. + +=head1 SEE ALSO + +L: The equivalent, for XLS files. + +L: An older, less robust and featureful implementation. + +=head1 SUPPORT + +You can find this documentation for this module with the perldoc command. + + perldoc Spreadsheet::ParseXLSX + +You can also look for information at: + +=over 4 + +=item * MetaCPAN + +L + +=item * RT: CPAN's request tracker + +L + +=item * Github + +L + +=item * CPAN Ratings + +L + +=back + +=head1 SPONSORS + +Parts of this code were paid for by + +=over 4 + +=item Socialflow L + +=back + +=head1 AUTHOR + +Jesse Luehrs + +=head1 COPYRIGHT AND LICENSE + +This software is Copyright (c) 2014 by Jesse Luehrs. + +This is free software, licensed under: + + The MIT (X11) License + +=cut diff -Nru gdata-2.13.3/inst/perl/Spreadsheet/XLSX/Fmt2007.pm gdata-2.17.0/inst/perl/Spreadsheet/XLSX/Fmt2007.pm --- gdata-2.13.3/inst/perl/Spreadsheet/XLSX/Fmt2007.pm 2014-01-03 19:32:45.000000000 +0000 +++ gdata-2.17.0/inst/perl/Spreadsheet/XLSX/Fmt2007.pm 1970-01-01 00:00:00.000000000 +0000 @@ -1,198 +0,0 @@ -# This code is adapted for Excel 2007 from: -# Spreadsheet::XLSX::FmtDefault -# by Kawai, Takanori (Hippo2000) 2001.2.2 -# This Program is ALPHA version. -#============================================================================== -package Spreadsheet::XLSX::Fmt2007; -use strict; -use warnings; -use POSIX; - -use Spreadsheet::XLSX::Utility2007 qw(ExcelFmt); -our $VERSION = '0.12'; # - -my %hFmtDefault = ( - 0x00 => '@', - 0x01 => '0', - 0x02 => '0.00', - 0x03 => '#,##0', - 0x04 => '#,##0.00', - 0x05 => '($#,##0_);($#,##0)', - 0x06 => '($#,##0_);[RED]($#,##0)', - 0x07 => '($#,##0.00_);($#,##0.00_)', - 0x08 => '($#,##0.00_);[RED]($#,##0.00_)', - 0x09 => '0%', - 0x0A => '0.00%', - 0x0B => '0.00E+00', - 0x0C => '# ?/?', - 0x0D => '# ??/??', - 0x0E => 'm-d-yy', - 0x0F => 'd-mmm-yy', - 0x10 => 'd-mmm', - 0x11 => 'mmm-yy', - 0x12 => 'h:mm AM/PM', - 0x13 => 'h:mm:ss AM/PM', - 0x14 => 'h:mm', - 0x15 => 'h:mm:ss', - 0x16 => 'm-d-yy h:mm', -#0x17-0x24 -- Differs in National - 0x25 => '(#,##0_);(#,##0)', - 0x26 => '(#,##0_);[RED](#,##0)', - 0x27 => '(#,##0.00);(#,##0.00)', - 0x28 => '(#,##0.00);[RED](#,##0.00)', - 0x29 => '_(*#,##0_);_(*(#,##0);_(*"-"_);_(@_)', - 0x2A => '_($*#,##0_);_($*(#,##0);_(*"-"_);_(@_)', - 0x2B => '_(*#,##0.00_);_(*(#,##0.00);_(*"-"??_);_(@_)', - 0x2C => '_($*#,##0.00_);_($*(#,##0.00);_(*"-"??_);_(@_)', - 0x2D => 'mm:ss', - 0x2E => '[h]:mm:ss', - 0x2F => 'mm:ss.0', - 0x30 => '##0.0E+0', - 0x31 => '@', -); -#------------------------------------------------------------------------------ -# new (for Spreadsheet::XLSX::FmtDefault) -#------------------------------------------------------------------------------ -sub new { - my($sPkg, %hKey) = @_; - my $oThis={ - }; - bless $oThis; - return $oThis; -} -#------------------------------------------------------------------------------ -# TextFmt (for Spreadsheet::XLSX::FmtDefault) -#------------------------------------------------------------------------------ -sub TextFmt { - my($oThis, $sTxt, $sCode) =@_; - return $sTxt if((! defined($sCode)) || ($sCode eq '_native_')); - return pack('U*', unpack('n*', $sTxt)); -} -#------------------------------------------------------------------------------ -# FmtStringDef (for Spreadsheet::XLSX::FmtDefault) -#------------------------------------------------------------------------------ -sub FmtStringDef { - my($oThis, $iFmtIdx, $oBook, $rhFmt) =@_; - my $sFmtStr = $oBook->{FormatStr}->{$iFmtIdx}; - - if(!(defined($sFmtStr)) && defined($rhFmt)) { - $sFmtStr = $rhFmt->{$iFmtIdx}; - } - $sFmtStr = $hFmtDefault{$iFmtIdx} unless($sFmtStr); - return $sFmtStr; -} -#------------------------------------------------------------------------------ -# FmtString (for Spreadsheet::XLSX::FmtDefault) -#------------------------------------------------------------------------------ -sub FmtString { - my($oThis, $oCell, $oBook) =@_; - - my $sFmtStr;# = $oThis->FmtStringDef( -# $oBook->{Format}[$oCell->{FormatNo}]->{FmtIdx}, $oBook); - - unless(defined($sFmtStr)) { - if ($oCell->{Type} eq 'Numeric') { - if($oCell->{Format}){ - $sFmtStr=$oCell->{Format}; - } - # Integer - elsif( isdigit($oCell->{Val}) ){ - $sFmtStr = '0'; - } - # Floating Point - else{ - $sFmtStr = '0.000000000000000'; - } - } - elsif($oCell->{Type} eq 'Date') { - if($oCell->{Format}){ - $sFmtStr=$oCell->{Format}; - } - # Fraction < 1 --> Time - elsif(int($oCell->{Val}) <= 0){ - $sFmtStr = 'hh:mm:ss'; - } - # Whole number --> Date - elsif(int($oCell->{Val}) != $oCell->{Val}){ - $sFmtStr = 'hh:mm:ss'; - } - # Otherwise both Date and Time - else { - $sFmtStr = 'mm-dd-yyyy hh:mm:ss'; - } - } - elsif($oCell->{Type} eq 'Time') - { - if($oCell->{Format}) - { - $sFmtStr=$oCell->{Format}; - } - elsif(int($oCell->{Val}) <= 0) - { - $sFmtStr = 'hh:mm:ss'; - } - } - else - { - $sFmtStr = '@'; - } - } - return $sFmtStr; -} -#------------------------------------------------------------------------------ -# ValFmt (for Spreadsheet::XLSX::FmtDefault) -#------------------------------------------------------------------------------ -sub ValFmt { - my($oThis, $oCell, $oBook) =@_; - - my($Dt, $iFmtIdx, $iNumeric, $Flg1904); - - if ($oCell->{Type} eq 'Text') { - $Dt = ((defined $oCell->{Val}) && ($oCell->{Val} ne ''))? - $oThis->TextFmt($oCell->{Val}, $oCell->{Code}):''; - } - else { - $Dt = $oCell->{Val}; - } - $Flg1904 = $oBook->{Flg1904}; - my $sFmtStr = $oThis->FmtString($oCell, $oBook); - return ExcelFmt($sFmtStr, $Dt, $Flg1904, $oCell->{Type}); -} -#------------------------------------------------------------------------------ -# ChkType (for Spreadsheet::XLSX::FmtDefault) -#------------------------------------------------------------------------------ -sub ChkType { - my($oPkg, $iNumeric, $iFmtIdx) =@_; - if ($iNumeric) { - if((($iFmtIdx >= 0x0E) && ($iFmtIdx <= 0x16)) || - (($iFmtIdx >= 0x2D) && ($iFmtIdx <= 0x2F))) { - return "Date"; - } - else { - return "Numeric"; - } - } - else { - return "Text"; - } -} -1; - -__END__ - -=head1 SYNOPSIS - - $cell = $myworkbook->worksheet->{Cells}[$row][$col] - my $type = $cell->{Type}; # Date, Text, or Numeric - my $disp_value = $cell->Value; # displayed (formatted) value set in XLSX by $myFmt2007->ValFmt($cell, $workbook) - my $fund_value = $cell->{Val}; # fundemental (underlying) value - my $formatter; - if ($myworkbook->excel07) { - $formatter=Spreadsheet::XLSX::Fmt2007->new(); - } else { - $formatter=Spreadsheet::ParseExcel::FmtDefault->new(); - } - my $format_string = $formatter->FmtString($cell,$self->workbook); - - -=cut diff -Nru gdata-2.13.3/inst/perl/Spreadsheet/XLSX/Utility2007.pm gdata-2.17.0/inst/perl/Spreadsheet/XLSX/Utility2007.pm --- gdata-2.13.3/inst/perl/Spreadsheet/XLSX/Utility2007.pm 2014-01-03 19:32:45.000000000 +0000 +++ gdata-2.17.0/inst/perl/Spreadsheet/XLSX/Utility2007.pm 1970-01-01 00:00:00.000000000 +0000 @@ -1,1153 +0,0 @@ -# This code is adapted for Excel 2007 from: -# Spreadsheet::XLSX::Utility -# by Kawai, Takanori (Hippo2000) 2001.2.2 -# This Program is ALPHA version. -#============================================================================== -# Spreadsheet::XLSX::Utility; -#============================================================================== -package Spreadsheet::XLSX::Utility2007; -use strict; -use warnings; - -require Exporter; -use vars qw(@ISA @EXPORT_OK); -@ISA = qw(Exporter); -@EXPORT_OK = qw(ExcelFmt LocaltimeExcel ExcelLocaltime col2int int2col sheetRef xls2csv); -our $VERSION = '0.12'; - -my $sNUMEXP = '(^[+-]?\d+(\.\d+)?$)|(^[+-]?\d+\.?(\d*)[eE][+-](\d+))$'; - -#------------------------------------------------------------------------------ -# ExcelFmt (for Spreadsheet::XLSX::Utility2007) -#------------------------------------------------------------------------------ -sub ExcelFmt { - my($sFmt, $iData, $i1904, $sType) =@_; - my $sCond; - my $sWkF =''; - my $sRes=''; - $sFmt=unescape_HTML($sFmt); -#1. Get Condition - if($sFmt=~/^\[([<>=][^\]]+)\](.*)$/) { - $sCond = $1; - $sFmt = $2; - } - $sFmt =~ s/_/ /g; - - my @sFmtWk; - my $sFmtObj; - my $iFmtPos=0; - my $iDblQ=0; - my $iQ = 0; - foreach my $sWk (split //, $sFmt) { - if($iDblQ or $iQ) { - $sFmtWk[$iFmtPos] .=$sWk; - $iDblQ = 0 if($sWk eq '"'); - $iQ = 0; - next; - } - - if($sWk eq ';') { - $iFmtPos++; - next; - } - elsif($sWk eq '"') { - $iDblQ = 1; - } - elsif($sWk eq '!') { - $iQ = 1; - } - elsif($sWk eq '\\') { - $iQ = 1; -# next; - } - elsif($sWk eq '(') { #Skip? - next; - } - elsif($sWk eq ')') { #Skip? - next; - } - $sFmtWk[$iFmtPos] .=$sWk; - } -#Get FmtString - if(scalar(@sFmtWk)>1) { - if($sCond) { - $sFmtObj = $sFmtWk[((eval(qq/"$iData" $sCond/))? 0: 1)]; - } - else { - my $iWk = ($iData =~/$sNUMEXP/)? $iData: 0; - # $iData = abs($iData) if($iWk !=0); - if(scalar(@sFmtWk)==2) { - $sFmtObj = $sFmtWk[(($iWk>=0)? 0: 1)]; - } - elsif(scalar(@sFmtWk)==3) { - $sFmtObj = $sFmtWk[(($iWk>0)? 0: (($iWk<0)? 1: 2))]; - } - else { - if($iData =~/$sNUMEXP/) { - $sFmtObj = $sFmtWk[(($iWk>0)? 0: (($iWk<0)? 1: 2))]; - } - else { - $sFmtObj = $sFmtWk[ 3]; - } - } - } - } - else { - $sFmtObj = $sFmtWk[0]; - } - my $sColor; - if($sFmtObj =~ /^(\[[^hm\[\]]*\])/) { - $sColor = $1; - $sFmtObj = substr($sFmtObj, length($sColor)); - chop($sColor); - $sColor = substr($sColor, 1); - } -#print "FMT:$sFmtObj Co:$sColor\n"; - -#3.Build Data - my $iFmtMode=0; #1:Number, 2:Date - my $i=0; - my $ir=0; - my $sFmtWk; - my @aRep = (); - my $sFmtRes=''; - - my $iFflg = -1; - my $iRpos = -1; - my $iCmmCnt = 0; - my $iBunFlg = 0; - my $iFugouFlg = 0; - my $iPer = 0; - my $iAm=0; - my $iSt; - - while($i=0) && - (($aRep[$#aRep]->[0] eq 'h') or ($aRep[$#aRep]->[0] eq 'hh'))) { - push @aRep, ['mm', length($sFmtRes), 2, 'min']; - } - else { - push @aRep, [substr($sFmtObj, $i, 2), length($sFmtRes), 2]; - } - if((substr($sFmtObj, $i, 2) eq 'ss') && ($#aRep>0)) { - if(($aRep[$#aRep-1]->[0] eq 'm') || - ($aRep[$#aRep-1]->[0] eq 'mm')) { - push(@{$aRep[$#aRep-1]}, 'min'); - } - } - $i+=2; - } - elsif((substr($sFmtObj, $i, 1) eq 'm') || - (substr($sFmtObj, $i, 1) eq 'd') || - (substr($sFmtObj, $i, 1) eq 'h') || - (substr($sFmtObj, $i, 1) eq 's')){ - if((substr($sFmtObj, $i, 1) eq 'm') && - ($#aRep>=0) && - (($aRep[$#aRep]->[0] eq 'h') or ($aRep[$#aRep]->[0] eq 'hh'))) { - push @aRep, ['m', length($sFmtRes), 1, 'min']; - } - else { - push @aRep, [substr($sFmtObj, $i, 1), length($sFmtRes), 1]; - } - if((substr($sFmtObj, $i, 1) eq 's') && ($#aRep>0)) { - if(($aRep[$#aRep-1]->[0] eq 'm') || - ($aRep[$#aRep-1]->[0] eq 'mm')) { - push(@{$aRep[$#aRep-1]}, 'min'); - } - } - $i+=1; - } - } - elsif((substr($sFmtObj, $i, 3) eq '[h]')) { - push @aRep, ['[h]', length($sFmtRes), 3]; - $i+=3; - } - elsif((substr($sFmtObj, $i, 4) eq '[mm]')) { - push @aRep, ['[mm]', length($sFmtRes), 4]; - $i+=4; - } - elsif($sWk eq '@') { - push @aRep, ['@', length($sFmtRes), 1]; - $i++; - } - elsif($sWk eq '*') { - push @aRep, [substr($sFmtObj, $i, 1), - length($sFmtRes), 1]; - } - else{ - $i++; - } - $i++ if($i == $iSt); #No Format match - $sFmtRes .= substr($sFmtObj, $iSt, $i-$iSt); - } -#print "FMT: $iRpos ",$sFmtRes, "\n"; - if($iFflg != -1) { - push @aRep, [substr($sFmtObj, $iFflg, $i-$iFflg+1), - $iRpos,, $i-$iFflg+1]; - $iFflg= 0; - } - -#For Date format - $iFmtMode = 0 if(defined $sType && $sType eq 'Text'); #Not Convert Non Numeric - if(($iFmtMode==2)&& ($iData =~/$sNUMEXP/)) { - my @aTime = ExcelLocaltime($iData, $i1904); - $aTime[4]++; - $aTime[5] += 1900; - - my @aMonL = - qw (dum January February March April May June July - August September October November December ); - my @aMonNm = - qw (dum Jan Feb Mar Apr May Jun Jul Aug Sep Oct Nov Dec); - my @aWeekNm = - qw (Mon Tue Wed Thu Fri Sat Sun); - my @aWeekL = - qw (Monday Tuesday Wednesday Thursday Friday Saturday Sunday); - my $sRep; - for(my $iIt=$#aRep; $iIt>=0;$iIt--) { - my $rItem = $aRep[$iIt]; - if((scalar @$rItem) >=4) { - #Min - if($rItem->[0] eq 'mm') { - $sRep = sprintf("%02d", $aTime[1]); - } - else { - $sRep = sprintf("%d", $aTime[1]); - } - } - #Year - elsif($rItem->[0] eq 'yyyy') { - $sRep = sprintf('%04d', $aTime[5]); - } - elsif($rItem->[0] eq 'yy') { - $sRep = sprintf('%02d', $aTime[5] % 100); - } - #Mon - elsif($rItem->[0] eq 'mmmmm') { - $sRep = substr($aMonNm[$aTime[4]], 0, 1); - } - elsif($rItem->[0] eq 'mmmm') { - $sRep = $aMonL[$aTime[4]]; - } - elsif($rItem->[0] eq 'mmm') { - $sRep = $aMonNm[$aTime[4]]; - } - elsif($rItem->[0] eq 'mm') { - $sRep = sprintf('%02d', $aTime[4]); - } - elsif($rItem->[0] eq 'm') { - $sRep = sprintf('%d', $aTime[4]); - } - #Day - elsif($rItem->[0] eq 'dddd') { - $sRep = $aWeekL[$aTime[7]]; - } - elsif($rItem->[0] eq 'ddd') { - $sRep = $aWeekNm[$aTime[7]]; - } - elsif($rItem->[0] eq 'dd') { - $sRep = sprintf('%02d', $aTime[3]); - } - elsif($rItem->[0] eq 'd') { - $sRep = sprintf('%d', $aTime[3]); - } - #Hour - elsif($rItem->[0] eq 'hh') { - if($iAm) { - $sRep = sprintf('%02d', $aTime[2]%12); - } - else { - $sRep = sprintf('%02d', $aTime[2]); - } - } - elsif($rItem->[0] eq 'h') { - if($iAm) { - $sRep = sprintf('%d', $aTime[2]%12); - } - else { - $sRep = sprintf('%d', $aTime[2]); - } - } - #SS - elsif($rItem->[0] eq 'ss') { - $sRep = sprintf('%02d', $aTime[0]); - } - elsif($rItem->[0] eq 'S') { - $sRep = sprintf('%d', $aTime[0]); - } - #am/pm - elsif($rItem->[0] eq 'am/pm') { - $sRep = ($aTime[4]>12)? 'pm':'am'; - } - elsif($rItem->[0] eq 'a/p') { - $sRep = ($aTime[4]>12)? 'p':'a'; - } - elsif($rItem->[0] eq '.') { - $sRep = '.'; - } - elsif($rItem->[0] =~ /^0+$/) { - my $i0Len = length($&); -#print "SEC:", $aTime[7], "\n"; - $sRep = substr(sprintf("%.${i0Len}f", $aTime[7]/1000.0), 2, $i0Len); - } - elsif($rItem->[0] eq '[h]') { - $sRep = sprintf('%d', int($iData) * 24 + $aTime[2]); - } - elsif($rItem->[0] eq '[mm]') { - $sRep = sprintf('%d', (int($iData) * 24 + $aTime[2])*60 + $aTime[1]); - } -#NENGO(Japanese) - elsif($rItem->[0] eq 'ge') { - $sRep = Spreadsheet::XLSX::FmtJapan::CnvNengo(1, @aTime); - } - elsif($rItem->[0] eq 'ggge') { - $sRep = Spreadsheet::XLSX::FmtJapan::CnvNengo(2, @aTime); - } - elsif($rItem->[0] eq '@') { - $sRep = $iData; - } - -#print "REP:$sRep ",$rItem->[0], ":", $rItem->[1], ":" ,$rItem->[2], "\n"; - substr($sFmtRes, $rItem->[1], $rItem->[2]) = $sRep; - } - } - elsif(($iFmtMode==1)&& ($iData =~/$sNUMEXP/)) { - if($#aRep>=0) { - while($aRep[$#aRep]->[0] eq ',') { - $iCmmCnt--; - substr($sFmtRes, $aRep[$#aRep]->[1], $aRep[$#aRep]->[2]) = ''; - $iData /= 1000; - pop @aRep; - } - - my $sNumFmt = join('', map {$_->[0]} @aRep); - my $sNumRes; - my $iTtl=0; - my $iE=0; - my $iP=0; - my $iInt = 0; - my $iAftP=undef; - foreach my $sItem (split //, $sNumFmt) { - if($sItem eq '.') { - $iTtl++; - $iP = 1; - } - elsif(($sItem eq 'E') || ($sItem eq 'e')){ - $iE = 1; - } - elsif($sItem eq '0') { - $iTtl++; - $iAftP++ if($iP); - $iInt = 1; - } - elsif($sItem eq '#') { - #$iTtl++; - $iAftP++ if($iP); - $iInt = 1; - } - elsif($sItem eq '?') { - #$iTtl++; - $iAftP++ if($iP); - } - } - $iData *= 100.0 if($iPer); - my $iDData = ($iFugouFlg)? abs($iData) : $iData+0; - if($iBunFlg) { - $sNumRes = sprintf("%0${iTtl}d", int($iDData)); - } - else { - if($iP) { - $sNumRes = sprintf( - (defined($iAftP)? - "%0${iTtl}.${iAftP}f": "%0${iTtl}f"), $iDData); - } - else { - $sNumRes = sprintf("%0${iTtl}.0f", $iDData); - } - } - $sNumRes = AddComma($sNumRes) if($iCmmCnt > 0); - my $iLen = length($sNumRes); - my $iPPos = -1; - my $sRep; - - for(my $iIt=$#aRep; $iIt>=0;$iIt--) { - my $rItem = $aRep[$iIt]; - if($rItem->[0] =~/([#0]*)([\.]?)([0#]*)([eE])([\+\-])([0#]+)/) { - substr($sFmtRes, $rItem->[1], $rItem->[2]) = - MakeE($rItem->[0], $iData); - } - elsif($rItem->[0] =~ /\//) { - substr($sFmtRes, $rItem->[1], $rItem->[2]) = - MakeBun($rItem->[0], $iData, $iInt); - } - elsif($rItem->[0] eq '.') { - $iLen--; - $iPPos=$iLen; - } - elsif($rItem->[0] eq '+') { - substr($sFmtRes, $rItem->[1], $rItem->[2]) = - ($iData > 0)? '+': (($iData==0)? '+':'-'); - } - elsif($rItem->[0] eq '-') { - substr($sFmtRes, $rItem->[1], $rItem->[2]) = - ($iData > 0)? '': (($iData==0)? '':'-'); - } - elsif($rItem->[0] eq '@') { - substr($sFmtRes, $rItem->[1], $rItem->[2]) = $iData; - } - elsif($rItem->[0] eq '*') { - substr($sFmtRes, $rItem->[1], $rItem->[2]) = ''; #REMOVE - } - elsif(($rItem->[0] eq "\xA2\xA4") or ($rItem->[0] eq "\xA2\xA5") or - ($rItem->[0] eq "\x81\xA2") or ($rItem->[0] eq "\x81\xA3") ){ - substr($sFmtRes, $rItem->[1], $rItem->[2]) = $rItem->[0]; - } - elsif(($rItem->[0] eq '(') or ($rItem->[0] eq ')')){ - substr($sFmtRes, $rItem->[1], $rItem->[2]) = $rItem->[0]; - } - else { - if($iLen>0) { - if($iIt <= 0) { - $sRep = substr($sNumRes, 0, $iLen); - $iLen = 0; - } - else { - my $iReal = length($rItem->[0]); - if($iPPos >= 0) { - my $sWkF = $rItem->[0]; - $sWkF=~s/^#+//; - $iReal = length($sWkF); - $iReal = ($iLen <=$iReal)? $iLen:$iReal; - } - else { - $iReal = ($iLen <=$iReal)? $iLen:$iReal; - } - $sRep = substr($sNumRes, $iLen - $iReal, $iReal); - $iLen -=$iReal; - } - } - else { - $sRep = ''; - } - substr($sFmtRes, $rItem->[1], $rItem->[2]) = "\x00" . $sRep; - } - } - $sRep = ($iLen > 0)?substr($sNumRes, 0, $iLen) : ''; - $sFmtRes =~ s/\x00/$sRep/; - $sFmtRes =~ s/\x00//g; - } - } - else { - my $iAtMk = 0; - for(my $iIt=$#aRep; $iIt>=0;$iIt--) { - my $rItem = $aRep[$iIt]; - if($rItem->[0] eq '@') { - substr($sFmtRes, $rItem->[1], $rItem->[2]) = $iData; - $iAtMk++; - } - else { - substr($sFmtRes, $rItem->[1], $rItem->[2]) = ''; - } - } - $sFmtRes = $iData unless($iAtMk); - } - return wantarray()? ($sFmtRes, $sColor) : $sFmtRes; -} -#------------------------------------------------------------------------------ -# AddComma (for Spreadsheet::XLSX::Utility) -#------------------------------------------------------------------------------ -sub AddComma { - my($sNum) = @_; - - if($sNum=~ /^([^\d]*)(\d\d\d\d+)(\.*.*)$/) { - my($sPre, $sObj, $sAft) =($1, $2, $3); - for(my $i=length($sObj)-3;$i>0; $i-=3) { - substr($sObj, $i, 0) = ','; - } - return $sPre . $sObj . $sAft; - } - else { - return $sNum; - } -} -#------------------------------------------------------------------------------ -# MakeBun (for Spreadsheet::XLSX::Utility) -#------------------------------------------------------------------------------ -sub MakeBun { - my($sFmt, $iData, $iFlg) = @_; - my $iBunbo; - my $iShou; - -#1. Init -#print "FLG: $iFlg\n"; - if($iFlg) { - $iShou = $iData - int($iData); - return '' if($iShou == 0); - } - else { - $iShou = $iData; - } - $iShou = abs($iShou); - my $sSWk; - -#2.Calc BUNBO -#2.1 BUNBO defined - if($sFmt =~ /\/(\d+)$/) { - $iBunbo = $1; - return sprintf("%d/%d", $iShou*$iBunbo, $iBunbo); - } - else { -#2.2 Calc BUNBO - $sFmt =~ /\/(\?+)$/; - my $iKeta = length($1); - my $iSWk = 1; - my $sSWk = ''; - my $iBunsi; - for(my $iBunbo = 2;$iBunbo<10**$iKeta;$iBunbo++) { - $iBunsi = int($iShou*$iBunbo + 0.5); - my $iCmp = abs($iShou - ($iBunsi/$iBunbo)); - if($iCmp < $iSWk) { - $iSWk =$iCmp; - $sSWk = sprintf("%d/%d", $iBunsi, $iBunbo); - last if($iSWk==0); - } - } - return $sSWk; - } -} -#------------------------------------------------------------------------------ -# MakeE (for Spreadsheet::XLSX::Utility) -#------------------------------------------------------------------------------ -sub MakeE { - my($sFmt, $iData) = @_; - - $sFmt=~/(([#0]*)[\.]?[#0]*)([eE])([\+\-][0#]+)/; - my($sKari, $iKeta, $sE, $sSisu) = ($1, length($2), $3, $4); - $iKeta = 1 if($iKeta<=0); - - my $iLog10 = 0; - $iLog10 = ($iData == 0)? 0 : (log(abs($iData))/ log(10)); - $iLog10 = (int($iLog10 / $iKeta) + - ((($iLog10 - int($iLog10 / $iKeta))<0)? -1: 0)) *$iKeta; - - my $sUe = ExcelFmt($sKari, $iData*(10**($iLog10*-1)),0); - my $sShita = ExcelFmt($sSisu, $iLog10, 0); - return $sUe . $sE . $sShita; -} -#------------------------------------------------------------------------------ -# LeapYear (for Spreadsheet::XLSX::Utility) -#------------------------------------------------------------------------------ -sub LeapYear { - my($iYear)=@_; - return 1 if($iYear==1900); #Special for Excel - return ((($iYear % 4)==0) && (($iYear % 100) || ($iYear % 400)==0))? 1: 0; -} -#------------------------------------------------------------------------------ -# LocaltimeExcel (for Spreadsheet::XLSX::Utility) -#------------------------------------------------------------------------------ -sub LocaltimeExcel { - my($iSec, $iMin, $iHour, $iDay, $iMon, $iYear, $iMSec, $flg1904) = @_; - -#0. Init - $iMon++; - $iYear+=1900; - -#1. Calc Time - my $iTime; - $iTime =$iHour; - $iTime *=60; - $iTime +=$iMin; - $iTime *=60; - $iTime +=$iSec; - $iTime += $iMSec/1000.0 if(defined($iMSec)) ; - $iTime /= 86400.0; #3600*24(1day in seconds) - my $iY; - my $iYDays; - -#2. Calc Days - if($flg1904) { - $iY = 1904; - $iTime--; #Start from Jan 1st - $iYDays = 366; - } - else { - $iY = 1900; - $iYDays = 366; #In Excel 1900 is leap year (That's not TRUE!) - } - while($iY<$iYear) { - $iTime += $iYDays; - $iY++; - $iYDays = (LeapYear($iY))? 366: 365; - } - for(my $iM=1;$iM < $iMon; $iM++){ - if($iM == 1 || $iM == 3 || $iM == 5 || $iM == 7 || $iM == 8 - || $iM == 10 || $iM == 12) { - $iTime += 31; - } - elsif($iM == 4 || $iM == 6 || $iM == 9 || $iM == 11) { - $iTime += 30; - } - elsif($iM == 2) { - $iTime += (LeapYear($iYear))? 29: 28; - } - } - $iTime+=$iDay; - return $iTime; -} -#------------------------------------------------------------------------------ -# ExcelLocaltime (for Spreadsheet::XLSX::Utility) -#------------------------------------------------------------------------------ -sub ExcelLocaltime { - my($dObj, $flg1904) = @_; - my($iSec, $iMin, $iHour, $iDay, $iMon, $iYear, $iwDay, $iMSec); - my($iDt, $iTime, $iYDays); - - $iDt = int($dObj); - $iTime = $dObj - $iDt; - -#1. Calc Days - if($flg1904) { - $iYear = 1904; - $iDt++; #Start from Jan 1st - $iYDays = 366; - $iwDay = (($iDt+4) % 7); - } - else { - $iYear = 1900; - $iYDays = 366; #In Excel 1900 is leap year (That's not TRUE!) - $iwDay = (($iDt+6) % 7); - } - while($iDt > $iYDays) { - $iDt -= $iYDays; - $iYear++; - $iYDays = ((($iYear % 4)==0) && - (($iYear % 100) || ($iYear % 400)==0))? 366: 365; - } - $iYear -= 1900; - for($iMon=1;$iMon < 12; $iMon++){ - my $iMD; - if($iMon == 1 || $iMon == 3 || $iMon == 5 || $iMon == 7 || $iMon == 8 - || $iMon == 10 || $iMon == 12) { - $iMD = 31; - } - elsif($iMon == 4 || $iMon == 6 || $iMon == 9 || $iMon == 11) { - $iMD = 30; - } - elsif($iMon == 2) { - $iMD = (($iYear % 4) == 0)? 29: 28; - } - last if($iDt <= $iMD); - $iDt -= $iMD; - } - -#2. Calc Time - $iDay = $iDt; - $iTime += (0.0005 / 86400.0); - $iTime*=24.0; - $iHour = int($iTime); - $iTime -= $iHour; - $iTime *= 60.0; - $iMin = int($iTime); - $iTime -= $iMin; - $iTime *= 60.0; - $iSec = int($iTime); - $iTime -= $iSec; - $iTime *= 1000.0; - $iMSec = int($iTime); - - return ($iSec, $iMin, $iHour, $iDay, $iMon-1, $iYear, $iwDay, $iMSec); -} -# ----------------------------------------------------------------------------- -# col2int (for Spreadsheet::XLSX::Utility) -#------------------------------------------------------------------------------ -# converts a excel row letter into an int for use in an array -sub col2int { - my $result = 0 ; - my $str = shift ; - my $incr = 0 ; - - for(my $i = length($str) ; $i > 0 ; $i--) { - my $char = substr( $str, $i-1) ; - my $curr += ord(lc($char)) - ord('a') + 1; - $curr *= $incr if( $incr) ; - $result += $curr ; - $incr += 26 ; - } - # this is one out as we range 0..x-1 not 1..x - $result-- ; - - return $result ; -} -# ----------------------------------------------------------------------------- -# int2col (for Spreadsheet::XLSX::Utility) -#------------------------------------------------------------------------------ -### int2col -# convert a column number into column letters -# @note this is quite a brute force coarse method -# does not manage values over 701 (ZZ) -# @arg number, to convert -# @returns string, column name -# -sub int2col { - my $out = "" ; - my $val = shift ; - - do { - $out .= chr(( $val % 26) + ord('A')) ; - $val = int( $val / 26) - 1 ; - } while( $val >= 0) ; - - return reverse $out ; -} -# ----------------------------------------------------------------------------- -# sheetRef (for Spreadsheet::XLSX::Utility) -#------------------------------------------------------------------------------ -# ----------------------------------------------------------------------------- -### sheetRef -# convert an excel letter-number address into a useful array address -# @note that also Excel uses X-Y notation, we normally use Y-X in arrays -# @args $str, excel coord eg. A2 -# @returns an array - 2 elements - column, row, or undefined -# -sub sheetRef { - my $str = shift ; - my @ret ; - - $str =~ m/^(\D+)(\d+)$/ ; - - if( $1 && $2) { - push( @ret, $2 -1, col2int($1)) ; - } - if( $ret[0] < 0) { - undef @ret ; - } - - return @ret ; -} -# ----------------------------------------------------------------------------- -# xls2csv (for Spreadsheet::XLSX::Utility) -#------------------------------------------------------------------------------ -### xls2csv -# convert a chunk of an excel file into csv text chunk -# @args $param, sheet-colrow:colrow (1-A1:B2 or A1:B2 for sheet 1 -# @args $rotate, 0 or 1 decides if output should be rotated or not -# @returns string containing a chunk of csv -# -sub xls2csv { - my ($filename, $regions, $rotate) = @_ ; - my $sheet = 0 ; - my $output = "" ; - - # extract any sheet number from the region string - $regions =~ m/^(\d+)-(.*)/ ; - - if( $2) { - $sheet = $1 - 1 ; - $regions = $2 ; - } - - # now extract the start and end regions - $regions =~ m/(.*):(.*)/ ; - - if( !$1 || !$2) { - print STDERR "Bad Params"; - return "" ; - } - - my @start = sheetRef( $1) ; - my @end = sheetRef( $2) ; - if( !@start) { - print STDERR "Bad coorinates - $1"; - return "" ; - } - if( !@end) { - print STDERR "Bad coorinates - $2"; - return "" ; - } - - if( $start[1] > $end[1]) { - print STDERR "Bad COLUMN ordering\n"; - print STDERR "Start column " . int2col($start[1]); - print STDERR " after end column " . int2col($end[1]) . "\n"; - return "" ; - } - if( $start[0] > $end[0]) { - print STDERR "Bad ROW ordering\n"; - print STDERR "Start row " . ($start[0] + 1); - print STDERR " after end row " . ($end[0] + 1) . "\n"; - exit ; - } - - # start the excel object now - my $oExcel = new Spreadsheet::XLSX ; - my $oBook = $oExcel->Parse( $filename) ; - # open the sheet - my $oWkS = $oBook->{Worksheet}[ $sheet] ; - - # now check that the region exists in the file - # if not trucate to the possible region - # output a warning msg - if( $start[1] < $oWkS->{MinCol}) { - print STDERR int2col( $start[1]) . " < min col " . int2col( $oWkS->{MinCol}) . " Reseting\n"; - $start[1] = $oWkS->{MinCol} ; - } - if( $end[1] > $oWkS->{MaxCol}) { - print STDERR int2col( $end[1]) . " > max col " . int2col( $oWkS->{MaxCol}) . " Reseting\n" ; - $end[1] = $oWkS->{MaxCol} ; - } - if( $start[0] < $oWkS->{MinRow}) { - print STDERR "" . ($start[0] + 1) . " < min row " . ($oWkS->{MinRow} + 1) . " Reseting\n"; - $start[0] = $oWkS->{MinCol} ; - } - if( $end[0] > $oWkS->{MaxRow}) { - print STDERR "" . ($end[0] + 1) . " > max row " . ($oWkS->{MaxRow} + 1) . " Reseting\n"; - $end[0] = $oWkS->{MaxRow} ; - - } - - my $x1 = $start[1] ; - my $y1 = $start[0] ; - my $x2 = $end[1] ; - my $y2 = $end[0] ; - - if( !$rotate) { - for( my $y = $y1 ; $y <= $y2 ; $y++) { - for( my $x = $x1 ; $x <= $x2 ; $x++) { - my $cell = $oWkS->{Cells}[$y][$x] ; - $output .= $cell->Value if(defined $cell); - $output .= "," if( $x != $x2) ; - } - $output .= "\n" ; - } - } else { - for( my $x = $x1 ; $x <= $x2 ; $x++) { - for( my $y = $y1 ; $y <= $y2 ; $y++) { - my $cell = $oWkS->{Cells}[$y][$x] ; - $output .= $cell->Value if(defined $cell); - $output .= "," if( $y != $y2) ; - } - $output .= "\n" ; - } - } - - return $output ; -} -sub unescape_HTML { - - my $string = shift; - my %options = @_; - - return $string if ($string eq ''); - - $string =~ s/"/"/g; - $string =~ s/’/'/g; - $string =~ s/&/&/g; - - return $string if $options{textarea}; # for textboxes, we leave < and > as < and > - # so that people who enter "" into - # our text boxes can't break forms - - $string =~ s/<//g; - - - return $string; -} - -1; -__END__ - -=head1 NAME - -Spreadsheet::XLSX::Utility2007 - Utility function for Spreadsheet::XLSX - -=head1 SYNOPSIS - - use strict; - #Declare - use Spreadsheet::XLSX::Utility qw(ExcelFmt ExcelLocaltime LocaltimeExcel); - - #Convert localtime ->Excel Time - my $iBirth = LocaltimeExcel(11, 10, 12, 23, 2, 64); - # = 1964-3-23 12:10:11 - print $iBirth, "\n"; # 23459.5070717593 - - #Convert Excel Time -> localtime - my @aBirth = ExcelLocaltime($iBirth, undef); - print join(":", @aBirth), "\n"; # 11:10:12:23:2:64:1:0 - - #Formatting - print ExcelFmt('yyyy-mm-dd', $iBirth), "\n"; #1964-3-23 - print ExcelFmt('m-d-yy', $iBirth), "\n"; # 3-23-64 - print ExcelFmt('#,##0', $iBirth), "\n"; # 23,460 - print ExcelFmt('#,##0.00', $iBirth), "\n"; # 23,459.51 - print ExcelFmt('"My Birthday is (m/d):" m/d', $iBirth), "\n"; - # My Birthday is (m/d): 3/23 - -=head1 DESCRIPTION - -Spreadsheet::XLSX::Utility exports utility functions concerned with Excel format setting. - -ExcelFmt is used by Spreadsheet::XLSX::Fmt2007.pm which is used by Spreadsheet::XLSX. - -=head1 Functions - -This module can export 3 functions: ExcelFmt, ExcelLocaltime and LocaltimeExcel. - -=head2 ExcelFmt - -$sTxt = ExcelFmt($sFmt, $iData [, $i1904]); - -I<$sFmt> is a format string for Excel. I<$iData> is the target value. -If I<$flg1904> is true, this functions assumes that epoch is 1904. -I<$sTxt> is the result. - -For more detail and examples, please refer sample/chkFmt.pl in this distribution. - -ex. - -=head2 ExcelLocaltime - -($iSec, $iMin, $iHour, $iDay, $iMon, $iYear, $iwDay, $iMSec) = - ExcelLocaltime($iExTime [, $flg1904]); - -I converts time information in Excel format into Perl localtime format. -I<$iExTime> is a time of Excel. If I<$flg1904> is true, this functions assumes that -epoch is 1904. -I<$iSec>, I<$iMin>, I<$iHour>, I<$iDay>, I<$iMon>, I<$iYear>, I<$iwDay> are same as localtime. -I<$iMSec> means 1/1,000,000 seconds(ms). - - -=head2 LocaltimeExcel - -I<$iExTime> = LocaltimeExcel($iSec, $iMin, $iHour, $iDay, $iMon, $iYear [,$iMSec] [,$flg1904]) - -I converts time information in Perl localtime format into Excel format . -I<$iSec>, I<$iMin>, I<$iHour>, I<$iDay>, I<$iMon>, I<$iYear> are same as localtime. - -If I<$flg1904> is true, this functions assumes that epoch is 1904. -I<$iExTime> is a time of Excel. - -=head2 col2int - -I<$iInt> = col2int($sCol); - -converts a excel row letter into an int for use in an array - -This function was contributed by Kevin Mulholland. - -=head2 int2col - -I<$sCol> = int2col($iRow); - -convert a column number into column letters -NOET: This is quite a brute force coarse method does not manage values over 701 (ZZ) - -This function was contributed by Kevin Mulholland. - -=head2 sheetRef - -(I<$iRow>, I<$iCol>) = sheetRef($sStr); - -convert an excel letter-number address into a useful array address -NOTE: That also Excel uses X-Y notation, we normally use Y-X in arrays -$sStr, excel coord (eg. A2). - -This function was contributed by Kevin Mulholland. - -=head2 xls2csv - -$sCsvTxt = xls2csv($sFileName, $sRegion, $iRotate); - -convert a chunk of an excel file into csv text chunk -$sRegions = "sheet-colrow:colrow" (ex. '1-A1:B2' means 'A1:B2' for sheet 1) -$iRotate = 0 or 1 (output should be rotated or not) - -This function was contributed by Kevin Mulholland. - -=head1 AUTHOR - -Rob Polocz rob.polocz@trackvia.com -based on work by for Spreadsheet::ParseExcel by -Kawai Takanori (Hippo2000) -used with permission - -=head1 SEE ALSO - -Spreadsheet::ParseExcel, Spreadsheet::WriteExcel - -=head1 COPYRIGHT - -This module is part of the Spreadsheet::XLSX distribution. - -=cut diff -Nru gdata-2.13.3/inst/perl/Spreadsheet/XLSX.pm gdata-2.17.0/inst/perl/Spreadsheet/XLSX.pm --- gdata-2.13.3/inst/perl/Spreadsheet/XLSX.pm 2014-01-03 19:32:45.000000000 +0000 +++ gdata-2.17.0/inst/perl/Spreadsheet/XLSX.pm 1970-01-01 00:00:00.000000000 +0000 @@ -1,345 +0,0 @@ -package Spreadsheet::XLSX; - -use 5.006000; -use strict; -use warnings; - -our @ISA = qw(); - -our $VERSION = '0.12'; - -use Archive::Zip; -use Spreadsheet::XLSX::Fmt2007; -use Data::Dumper; -use Spreadsheet::ParseExcel; - -################################################################################ - -sub new { - - my ($class, $filename, $converter) = @_; - - my $self = {}; - - $self -> {zip} = Archive::Zip -> new ($filename) or die ("Cant't open $filename as a zip file\n"); - - my $member_shared_strings = $self -> {zip} -> memberNamed ('xl/sharedStrings.xml'); - - my @shared_strings = (); - - if ($member_shared_strings) { - - my $mstr = $member_shared_strings->contents; - $mstr =~ s//<\/t>/gsm; # this handles an empty t tag in the xml - foreach my $si ($mstr =~ /(.*?)<\/si/gsm) { - my $str; - foreach my $t ($si =~ /(.*?)<\/t/gsm) { - $t = $converter -> convert ($t) if $converter; - $str .= $t; - } - push @shared_strings, $str; - } - } - my $member_styles = $self -> {zip} -> memberNamed ('xl/styles.xml'); - - my @styles = (); - - my %style_info = (); - - if ($member_styles) { - - foreach my $t ($member_styles -> contents =~ /xf\ numFmtId="([^"]*)"(?!.*\/cellStyleXfs)/gsm) { #" - # $t = $converter -> convert ($t) if $converter; - push @styles, $t; - - } - my $default = $1 || ''; - - foreach my $t1 (@styles){ - $member_styles -> contents =~ /numFmtId="$t1" formatCode="([^"]*)/; - my $formatCode = $1 || ''; - if ($formatCode eq $default || not($formatCode)){ - if ($t1 == 9 || $t1==10){ $formatCode="0.00000%";} - elsif ($t1 == 14){ $formatCode="yyyy-mm-dd";} - elsif ($t1 == 20){ $formatCode="h:mm";} - else { - $formatCode=""; - } - } - $style_info{$t1} = $formatCode; - $default = $1 || ''; - } - - } - - my $member_rels = $self -> {zip} -> memberNamed ('xl/_rels/workbook.xml.rels') or die ("xl/_rels/workbook.xml.rels not found in this zip\n"); - - my %rels = (); - - foreach ($member_rels -> contents =~ /\/g) { - - /^Id="(.*?)".*?Target="(.*?)"/ or next; - - $rels {$1} = $2; - - } - - my $member_workbook = $self -> {zip} -> memberNamed ('xl/workbook.xml') or die ("xl/workbook.xml not found in this zip\n"); - my $oBook = Spreadsheet::ParseExcel::Workbook->new; - $oBook->{SheetCount} = 0; - $oBook->{FmtClass} = Spreadsheet::XLSX::Fmt2007->new; - $oBook->{Flg1904}=0; - if ($member_workbook->contents =~ /date1904="1"/){ - $oBook->{Flg1904}=1; - } - my @Worksheet = (); - - foreach ($member_workbook -> contents =~ /\<(.*?)\/?\>/g) { - - /^(\w+)\s+/; - - my ($tag, $other) = ($1, $'); - - my @pairs = split /\" /, $other; - - $tag eq 'sheet' or next; - - my $sheet = { - MaxRow => 0, - MaxCol => 0, - MinRow => 1000000, - MinCol => 1000000, - }; - - foreach ($other =~ /(\S+=".*?")/gsm) { - - my ($k, $v) = split /=?"/; #" - - if ($k eq 'name') { - $sheet -> {Name} = $v; - $sheet -> {Name} = $converter -> convert ($sheet -> {Name}) if $converter; - } - elsif ($k eq 'r:id') { - - $sheet -> {path} = $rels {$v}; - - }; - - } - my $wsheet = Spreadsheet::ParseExcel::Worksheet->new(%$sheet); - push @Worksheet, $wsheet; - $oBook->{Worksheet}[$oBook->{SheetCount}] = $wsheet; - $oBook->{SheetCount}+=1; - - } - - $self -> {Worksheet} = \@Worksheet; - - foreach my $sheet (@Worksheet) { - - my $member_sheet = $self -> {zip} -> memberNamed ("xl/$sheet->{path}") or next; - - my ($row, $col); - - my $flag = 0; - my $s = 0; - my $s2 = 0; - my $sty = 0; - foreach ($member_sheet -> contents =~ /(\<.*?\/?\>|.*?(?=\<))/g) { - if (/^\"){$v="";} - my $type = "Text"; - my $thisstyle = ""; - if (not($s) && not($s2)){ - $type="Numeric"; - $thisstyle = $style_info{$styles[$sty]}; - if ($thisstyle =~ /(? {MaxRow} = $row if $sheet -> {MaxRow} < $row; - $sheet -> {MaxCol} = $col if $sheet -> {MaxCol} < $col; - $sheet -> {MinRow} = $row if $sheet -> {MinRow} > $row; - $sheet -> {MinCol} = $col if $sheet -> {MinCol} > $col; - if ($v =~ /(.*)E\-(.*)/gsm && $type eq "Numeric"){ - $v=$1/(10**$2); # this handles scientific notation for very small numbers - } - my $cell =Spreadsheet::ParseExcel::Cell->new( - - Val => $v, - Format => $thisstyle, - Type => $type - - ); - - $cell->{_Value} = $oBook->{FmtClass}->ValFmt($cell, $oBook); - if ($type eq "Date" && $v<1){ #then this is Excel time field - $cell->{Type}="Text"; - $cell->{Val}=$cell->{_Value}; - } - $sheet -> {Cells} [$row] [$col] = $cell; - } - - } - - $sheet -> {MinRow} = 0 if $sheet -> {MinRow} > $sheet -> {MaxRow}; - $sheet -> {MinCol} = 0 if $sheet -> {MinCol} > $sheet -> {MaxCol}; - - } -foreach my $stys (keys %style_info){ -} - bless ($self, $class); - - return $oBook; - -} - -1; -__END__ - -=head1 NAME - -Spreadsheet::XLSX - Perl extension for reading MS Excel 2007 files; - -=head1 SYNOPSIS - - use Text::Iconv; - my $converter = Text::Iconv -> new ("utf-8", "windows-1251"); - - # Text::Iconv is not really required. - # This can be any object with the convert method. Or nothing. - - use Spreadsheet::XLSX; - - my $excel = Spreadsheet::XLSX -> new ('test.xlsx', $converter); - - foreach my $sheet (@{$excel -> {Worksheet}}) { - - printf("Sheet: %s\n", $sheet->{Name}); - - $sheet -> {MaxRow} ||= $sheet -> {MinRow}; - - foreach my $row ($sheet -> {MinRow} .. $sheet -> {MaxRow}) { - - $sheet -> {MaxCol} ||= $sheet -> {MinCol}; - - foreach my $col ($sheet -> {MinCol} .. $sheet -> {MaxCol}) { - - my $cell = $sheet -> {Cells} [$row] [$col]; - - if ($cell) { - printf("( %s , %s ) => %s\n", $row, $col, $cell -> {Val}); - } - - } - - } - - } - -=head1 DESCRIPTION - -This module is a (quick and dirty) emulation of Spreadsheet::ParseExcel for -Excel 2007 (.xlsx) file format. It supports styles and many of Excel's quirks, -but not all. It populates the classes from Spreadsheet::ParseExcel for interoperability; -including Workbook, Worksheet, and Cell. - -=head1 SEE ALSO - -=over 2 - -=item Text::CSV_XS, Text::CSV_PP - -http://search.cpan.org/~hmbrand/ - -A pure perl version is available on http://search.cpan.org/~makamaka/ - -=item Spreadsheet::ParseExcel - -http://search.cpan.org/~kwitknr/ - -=item Spreadsheet::ReadSXC - -http://search.cpan.org/~terhechte/ - -=item Spreadsheet::BasicRead - -http://search.cpan.org/~gng/ for xlscat likewise functionality (Excel only) - -=item Spreadsheet::ConvertAA - -http://search.cpan.org/~nkh/ for an alternative set of cell2cr () / -cr2cell () pair - -=item Spreadsheet::Perl - -http://search.cpan.org/~nkh/ offers a Pure Perl implementation of a -spreadsheet engine. Users that want this format to be supported in -Spreadsheet::Read are hereby motivated to offer patches. It's not high -on my todo-list. - -=item xls2csv - -http://search.cpan.org/~ken/ offers an alternative for my C, -in the xls2csv tool, but this tool focusses on character encoding -transparency, and requires some other modules. - -=item Spreadsheet::Read - -http://search.cpan.org/~hmbrand/ read the data from a spreadsheet (interface -module) - -=back - -=head1 AUTHOR - -Dmitry Ovsyanko, Edo@eludia.ru, http://eludia.ru/wiki/ - -Patches by: - - Steve Simms - Joerg Meltzer - Loreyna Yeung - Rob Polocz - Gregor Herrmann - H.Merijn Brand - endacoe - Pat Mariani - -=head1 ACKNOWLEDGEMENTS - - Thanks to TrackVia Inc. (http://www.trackvia.com) for paying for Rob Polocz working time. - -=head1 COPYRIGHT AND LICENSE - -Copyright (C) 2008 by Dmitry Ovsyanko - -This library is free software; you can redistribute it and/or modify -it under the same terms as Perl itself, either Perl version 5.8.8 or, -at your option, any later version of Perl 5 you may have available. - -=cut diff -Nru gdata-2.13.3/inst/perl/supportedFormats.pl gdata-2.17.0/inst/perl/supportedFormats.pl --- gdata-2.13.3/inst/perl/supportedFormats.pl 2014-01-03 19:32:45.000000000 +0000 +++ gdata-2.17.0/inst/perl/supportedFormats.pl 2014-08-28 04:05:07.000000000 +0000 @@ -10,12 +10,12 @@ my( $HAS_Spreadsheet_ParseExcel, $HAS_Compress_Raw_Zlib, - $HAS_Spreadsheet_XLSX) = check_modules(0); + $HAS_Spreadsheet_ParseXLSX) = check_modules(0); $XLS_Support = $HAS_Spreadsheet_ParseExcel; $XLSX_Support = $HAS_Spreadsheet_ParseExcel && $HAS_Compress_Raw_Zlib && - $HAS_Spreadsheet_XLSX; + $HAS_Spreadsheet_ParseXLSX; printf "Supported formats: "; printf "XLS " if ( $XLS_Support ); diff -Nru gdata-2.13.3/inst/perl/xls2csv.pl gdata-2.17.0/inst/perl/xls2csv.pl --- gdata-2.13.3/inst/perl/xls2csv.pl 2014-01-03 19:32:45.000000000 +0000 +++ gdata-2.17.0/inst/perl/xls2csv.pl 2015-04-25 08:48:57.000000000 +0000 @@ -1,15 +1,14 @@ #!/usr/bin/perl BEGIN { -use File::Basename; -# Add current path to perl library search path -use lib dirname($0); + use File::Basename; + # Add current path to perl library search path + use lib dirname($0); } use strict; #use Spreadsheet::ParseExcel; -#use Spreadsheet::XLSX; -use POSIX; +#use Spreadsheet::ParseXLSX; use File::Spec::Functions; use Getopt::Std; @@ -20,15 +19,18 @@ my( $HAS_Spreadsheet_ParseExcel, $HAS_Compress_Raw_Zlib, - $HAS_Spreadsheet_XLSX + $HAS_Spreadsheet_ParseXLSX ) = check_modules_and_notify(); # declare some varibles local my($row, $col, $sheet, $cell, $usage, $targetfile,$basename, $sheetnumber, $filename, $volume, $directories, $whoami, - $sep, $sepName, $sepLabel, $sepExt, - $skipBlankLines, %switches); + $sep, $sepName, $sepLabel, $sepExt, + $skipBlankLines, %switches, + $parser, $oBook, $formatter, + $using_1904_date +); ## ## Figure out whether I'm called as xls2csv.pl or xls2tab.pl @@ -59,7 +61,7 @@ } else { - die("This script is named '$whoami', but must be named either 'xls2csv.pl' or 'xls2tab.pl' to function properly.\n"); + die("This script is named '$whoami', but must be named 'xls2csv.pl', 'xls2tsv', or 'xls2tab.pl' to function properly.\n"); } @@ -115,7 +117,7 @@ if(defined($ARGV[2]) ) { - if ( isdigit($ARGV[2]) ) + if ( $ARGV[2] =~ m|^\d+$| ) { $sheetnumber = $ARGV[2]; die "Sheetnumber must be an integer larger than 0.\n" if $sheetnumber < 1; @@ -133,29 +135,50 @@ my $oExcel; my $oBook; -$oExcel = new Spreadsheet::ParseExcel; +$oExcel = new Spreadsheet::ParseExcel; +$formatter = Spreadsheet::ParseExcel::FmtDefault->new(); open(FH, "<$ARGV[0]") or die "Unable to open file '$ARGV[0]'.\n"; close(FH); +print "\n"; print "Loading '$ARGV[0]'...\n"; ## First try as a Excel 2007+ 'xml' file eval { local $SIG{__WARN__} = sub {}; - $oBook = Spreadsheet::XLSX -> new ($ARGV[0]); + $parser = Spreadsheet::ParseXLSX -> new(); + $oBook = $parser->parse ($ARGV[0]); }; ## Then Excel 97-2004 Format -if($@) +if ( !defined $oBook ) { - $oBook = new Spreadsheet::ParseExcel->Parse($ARGV[0]) or \ + $parser = Spreadsheet::ParseExcel -> new(); + $oBook = $parser->parse($ARGV[0]) or \ die "Error parsing file '$ARGV[0]'.\n"; } print "Done.\n"; +## Does this file use 1904-01-01 as the reference date instead of +## 1900-01-01? +$using_1904_date = ( $oBook->using_1904_date() == 1 ) || # ParseExcel + ( $oBook->{Flag1904} == 1 ); # ParseXLSX + + +## Show the user some summary information before we start extracting +## date print "\n"; print "Orignal Filename: ", $ARGV[0], "\n"; print "Number of Sheets: ", $oBook->{SheetCount} , "\n"; +if($using_1904_date) + { + print "Date reference : 1904-01-01\n"; + } +else + { + print "Date reference : 1900-01-01\n"; + } + print "\n"; ## Get list all worksheets in the file @@ -236,19 +259,53 @@ for(my $col = $mincol; $col <= $maxcol; $col++) { - my $cell = $sheet->{Cells}[$row][$col]; + my $cell = $sheet->{Cells}[$row][$col]; + my $format = $formatter->FmtString($cell, $oBook); if( defined($cell) ) { - $_=$cell->Value; #{Val}; + if ($cell->type() eq "Date") # && $using_1904_date ) + { + my $is_date = ( $format =~ m/y/ && + $format =~ m/m/ && + $format =~ m/d/ ); + + my $is_time = ( $format =~ m/h[:\]]*m/ || + $format =~ m/m[:\]]*s/ ); + + + if($is_date && $is_time) + { + $format = "yyyy-mm-dd hh:mm:ss.00"; + } + elsif ($is_date) + { + $format = "yyyy-mm-dd"; + } + elsif ($is_time) + { + $format = "hh:mm:ss.00" + } + + $_ = ExcelFmt($format, + $cell->unformatted(), + $using_1904_date); + } + else + { + $_=$cell->value(); + } # convert '#NUM!' strings to missing (empty) values s/#NUM!//; + # convert "#DIV/0!" strings to missing (emtpy) values + s|#DIV/0!||; + # escape double-quote characters in the data since # they are used as field delimiters s/\"/\\\"/g; } - else + else { $_ = ''; } @@ -273,7 +330,7 @@ close OutFile; - print " (Ignored $cumulativeBlankLines blank lines.)\n" + print " (Ignored $cumulativeBlankLines blank lines.)\n" if $skipBlankLines; print "\n"; } diff -Nru gdata-2.13.3/inst/perl/xls2tab.pl gdata-2.17.0/inst/perl/xls2tab.pl --- gdata-2.13.3/inst/perl/xls2tab.pl 2014-01-03 19:32:45.000000000 +0000 +++ gdata-2.17.0/inst/perl/xls2tab.pl 2015-04-25 08:48:57.000000000 +0000 @@ -1,15 +1,14 @@ #!/usr/bin/perl BEGIN { -use File::Basename; -# Add current path to perl library search path -use lib dirname($0); + use File::Basename; + # Add current path to perl library search path + use lib dirname($0); } use strict; #use Spreadsheet::ParseExcel; -#use Spreadsheet::XLSX; -use POSIX; +#use Spreadsheet::ParseXLSX; use File::Spec::Functions; use Getopt::Std; @@ -20,15 +19,18 @@ my( $HAS_Spreadsheet_ParseExcel, $HAS_Compress_Raw_Zlib, - $HAS_Spreadsheet_XLSX + $HAS_Spreadsheet_ParseXLSX ) = check_modules_and_notify(); # declare some varibles local my($row, $col, $sheet, $cell, $usage, $targetfile,$basename, $sheetnumber, $filename, $volume, $directories, $whoami, - $sep, $sepName, $sepLabel, $sepExt, - $skipBlankLines, %switches); + $sep, $sepName, $sepLabel, $sepExt, + $skipBlankLines, %switches, + $parser, $oBook, $formatter, + $using_1904_date +); ## ## Figure out whether I'm called as xls2csv.pl or xls2tab.pl @@ -59,7 +61,7 @@ } else { - die("This script is named '$whoami', but must be named either 'xls2csv.pl' or 'xls2tab.pl' to function properly.\n"); + die("This script is named '$whoami', but must be named 'xls2csv.pl', 'xls2tsv', or 'xls2tab.pl' to function properly.\n"); } @@ -115,7 +117,7 @@ if(defined($ARGV[2]) ) { - if ( isdigit($ARGV[2]) ) + if ( $ARGV[2] =~ m|^\d+$| ) { $sheetnumber = $ARGV[2]; die "Sheetnumber must be an integer larger than 0.\n" if $sheetnumber < 1; @@ -133,29 +135,50 @@ my $oExcel; my $oBook; -$oExcel = new Spreadsheet::ParseExcel; +$oExcel = new Spreadsheet::ParseExcel; +$formatter = Spreadsheet::ParseExcel::FmtDefault->new(); open(FH, "<$ARGV[0]") or die "Unable to open file '$ARGV[0]'.\n"; close(FH); +print "\n"; print "Loading '$ARGV[0]'...\n"; ## First try as a Excel 2007+ 'xml' file eval { local $SIG{__WARN__} = sub {}; - $oBook = Spreadsheet::XLSX -> new ($ARGV[0]); + $parser = Spreadsheet::ParseXLSX -> new(); + $oBook = $parser->parse ($ARGV[0]); }; ## Then Excel 97-2004 Format -if($@) +if ( !defined $oBook ) { - $oBook = new Spreadsheet::ParseExcel->Parse($ARGV[0]) or \ + $parser = Spreadsheet::ParseExcel -> new(); + $oBook = $parser->parse($ARGV[0]) or \ die "Error parsing file '$ARGV[0]'.\n"; } print "Done.\n"; +## Does this file use 1904-01-01 as the reference date instead of +## 1900-01-01? +$using_1904_date = ( $oBook->using_1904_date() == 1 ) || # ParseExcel + ( $oBook->{Flag1904} == 1 ); # ParseXLSX + + +## Show the user some summary information before we start extracting +## date print "\n"; print "Orignal Filename: ", $ARGV[0], "\n"; print "Number of Sheets: ", $oBook->{SheetCount} , "\n"; +if($using_1904_date) + { + print "Date reference : 1904-01-01\n"; + } +else + { + print "Date reference : 1900-01-01\n"; + } + print "\n"; ## Get list all worksheets in the file @@ -236,19 +259,53 @@ for(my $col = $mincol; $col <= $maxcol; $col++) { - my $cell = $sheet->{Cells}[$row][$col]; + my $cell = $sheet->{Cells}[$row][$col]; + my $format = $formatter->FmtString($cell, $oBook); if( defined($cell) ) { - $_=$cell->Value; #{Val}; + if ($cell->type() eq "Date") # && $using_1904_date ) + { + my $is_date = ( $format =~ m/y/ && + $format =~ m/m/ && + $format =~ m/d/ ); + + my $is_time = ( $format =~ m/h[:\]]*m/ || + $format =~ m/m[:\]]*s/ ); + + + if($is_date && $is_time) + { + $format = "yyyy-mm-dd hh:mm:ss.00"; + } + elsif ($is_date) + { + $format = "yyyy-mm-dd"; + } + elsif ($is_time) + { + $format = "hh:mm:ss.00" + } + + $_ = ExcelFmt($format, + $cell->unformatted(), + $using_1904_date); + } + else + { + $_=$cell->value(); + } # convert '#NUM!' strings to missing (empty) values s/#NUM!//; + # convert "#DIV/0!" strings to missing (emtpy) values + s|#DIV/0!||; + # escape double-quote characters in the data since # they are used as field delimiters s/\"/\\\"/g; } - else + else { $_ = ''; } @@ -273,7 +330,7 @@ close OutFile; - print " (Ignored $cumulativeBlankLines blank lines.)\n" + print " (Ignored $cumulativeBlankLines blank lines.)\n" if $skipBlankLines; print "\n"; } diff -Nru gdata-2.13.3/inst/perl/xls2tsv.pl gdata-2.17.0/inst/perl/xls2tsv.pl --- gdata-2.13.3/inst/perl/xls2tsv.pl 2014-01-03 19:32:45.000000000 +0000 +++ gdata-2.17.0/inst/perl/xls2tsv.pl 2015-04-25 08:48:57.000000000 +0000 @@ -1,15 +1,14 @@ #!/usr/bin/perl BEGIN { -use File::Basename; -# Add current path to perl library search path -use lib dirname($0); + use File::Basename; + # Add current path to perl library search path + use lib dirname($0); } use strict; #use Spreadsheet::ParseExcel; -#use Spreadsheet::XLSX; -use POSIX; +#use Spreadsheet::ParseXLSX; use File::Spec::Functions; use Getopt::Std; @@ -20,15 +19,18 @@ my( $HAS_Spreadsheet_ParseExcel, $HAS_Compress_Raw_Zlib, - $HAS_Spreadsheet_XLSX + $HAS_Spreadsheet_ParseXLSX ) = check_modules_and_notify(); # declare some varibles local my($row, $col, $sheet, $cell, $usage, $targetfile,$basename, $sheetnumber, $filename, $volume, $directories, $whoami, - $sep, $sepName, $sepLabel, $sepExt, - $skipBlankLines, %switches); + $sep, $sepName, $sepLabel, $sepExt, + $skipBlankLines, %switches, + $parser, $oBook, $formatter, + $using_1904_date +); ## ## Figure out whether I'm called as xls2csv.pl or xls2tab.pl @@ -59,7 +61,7 @@ } else { - die("This script is named '$whoami', but must be named either 'xls2csv.pl' or 'xls2tab.pl' to function properly.\n"); + die("This script is named '$whoami', but must be named 'xls2csv.pl', 'xls2tsv', or 'xls2tab.pl' to function properly.\n"); } @@ -115,7 +117,7 @@ if(defined($ARGV[2]) ) { - if ( isdigit($ARGV[2]) ) + if ( $ARGV[2] =~ m|^\d+$| ) { $sheetnumber = $ARGV[2]; die "Sheetnumber must be an integer larger than 0.\n" if $sheetnumber < 1; @@ -133,29 +135,50 @@ my $oExcel; my $oBook; -$oExcel = new Spreadsheet::ParseExcel; +$oExcel = new Spreadsheet::ParseExcel; +$formatter = Spreadsheet::ParseExcel::FmtDefault->new(); open(FH, "<$ARGV[0]") or die "Unable to open file '$ARGV[0]'.\n"; close(FH); +print "\n"; print "Loading '$ARGV[0]'...\n"; ## First try as a Excel 2007+ 'xml' file eval { local $SIG{__WARN__} = sub {}; - $oBook = Spreadsheet::XLSX -> new ($ARGV[0]); + $parser = Spreadsheet::ParseXLSX -> new(); + $oBook = $parser->parse ($ARGV[0]); }; ## Then Excel 97-2004 Format -if($@) +if ( !defined $oBook ) { - $oBook = new Spreadsheet::ParseExcel->Parse($ARGV[0]) or \ + $parser = Spreadsheet::ParseExcel -> new(); + $oBook = $parser->parse($ARGV[0]) or \ die "Error parsing file '$ARGV[0]'.\n"; } print "Done.\n"; +## Does this file use 1904-01-01 as the reference date instead of +## 1900-01-01? +$using_1904_date = ( $oBook->using_1904_date() == 1 ) || # ParseExcel + ( $oBook->{Flag1904} == 1 ); # ParseXLSX + + +## Show the user some summary information before we start extracting +## date print "\n"; print "Orignal Filename: ", $ARGV[0], "\n"; print "Number of Sheets: ", $oBook->{SheetCount} , "\n"; +if($using_1904_date) + { + print "Date reference : 1904-01-01\n"; + } +else + { + print "Date reference : 1900-01-01\n"; + } + print "\n"; ## Get list all worksheets in the file @@ -236,19 +259,53 @@ for(my $col = $mincol; $col <= $maxcol; $col++) { - my $cell = $sheet->{Cells}[$row][$col]; + my $cell = $sheet->{Cells}[$row][$col]; + my $format = $formatter->FmtString($cell, $oBook); if( defined($cell) ) { - $_=$cell->Value; #{Val}; + if ($cell->type() eq "Date") # && $using_1904_date ) + { + my $is_date = ( $format =~ m/y/ && + $format =~ m/m/ && + $format =~ m/d/ ); + + my $is_time = ( $format =~ m/h[:\]]*m/ || + $format =~ m/m[:\]]*s/ ); + + + if($is_date && $is_time) + { + $format = "yyyy-mm-dd hh:mm:ss.00"; + } + elsif ($is_date) + { + $format = "yyyy-mm-dd"; + } + elsif ($is_time) + { + $format = "hh:mm:ss.00" + } + + $_ = ExcelFmt($format, + $cell->unformatted(), + $using_1904_date); + } + else + { + $_=$cell->value(); + } # convert '#NUM!' strings to missing (empty) values s/#NUM!//; + # convert "#DIV/0!" strings to missing (emtpy) values + s|#DIV/0!||; + # escape double-quote characters in the data since # they are used as field delimiters s/\"/\\\"/g; } - else + else { $_ = ''; } @@ -273,7 +330,7 @@ close OutFile; - print " (Ignored $cumulativeBlankLines blank lines.)\n" + print " (Ignored $cumulativeBlankLines blank lines.)\n" if $skipBlankLines; print "\n"; } diff -Nru gdata-2.13.3/inst/perl/XML/Twig/XPath.pm gdata-2.17.0/inst/perl/XML/Twig/XPath.pm --- gdata-2.13.3/inst/perl/XML/Twig/XPath.pm 1970-01-01 00:00:00.000000000 +0000 +++ gdata-2.17.0/inst/perl/XML/Twig/XPath.pm 2014-08-28 01:23:43.000000000 +0000 @@ -0,0 +1,229 @@ +# $Id: /xmltwig/trunk/Twig/XPath.pm 32 2008-01-18T13:11:52.128782Z mrodrigu $ +package XML::Twig::XPath; +use strict; +use XML::Twig; + +my $XPATH; # XPath engine (XML::XPath or XML::XPathEngine); +my $XPATH_NUMBER; # <$XPATH>::Number, the XPath number class +BEGIN + { foreach my $xpath_engine ( qw( XML::XPathEngine XML::XPath) ) + { if( XML::Twig::_use( $xpath_engine) ) { $XPATH= $xpath_engine; last; } } + unless( $XPATH) { die "cannot use XML::Twig::XPath: neither XML::XPathEngine 0.09+ nor XML::XPath are available"; } + $XPATH_NUMBER= "${XPATH}::Number"; + } + + +use vars qw($VERSION); +$VERSION="0.02"; + +BEGIN +{ package XML::XPath::NodeSet; + no warnings; # to avoid the "Subroutine sort redefined" message + # replace the native sort routine by a Twig'd one + sub sort + { my $self = CORE::shift; + @$self = CORE::sort { $a->node_cmp( $b) } @$self; + return $self; + } + + package XML::XPathEngine::NodeSet; + no warnings; # to avoid the "Subroutine sort redefined" message + # replace the native sort routine by a Twig'd one + sub sort + { my $self = CORE::shift; + @$self = CORE::sort { $a->node_cmp( $b) } @$self; + return $self; + } +} + +package XML::Twig::XPath; + +use base 'XML::Twig'; + +my $XP; # the global xp object; + +sub to_number { return $XPATH_NUMBER->new( $_[0]->root->text); } + +sub new + { my $class= shift; + my $t= XML::Twig->new( elt_class => 'XML::Twig::XPath::Elt', @_); + $t->{twig_xp}= $XPATH->new(); + bless $t, $class; + return $t; + } + + +sub set_namespace { my $t= shift; $t->{twig_xp}->set_namespace( @_); } +sub set_strict_namespaces { my $t= shift; $t->{twig_xp}->set_strict_namespaces( @_); } + +sub node_cmp($$) { return $_[1] == $_[0] ? 0 : -1; } # document is before anything but itself + +sub isElementNode { 0 } +sub isAttributeNode { 0 } +sub isTextNode { 0 } +sub isProcessingInstructionNode { 0 } +sub isPINode { 0 } +sub isCommentNode { 0 } +sub isNamespaceNode { 0 } +sub getAttributes { [] } +sub getValue { return $_[0]->root->text; } + +sub findnodes { my( $t, $path)= @_; return $t->{twig_xp}->findnodes( $path, $t); } +sub findnodes_as_string { my( $t, $path)= @_; return $t->{twig_xp}->findnodes_as_string( $path, $t); } +sub findvalue { my( $t, $path)= @_; return $t->{twig_xp}->findvalue( $path, $t); } +sub exists { my( $t, $path)= @_; return $t->{twig_xp}->exists( $path, $t); } +sub find { my( $t, $path)= @_; return $t->{twig_xp}->find( $path, $t); } +sub matches { my( $t, $path, $node)= @_; $node ||= $t; return $t->{twig_xp}->matches( $node, $path, $t) || 0; } + +#TODO: it would be nice to be able to pass in any object in this +#distribution and cast it to the proper $XPATH class to use as a +#variable (via 'nodes' argument or something) +sub set_var { + my ($t, $name, $value) = @_; + if( ! ref $value) { $value= $t->findnodes( qq{"$value"}); } + $t->{twig_xp}->set_var($name, $value); +} + +1; + +# adds the appropriate methods to XML::Twig::Elt so XML::XPath can be used as the XPath engine +package XML::Twig::XPath::Elt; +use base 'XML::Twig::Elt'; + +*getLocalName= *XML::Twig::Elt::local_name; +*getValue = *XML::Twig::Elt::text; +sub isAttributeNode { 0 } +sub isNamespaceNode { 0 } + +sub to_number { return $XPATH_NUMBER->new( $_[0]->text); } + +sub getAttributes + { my $elt= shift; + my $atts= $elt->atts; + # alternate, faster but less clean, way + my @atts= map { bless( { name => $_, value => $atts->{$_}, elt => $elt }, + 'XML::Twig::XPath::Attribute') + } + sort keys %$atts; + # my @atts= map { XML::Twig::XPath::Attribute->new( $elt, $_) } sort keys %$atts; + return wantarray ? @atts : \@atts; + } + +sub getNamespace + { my $elt= shift; + my $prefix= shift() || $elt->ns_prefix; + if( my $expanded= $elt->namespace( $prefix)) + { return XML::Twig::XPath::Namespace->new( $prefix, $expanded); } + else + { return XML::Twig::XPath::Namespace->new( $prefix, ''); } + } + +sub node_cmp($$) + { my( $a, $b)= @_; + if( UNIVERSAL::isa( $b, 'XML::Twig::XPath::Elt')) + { # 2 elts, compare them + return $a->cmp( $b); + } + elsif( UNIVERSAL::isa( $b, 'XML::Twig::XPath::Attribute')) + { # elt <=> att, compare the elt to the att->{elt} + # if the elt is the att->{elt} (cmp return 0) then -1, elt is before att + return ($a->cmp( $b->{elt}) ) || -1 ; + } + elsif( UNIVERSAL::isa( $b, 'XML::Twig::XPath')) + { # elt <=> document, elt is after document + return 1; + } + else + { die "unknown node type ", ref( $b); } + } + +sub getParentNode + { return $_[0]->_parent + || $_[0]->twig; + } + +sub findnodes { my( $elt, $path)= @_; return $elt->twig->{twig_xp}->findnodes( $path, $elt); } +sub findnodes_as_string { my( $elt, $path)= @_; return $elt->twig->{twig_xp}->findnodes_as_string( $path, $elt); } +sub findvalue { my( $elt, $path)= @_; return $elt->twig->{twig_xp}->findvalue( $path, $elt); } +sub exists { my( $elt, $path)= @_; return $elt->twig->{twig_xp}->exists( $path, $elt); } +sub find { my( $elt, $path)= @_; return $elt->twig->{twig_xp}->find( $path, $elt); } +sub matches { my( $elt, $path)= @_; return $elt->twig->{twig_xp}->matches( $elt, $path, $elt->getParentNode) || 0; } + + +1; + +# this package is only used to allow XML::XPath as the XPath engine, otherwise +# attributes are just attached to their parent element and are not considered objects + +package XML::Twig::XPath::Attribute; + +sub new + { my( $class, $elt, $att)= @_; + return bless { name => $att, value => $elt->att( $att), elt => $elt }, $class; + } + +sub getValue { return $_[0]->{value}; } +sub getName { return $_[0]->{name} ; } +sub getLocalName { (my $name= $_[0]->{name}) =~ s{^.*:}{}; $name; } +sub string_value { return $_[0]->{value}; } +sub to_number { return $XPATH_NUMBER->new( $_[0]->{value}); } +sub isElementNode { 0 } +sub isAttributeNode { 1 } +sub isNamespaceNode { 0 } +sub isTextNode { 0 } +sub isProcessingInstructionNode { 0 } +sub isPINode { 0 } +sub isCommentNode { 0 } +sub toString { return qq{$_[0]->{name}="$_[0]->{value}"}; } + +sub getNamespace + { my $att= shift; + my $prefix= shift(); + if( ! defined( $prefix)) + { if($att->{name}=~ m{^(.*):}) { $prefix= $1; } + else { $prefix=''; } + } + + if( my $expanded= $att->{elt}->namespace( $prefix)) + { return XML::Twig::XPath::Namespace->new( $prefix, $expanded); } + } + +sub node_cmp($$) + { my( $a, $b)= @_; + if( UNIVERSAL::isa( $b, 'XML::Twig::XPath::Attribute')) + { # 2 attributes, compare their elements, then their name + return ($a->{elt}->cmp( $b->{elt}) ) || ($a->{name} cmp $b->{name}); + } + elsif( UNIVERSAL::isa( $b, 'XML::Twig::XPath::Elt')) + { # att <=> elt : compare the att->elt and the elt + # if att->elt is the elt (cmp returns 0) then 1 (elt is before att) + return ($a->{elt}->cmp( $b) ) || 1 ; + } + elsif( UNIVERSAL::isa( $b, 'XML::Twig::XPath')) + { # att <=> document, att is after document + return 1; + } + else + { die "unknown node type ", ref( $b); } + } + +*cmp=*node_cmp; + +1; + +package XML::Twig::XPath::Namespace; + +sub new + { my( $class, $prefix, $expanded)= @_; + bless { prefix => $prefix, expanded => $expanded }, $class; + } + +sub isNamespaceNode { 1; } + +sub getPrefix { $_[0]->{prefix}; } +sub getExpanded { $_[0]->{expanded}; } +sub getValue { $_[0]->{expanded}; } +sub getData { $_[0]->{expanded}; } + +1 + diff -Nru gdata-2.13.3/inst/perl/XML/Twig.pm gdata-2.17.0/inst/perl/XML/Twig.pm --- gdata-2.13.3/inst/perl/XML/Twig.pm 1970-01-01 00:00:00.000000000 +0000 +++ gdata-2.17.0/inst/perl/XML/Twig.pm 2014-08-28 01:23:43.000000000 +0000 @@ -0,0 +1,14008 @@ +use strict; +use warnings; # > perl 5.5 + +# This is created in the caller's space +# I realize (now!) that it's not clean, but it's been there for 10+ years... +BEGIN +{ sub ::PCDATA { '#PCDATA' } ## no critic (Subroutines::ProhibitNestedSubs); + sub ::CDATA { '#CDATA' } ## no critic (Subroutines::ProhibitNestedSubs); +} + +use UNIVERSAL(); + +## if a sub returns a scalar, it better not bloody disappear in list context +## no critic (Subroutines::ProhibitExplicitReturnUndef); + +my $perl_version; +my $parser_version; + +###################################################################### +package XML::Twig; +###################################################################### + +require 5.004; + +use utf8; # > perl 5.5 + +use vars qw($VERSION @ISA %valid_option); + +use Carp; +use File::Spec; +use File::Basename; + +*isa= *UNIVERSAL::isa; + +# flag, set to true if the weaken sub is available +use vars qw( $weakrefs); + +# flag set to true if the version of expat seems to be 1.95.2, which has annoying bugs +# wrt doctype handling. This is global for performance reasons. +my $expat_1_95_2=0; + +# a slight non-xml mod: # is allowed as a first character +my $REG_TAG_FIRST_LETTER; +#$REG_TAG_FIRST_LETTER= q{(?:[^\W\d]|[:#_])}; # < perl 5.6 - does not work for leading non-ascii letters +$REG_TAG_FIRST_LETTER= q{(?:[[:alpha:]:#_])}; # >= perl 5.6 + +my $REG_TAG_LETTER= q{(?:[\w_.-]*)}; + +# a simple name (no colon) +my $REG_NAME_TOKEN= qq{(?:$REG_TAG_FIRST_LETTER$REG_TAG_LETTER*)}; + +# a tag name, possibly including namespace +my $REG_NAME= qq{(?:(?:$REG_NAME_TOKEN:)?$REG_NAME_TOKEN)}; + +# tag name (leading # allowed) +# first line is for perl 5.005, second line for modern perl, that accept character classes +my $REG_TAG_NAME=$REG_NAME; + +# name or wildcard (* or '') (leading # allowed) +my $REG_NAME_W = qq{(?:$REG_NAME|[*])}; + +# class and ids are deliberatly permissive +my $REG_NTOKEN_FIRST_LETTER; +#$REG_NTOKEN_FIRST_LETTER= q{(?:[^\W\d]|[:_])}; # < perl 5.6 - does not work for leading non-ascii letters +$REG_NTOKEN_FIRST_LETTER= q{(?:[[:alpha:]:_])}; # >= perl 5.6 + +my $REG_NTOKEN_LETTER= q{(?:[\w_:.-]*)}; + +my $REG_NTOKEN= qq{(?:$REG_NTOKEN_FIRST_LETTER$REG_NTOKEN_LETTER*)}; +my $REG_CLASS = $REG_NTOKEN; +my $REG_ID = $REG_NTOKEN; + +# allow # (private elt) * . *. # *# +my $REG_TAG_PART= qq{(?:$REG_NAME_W(?:[.]$REG_CLASS|[#]$REG_ID)?|[.]$REG_CLASS)}; + +my $REG_REGEXP = q{(?:/(?:[^\\/]|\\.)*/[eimsox]*)}; # regexp +my $REG_MATCH = q{[!=]~}; # match (or not) +my $REG_STRING = q{(?:"(?:[^\\"]|\\.)*"|'(?:[^\\']|\\.)*')}; # string (simple or double quoted) +my $REG_NUMBER = q{(?:\d+(?:\.\d*)?|\.\d+)}; # number +my $REG_VALUE = qq{(?:$REG_STRING|$REG_NUMBER)}; # value +my $REG_OP = q{==|!=|>|<|>=|<=|eq|ne|lt|gt|le|ge|=}; # op +my $REG_FUNCTION = q{(?:string|text)\(\s*\)}; +my $REG_STRING_ARG = qq{(?:string|text)\\(\\s*$REG_NAME_W\\s*\\)}; +my $REG_COMP = q{(?:>=|<=|!=|<|>|=)}; + +my $REG_TAG_IN_PREDICATE= $REG_NAME_W . q{(?=\s*(?i:and\b|or\b|\]|$))}; + +# keys in the context stack, chosen not to interfere with att names, even private (#-prefixed) ones +my $ST_TAG = '##tag'; +my $ST_ELT = '##elt'; +my $ST_NS = '##ns' ; + +# used in the handler trigger code +my $REG_NAKED_PREDICATE= qq{((?:"[^"]*"|'[^']*'|$REG_STRING_ARG|$REG_FUNCTION|\@$REG_NAME_W|$REG_MATCH\\s*$REG_REGEXP|[\\s\\d><=!()+.-]|(?i:and)|(?i:or)|$REG_TAG_IN_PREDICATE)*)}; +my $REG_PREDICATE= qq{\\[$REG_NAKED_PREDICATE\\]}; + +# not all axis, only supported ones (in get_xpath) +my @supported_axis= ( 'ancestor', 'ancestor-or-self', 'child', 'descendant', 'descendant-or-self', + 'following', 'following-sibling', 'parent', 'preceding', 'preceding-sibling', 'self' + ); +my $REG_AXIS = "(?:" . join( '|', @supported_axis) .")"; + +# only used in the "xpath"engine (for get_xpath/findnodes) for now +my $REG_PREDICATE_ALT = qr{\[(?:(?:string\(\s*\)|\@$REG_TAG_NAME)\s*$REG_MATCH\s*$REG_REGEXP\s*|[^\]]*)\]}; + +# used to convert XPath tests on strings to the perl equivalent +my %PERL_ALPHA_TEST= ( '=' => ' eq ', '!=' => ' ne ', '>' => ' gt ', '>=' => ' ge ', '<' => ' lt ', '<=' => ' le '); + +my( $FB_HTMLCREF, $FB_XMLCREF); + +my $NO_WARNINGS= $perl_version >= 5.006 ? 'no warnings' : 'local $^W=0'; + +# default namespaces, both ways +my %DEFAULT_NS= ( xml => "http://www.w3.org/XML/1998/namespace", + xmlns => "http://www.w3.org/2000/xmlns/", + ); +my %DEFAULT_URI2NS= map { $DEFAULT_NS{$_} => $_ } keys %DEFAULT_NS; + +# constants +my( $PCDATA, $CDATA, $PI, $COMMENT, $ENT, $ELT, $TEXT, $ASIS, $EMPTY, $BUFSIZE); + +# used when an HTML doc only has a PUBLIC declaration, to generate the SYSTEM one +# this should really be done by HTML::TreeBuilder, but as of HTML::TreeBuilder 4.2 it isn't +# the various declarations are taken from http://en.wikipedia.org/wiki/Document_Type_Declaration +my %HTML_DECL= ( "-//W3C//DTD HTML 4.0 Transitional//EN" => "http://www.w3.org/TR/REC-html40/loose.dtd", + "-//W3C//DTD HTML 4.01//EN" => "http://www.w3.org/TR/html4/strict.dtd", + "-//W3C//DTD HTML 4.01 Transitional//EN" => "http://www.w3.org/TR/html4/loose.dtd", + "-//W3C//DTD HTML 4.01 Frameset//EN" => "http://www.w3.org/TR/html4/frameset.dtd", + "-//W3C//DTD XHTML 1.0 Strict//EN" => "http://www.w3.org/TR/xhtml1/DTD/xhtml1-strict.dtd", + "-//W3C//DTD XHTML 1.0 Transitional//EN" => "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd", + "-//W3C//DTD XHTML 1.0 Frameset//EN" => "http://www.w3.org/TR/xhtml1/DTD/xhtml1-frameset.dtd", + "-//W3C//DTD XHTML 1.1//EN" => "http://www.w3.org/TR/xhtml11/DTD/xhtml11.dtd", + "-//W3C//DTD XHTML Basic 1.0//EN" => "http://www.w3.org/TR/xhtml-basic/xhtml-basic10.dtd", + "-//W3C//DTD XHTML Basic 1.1//EN" => "http://www.w3.org/TR/xhtml-basic/xhtml-basic11.dtd", + "-//WAPFORUM//DTD XHTML Mobile 1.0//EN" => "http://www.wapforum.org/DTD/xhtml-mobile10.dtd", + "-//WAPFORUM//DTD XHTML Mobile 1.1//EN" => "http://www.openmobilealliance.org/tech/DTD/xhtml-mobile11.dtd", + "-//WAPFORUM//DTD XHTML Mobile 1.2//EN" => "http://www.openmobilealliance.org/tech/DTD/xhtml-mobile12.dtd", + "-//W3C//DTD XHTML+RDFa 1.0//EN" => "http://www.w3.org/MarkUp/DTD/xhtml-rdfa-1.dtd", + ); + +my $DEFAULT_HTML_TYPE= "-//W3C//DTD HTML 4.0 Transitional//EN"; + +my $SEP= qr/\s*(?:$|\|)/; + +BEGIN +{ +$VERSION = '3.48'; + +use XML::Parser; +my $needVersion = '2.23'; +($parser_version= $XML::Parser::VERSION)=~ s{_\d+}{}; # remove _ from version so numeric tests do not warn +croak "need at least XML::Parser version $needVersion" unless $parser_version >= $needVersion; + +($perl_version= $])=~ s{_\d+}{}; + +if( $perl_version >= 5.008) + { eval "use Encode qw( :all)"; ## no critic ProhibitStringyEval + $FB_XMLCREF = 0x0400; # Encode::FB_XMLCREF; + $FB_HTMLCREF = 0x0200; # Encode::FB_HTMLCREF; + } + +# test whether we can use weak references +# set local empty signal handler to trap error messages +{ local $SIG{__DIE__}; + if( eval( 'require Scalar::Util') && defined( \&Scalar::Util::weaken)) + { import Scalar::Util( 'weaken'); $weakrefs= 1; } + elsif( eval( 'require WeakRef')) + { import WeakRef; $weakrefs= 1; } + else + { $weakrefs= 0; } +} + +import XML::Twig::Elt; +import XML::Twig::Entity; +import XML::Twig::Entity_list; + +# used to store the gi's +# should be set for each twig really, at least when there are several +# the init ensures that special gi's are always the same + +# constants: element types +$PCDATA = '#PCDATA'; +$CDATA = '#CDATA'; +$PI = '#PI'; +$COMMENT = '#COMMENT'; +$ENT = '#ENT'; + +# element classes +$ELT = '#ELT'; +$TEXT = '#TEXT'; + +# element properties +$ASIS = '#ASIS'; +$EMPTY = '#EMPTY'; + +# used in parseurl to set the buffer size to the same size as in XML::Parser::Expat +$BUFSIZE = 32768; + + +# gi => index +%XML::Twig::gi2index=( '', 0, $PCDATA => 1, $CDATA => 2, $PI => 3, $COMMENT => 4, $ENT => 5); +# list of gi's +@XML::Twig::index2gi=( '', $PCDATA, $CDATA, $PI, $COMMENT, $ENT); + +# gi's under this value are special +$XML::Twig::SPECIAL_GI= @XML::Twig::index2gi; + +%XML::Twig::base_ent= ( '>' => '>', '<' => '<', '&' => '&', "'" => ''', '"' => '"',); +foreach my $c ( "\n", "\r", "\t") { $XML::Twig::base_ent{$c}= sprintf( "&#x%02x;", ord( $c)); } + +# now set some aliases +*find_nodes = *get_xpath; # same as XML::XPath +*findnodes = *get_xpath; # same as XML::LibXML +*getElementsByTagName = *descendants; +*descendants_or_self = *descendants; # valid in XML::Twig, not in XML::Twig::Elt +*find_by_tag_name = *descendants; +*getElementById = *elt_id; +*getEltById = *elt_id; +*toString = *sprint; +*create_accessors = *att_accessors; + +} + +@ISA = qw(XML::Parser); + +# fake gi's used in twig_handlers and start_tag_handlers +my $ALL = '_all_'; # the associated function is always called +my $DEFAULT= '_default_'; # the function is called if no other handler has been + +# some defaults +my $COMMENTS_DEFAULT= 'keep'; +my $PI_DEFAULT = 'keep'; + + +# handlers used in regular mode +my %twig_handlers=( Start => \&_twig_start, + End => \&_twig_end, + Char => \&_twig_char, + Entity => \&_twig_entity, + XMLDecl => \&_twig_xmldecl, + Doctype => \&_twig_doctype, + Element => \&_twig_element, + Attlist => \&_twig_attlist, + CdataStart => \&_twig_cdatastart, + CdataEnd => \&_twig_cdataend, + Proc => \&_twig_pi, + Comment => \&_twig_comment, + Default => \&_twig_default, + ExternEnt => \&_twig_extern_ent, + ); + +# handlers used when twig_roots is used and we are outside of the roots +my %twig_handlers_roots= + ( Start => \&_twig_start_check_roots, + End => \&_twig_end_check_roots, + Doctype => \&_twig_doctype, + Char => undef, Entity => undef, XMLDecl => \&_twig_xmldecl, + Element => undef, Attlist => undef, CdataStart => undef, + CdataEnd => undef, Proc => undef, Comment => undef, + Proc => \&_twig_pi_check_roots, + Default => sub {}, # hack needed for XML::Parser 2.27 + ExternEnt => \&_twig_extern_ent, + ); + +# handlers used when twig_roots and print_outside_roots are used and we are +# outside of the roots +my %twig_handlers_roots_print_2_30= + ( Start => \&_twig_start_check_roots, + End => \&_twig_end_check_roots, + Char => \&_twig_print, + Entity => \&_twig_print_entity, + ExternEnt => \&_twig_print_entity, + DoctypeFin => \&_twig_doctype_fin_print, + XMLDecl => sub { _twig_xmldecl( @_); _twig_print( @_); }, + Doctype => \&_twig_print_doctype, # because recognized_string is broken here + # Element => \&_twig_print, Attlist => \&_twig_print, + CdataStart => \&_twig_print, CdataEnd => \&_twig_print, + Proc => \&_twig_pi_check_roots, Comment => \&_twig_print, + Default => \&_twig_print_check_doctype, + ExternEnt => \&_twig_extern_ent, + ); + +# handlers used when twig_roots, print_outside_roots and keep_encoding are used +# and we are outside of the roots +my %twig_handlers_roots_print_original_2_30= + ( Start => \&_twig_start_check_roots, + End => \&_twig_end_check_roots, + Char => \&_twig_print_original, + # I have no idea why I should not be using this handler! + Entity => \&_twig_print_entity, + ExternEnt => \&_twig_print_entity, + DoctypeFin => \&_twig_doctype_fin_print, + XMLDecl => sub { _twig_xmldecl( @_); _twig_print_original( @_) }, + Doctype => \&_twig_print_original_doctype, # because original_string is broken here + Element => \&_twig_print_original, Attlist => \&_twig_print_original, + CdataStart => \&_twig_print_original, CdataEnd => \&_twig_print_original, + Proc => \&_twig_pi_check_roots, Comment => \&_twig_print_original, + Default => \&_twig_print_original_check_doctype, + ); + +# handlers used when twig_roots and print_outside_roots are used and we are +# outside of the roots +my %twig_handlers_roots_print_2_27= + ( Start => \&_twig_start_check_roots, + End => \&_twig_end_check_roots, + Char => \&_twig_print, + # if the Entity handler is set then it prints the entity declaration + # before the entire internal subset (including the declaration!) is output + Entity => sub {}, + XMLDecl => \&_twig_print, Doctype => \&_twig_print, + CdataStart => \&_twig_print, CdataEnd => \&_twig_print, + Proc => \&_twig_pi_check_roots, Comment => \&_twig_print, + Default => \&_twig_print, + ExternEnt => \&_twig_extern_ent, + ); + +# handlers used when twig_roots, print_outside_roots and keep_encoding are used +# and we are outside of the roots +my %twig_handlers_roots_print_original_2_27= + ( Start => \&_twig_start_check_roots, + End => \&_twig_end_check_roots, + Char => \&_twig_print_original, + # for some reason original_string is wrong here + # this can be a problem if the doctype includes non ascii characters + XMLDecl => \&_twig_print, Doctype => \&_twig_print, + # if the Entity handler is set then it prints the entity declaration + # before the entire internal subset (including the declaration!) is output + Entity => sub {}, + #Element => undef, Attlist => undef, + CdataStart => \&_twig_print_original, CdataEnd => \&_twig_print_original, + Proc => \&_twig_pi_check_roots, Comment => \&_twig_print_original, + Default => \&_twig_print, # _twig_print_original does not work + ExternEnt => \&_twig_extern_ent, + ); + + +my %twig_handlers_roots_print= $parser_version > 2.27 + ? %twig_handlers_roots_print_2_30 + : %twig_handlers_roots_print_2_27; +my %twig_handlers_roots_print_original= $parser_version > 2.27 + ? %twig_handlers_roots_print_original_2_30 + : %twig_handlers_roots_print_original_2_27; + + +# handlers used when the finish_print method has been called +my %twig_handlers_finish_print= + ( Start => \&_twig_print, + End => \&_twig_print, Char => \&_twig_print, + Entity => \&_twig_print, XMLDecl => \&_twig_print, + Doctype => \&_twig_print, Element => \&_twig_print, + Attlist => \&_twig_print, CdataStart => \&_twig_print, + CdataEnd => \&_twig_print, Proc => \&_twig_print, + Comment => \&_twig_print, Default => \&_twig_print, + ExternEnt => \&_twig_extern_ent, + ); + +# handlers used when the finish_print method has been called and the keep_encoding +# option is used +my %twig_handlers_finish_print_original= + ( Start => \&_twig_print_original, End => \&_twig_print_end_original, + Char => \&_twig_print_original, Entity => \&_twig_print_original, + XMLDecl => \&_twig_print_original, Doctype => \&_twig_print_original, + Element => \&_twig_print_original, Attlist => \&_twig_print_original, + CdataStart => \&_twig_print_original, CdataEnd => \&_twig_print_original, + Proc => \&_twig_print_original, Comment => \&_twig_print_original, + Default => \&_twig_print_original, + ); + +# handlers used within ignored elements +my %twig_handlers_ignore= + ( Start => \&_twig_ignore_start, + End => \&_twig_ignore_end, + Char => undef, Entity => undef, XMLDecl => undef, + Doctype => undef, Element => undef, Attlist => undef, + CdataStart => undef, CdataEnd => undef, Proc => undef, + Comment => undef, Default => undef, + ExternEnt => undef, + ); + + +# those handlers are only used if the entities are NOT to be expanded +my %twig_noexpand_handlers= ( ExternEnt => undef, Default => \&_twig_default ); + +my @saved_default_handler; + +my $ID= 'id'; # default value, set by the Id argument +my $css_sel=0; # set through the css_sel option to allow .class selectors in triggers + +# all allowed options +%valid_option= + ( # XML::Twig options + TwigHandlers => 1, Id => 1, + TwigRoots => 1, TwigPrintOutsideRoots => 1, + StartTagHandlers => 1, EndTagHandlers => 1, + ForceEndTagHandlersUsage => 1, + DoNotChainHandlers => 1, + IgnoreElts => 1, + Index => 1, + AttAccessors => 1, + EltAccessors => 1, + FieldAccessors => 1, + CharHandler => 1, + TopDownHandlers => 1, + KeepEncoding => 1, DoNotEscapeAmpInAtts => 1, + ParseStartTag => 1, KeepAttsOrder => 1, + LoadDTD => 1, DTDHandler => 1, + DoNotOutputDTD => 1, NoProlog => 1, + ExpandExternalEnts => 1, + DiscardSpaces => 1, KeepSpaces => 1, DiscardAllSpaces => 1, + DiscardSpacesIn => 1, KeepSpacesIn => 1, + PrettyPrint => 1, EmptyTags => 1, + EscapeGt => 1, + Quote => 1, + Comments => 1, Pi => 1, + OutputFilter => 1, InputFilter => 1, + OutputTextFilter => 1, + OutputEncoding => 1, + RemoveCdata => 1, + EltClass => 1, + MapXmlns => 1, KeepOriginalPrefix => 1, + SkipMissingEnts => 1, + # XML::Parser options + ErrorContext => 1, ProtocolEncoding => 1, + Namespaces => 1, NoExpand => 1, + Stream_Delimiter => 1, ParseParamEnt => 1, + NoLWP => 1, Non_Expat_Options => 1, + Xmlns => 1, CssSel => 1, + UseTidy => 1, TidyOptions => 1, + OutputHtmlDoctype => 1, + ); + +my $active_twig; # last active twig,for XML::Twig::s + +# predefined input and output filters +use vars qw( %filter); +%filter= ( html => \&html_encode, + safe => \&safe_encode, + safe_hex => \&safe_encode_hex, + ); + + +# trigger types (used to sort them) +my ($LEVEL_TRIGGER, $REGEXP_TRIGGER, $XPATH_TRIGGER)=(1..3); + +sub new + { my ($class, %args) = @_; + my $handlers; + + # change all nice_perlish_names into nicePerlishNames + %args= _normalize_args( %args); + + # check options + unless( $args{MoreOptions}) + { foreach my $arg (keys %args) + { carp "invalid option $arg" unless $valid_option{$arg}; } + } + + # a twig is really an XML::Parser + # my $self= XML::Parser->new(%args); + my $self; + $self= XML::Parser->new(%args); + + bless $self, $class; + + $self->{_twig_context_stack}= []; + + # allow tag.class selectors in handler triggers + $css_sel= $args{CssSel} || 0; + + + if( exists $args{TwigHandlers}) + { $handlers= $args{TwigHandlers}; + $self->setTwigHandlers( $handlers); + delete $args{TwigHandlers}; + } + + # take care of twig-specific arguments + if( exists $args{StartTagHandlers}) + { $self->setStartTagHandlers( $args{StartTagHandlers}); + delete $args{StartTagHandlers}; + } + + if( exists $args{DoNotChainHandlers}) + { $self->{twig_do_not_chain_handlers}= $args{DoNotChainHandlers}; } + + if( exists $args{IgnoreElts}) + { # change array to hash so you can write ignore_elts => [ qw(foo bar baz)] + if( isa( $args{IgnoreElts}, 'ARRAY')) { $args{IgnoreElts}= { map { $_ => 1 } @{$args{IgnoreElts}} }; } + $self->setIgnoreEltsHandlers( $args{IgnoreElts}); + delete $args{IgnoreElts}; + } + + if( exists $args{Index}) + { my $index= $args{Index}; + # we really want a hash name => path, we turn an array into a hash if necessary + if( ref( $index) eq 'ARRAY') + { my %index= map { $_ => $_ } @$index; + $index= \%index; + } + while( my( $name, $exp)= each %$index) + { $self->setTwigHandler( $exp, sub { push @{$_[0]->{_twig_index}->{$name}}, $_; 1; }); } + } + + $self->{twig_elt_class}= $args{EltClass} || 'XML::Twig::Elt'; + if( defined( $args{EltClass}) && $args{EltClass} ne 'XML::Twig::Elt') { $self->{twig_alt_elt_class}=1; } + if( exists( $args{EltClass})) { delete $args{EltClass}; } + + if( exists( $args{MapXmlns})) + { $self->{twig_map_xmlns}= $args{MapXmlns}; + $self->{Namespaces}=1; + delete $args{MapXmlns}; + } + + if( exists( $args{KeepOriginalPrefix})) + { $self->{twig_keep_original_prefix}= $args{KeepOriginalPrefix}; + delete $args{KeepOriginalPrefix}; + } + + $self->{twig_dtd_handler}= $args{DTDHandler}; + delete $args{DTDHandler}; + + if( $args{ExpandExternalEnts}) + { $self->set_expand_external_entities( 1); + $self->{twig_expand_external_ents}= $args{ExpandExternalEnts}; + $self->{twig_read_external_dtd}= 1; # implied by ExpandExternalEnts + if( $args{ExpandExternalEnts} == -1) + { $self->{twig_extern_ent_nofail}= 1; + $self->setHandlers( ExternEnt => \&_twig_extern_ent_nofail); + } + delete $args{LoadDTD}; + delete $args{ExpandExternalEnts}; + } + else + { $self->set_expand_external_entities( 0); } + + if( !$args{NoLWP} && ! _use( 'URI') && ! _use( 'URI::File') && ! _use( 'LWP')) + { $self->{twig_ext_ent_handler}= \&XML::Parser::initial_ext_ent_handler } + else + { $self->{twig_ext_ent_handler}= \&XML::Parser::file_ext_ent_handler } + + if( $args{DoNotEscapeAmpInAtts}) + { $self->set_do_not_escape_amp_in_atts( 1); + $self->{twig_do_not_escape_amp_in_atts}=1; + } + else + { $self->set_do_not_escape_amp_in_atts( 0); + $self->{twig_do_not_escape_amp_in_atts}=0; + } + + # deal with TwigRoots argument, a hash of elements for which + # subtrees will be built (and associated handlers) + + if( $args{TwigRoots}) + { $self->setTwigRoots( $args{TwigRoots}); + delete $args{TwigRoots}; + } + + if( $args{EndTagHandlers}) + { unless ($self->{twig_roots} || $args{ForceEndTagHandlersUsage}) + { croak "you should not use EndTagHandlers without TwigRoots\n", + "if you want to use it anyway, normally because you have ", + "a start_tag_handlers that calls 'ignore' and you want to ", + "call an ent_tag_handlers at the end of the element, then ", + "pass 'force_end_tag_handlers_usage => 1' as an argument ", + "to new"; + } + + $self->setEndTagHandlers( $args{EndTagHandlers}); + delete $args{EndTagHandlers}; + } + + if( $args{TwigPrintOutsideRoots}) + { croak "cannot use twig_print_outside_roots without twig_roots" + unless( $self->{twig_roots}); + # if the arg is a filehandle then store it + if( _is_fh( $args{TwigPrintOutsideRoots}) ) + { $self->{twig_output_fh}= $args{TwigPrintOutsideRoots}; } + $self->{twig_default_print}= $args{TwigPrintOutsideRoots}; + } + + # space policy + if( $args{KeepSpaces}) + { croak "cannot use both keep_spaces and discard_spaces" if( $args{DiscardSpaces}); + croak "cannot use both keep_spaces and discard_all_spaces" if( $args{DiscardAllSpaces}); + croak "cannot use both keep_spaces and keep_spaces_in" if( $args{KeepSpacesIn}); + $self->{twig_keep_spaces}=1; + delete $args{KeepSpaces}; + } + if( $args{DiscardSpaces}) + { + croak "cannot use both discard_spaces and keep_spaces_in" if( $args{KeepSpacesIn}); + croak "cannot use both discard_spaces and discard_all_spaces" if( $args{DiscardAllSpaces}); + croak "cannot use both discard_spaces and discard_spaces_in" if( $args{DiscardSpacesIn}); + $self->{twig_discard_spaces}=1; + delete $args{DiscardSpaces}; + } + if( $args{KeepSpacesIn}) + { croak "cannot use both keep_spaces_in and discard_spaces_in" if( $args{DiscardSpacesIn}); + croak "cannot use both keep_spaces_in and discard_all_spaces" if( $args{DiscardAllSpaces}); + $self->{twig_discard_spaces}=1; + $self->{twig_keep_spaces_in}={}; + my @tags= @{$args{KeepSpacesIn}}; + foreach my $tag (@tags) { $self->{twig_keep_spaces_in}->{$tag}=1; } + delete $args{KeepSpacesIn}; + } + + if( $args{DiscardAllSpaces}) + { + croak "cannot use both discard_all_spaces and discard_spaces_in" if( $args{DiscardSpacesIn}); + $self->{twig_discard_all_spaces}=1; + delete $args{DiscardAllSpaces}; + } + + if( $args{DiscardSpacesIn}) + { $self->{twig_keep_spaces}=1; + $self->{twig_discard_spaces_in}={}; + my @tags= @{$args{DiscardSpacesIn}}; + foreach my $tag (@tags) { $self->{twig_discard_spaces_in}->{$tag}=1; } + delete $args{DiscardSpacesIn}; + } + # discard spaces by default + $self->{twig_discard_spaces}= 1 unless( $self->{twig_keep_spaces}); + + $args{Comments}||= $COMMENTS_DEFAULT; + if( $args{Comments} eq 'drop') { $self->{twig_keep_comments}= 0; } + elsif( $args{Comments} eq 'keep') { $self->{twig_keep_comments}= 1; } + elsif( $args{Comments} eq 'process') { $self->{twig_process_comments}= 1; } + else { croak "wrong value for comments argument: '$args{Comments}' (should be 'drop', 'keep' or 'process')"; } + delete $args{Comments}; + + $args{Pi}||= $PI_DEFAULT; + if( $args{Pi} eq 'drop') { $self->{twig_keep_pi}= 0; } + elsif( $args{Pi} eq 'keep') { $self->{twig_keep_pi}= 1; } + elsif( $args{Pi} eq 'process') { $self->{twig_process_pi}= 1; } + else { croak "wrong value for pi argument: '$args{Pi}' (should be 'drop', 'keep' or 'process')"; } + delete $args{Pi}; + + if( $args{KeepEncoding}) + { + # set it in XML::Twig::Elt so print functions know what to do + $self->set_keep_encoding( 1); + $self->{parse_start_tag}= $args{ParseStartTag} || \&_parse_start_tag; + delete $args{ParseStartTag} if defined( $args{ParseStartTag}) ; + delete $args{KeepEncoding}; + } + else + { $self->set_keep_encoding( 0); + if( $args{ParseStartTag}) + { $self->{parse_start_tag}= $args{ParseStartTag}; } + else + { delete $self->{parse_start_tag}; } + delete $args{ParseStartTag}; + } + + if( $args{OutputFilter}) + { $self->set_output_filter( $args{OutputFilter}); + delete $args{OutputFilter}; + } + else + { $self->set_output_filter( 0); } + + if( $args{RemoveCdata}) + { $self->set_remove_cdata( $args{RemoveCdata}); + delete $args{RemoveCdata}; + } + else + { $self->set_remove_cdata( 0); } + + if( $args{OutputTextFilter}) + { $self->set_output_text_filter( $args{OutputTextFilter}); + delete $args{OutputTextFilter}; + } + else + { $self->set_output_text_filter( 0); } + + if( exists $args{KeepAttsOrder}) + { $self->{keep_atts_order}= $args{KeepAttsOrder}; + if( _use( 'Tie::IxHash')) + { $self->set_keep_atts_order( $self->{keep_atts_order}); } + else + { croak "Tie::IxHash not available, option keep_atts_order not allowed"; } + } + else + { $self->set_keep_atts_order( 0); } + + + if( $args{PrettyPrint}) { $self->set_pretty_print( $args{PrettyPrint}); } + if( $args{EscapeGt}) { $self->escape_gt( $args{EscapeGt}); } + if( $args{EmptyTags}) { $self->set_empty_tag_style( $args{EmptyTags}) } + + if( exists $args{Id}) { $ID= $args{Id}; delete $args{ID}; } + if( $args{NoProlog}) { $self->{no_prolog}= 1; delete $args{NoProlog}; } + if( $args{DoNotOutputDTD}) { $self->{no_dtd_output}= 1; delete $args{DoNotOutputDTD}; } + if( $args{LoadDTD}) { $self->{twig_read_external_dtd}= 1; delete $args{LoadDTD}; } + if( $args{CharHandler}) { $self->setCharHandler( $args{CharHandler}); delete $args{CharHandler}; } + + if( $args{InputFilter}) { $self->set_input_filter( $args{InputFilter}); delete $args{InputFilter}; } + if( $args{NoExpand}) { $self->setHandlers( %twig_noexpand_handlers); $self->{twig_no_expand}=1; } + if( my $output_encoding= $args{OutputEncoding}) { $self->set_output_encoding( $output_encoding); delete $args{OutputFilter}; } + + if( my $tdh= $args{TopDownHandlers}) { $self->{twig_tdh}=1; delete $args{TopDownHandlers}; } + + if( my $acc_a= $args{AttAccessors}) { $self->att_accessors( @$acc_a); } + if( my $acc_e= $args{EltAccessors}) { $self->elt_accessors( isa( $acc_e, 'ARRAY') ? @$acc_e : $acc_e); } + if( my $acc_f= $args{FieldAccessors}) { $self->field_accessors( isa( $acc_f, 'ARRAY') ? @$acc_f : $acc_f); } + + if( $args{UseTidy}) { $self->{use_tidy}= 1; } + $self->{tidy_options}= $args{TidyOptions} || {}; + + if( $args{OutputHtmlDoctype}) { $self->{html_doctype}= 1; } + + $self->set_quote( $args{Quote} || 'double'); + + # set handlers + if( $self->{twig_roots}) + { if( $self->{twig_default_print}) + { if( $self->{twig_keep_encoding}) + { $self->setHandlers( %twig_handlers_roots_print_original); } + else + { $self->setHandlers( %twig_handlers_roots_print); } + } + else + { $self->setHandlers( %twig_handlers_roots); } + } + else + { $self->setHandlers( %twig_handlers); } + + # XML::Parser::Expat does not like these handler to be set. So in order to + # use the various sets of handlers on XML::Parser or XML::Parser::Expat + # objects when needed, these ones have to be set only once, here, at + # XML::Parser level + $self->setHandlers( Init => \&_twig_init, Final => \&_twig_final); + + $self->{twig_entity_list}= XML::Twig::Entity_list->new; + + $self->{twig_id}= $ID; + $self->{twig_stored_spaces}=''; + + $self->{twig_autoflush}= 1; # auto flush by default + + $self->{twig}= $self; + if( $weakrefs) { weaken( $self->{twig}); } + + return $self; + } + +sub parse + { + my $t= shift; + # if called as a class method, calls nparse, which creates the twig then parses it + if( !ref( $t) || !isa( $t, 'XML::Twig')) { return $t->nparse( @_); } + + # requires 5.006 at least (or the ${^UNICODE} causes a problem) # > perl 5.5 + # trap underlying bug in IO::Handle (see RT #17500) # > perl 5.5 + # croak if perl 5.8+, -CD (or PERL_UNICODE set to D) and parsing a pipe # > perl 5.5 + if( $perl_version>=5.008 && ${^UNICODE} && (${^UNICODE} & 24) && isa( $_[0], 'GLOB') && -p $_[0] ) # > perl 5.5 + { croak "cannot parse the output of a pipe when perl is set to use the UTF8 perlIO layer\n" # > perl 5.5 + . "set the environment variable PERL_UNICODE or use the -C option (see perldoc perlrun)\n" # > perl 5.5 + . "not to include 'D'"; # > perl 5.5 + } # > perl 5.5 + $t= eval { $t->SUPER::parse( @_); }; + + if( !$t + && $@=~m{(syntax error at line 1, column 0, byte 0|not well-formed \(invalid token\) at line 1, column 1, byte 1)} + && -f $_[0] + ) + { croak "you seem to have used the parse method on a filename ($_[0]), you probably want parsefile instead"; } + return _checked_parse_result( $t, $@); + } + +sub parsefile + { my $t= shift; + if( -f $_[0] && ! -s $_[0]) { return _checked_parse_result( undef, "empty file '$_[0]'"); } + $t= eval { $t->SUPER::parsefile( @_); }; + return _checked_parse_result( $t, $@); + } + +sub _checked_parse_result + { my( $t, $returned)= @_; + if( !$t) + { if( isa( $returned, 'XML::Twig') && $returned->{twig_finish_now}) + { $t= $returned; + delete $t->{twig_finish_now}; + return $t->_twig_final; + } + else + { _croak( $returned, 0); } + } + + $active_twig= $t; + return $t; + } + +sub active_twig { return $active_twig; } + +sub finish_now + { my $t= shift; + $t->{twig_finish_now}=1; + die $t; + } + + +sub parsefile_inplace { shift->_parse_inplace( parsefile => @_); } +sub parsefile_html_inplace { shift->_parse_inplace( parsefile_html => @_); } + +sub _parse_inplace + { my( $t, $method, $file, $suffix)= @_; + _use( 'File::Temp') || croak "need File::Temp to use inplace methods\n"; + _use( 'File::Basename'); + + + my $tmpdir= dirname( $file); + my( $tmpfh, $tmpfile)= File::Temp::tempfile( DIR => $tmpdir); + my $original_fh= select $tmpfh; + + unless( $t->{twig_keep_encoding} || $perl_version < 5.006) + { if( grep /useperlio=define/, `$^X -V`) # we can only use binmode :utf8 if perl was compiled with useperlio + { binmode( $tmpfh, ":utf8" ); } + } + + $t->$method( $file); + + select $original_fh; + close $tmpfh; + my $mode= (stat( $file))[2] & oct(7777); + chmod $mode, $tmpfile or croak "cannot change temp file mode to $mode: $!"; + + if( $suffix) + { my $backup; + if( $suffix=~ m{\*}) { ($backup = $suffix) =~ s/\*/$file/g; } + else { $backup= $file . $suffix; } + + rename( $file, $backup) or croak "cannot backup initial file ($file) to $backup: $!"; + } + rename( $tmpfile, $file) or croak "cannot rename temp file ($tmpfile) to initial file ($file): $!"; + + return $t; + } + + +sub parseurl + { my $t= shift; + $t->_parseurl( 0, @_); + } + +sub safe_parseurl + { my $t= shift; + $t->_parseurl( 1, @_); + } + +sub safe_parsefile_html + { my $t= shift; + eval { $t->parsefile_html( @_); }; + return $@ ? $t->_reset_twig_after_error : $t; + } + +sub safe_parseurl_html + { my $t= shift; + _use( 'LWP::Simple') or croak "missing LWP::Simple"; + eval { $t->parse_html( LWP::Simple::get( shift()), @_); } ; + return $@ ? $t->_reset_twig_after_error : $t; + } + +sub parseurl_html + { my $t= shift; + _use( 'LWP::Simple') or croak "missing LWP::Simple"; + $t->parse_html( LWP::Simple::get( shift()), @_); + } + + +# uses eval to catch the parser's death +sub safe_parse_html + { my $t= shift; + eval { $t->parse_html( @_); } ; + return $@ ? $t->_reset_twig_after_error : $t; + } + +sub parsefile_html + { my $t= shift; + my $file= shift; + my $indent= $t->{ErrorContext} ? 1 : 0; + $t->set_empty_tag_style( 'html'); + my $html2xml= $t->{use_tidy} ? \&_tidy_html : \&_html2xml; + my $options= $t->{use_tidy} ? $t->{tidy_options} || {} : { indent => $indent, html_doctype => $t->{html_doctype} }; + $t->parse( $html2xml->( _slurp( $file), $options), @_); + return $t; + } + +sub parse_html + { my $t= shift; + my $options= ref $_[0] && ref $_[0] eq 'HASH' ? shift() : {}; + my $use_tidy= exists $options->{use_tidy} ? $options->{use_tidy} : $t->{use_tidy}; + my $content= shift; + my $indent= $t->{ErrorContext} ? 1 : 0; + $t->set_empty_tag_style( 'html'); + my $html2xml= $use_tidy ? \&_tidy_html : \&_html2xml; + my $conv_options= $use_tidy ? $t->{tidy_options} || {} : { indent => $indent, html_doctype => $t->{html_doctype} }; + $t->parse( $html2xml->( isa( $content, 'GLOB') ? _slurp_fh( $content) : $content, $conv_options), @_); + return $t; + } + +sub xparse + { my $t= shift; + my $to_parse= $_[0]; + if( isa( $to_parse, 'GLOB')) { $t->parse( @_); } + elsif( $to_parse=~ m{^\s*<}) { $to_parse=~ m{_parse_as_xml_or_html( @_) + : $t->parse( @_); + } + elsif( $to_parse=~ m{^\w+://.*\.html?$}) { _use( 'LWP::Simple') or croak "missing LWP::Simple"; + $t->_parse_as_xml_or_html( LWP::Simple::get( shift()), @_); + } + elsif( $to_parse=~ m{^\w+://}) { _use( 'LWP::Simple') or croak "missing LWP::Simple"; + my $doc= LWP::Simple::get( shift); + if( ! defined $doc) { $doc=''; } + my $xml_parse_ok= $t->safe_parse( $doc, @_); + if( $xml_parse_ok) + { return $xml_parse_ok; } + else + { my $diag= $@; + if( $doc=~ m{parse_html( $doc, @_); } + else + { croak $diag; } + } + } + elsif( $to_parse=~ m{\.html?$}) { my $content= _slurp( shift); + $t->_parse_as_xml_or_html( $content, @_); + } + else { $t->parsefile( @_); } + } + +sub _parse_as_xml_or_html + { my $t= shift; + if( _is_well_formed_xml( $_[0])) + { $t->parse( @_) } + else + { my $html2xml= $t->{use_tidy} ? \&_tidy_html : \&_html2xml; + my $options= $t->{use_tidy} ? $t->{tidy_options} || {} : { indent => 0, html_doctype => $t->{html_doctype} }; + my $html= $html2xml->( $_[0], $options, @_); + if( _is_well_formed_xml( $html)) + { $t->parse( $html); } + else + { croak $@; } # can't really test this because HTML::Parser or HTML::Tidy may change how they deal with bas HTML between versions + } + } + +{ my $parser; + sub _is_well_formed_xml + { $parser ||= XML::Parser->new; + eval { $parser->parse( $_[0]); }; + return $@ ? 0 : 1; + } +} + +sub nparse + { my $class= shift; + my $to_parse= pop; + $class->new( @_)->xparse( $to_parse); + } + +sub nparse_pp { shift()->nparse( pretty_print => 'indented', @_); } +sub nparse_e { shift()->nparse( error_context => 1, @_); } +sub nparse_ppe { shift()->nparse( pretty_print => 'indented', error_context => 1, @_); } + + +sub _html2xml + { my( $html, $options)= @_; + _use( 'HTML::TreeBuilder', '3.13') or croak "cannot parse HTML: missing HTML::TreeBuilder v >= 3.13\n"; + my $tree= HTML::TreeBuilder->new; + $tree->ignore_ignorable_whitespace( 0); + $tree->ignore_unknown( 0); + $tree->no_space_compacting( 1); + $tree->store_comments( 1); + $tree->store_pis(1); + $tree->parse( $html); + $tree->eof; + + my $xml=''; + if( $options->{html_doctype} && exists $tree->{_decl} ) + { my $decl= $tree->{_decl}->as_XML; + + # first try to fix declarations that are missing the SYSTEM part + $decl =~ s{^\s*} + { my $system= $HTML_DECL{$2} || $HTML_DECL{$DEFAULT_HTML_TYPE}; + qq{} + + }xe; + + # then check that the declaration looks OK (so it parses), if not remove it, + # better to parse without the declaration than to die stupidly + if( $decl =~ m{}x # PUBLIC then SYSTEM + || $decl =~ m{}x # just SYSTEM + ) + { $xml= $decl; } + } + + $xml.= _as_XML( $tree); + + + _fix_xml( $tree, \$xml); + + if( $options->{indent}) { _indent_xhtml( \$xml); } + $tree->delete; + $xml=~ s{\s+$}{}s; # trim end + return $xml; + } + +sub _tidy_html + { my( $html, $options)= @_; + _use( 'HTML::Tidy') or croak "cannot cleanup HTML using HTML::Tidy (required by the use_tidy option): $@\n"; ; + my $TIDY_DEFAULTS= { output_xhtml => 1, # duh! + tidy_mark => 0, # do not add the "generated by tidy" comment + numeric_entities => 1, + char_encoding => 'utf8', + bare => 1, + clean => 1, + doctype => 'transitional', + fix_backslash => 1, + merge_divs => 0, + merge_spans => 0, + sort_attributes => 'alpha', + indent => 0, + wrap => 0, + break_before_br => 0, + }; + $options ||= {}; + my $tidy_options= { %$TIDY_DEFAULTS, %$options}; + my $tidy = HTML::Tidy->new( $tidy_options); + $tidy->ignore( type => 1, type => 2 ); # 1 is TIDY_WARNING, 2 is TIDY_ERROR, not clean + my $xml= $tidy->clean( $html ); + return $xml; + } + + +{ my %xml_parser_encoding; + sub _fix_xml + { my( $tree, $xml)= @_; # $xml is a ref to the xml string + + my $max_tries=5; + my $add_decl; + + while( ! _check_xml( $xml) && $max_tries--) + { + # a couple of fixes for weird HTML::TreeBuilder errors + if( $@=~ m{^\s*xml (or text )?declaration not at start of (external )?entity}i) + { $$xml=~ s{<\?xml.*?\?>}{}g; + #warn " fixed xml declaration in the wrong place\n"; + } + elsif( $@=~ m{undefined entity}) + { $$xml=~ s{&(amp;)?Amp;}{&}g if $HTML::TreeBuilder::VERSION < 4.00; + if( _use( 'HTML::Entities::Numbered')) { $$xml=name2hex_xml( $$xml); } + $$xml=~ s{&(\w+);}{ my $ent= $1; if( $ent !~ m{^(amp|lt|gt|apos|quote)$}) { "&$ent;" } }eg; + } + elsif( $@=~ m{&Amp; used in html}) + # if $Amp; is used instead of & then HTML::TreeBuilder's as_xml is tripped (old version) + { $$xml=~ s{&(amp;)?Amp;}{&}g if $HTML::TreeBuilder::VERSION < 4.00; + } + elsif( $@=~ m{^\s*not well-formed \(invalid token\)}) + { if( $HTML::TreeBuilder::VERSION < 4.00) + { $$xml=~ s{&(amp;)?Amp;}{&}g; + $$xml=~ s{(<[^>]* )(\d+=)"}{$1a$2"}g; # comes out as
, "fix the attribute + } + my $q= ')?}{}s; + #warn " added decl (encoding $encoding)\n"; + } + else + { $$xml=~ s{^(<\?xml.*?\?>)?}{}s; + #warn " converting to utf8 from $encoding\n"; + $$xml= _to_utf8( $encoding, $$xml); + } + } + else + { $$xml=~ s{^(<\?xml.*?\?>)?}{}s; + #warn " converting to utf8 from $encoding\n"; + $$xml= _to_utf8( $encoding, $$xml); + } + } + } + } + + # some versions of HTML::TreeBuilder escape CDATA sections + $$xml=~ s{(<!\[CDATA\[.*?\]\]>)}{_unescape_cdata( $1)}eg; + + } + + sub _xml_parser_encodings + { my @encodings=( 'iso-8859-1'); # this one is included by default, there is no map for it in @INC + foreach my $inc (@INC) + { push @encodings, map { basename( $_, '.enc') } glob( File::Spec->catdir( $inc => XML => Parser => Encodings => '*.enc')); } + return map { $_ => 1 } @encodings; + } +} + + +sub _unescape_cdata + { my( $cdata)= @_; + $cdata=~s{<}{<}g; + $cdata=~s{>}{>}g; + $cdata=~s{&}{&}g; + return $cdata; + } + +sub _as_XML { + + # fork of HTML::Element::as_XML, which is a little too buggy and inconsistent between versions for my liking + my ($elt) = @_; + my $xml= ''; + my $empty_element_map = $elt->_empty_element_map; + + my ( $tag, $node, $start ); # per-iteration scratch + $elt->traverse( + sub { + ( $node, $start ) = @_; + if ( ref $node ) + { # it's an element + $tag = $node->{'_tag'}; + if ($start) + { # on the way in + foreach my $att ( grep { ! m{^(_|/$)} } keys %$node ) + { # fix attribute names instead of dying + my $new_att= $att; + if( $att=~ m{^\d}) { $new_att= "a$att"; } + $new_att=~ s{[^\w\d:_-]}{}g; + $new_att ||= 'a'; + if( $new_att ne $att) { $node->{$new_att}= delete $node->{$att}; } + } + + if ( $empty_element_map->{$tag} && (!@{ $node->{'_content'} || []}) ) + { $xml.= $node->starttag_XML( undef, 1 ); } + else + { $xml.= $node->starttag_XML(undef); } + } + else + { # on the way out + unless ( $empty_element_map->{$tag} and !@{ $node->{'_content'} || [] } ) + { $xml.= $node->endtag_XML(); + } # otherwise it will have been an <... /> tag. + } + } + elsif( $node=~ /)/s, $node) # chunks are CDATA sections or normal text + { $xml.= $chunk =~ m{/>/g; + $html =~ s/"/"/g; + $html =~ s/'/'/g; + + return $html; + } + + + + +sub _check_xml + { my( $xml)= @_; # $xml is a ref to the xml string + my $ok= eval { XML::Parser->new->parse( $$xml); }; + #if( $ok) { warn " parse OK\n"; } + return $ok; + } + +sub _encoding_from_meta + { my( $tree)= @_; + my $enc="iso-8859-1"; + my @meta= $tree->find( 'meta'); + foreach my $meta (@meta) + { if( $meta->{'http-equiv'} && ($meta->{'http-equiv'} =~ m{^\s*content-type\s*}i) + && $meta->{content} && ($meta->{content} =~ m{^\s*text/html\s*;\s*charset\s*=\s*(\S*)\s*}i) + ) + { $enc= lc $1; + #warn " encoding from meta tag is '$enc'\n"; + last; + } + } + return $enc; + } + +{ sub _to_utf8 + { my( $encoding, $string)= @_; + local $SIG{__DIE__}; + if( _use( 'Encode')) + { Encode::from_to( $string, $encoding => 'utf8', 0x0400); } # 0x0400 is Encode::FB_XMLCREF + elsif( _use( 'Text::Iconv')) + { my $converter = eval { Text::Iconv->new( $encoding => "utf8") }; + if( $converter) { $string= $converter->convert( $string); } + } + elsif( _use( 'Unicode::Map8') && _use( 'Unicode::String')) + { my $map= Unicode::Map8->new( $encoding); + $string= $map->tou( $string)->utf8; + } + $string=~ s{[\x00-\x08\x0B\x0C\x0E-\x1F]}{}g; # get rid of control chars, portable in 5.6 + return $string; + } +} + + +sub _indent_xhtml + { my( $xhtml)= @_; # $xhtml is a ref + my %block_tag= map { $_ => 1 } qw( html + head + meta title link script base + body + h1 h2 h3 h4 h5 h6 + p br address blockquote pre + ol ul li dd dl dt + table tr td th tbody tfoot thead col colgroup caption + div frame frameset hr + ); + + my $level=0; + $$xhtml=~ s{( (?:|[CDATA[.*?]]>)) # ignore comments and CDATA sections + | <(\w+)((?:\s+\w+\s*=\s*(?:"[^"]*"|'[^']*'))*\s*/>) # empty tag + | <(\w+) # start tag + |}); + my $nl= $4 eq 'html' ? '' : "\n"; + "$nl$indent<$4"; + } + elsif( $5 && $block_tag{$5}) { $level--; " 1 } qw( xsl css); + my $ss= $t->{twig_elt_class}->new( $PI); + if( $text_type{$type}) + { $ss->_set_pi( 'xml-stylesheet', qq{type="text/$type" href="$href"}); } + else + { croak "unsupported style sheet type '$type'"; } + + $t->_add_cpi_outside_of_root( leading_cpi => $ss); + return $t; + } + +{ my %used; # module => 1 if require ok, 0 otherwise + my %disallowed; # for testing, refuses to _use modules in this hash + + sub _disallow_use ## no critic (Subroutines::ProhibitNestedSubs); + { my( @modules)= @_; + $disallowed{$_}= 1 foreach (@modules); + } + + sub _allow_use ## no critic (Subroutines::ProhibitNestedSubs); + { my( @modules)= @_; + $disallowed{$_}= 0 foreach (@modules); + } + + sub _use ## no critic (Subroutines::ProhibitNestedSubs); + { my( $module, $version)= @_; + $version ||= 0; + if( $disallowed{$module}) { return 0; } + if( $used{$module}) { return 1; } + if( eval "require $module") { import $module; $used{$module}= 1; # no critic ProhibitStringyEval + if( $version) + { + ## no critic (TestingAndDebugging::ProhibitNoStrict); + no strict 'refs'; + if( ${"${module}::VERSION"} >= $version ) { return 1; } + else { return 0; } + } + else + { return 1; } + } + else { $used{$module}= 0; return 0; } + } +} + +# used to solve the [n] predicates while avoiding getting the entire list +# needs a prototype to accept passing bare blocks +sub _first_n(&$@) ## no critic (Subroutines::ProhibitSubroutinePrototypes); + { my $coderef= shift; + my $n= shift; + my $i=0; + if( $n > 0) + { foreach (@_) { if( &$coderef) { $i++; return $_ if( $i == $n); } } } + elsif( $n < 0) + { foreach (reverse @_) { if( &$coderef) { $i--; return $_ if( $i == $n); } } } + else + { croak "illegal position number 0"; } + return undef; + } + +sub _slurp_uri + { my( $uri, $base)= @_; + if( $uri=~ m{^\w+://}) { _use( 'LWP::Simple'); return LWP::Simple::get( $uri); } + else { return _slurp( _based_filename( $uri, $base)); } + } + +sub _based_filename + { my( $filename, $base)= @_; + # cf. XML/Parser.pm's file_ext_ent_handler + if (defined($base) and not ($filename =~ m{^(?:[\\/]|\w+:)})) + { my $newpath = $base; + $newpath =~ s{[^\\/:]*$}{$filename}; + $filename = $newpath; + } + return $filename; + } + +sub _slurp + { my( $filename)= @_; + my $to_slurp; + open( $to_slurp, "<$filename") or croak "cannot open '$filename': $!"; + local $/= undef; + my $content= <$to_slurp>; + close $to_slurp; + return $content; + } + +sub _slurp_fh + { my( $fh)= @_; + local $/= undef; + my $content= <$fh>; + return $content; + } + +# I should really add extra options to allow better configuration of the +# LWP::UserAgent object +# this method forks (except on VMS!) +# - the child gets the data and copies it to the pipe, +# - the parent reads the stream and sends it to XML::Parser +# the data is cut it chunks the size of the XML::Parser::Expat buffer +# the method returns the twig and the status +sub _parseurl + { my( $t, $safe, $url, $agent)= @_; + _use( 'LWP') || croak "LWP not available, needed to use parseurl methods"; + if( $^O ne 'VMS') + { pipe( README, WRITEME) or croak "cannot create connected pipes: $!"; + if( my $pid= fork) + { # parent code: parse the incoming file + close WRITEME; # no need to write + my $result= $safe ? $t->safe_parse( \*README) : $t->parse( \*README); + close README; + return $@ ? 0 : $t; + } + else + { # child + close README; # no need to read + local $|=1; + $agent ||= LWP::UserAgent->new; + my $request = HTTP::Request->new( GET => $url); + # _pass_url_content is called with chunks of data the same size as + # the XML::Parser buffer + my $response = $agent->request( $request, + sub { _pass_url_content( \*WRITEME, @_); }, $BUFSIZE); + $response->is_success or croak "$url ", $response->message; + close WRITEME; + CORE::exit(); # CORE is there for mod_perl (which redefines exit) + } + } + else + { # VMS branch (hard to test!) + local $|=1; + $agent ||= LWP::UserAgent->new; + my $request = HTTP::Request->new( GET => $url); + my $response = $agent->request( $request); + $response->is_success or croak "$url ", $response->message; + my $result= $safe ? $t->safe_parse($response->content) : $t->parse($response->content); + return $@ ? 0 : $t; + } + + } + +# get the (hopefully!) XML data from the URL and +sub _pass_url_content + { my( $fh, $data, $response, $protocol)= @_; + print {$fh} $data; + } + +sub add_options + { my %args= map { $_, 1 } @_; + %args= _normalize_args( %args); + foreach (keys %args) { $valid_option{$_}++; } + } + +sub _pretty_print_styles { return XML::Twig::Elt::_pretty_print_styles(); } + +sub _twig_store_internal_dtd + { + # warn " in _twig_store_internal_dtd...\n"; # DEBUG handler + my( $p, $string)= @_; + my $t= $p->{twig}; + if( $t->{twig_keep_encoding}) { $string= $p->original_string(); } + $t->{twig_doctype}->{internal} .= $string; + return; + } + +sub _twig_stop_storing_internal_dtd + { # warn " in _twig_stop_storing_internal_dtd...\n"; # DEBUG handler + my $p= shift; + if( @saved_default_handler && defined $saved_default_handler[1]) + { $p->setHandlers( @saved_default_handler); } + else + { + $p->setHandlers( Default => undef); + } + $p->{twig}->{twig_doctype}->{internal}=~ s{^\s*\[}{}; + $p->{twig}->{twig_doctype}->{internal}=~ s{\]\s*$}{}; + return; + } + +sub _twig_doctype_fin_print + { # warn " in _twig_doctype_fin_print...\n"; # DEBUG handler + my( $p)= shift; + if( $p->{twig}->{twig_doctype}->{has_internal} && !$expat_1_95_2) { print ' ]>'; } + return; + } + + +sub _normalize_args + { my %normalized_args; + while( my $key= shift ) + { $key= join '', map { ucfirst } split /_/, $key; + #$key= "Twig".$key unless( substr( $key, 0, 4) eq 'Twig'); + $normalized_args{$key}= shift ; + } + return %normalized_args; + } + +sub _is_fh { return unless $_[0]; return $_[0] if( isa( $_[0], 'GLOB') || isa( $_[0], 'IO::Scalar')); } + +sub _set_handler + { my( $handlers, $whole_path, $handler)= @_; + + my $H_SPECIAL = qr{($ALL|$DEFAULT|$COMMENT|$TEXT)}; + my $H_PI = qr{(\?|$PI)\s*(([^\s]*)\s*)}; + my $H_LEVEL = qr{level \s* \( \s* ([0-9]+) \s* \)}x; + my $H_REGEXP = qr{\(\?([\^xism]*)(-[\^xism]*)?:(.*)\)}x; + my $H_XPATH = qr{(/?/?$REG_TAG_PART? \s* ($REG_PREDICATE\s*)?)+}x; + + my $prev_handler; + + my $cpath= $whole_path; + #warn "\$cpath: '$cpath\n"; + while( $cpath && $cpath=~ s{^\s*($H_SPECIAL|$H_PI|$H_LEVEL|$H_REGEXP|$H_XPATH)\s*($|\|)}{}) + { my $path= $1; + #warn "\$cpath: '$cpath' - $path: '$path'\n"; + $prev_handler ||= $handlers->{handlers}->{string}->{$path} || undef; # $prev_handler gets the first found handler + + _set_special_handler ( $handlers, $path, $handler, $prev_handler) + || _set_pi_handler ( $handlers, $path, $handler, $prev_handler) + || _set_level_handler ( $handlers, $path, $handler, $prev_handler) + || _set_regexp_handler ( $handlers, $path, $handler, $prev_handler) + || _set_xpath_handler ( $handlers, $path, $handler, $prev_handler) + || croak "unrecognized expression in handler: '$whole_path'"; + + # this both takes care of the simple (gi) handlers and store + # the handler code reference for other handlers + $handlers->{handlers}->{string}->{$path}= $handler; + } + + if( $cpath) { croak "unrecognized expression in handler: '$whole_path'"; } + + return $prev_handler; + } + + +sub _set_special_handler + { my( $handlers, $path, $handler, $prev_handler)= @_; + if( $path =~ m{^\s*($ALL|$DEFAULT|$COMMENT|$TEXT)\s*$}io ) + { $handlers->{handlers}->{$1}= $handler; + return 1; + } + else + { return 0; } + } + +sub _set_xpath_handler + { my( $handlers, $path, $handler, $prev_handler)= @_; + if( my $handler_data= _parse_xpath_handler( $path, $handler)) + { _add_handler( $handlers, $handler_data, $path, $prev_handler); + return 1; + } + else + { return 0; } + } + +sub _add_handler + { my( $handlers, $handler_data, $path, $prev_handler)= @_; + + my $tag= $handler_data->{tag}; + my @handlers= $handlers->{xpath_handler}->{$tag} ? @{$handlers->{xpath_handler}->{$tag}} : (); + + if( $prev_handler) { @handlers= grep { $_->{path} ne $path } @handlers; } + + push @handlers, $handler_data if( $handler_data->{handler}); + + if( @handlers > 1) + { @handlers= sort { (($b->{score}->{type} || 0) <=> ($a->{score}->{type} || 0)) + || (($b->{score}->{anchored} || 0) <=> ($a->{score}->{anchored} || 0)) + || (($b->{score}->{steps} || 0) <=> ($a->{score}->{steps} || 0)) + || (($b->{score}->{predicates} || 0) <=> ($a->{score}->{predicates} || 0)) + || (($b->{score}->{tests} || 0) <=> ($a->{score}->{tests} || 0)) + || ($a->{path} cmp $b->{path}) + } @handlers; + } + + $handlers->{xpath_handler}->{$tag}= \@handlers; + } + +sub _set_pi_handler + { my( $handlers, $path, $handler, $prev_handler)= @_; + # PI conditions ( '?target' => \&handler or '?' => \&handler + # or '#PItarget' => \&handler or '#PI' => \&handler) + if( $path=~ /^\s*(?:\?|$PI)\s*(?:([^\s]*)\s*)$/) + { my $target= $1 || ''; + # update the path_handlers count, knowing that + # either the previous or the new handler can be undef + $handlers->{pi_handlers}->{$1}= $handler; + return 1; + } + else + { return 0; + } + } + +sub _set_level_handler + { my( $handlers, $path, $handler, $prev_handler)= @_; + if( $path =~ m{^ \s* level \s* \( \s* ([0-9]+) \s* \) \s* $}ox ) + { my $level= $1; + my $sub= sub { my( $stack)= @_; return( ($stack->[-1]->{$ST_TAG} !~ m{^#}) && (scalar @$stack == $level + 1) ) }; + my $handler_data= { tag=> '*', score => { type => $LEVEL_TRIGGER}, trigger => $sub, + path => $path, handler => $handler, test_on_text => 0 + }; + _add_handler( $handlers, $handler_data, $path, $prev_handler); + return 1; + } + else + { return 0; } + } + +sub _set_regexp_handler + { my( $handlers, $path, $handler, $prev_handler)= @_; + # if the expression was a regexp it is now a string (it was stringified when it became a hash key) + if( $path=~ m{^\(\?([\^xism]*)(?:-[\^xism]*)?:(.*)\)$}) + { my $regexp= qr/(?$1:$2)/; # convert it back into a regexp + my $sub= sub { my( $stack)= @_; return( $stack->[-1]->{$ST_TAG} =~ $regexp ) }; + my $handler_data= { tag=> '*', score => { type => $REGEXP_TRIGGER} , trigger => $sub, + path => $path, handler => $handler, test_on_text => 0 + }; + _add_handler( $handlers, $handler_data, $path, $prev_handler); + return 1; + } + else + { return 0; } + } + +my $DEBUG_HANDLER= 0; # 0 or 1 (output the handler checking code) or 2 (super verbose) +my $handler_string; # store the handler itself +sub _set_debug_handler { $DEBUG_HANDLER= shift; } +sub _warn_debug_handler { if( $DEBUG_HANDLER < 3) { warn @_; } else { $handler_string .= join( '', @_); } } +sub _return_debug_handler { my $string= $handler_string; $handler_string=''; return $string; } + +sub _parse_xpath_handler + { my( $xpath, $handler)= @_; + my $xpath_original= $xpath; + + + if( $DEBUG_HANDLER >=1) { _warn_debug_handler( "\n\nparsing path '$xpath'\n"); } + + my $path_to_check= $xpath; + $path_to_check=~ s{/?/?$REG_TAG_PART?\s*(?:$REG_PREDICATE\s*)?}{}g; + if( $DEBUG_HANDLER && $path_to_check=~ /\S/) { _warn_debug_handler( "left: $path_to_check\n"); } + return if( $path_to_check=~ /\S/); + + (my $xpath_to_display= $xpath)=~ s{(["{}'\[\]\@\$])}{\\$1}g; + + my @xpath_steps; + my $last_token_is_sep; + + while( $xpath=~ s{^\s* + ( (//?) # separator + | (?:$REG_TAG_PART\s*(?:$REG_PREDICATE\s*)?) # tag name and optional predicate + | (?:$REG_PREDICATE) # just a predicate + ) + } + {}x + ) + { # check that we have alternating separators and steps + if( $2) # found a separator + { if( $last_token_is_sep) { return 0; } # 2 separators in a row + $last_token_is_sep= 1; + } + else + { if( defined( $last_token_is_sep) && !$last_token_is_sep) { return 0; } # 2 steps in a row + $last_token_is_sep= 0; + } + + push @xpath_steps, $1; + } + if( $last_token_is_sep) { return 0; } # expression cannot end with a separator + + my $i=-1; + + my $perlfunc= _join_n( $NO_WARNINGS . ';', + q|my( $stack)= @_; |, + q|my @current_elts= (scalar @$stack); |, + q|my @new_current_elts; |, + q|my $elt; |, + ($DEBUG_HANDLER >= 1) && (qq#warn q{checking path '$xpath_to_display'\n};#), + ); + + + my $last_tag=''; + my $anchored= $xpath_original=~ m{^\s*/(?!/)} ? 1 : 0; + my $score={ type => $XPATH_TRIGGER, anchored => $anchored }; + my $flag= { test_on_text => 0 }; + my $sep='/'; # '/' or '//' + while( my $xpath_step= pop @xpath_steps) + { my( $tag, $predicate)= $xpath_step =~ m{^($REG_TAG_PART)?(?:\[(.*)\])?\s*$}; + $score->{steps}++; + $tag||='*'; + + my $warn_empty_stack= $DEBUG_HANDLER >= 2 ? qq{warn "return with empty stack\\n";} : ''; + + if( $predicate) + { if( $DEBUG_HANDLER >= 2) { _warn_debug_handler( "predicate is: '$predicate'\n"); } + # changes $predicate (from an XPath expression to a Perl one) + if( $predicate=~ m{^\s*$REG_NUMBER\s*$}) { croak "position selector [$predicate] not supported on twig_handlers"; } + _parse_predicate_in_handler( $predicate, $flag, $score); + if( $DEBUG_HANDLER >= 2) { _warn_debug_handler( "predicate becomes: '$predicate'\n"); } + } + + my $tag_cond= _tag_cond( $tag); + my $cond= join( " && ", grep { $_ } $tag_cond, $predicate) || 1; + + if( $css_sel && $tag=~ m{\.}) { $tag=~s{\.[^.]*$}{}; $tag ||='*'; } + $tag=~ s{(.)#.+$}{$1}; + + $last_tag ||= $tag; + + if( $sep eq '/') + { + $perlfunc .= sprintf( _join_n( q#foreach my $current_elt (@current_elts) #, + q# { next if( !$current_elt); #, + q# $current_elt--; #, + q# $elt= $stack->[$current_elt]; #, + q# if( %s) { push @new_current_elts, $current_elt;} #, + q# } #, + ), + $cond + ); + } + elsif( $sep eq '//') + { + $perlfunc .= sprintf( _join_n( q#foreach my $current_elt (@current_elts) #, + q# { next if( !$current_elt); #, + q# $current_elt--; #, + q# my $candidate= $current_elt; #, + q# while( $candidate >=0) #, + q# { $elt= $stack->[$candidate]; #, + q# if( %s) { push @new_current_elts, $candidate;} #, + q# $candidate--; #, + q# } #, + q# } #, + ), + $cond + ); + } + my $warn= $DEBUG_HANDLER >= 2 ? _join_n( qq#warn qq%fail at cond '$cond'%;#) : ''; + $perlfunc .= sprintf( _join_n( q#unless( @new_current_elts) { %s return 0; } #, + q#@current_elts= @new_current_elts; #, + q#@new_current_elts=(); #, + ), + $warn + ); + + $sep= pop @xpath_steps; + } + + if( $anchored) # there should be a better way, but this works + { + my $warn= $DEBUG_HANDLER >= 2 ? _join_n( qq#warn qq{fail, stack not empty};#) : ''; + $perlfunc .= sprintf( _join_n( q#if( ! grep { $_ == 0 } @current_elts) { %s return 0;}#), $warn); + } + + $perlfunc.= qq{warn "handler for '$xpath_to_display' triggered\\n";\n} if( $DEBUG_HANDLER >=2); + $perlfunc.= qq{return q{$xpath_original};\n}; + _warn_debug_handler( "\nperlfunc:\n$perlfunc\n") if( $DEBUG_HANDLER>=1); + my $s= eval "sub { $perlfunc }"; + if( $@) + { croak "wrong handler condition '$xpath' ($@);" } + + _warn_debug_handler( "last tag: '$last_tag', test_on_text: '$flag->{test_on_text}'\n") if( $DEBUG_HANDLER >=1); + _warn_debug_handler( "score: ", join( ' ', map { "$_: $score->{$_}" } sort keys %$score), "\n") if( $DEBUG_HANDLER >=1); + return { tag=> $last_tag, score => $score, trigger => $s, path => $xpath_original, handler => $handler, test_on_text => $flag->{test_on_text} }; + } + +sub _join_n { return join( "\n", @_, ''); } + +# the "tag" part can be , . or # (where tag can be *, or start with # for hidden tags) +sub _tag_cond + { my( $full_tag)= @_; + + my( $tag, $class, $id); + if( $full_tag=~ m{^(.+)#(.+)$}) + { ($tag, $id)= ($1, $2); } # # + else + { ( $tag, $class)= $css_sel ? $full_tag=~ m{^(.*?)(?:\.([^.]*))?$} : ($full_tag, undef); } + + my $tag_cond = $tag && $tag ne '*' ? qq#(\$elt->{'$ST_TAG'} eq "$tag")# : ''; + my $id_cond = defined $id ? qq#(\$elt->{id} eq "$id")# : ''; + my $class_cond = defined $class ? qq#(\$elt->{class}=~ m{(^| )$class( |\$)})# : ''; + + my $full_cond= join( ' && ', grep { $_ } ( $tag_cond, $class_cond, $id_cond)); + + return $full_cond; + } + +# input: the predicate ($_[0]) which will be changed in place +# flags, a hashref with various flags (like test_on_text) +# the score +sub _parse_predicate_in_handler + { my( $flag, $score)= @_[1..2]; + $_[0]=~ s{( ($REG_STRING) # strings + |\@($REG_TAG_NAME)(\s* $REG_MATCH \s* $REG_REGEXP) # @att and regexp + |\@($REG_TAG_NAME)(?=\s*(?:[><=!])) # @att followed by a comparison operator + |\@($REG_TAG_NAME) # @att (not followed by a comparison operator) + |=~|!~ # matching operators + |([><]=?|=|!=)(?=\s*[\d+-]) # test before a number + |([><]=?|=|!=) # test, other cases + |($REG_FUNCTION) # no arg functions + # this bit is a mess, but it is the only solution with this half-baked parser + |(string\(\s*$REG_NAME\s*\)\s*$REG_MATCH\s*$REG_REGEXP) # string( child)=~ /regexp/ + |(string\(\s*$REG_NAME\s*\)\s*$REG_COMP\s*$REG_STRING) # string( child) = "value" (or other test) + |(string\(\s*$REG_NAME\s*\)\s*$REG_COMP\s*$REG_NUMBER) # string( child) = nb (or other test) + |(and|or) + # |($REG_NAME(?=\s*(and|or|$))) # nested tag name (needs to be after all other unquoted strings) + |($REG_TAG_IN_PREDICATE) # nested tag name (needs to be after all other unquoted strings) + + )} + { my( $token, $str, $att_re_name, $att_re_regexp, $att, $bare_att, $num_test, $alpha_test, $func, $str_regexp, $str_test_alpha, $str_test_num, $and_or, $tag) + = ( $1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12, $13, $14); + + $score->{predicates}++; + + # store tests on text (they are not always allowed) + if( $func || $str_regexp || $str_test_num || $str_test_alpha ) { $flag->{test_on_text}= 1; } + + if( defined $str) { $token } + elsif( $tag) { qq{(\$elt->{'$ST_ELT'} && \$elt->{'$ST_ELT'}->has_child( '$tag'))} } + elsif( $att) { $att=~ m{^#} ? qq{ (\$elt->{'$ST_ELT'} && \$elt->{'$ST_ELT'}->{att}->{'$att'})} + : qq{\$elt->{'$att'}} + } + elsif( $att_re_name) { $att_re_name=~ m{^#} ? qq{ (\$elt->{'$ST_ELT'} && \$elt->{'$ST_ELT'}->{att}->{'$att_re_name'}$att_re_regexp)} + : qq{\$elt->{'$att_re_name'}$att_re_regexp} + } + # for some reason Devel::Cover flags the following lines as not tested. They are though. + elsif( $bare_att) { $bare_att=~ m{^#} ? qq{(\$elt->{'$ST_ELT'} && defined(\$elt->{'$ST_ELT'}->{att}->{'$bare_att'}))} + : qq{defined( \$elt->{'$bare_att'})} + } + elsif( $num_test && ($num_test eq '=') ) { "==" } # others tests are unchanged + elsif( $alpha_test) { $PERL_ALPHA_TEST{$alpha_test} } + elsif( $func && $func=~ m{^string}) + { "\$elt->{'$ST_ELT'}->text"; } + elsif( $str_regexp && $str_regexp =~ m{string\(\s*($REG_TAG_NAME)\s*\)\s*($REG_MATCH)\s*($REG_REGEXP)}) + { "defined( _first_n { \$_->text $2 $3 } 1, \$elt->{'$ST_ELT'}->_children( '$1'))"; } + elsif( $str_test_alpha && $str_test_alpha =~ m{string\(\s*($REG_TAG_NAME)\s*\)\s*($REG_COMP)\s*($REG_STRING)}) + { my( $tag, $op, $str)= ($1, $2, $3); + $str=~ s{(?<=.)'(?=.)}{\\'}g; # escape a quote within the string + $str=~ s{^"}{'}; + $str=~ s{"$}{'}; + "defined( _first_n { \$_->text $PERL_ALPHA_TEST{$op} $str } 1, \$elt->{'$ST_ELT'}->children( '$tag'))"; } + elsif( $str_test_num && $str_test_num =~ m{string\(\s*($REG_TAG_NAME)\s*\)\s*($REG_COMP)\s*($REG_NUMBER)}) + { my $test= ($2 eq '=') ? '==' : $2; + "defined( _first_n { \$_->text $test $3 } 1, \$elt->{'$ST_ELT'}->children( '$1'))"; + } + elsif( $and_or) { $score->{tests}++; $and_or eq 'and' ? '&&' : '||' ; } + else { $token; } + }gexs; + } + + +sub setCharHandler + { my( $t, $handler)= @_; + $t->{twig_char_handler}= $handler; + } + + +sub _reset_handlers + { my $handlers= shift; + delete $handlers->{handlers}; + delete $handlers->{path_handlers}; + delete $handlers->{subpath_handlers}; + $handlers->{attcond_handlers_exp}=[] if( $handlers->{attcond_handlers}); + delete $handlers->{attcond_handlers}; + } + +sub _set_handlers + { my $handlers= shift || return; + my $set_handlers= {}; + foreach my $path (keys %{$handlers}) + { _set_handler( $set_handlers, $path, $handlers->{$path}); } + + return $set_handlers; + } + + +sub setTwigHandler + { my( $t, $path, $handler)= @_; + $t->{twig_handlers} ||={}; + return _set_handler( $t->{twig_handlers}, $path, $handler); + } + +sub setTwigHandlers + { my( $t, $handlers)= @_; + my $previous_handlers= $t->{twig_handlers} || undef; + _reset_handlers( $t->{twig_handlers}); + $t->{twig_handlers}= _set_handlers( $handlers); + return $previous_handlers; + } + +sub setStartTagHandler + { my( $t, $path, $handler)= @_; + $t->{twig_starttag_handlers}||={}; + return _set_handler( $t->{twig_starttag_handlers}, $path, $handler); + } + +sub setStartTagHandlers + { my( $t, $handlers)= @_; + my $previous_handlers= $t->{twig_starttag_handlers} || undef; + _reset_handlers( $t->{twig_starttag_handlers}); + $t->{twig_starttag_handlers}= _set_handlers( $handlers); + return $previous_handlers; + } + +sub setIgnoreEltsHandler + { my( $t, $path, $action)= @_; + $t->{twig_ignore_elts_handlers}||={}; + return _set_handler( $t->{twig_ignore_elts_handlers}, $path, $action ); + } + +sub setIgnoreEltsHandlers + { my( $t, $handlers)= @_; + my $previous_handlers= $t->{twig_ignore_elts_handlers}; + _reset_handlers( $t->{twig_ignore_elts_handlers}); + $t->{twig_ignore_elts_handlers}= _set_handlers( $handlers); + return $previous_handlers; + } + +sub setEndTagHandler + { my( $t, $path, $handler)= @_; + $t->{twig_endtag_handlers}||={}; + return _set_handler( $t->{twig_endtag_handlers}, $path,$handler); + } + +sub setEndTagHandlers + { my( $t, $handlers)= @_; + my $previous_handlers= $t->{twig_endtag_handlers}; + _reset_handlers( $t->{twig_endtag_handlers}); + $t->{twig_endtag_handlers}= _set_handlers( $handlers); + return $previous_handlers; + } + +# a little more complex: set the twig_handlers only if a code ref is given +sub setTwigRoots + { my( $t, $handlers)= @_; + my $previous_roots= $t->{twig_roots}; + _reset_handlers($t->{twig_roots}); + $t->{twig_roots}= _set_handlers( $handlers); + + _check_illegal_twig_roots_handlers( $t->{twig_roots}); + + foreach my $path (keys %{$handlers}) + { $t->{twig_handlers}||= {}; + _set_handler( $t->{twig_handlers}, $path, $handlers->{$path}) + if( ref($handlers->{$path}) && isa( $handlers->{$path}, 'CODE')); + } + return $previous_roots; + } + +sub _check_illegal_twig_roots_handlers + { my( $handlers)= @_; + foreach my $tag_handlers (values %{$handlers->{xpath_handler}}) + { foreach my $handler_data (@$tag_handlers) + { if( my $type= $handler_data->{test_on_text}) + { croak "string() condition not supported on twig_roots option"; } + } + } + return; + } + + +# just store the reference to the expat object in the twig +sub _twig_init + { # warn " in _twig_init...\n"; # DEBUG handler + + my $p= shift; + my $t=$p->{twig}; + + if( $t->{twig_parsing} ) { croak "cannot reuse a twig that is already parsing"; } + $t->{twig_parsing}=1; + + $t->{twig_parser}= $p; + if( $weakrefs) { weaken( $t->{twig_parser}); } + + # in case they had been created by a previous parse + delete $t->{twig_dtd}; + delete $t->{twig_doctype}; + delete $t->{twig_xmldecl}; + delete $t->{twig_root}; + + # if needed set the output filehandle + $t->_set_fh_to_twig_output_fh(); + return; + } + +# uses eval to catch the parser's death +sub safe_parse + { my $t= shift; + eval { $t->parse( @_); } ; + return $@ ? $t->_reset_twig_after_error : $t; + } + +sub safe_parsefile + { my $t= shift; + eval { $t->parsefile( @_); } ; + return $@ ? $t->_reset_twig_after_error : $t; + } + +# restore a twig in a proper state so it can be reused for a new parse +sub _reset_twig + { my $t= shift; + $t->{twig_parsing}= 0; + delete $t->{twig_current}; + delete $t->{extra_data}; + delete $t->{twig_dtd}; + delete $t->{twig_in_pcdata}; + delete $t->{twig_in_cdata}; + delete $t->{twig_stored_space}; + delete $t->{twig_entity_list}; + $t->root->delete if( $t->root); + delete $t->{twig_root}; + return $t; + } + +sub _reset_twig_after_error + { my $t= shift; + $t->_reset_twig; + return undef; + } + + +sub _add_or_discard_stored_spaces + { my $t= shift; + + $t->{twig_right_after_root}=0; #XX + + my $current= $t->{twig_current} or return; # ugly hack, with ignore on, twig_current can disappear + return unless length $t->{twig_stored_spaces}; + my $current_gi= $XML::Twig::index2gi[$current->{'gi'}]; + + if( ! $t->{twig_discard_all_spaces}) + { if( ! defined( $t->{twig_space_policy}->{$current_gi})) + { $t->{twig_space_policy}->{$current_gi}= _space_policy( $t, $current_gi); } + if( $t->{twig_space_policy}->{$current_gi} || ($t->{twig_stored_spaces}!~ m{\n}) || $t->{twig_preserve_space}) + { _insert_pcdata( $t, $t->{twig_stored_spaces} ); } + } + + $t->{twig_stored_spaces}=''; + + return; + } + +# the default twig handlers, which build the tree +sub _twig_start + { # warn " in _twig_start...\n"; # DEBUG handler + + #foreach my $s (@_) { next if ref $s; warn "$s: ", is_utf8( $s) ? "has flag" : "FLAG NOT SET"; } # YYY + + my ($p, $gi, @att)= @_; + my $t=$p->{twig}; + + # empty the stored pcdata (space stored in case they are really part of + # a pcdata element) or stored it if the space policy dictates so + # create a pcdata element with the spaces if need be + _add_or_discard_stored_spaces( $t); + my $parent= $t->{twig_current}; + + # if we were parsing PCDATA then we exit the pcdata + if( $t->{twig_in_pcdata}) + { $t->{twig_in_pcdata}= 0; + delete $parent->{'twig_current'}; + $parent= $parent->{parent}; + } + + # if we choose to keep the encoding then we need to parse the tag + if( my $func = $t->{parse_start_tag}) + { ($gi, @att)= &$func($p->original_string); } + elsif( $t->{twig_entities_in_attribute}) + { + ($gi,@att)= _parse_start_tag( $p->recognized_string); + $t->{twig_entities_in_attribute}=0; + } + + # if we are using an external DTD, we need to fill the default attributes + if( $t->{twig_read_external_dtd}) { _fill_default_atts( $t, $gi, \@att); } + + # filter the input data if need be + if( my $filter= $t->{twig_input_filter}) + { $gi= $filter->( $gi); + foreach my $att (@att) { $att= $filter->($att); } + } + + my $ns_decl; + if( $t->{twig_map_xmlns}) + { $ns_decl= _replace_ns( $t, \$gi, \@att); } + + my $elt= $t->{twig_elt_class}->new( $gi); + $elt->set_atts( @att); + + # now we can store the tag and atts + my $context= { $ST_TAG => $gi, $ST_ELT => $elt, @att}; + $context->{$ST_NS}= $ns_decl if $ns_decl; + if( $weakrefs) { weaken( $context->{$ST_ELT}); } + push @{$t->{_twig_context_stack}}, $context; + + delete $parent->{'twig_current'} if( $parent); + $t->{twig_current}= $elt; + $elt->{'twig_current'}=1; + + if( $parent) + { my $prev_sibling= $parent->{last_child}; + if( $prev_sibling) + { $prev_sibling->{next_sibling}= $elt; + $elt->{prev_sibling}=$prev_sibling; if( $XML::Twig::weakrefs) { weaken( $elt->{prev_sibling});} ; + } + + $elt->{parent}=$parent; if( $XML::Twig::weakrefs) { weaken( $elt->{parent});} ; + unless( $parent->{first_child}) { $parent->{first_child}= $elt; } + $parent->{empty}=0; $parent->{last_child}=$elt; if( $XML::Twig::weakrefs) { weaken( $parent->{last_child});} ; + } + else + { # processing root + $t->set_root( $elt); + # call dtd handler if need be + $t->{twig_dtd_handler}->($t, $t->{twig_dtd}) + if( defined $t->{twig_dtd_handler}); + + # set this so we can catch external entities + # (the handler was modified during DTD processing) + if( $t->{twig_default_print}) + { $p->setHandlers( Default => \&_twig_print); } + elsif( $t->{twig_roots}) + { $p->setHandlers( Default => sub { return }); } + else + { $p->setHandlers( Default => \&_twig_default); } + } + + $elt->{empty}= $p->recognized_string=~ m{/\s*>$}s ? 1 : 0; + + $elt->{extra_data}= $t->{extra_data} if( $t->{extra_data}); + $t->{extra_data}=''; + + # if the element is ID-ed then store that info + my $id= $elt->{'att'}->{$ID}; + if( defined $id) + { $t->{twig_id_list}->{$id}= $elt; + if( $weakrefs) { weaken( $t->{twig_id_list}->{$id}); } + } + + # call user handler if need be + if( $t->{twig_starttag_handlers}) + { # call all appropriate handlers + my @handlers= _handler( $t, $t->{twig_starttag_handlers}, $gi); + + local $_= $elt; + + foreach my $handler ( @handlers) + { $handler->($t, $elt) || last; } + # call _all_ handler if needed + if( my $all= $t->{twig_starttag_handlers}->{handlers}->{$ALL}) + { $all->($t, $elt); } + } + + # check if the tag is in the list of tags to be ignored + if( $t->{twig_ignore_elts_handlers}) + { my @handlers= _handler( $t, $t->{twig_ignore_elts_handlers}, $gi); + # only the first handler counts, it contains the action (discard/print/string) + if( @handlers) { my $action= shift @handlers; $t->ignore( $elt, $action); } + } + + if( $elt->{'att'}->{'xml:space'} && ( $elt->{'att'}->{'xml:space'} eq 'preserve')) { $t->{twig_preserve_space}++; } + + + return; + } + +sub _replace_ns + { my( $t, $gi, $atts)= @_; + my $decls; + foreach my $new_prefix ( $t->parser->new_ns_prefixes) + { my $uri= $t->parser->expand_ns_prefix( $new_prefix); + # replace the prefix if it is mapped + $decls->{$new_prefix}= $uri; + if( !$t->{twig_keep_original_prefix} && (my $mapped_prefix= $t->{twig_map_xmlns}->{$uri})) + { $new_prefix= $mapped_prefix; } + # now put the namespace declaration back in the element + if( $new_prefix eq '#default') + { push @$atts, "xmlns" => $uri; } + else + { push @$atts, "xmlns:$new_prefix" => $uri; } + } + + if( $t->{twig_keep_original_prefix}) + { # things become more complex: we need to find the original prefix + # and store both prefixes + my $ns_info= $t->_ns_info( $$gi); + my $map_att; + if( $ns_info->{mapped_prefix}) + { $$gi= "$ns_info->{mapped_prefix}:$$gi"; + $map_att->{$ns_info->{mapped_prefix}}= $ns_info->{prefix}; + } + my $att_name=1; + foreach( @$atts) + { if( $att_name) + { + my $ns_info= $t->_ns_info( $_); + if( $ns_info->{mapped_prefix}) + { $_= "$ns_info->{mapped_prefix}:$_"; + $map_att->{$ns_info->{mapped_prefix}}= $ns_info->{prefix}; + } + $att_name=0; + } + else + { $att_name=1; } + } + push @$atts, '#original_gi', $map_att if( $map_att); + } + else + { $$gi= $t->_replace_prefix( $$gi); + my $att_name=1; + foreach( @$atts) + { if( $att_name) { $_= $t->_replace_prefix( $_); $att_name=0; } + else { $att_name=1; } + } + } + return $decls; + } + + +# extract prefix, local_name, uri, mapped_prefix from a name +# will only work if called from a start or end tag handler +sub _ns_info + { my( $t, $name)= @_; + my $ns_info={}; + my $p= $t->parser; + $ns_info->{uri}= $p->namespace( $name); + return $ns_info unless( $ns_info->{uri}); + + $ns_info->{prefix}= _a_proper_ns_prefix( $p, $ns_info->{uri}); + $ns_info->{mapped_prefix}= $t->{twig_map_xmlns}->{$ns_info->{uri}} || $ns_info->{prefix}; + + return $ns_info; + } + +sub _a_proper_ns_prefix + { my( $p, $uri)= @_; + foreach my $prefix ($p->current_ns_prefixes) + { if( $p->expand_ns_prefix( $prefix) eq $uri) + { return $prefix; } + } + return; + } + +# returns the uri bound to a prefix in the original document +# only works in a handler +# can be used to deal with xsi:type attributes +sub original_uri + { my( $t, $prefix)= @_; + my $ST_NS = '##ns' ; + foreach my $ns (map { $_->{$ST_NS} if $_->{$ST_NS} } reverse @{$t->{_twig_context_stack}}) + { return $ns->{$prefix} || next; } + return; + } + + +sub _fill_default_atts + { my( $t, $gi, $atts)= @_; + my $dtd= $t->{twig_dtd}; + my $attlist= $dtd->{att}->{$gi}; + my %value= @$atts; + foreach my $att (keys %$attlist) + { if( !exists( $value{$att}) + && exists( $attlist->{$att}->{default}) + && ( $attlist->{$att}->{default} ne '#IMPLIED') + ) + { # the quotes are included in the default, so we need to remove them + my $default_value= substr( $attlist->{$att}->{default}, 1, -1); + push @$atts, $att, $default_value; + } + } + return; + } + + +# the default function to parse a start tag (in keep_encoding mode) +# can be overridden with the parse_start_tag method +# only works for 1-byte character sets +sub _parse_start_tag + { my $string= shift; + my( $gi, @atts); + + # get the gi (between < and the first space, / or > character) + #if( $string=~ s{^<\s*([^\s>/]*)[\s>/]*}{}s) + if( $string=~ s{^<\s*($REG_TAG_NAME)\s*[\s>/]}{}s) + { $gi= $1; } + else + { croak "error parsing tag '$string'"; } + while( $string=~ s{^([^\s=]*)\s*=\s*(["'])(.*?)\2\s*}{}s) + { push @atts, $1, $3; } + return $gi, @atts; + } + +sub set_root + { my( $t, $elt)= @_; + $t->{twig_root}= $elt; + if( $elt) + { $elt->{twig}= $t; + if( $weakrefs) { weaken( $elt->{twig}); } + } + return $t; + } + +sub _twig_end + { # warn " in _twig_end...\n"; # DEBUG handler + my ($p, $gi) = @_; + + my $t=$p->{twig}; + + if( $t->{twig_in_pcdata} && (my $text_handler= $t->{TwigHandlers}->{$TEXT}) ) + { local $_= $t->{twig_current}; $text_handler->( $t, $_) if $_; + } + + if( $t->{twig_map_xmlns}) { $gi= $t->_replace_prefix( $gi); } + + _add_or_discard_stored_spaces( $t); + + # the new twig_current is the parent + my $elt= $t->{twig_current}; + delete $elt->{'twig_current'}; + + # if we were parsing PCDATA then we exit the pcdata too + if( $t->{twig_in_pcdata}) + { + $t->{twig_in_pcdata}= 0; + $elt= $elt->{parent} if($elt->{parent}); + delete $elt->{'twig_current'}; + } + + # parent is the new current element + my $parent= $elt->{parent}; + $t->{twig_current}= $parent; + + if( $parent) + { $parent->{'twig_current'}=1; + # twig_to_be_normalized + if( $parent->{twig_to_be_normalized}) { $parent->normalize; $parent->{twig_to_be_normalized}=0; } + } + + if( $t->{extra_data}) + { $elt->_set_extra_data_before_end_tag( $t->{extra_data}); + $t->{extra_data}=''; + } + + if( $t->{twig_handlers}) + { # look for handlers + my @handlers= _handler( $t, $t->{twig_handlers}, $gi); + + if( $t->{twig_tdh}) + { if( @handlers) { push @{$t->{twig_handlers_to_trigger}}, [ $elt, \@handlers ]; } + if( my $all= $t->{twig_handlers}->{handlers}->{$ALL}) + { push @{$t->{twig_handlers_to_trigger}}, [ $elt, [$all] ]; } + } + else + { + local $_= $elt; # so we can use $_ in the handlers + + foreach my $handler ( @handlers) + { $handler->($t, $elt) || last; } + # call _all_ handler if needed + my $all= $t->{twig_handlers}->{handlers}->{$ALL}; + if( $all) + { $all->($t, $elt); } + if( @handlers || $all) { $t->{twig_right_after_root}=0; } + } + } + + # if twig_roots is set for the element then set appropriate handler + if( $t->{twig_root_depth} and ($p->depth == $t->{twig_root_depth}) ) + { if( $t->{twig_default_print}) + { # select the proper fh (and store the currently selected one) + $t->_set_fh_to_twig_output_fh(); + if( !$p->depth==1) { $t->{twig_right_after_root}=1; } #XX + if( $t->{twig_keep_encoding}) + { $p->setHandlers( %twig_handlers_roots_print_original); } + else + { $p->setHandlers( %twig_handlers_roots_print); } + } + else + { $p->setHandlers( %twig_handlers_roots); } + } + + if( $elt->{'att'}->{'xml:space'} && ( $elt->{'att'}->{'xml:space'} eq 'preserve')) { $t->{twig_preserve_space}--; } + + pop @{$t->{_twig_context_stack}}; + return; + } + +sub _trigger_tdh + { my( $t)= @_; + + if( @{$t->{twig_handlers_to_trigger}}) + { my @handlers_to_trigger_now= sort { $a->[0]->cmp( $b->[0]) } @{$t->{twig_handlers_to_trigger}}; + foreach my $elt_handlers (@handlers_to_trigger_now) + { my( $handled_elt, $handlers_to_trigger)= @$elt_handlers; + foreach my $handler ( @$handlers_to_trigger) + { local $_= $handled_elt; $handler->($t, $handled_elt) || last; } + } + } + return; + } + +# return the list of handler that can be activated for an element +# (either of CODE ref's or 1's for twig_roots) + +sub _handler + { my( $t, $handlers, $gi)= @_; + + my @found_handlers=(); + my $found_handler; + + foreach my $handler ( map { @$_ } grep { $_ } $handlers->{xpath_handler}->{$gi}, $handlers->{xpath_handler}->{'*'}) + { my $trigger= $handler->{trigger}; + if( my $found_path= $trigger->( $t->{_twig_context_stack})) + { my $found_handler= $handler->{handler}; + push @found_handlers, $found_handler; + } + } + + # if no handler found call default handler if defined + if( !@found_handlers && defined $handlers->{handlers}->{$DEFAULT}) + { push @found_handlers, $handlers->{handlers}->{$DEFAULT}; } + + if( @found_handlers and $t->{twig_do_not_chain_handlers}) + { @found_handlers= ($found_handlers[0]); } + + return @found_handlers; # empty if no handler found + + } + + +sub _replace_prefix + { my( $t, $name)= @_; + my $p= $t->parser; + my $uri= $p->namespace( $name); + # try to get the namespace from default if none is found (for attributes) + # this should probably be an option + if( !$uri and( $name!~/^xml/)) { $uri= $p->expand_ns_prefix( '#default'); } + if( $uri) + { if (my $mapped_prefix= $t->{twig_map_xmlns}->{$uri} || $DEFAULT_URI2NS{$uri}) + { return "$mapped_prefix:$name"; } + else + { my $prefix= _a_proper_ns_prefix( $p, $uri); + if( $prefix eq '#default') { $prefix=''; } + return $prefix ? "$prefix:$name" : $name; + } + } + else + { return $name; } + } + + +sub _twig_char + { # warn " in _twig_char...\n"; # DEBUG handler + + my ($p, $string)= @_; + my $t=$p->{twig}; + + if( $t->{twig_keep_encoding}) + { if( !$t->{twig_in_cdata}) + { $string= $p->original_string(); } + else + { + use bytes; # > perl 5.5 + if( length( $string) < 1024) + { $string= $p->original_string(); } + else + { #warn "dodgy case"; + # TODO original_string does not hold the entire string, but $string is wrong + # I believe due to a bug in XML::Parser + # for now, we use the original string, even if it means that it's been converted to utf8 + } + } + } + + if( $t->{twig_input_filter}) { $string= $t->{twig_input_filter}->( $string); } + if( $t->{twig_char_handler}) { $string= $t->{twig_char_handler}->( $string); } + + my $elt= $t->{twig_current}; + + if( $t->{twig_in_cdata}) + { # text is the continuation of a previously created cdata + $elt->{cdata}.= $t->{twig_stored_spaces} . $string; + } + elsif( $t->{twig_in_pcdata}) + { # text is the continuation of a previously created pcdata + if( $t->{extra_data}) + { $elt->_push_extra_data_in_pcdata( $t->{extra_data}, length( $elt->{pcdata})); + $t->{extra_data}=''; + } + $elt->{pcdata}.= $string; + } + else + { + # text is just space, which might be discarded later + if( $string=~/\A\s*\Z/s) + { + if( $t->{extra_data}) + { # we got extra data (comment, pi), lets add the spaces to it + $t->{extra_data} .= $string; + } + else + { # no extra data, just store the spaces + $t->{twig_stored_spaces}.= $string; + } + } + else + { my $new_elt= _insert_pcdata( $t, $t->{twig_stored_spaces}.$string); + delete $elt->{'twig_current'}; + $new_elt->{'twig_current'}=1; + $t->{twig_current}= $new_elt; + $t->{twig_in_pcdata}=1; + if( $t->{extra_data}) + { $new_elt->_push_extra_data_in_pcdata( $t->{extra_data}, 0); + $t->{extra_data}=''; + } + } + } + return; + } + +sub _twig_cdatastart + { # warn " in _twig_cdatastart...\n"; # DEBUG handler + + my $p= shift; + my $t=$p->{twig}; + + $t->{twig_in_cdata}=1; + my $cdata= $t->{twig_elt_class}->new( $CDATA); + my $twig_current= $t->{twig_current}; + + if( $t->{twig_in_pcdata}) + { # create the node as a sibling of the PCDATA + $cdata->{prev_sibling}=$twig_current; if( $XML::Twig::weakrefs) { weaken( $cdata->{prev_sibling});} ; + $twig_current->{next_sibling}= $cdata; + my $parent= $twig_current->{parent}; + $cdata->{parent}=$parent; if( $XML::Twig::weakrefs) { weaken( $cdata->{parent});} ; + $parent->{empty}=0; $parent->{last_child}=$cdata; if( $XML::Twig::weakrefs) { weaken( $parent->{last_child});} ; + $t->{twig_in_pcdata}=0; + } + else + { # we have to create a PCDATA element if we need to store spaces + if( $t->_space_policy($XML::Twig::index2gi[$twig_current->{'gi'}]) && $t->{twig_stored_spaces}) + { _insert_pcdata( $t, $t->{twig_stored_spaces}); } + $t->{twig_stored_spaces}=''; + + # create the node as a child of the current element + $cdata->{parent}=$twig_current; if( $XML::Twig::weakrefs) { weaken( $cdata->{parent});} ; + if( my $prev_sibling= $twig_current->{last_child}) + { $cdata->{prev_sibling}=$prev_sibling; if( $XML::Twig::weakrefs) { weaken( $cdata->{prev_sibling});} ; + $prev_sibling->{next_sibling}= $cdata; + } + else + { $twig_current->{first_child}= $cdata; } + $twig_current->{empty}=0; $twig_current->{last_child}=$cdata; if( $XML::Twig::weakrefs) { weaken( $twig_current->{last_child});} ; + + } + + delete $twig_current->{'twig_current'}; + $t->{twig_current}= $cdata; + $cdata->{'twig_current'}=1; + if( $t->{extra_data}) { $cdata->set_extra_data( $t->{extra_data}); $t->{extra_data}='' }; + return; + } + +sub _twig_cdataend + { # warn " in _twig_cdataend...\n"; # DEBUG handler + + my $p= shift; + my $t=$p->{twig}; + + $t->{twig_in_cdata}=0; + + my $elt= $t->{twig_current}; + delete $elt->{'twig_current'}; + my $cdata= $elt->{cdata}; + $elt->_set_cdata( $cdata); + + push @{$t->{_twig_context_stack}}, { $ST_TAG => $CDATA }; + + if( $t->{twig_handlers}) + { # look for handlers + my @handlers= _handler( $t, $t->{twig_handlers}, $CDATA); + local $_= $elt; # so we can use $_ in the handlers + foreach my $handler ( @handlers) { $handler->($t, $elt) || last; } + } + + pop @{$t->{_twig_context_stack}}; + + $elt= $elt->{parent}; + $t->{twig_current}= $elt; + $elt->{'twig_current'}=1; + + $t->{twig_long_cdata}=0; + return; + } + +sub _pi_elt_handlers + { my( $t, $pi)= @_; + my $pi_handlers= $t->{twig_handlers}->{pi_handlers} || return; + foreach my $handler ( $pi_handlers->{$pi->{target}}, $pi_handlers->{''}) + { if( $handler) { local $_= $pi; $handler->( $t, $pi) || last; } } + } + +sub _pi_text_handler + { my( $t, $target, $data)= @_; + if( my $handler= $t->{twig_handlers}->{pi_handlers}->{$target}) + { return $handler->( $t, $target, $data); } + if( my $handler= $t->{twig_handlers}->{pi_handlers}->{''}) + { return $handler->( $t, $target, $data); } + return defined( $data) && $data ne '' ? "" : "" ; + } + +sub _comment_elt_handler + { my( $t, $comment)= @_; + if( my $handler= $t->{twig_handlers}->{handlers}->{$COMMENT}) + { local $_= $comment; $handler->($t, $comment); } + } + +sub _comment_text_handler + { my( $t, $comment)= @_; + if( my $handler= $t->{twig_handlers}->{handlers}->{$COMMENT}) + { $comment= $handler->($t, $comment); + if( !defined $comment || $comment eq '') { return ''; } + } + return ""; + } + + + +sub _twig_comment + { # warn " in _twig_comment...\n"; # DEBUG handler + + my( $p, $comment_text)= @_; + my $t=$p->{twig}; + + if( $t->{twig_keep_encoding}) { $comment_text= substr( $p->original_string(), 4, -3); } + + $t->_twig_pi_comment( $p, $COMMENT, $t->{twig_keep_comments}, $t->{twig_process_comments}, + '_set_comment', '_comment_elt_handler', '_comment_text_handler', $comment_text + ); + return; + } + +sub _twig_pi + { # warn " in _twig_pi...\n"; # DEBUG handler + + my( $p, $target, $data)= @_; + my $t=$p->{twig}; + + if( $t->{twig_keep_encoding}) + { my $pi_text= substr( $p->original_string(), 2, -2); + ($target, $data)= split( /\s+/, $pi_text, 2); + } + + $t->_twig_pi_comment( $p, $PI, $t->{twig_keep_pi}, $t->{twig_process_pi}, + '_set_pi', '_pi_elt_handlers', '_pi_text_handler', $target, $data + ); + return; + } + +sub _twig_pi_comment + { my( $t, $p, $type, $keep, $process, $set, $elt_handler, $text_handler, @parser_args)= @_; + + if( $t->{twig_input_filter}) + { foreach my $arg (@parser_args) { $arg= $t->{twig_input_filter}->( $arg); } } + + # if pi/comments are to be kept then we piggyback them to the current element + if( $keep) + { # first add spaces + if( $t->{twig_stored_spaces}) + { $t->{extra_data}.= $t->{twig_stored_spaces}; + $t->{twig_stored_spaces}= ''; + } + + my $extra_data= $t->$text_handler( @parser_args); + $t->{extra_data}.= $extra_data; + + } + elsif( $process) + { + my $twig_current= $t->{twig_current}; # defined unless we are outside of the root + + my $elt= $t->{twig_elt_class}->new( $type); + $elt->$set( @parser_args); + if( $t->{extra_data}) + { $elt->set_extra_data( $t->{extra_data}); + $t->{extra_data}=''; + } + + unless( $t->root) + { $t->_add_cpi_outside_of_root( leading_cpi => $elt); + } + elsif( $t->{twig_in_pcdata}) + { # create the node as a sibling of the PCDATA + $elt->paste_after( $twig_current); + $t->{twig_in_pcdata}=0; + } + elsif( $twig_current) + { # we have to create a PCDATA element if we need to store spaces + if( $t->_space_policy($XML::Twig::index2gi[$twig_current->{'gi'}]) && $t->{twig_stored_spaces}) + { _insert_pcdata( $t, $t->{twig_stored_spaces}); } + $t->{twig_stored_spaces}=''; + # create the node as a child of the current element + $elt->paste_last_child( $twig_current); + } + else + { $t->_add_cpi_outside_of_root( trailing_cpi => $elt); } + + if( $twig_current) + { delete $twig_current->{'twig_current'}; + my $parent= $elt->{parent}; + $t->{twig_current}= $parent; + $parent->{'twig_current'}=1; + } + + $t->$elt_handler( $elt); + } + + } + + +# add a comment or pi before the first element +sub _add_cpi_outside_of_root + { my($t, $type, $elt)= @_; # $type is 'leading_cpi' or 'trailing_cpi' + $t->{$type} ||= $t->{twig_elt_class}->new( '#CPI'); + # create the node as a child of the current element + $elt->paste_last_child( $t->{$type}); + return $t; + } + +sub _twig_final + { # warn " in _twig_final...\n"; # DEBUG handler + + my $p= shift; + my $t= $p->isa( 'XML::Twig') ? $p : $p->{twig}; + + # store trailing data + if( $t->{extra_data}) { $t->{trailing_cpi_text} = $t->{extra_data}; $t->{extra_data}=''; } + $t->{trailing_spaces}= $t->{twig_stored_spaces} || ''; + my $s= $t->{twig_stored_spaces}; $s=~s{\n}{\\n}g; + if( $t->{twig_stored_spaces}) { my $s= $t->{twig_stored_spaces}; } + + # restore the selected filehandle if needed + $t->_set_fh_to_selected_fh(); + + $t->_trigger_tdh if( $t->{twig_tdh}); + + select $t->{twig_original_selected_fh} if($t->{twig_original_selected_fh}); # probably dodgy + + if( exists $t->{twig_autoflush_data}) + { my @args; + push @args, $t->{twig_autoflush_data}->{fh} if( $t->{twig_autoflush_data}->{fh}); + push @args, @{$t->{twig_autoflush_data}->{args}} if( $t->{twig_autoflush_data}->{args}); + $t->flush( @args); + delete $t->{twig_autoflush_data}; + $t->root->delete if $t->root; + } + + # tries to clean-up (probably not very well at the moment) + #undef $p->{twig}; + undef $t->{twig_parser}; + delete $t->{twig_parsing}; + @{$t}{ qw( twig_parser twig_parsing _twig_context_stack twig_current) }=(); + + return $t; + } + +sub _insert_pcdata + { my( $t, $string)= @_; + # create a new PCDATA element + my $parent= $t->{twig_current}; # always defined + my $elt; + if( exists $t->{twig_alt_elt_class}) + { $elt= $t->{twig_elt_class}->new( $PCDATA); + $elt->_set_pcdata( $string); + } + else + { $elt= bless( { gi => $XML::Twig::gi2index{$PCDATA}, pcdata => $string }, 'XML::Twig::Elt'); } + + my $prev_sibling= $parent->{last_child}; + if( $prev_sibling) + { $prev_sibling->{next_sibling}= $elt; + $elt->{prev_sibling}=$prev_sibling; if( $XML::Twig::weakrefs) { weaken( $elt->{prev_sibling});} ; + } + else + { $parent->{first_child}= $elt; } + + $elt->{parent}=$parent; if( $XML::Twig::weakrefs) { weaken( $elt->{parent});} ; + $parent->{empty}=0; $parent->{last_child}=$elt; if( $XML::Twig::weakrefs) { weaken( $parent->{last_child});} ; + $t->{twig_stored_spaces}=''; + return $elt; + } + +sub _space_policy + { my( $t, $gi)= @_; + my $policy; + $policy=0 if( $t->{twig_discard_spaces}); + $policy=1 if( $t->{twig_keep_spaces}); + $policy=1 if( $t->{twig_keep_spaces_in} + && $t->{twig_keep_spaces_in}->{$gi}); + $policy=0 if( $t->{twig_discard_spaces_in} + && $t->{twig_discard_spaces_in}->{$gi}); + return $policy; + } + + +sub _twig_entity + { # warn " in _twig_entity...\n"; # DEBUG handler + my( $p, $name, $val, $sysid, $pubid, $ndata, $param)= @_; + my $t=$p->{twig}; + + #{ no warnings; my $base= $p->base; warn "_twig_entity called: expand: '$t->{twig_expand_external_ents}', base: '$base', name: '$name', val: '$val', sysid: '$sysid', pubid: '$pubid', ndata: '$ndata', param: '$param'\n";} + + my $missing_entity=0; + + if( $sysid) + { if($ndata) + { if( ! -f _based_filename( $sysid, $p->base)) { $missing_entity= 1; } + } + else + { if( $t->{twig_expand_external_ents}) + { $val= eval { _slurp_uri( $sysid, $p->base) }; + if( ! defined $val) + { if( $t->{twig_extern_ent_nofail}) + { $missing_entity= 1; } + else + { _croak( "cannot load SYSTEM entity '$name' from '$sysid': $@", 3); } + } + } + } + } + + my $ent=XML::Twig::Entity->new( $name, $val, $sysid, $pubid, $ndata, $param); + if( $missing_entity) { $t->{twig_missing_system_entities}->{$name}= $ent; } + + my $entity_list= $t->entity_list; + if( $entity_list) { $entity_list->add( $ent); } + + if( $parser_version > 2.27) + { # this is really ugly, but with some versions of XML::Parser the value + # of the entity is not properly returned by the default handler + my $ent_decl= $ent->text; + if( $t->{twig_keep_encoding}) + { if( defined $ent->{val} && ($ent_decl !~ /["']/)) + { my $val= $ent->{val}; + $ent_decl .= $val =~ /"/ ? qq{'$val' } : qq{"$val" }; + } + # for my solaris box (perl 5.6.1, XML::Parser 2.31, expat?) + $t->{twig_doctype}->{internal}=~ s{{twig_doctype}->{internal} .= $ent_decl + unless( $t->{twig_doctype}->{internal}=~ m{original_string, ")\n"; # DEBUG handler + my( $p, $base, $sysid, $pubid)= @_; + my $t= $p->{twig}; + if( $t->{twig_no_expand}) + { my $ent_name= $t->{twig_keep_encoding} ? $p->original_string : $p->recognized_string; + _twig_insert_ent( $t, $ent_name); + return ''; + } + my $ent_content= eval { $t->{twig_ext_ent_handler}->( $p, $base, $sysid) }; + if( ! defined $ent_content) + { + my $ent_name = $p->recognized_string; + my $file = _based_filename( $sysid, $base); + my $error_message= "cannot expand $ent_name - cannot load '$file'"; + if( $t->{twig_extern_ent_nofail}) { return ""; } + else { _croak( $error_message); } + } + return $ent_content; + } + +# I use this so I can change the $Carp::CarpLevel (which determines how many call frames to skip when reporting an error) +sub _croak + { my( $message, $level)= @_; + $Carp::CarpLevel= $level || 0; + croak $message; + } + +sub _twig_xmldecl + { # warn " in _twig_xmldecl...\n"; # DEBUG handler + + my $p= shift; + my $t=$p->{twig}; + $t->{twig_xmldecl}||={}; # could have been set by set_output_encoding + $t->{twig_xmldecl}->{version}= shift; + $t->{twig_xmldecl}->{encoding}= shift; + $t->{twig_xmldecl}->{standalone}= shift; + return; + } + +sub _twig_doctype + { # warn " in _twig_doctype...\n"; # DEBUG handler + my( $p, $name, $sysid, $pub, $internal)= @_; + my $t=$p->{twig}; + $t->{twig_doctype}||= {}; # create + $t->{twig_doctype}->{name}= $name; # always there + $t->{twig_doctype}->{sysid}= $sysid; # + $t->{twig_doctype}->{pub}= $pub; # + + # now let's try to cope with XML::Parser 2.28 and above + if( $parser_version > 2.27) + { @saved_default_handler= $p->setHandlers( Default => \&_twig_store_internal_dtd, + Entity => \&_twig_entity, + ); + $p->setHandlers( DoctypeFin => \&_twig_stop_storing_internal_dtd); + $t->{twig_doctype}->{internal}=''; + } + else + # for XML::Parser before 2.28 + { $internal||=''; + $internal=~ s{^\s*\[}{}; + $internal=~ s{]\s*$}{}; + $t->{twig_doctype}->{internal}=$internal; + } + + # now check if we want to get the DTD info + if( $t->{twig_read_external_dtd} && $sysid) + { # let's build a fake document with an internal DTD + my $dtd= "<$name/>"; + + $t->save_global_state(); # save the globals (they will be reset by the following new) + my $t_dtd= XML::Twig->new( load_DTD => 1, ParseParamEnt => 1, error_context => $t->{ErrorContext} || 0); # create a temp twig + $t_dtd->parse( $dtd); # parse it + $t->{twig_dtd}= $t_dtd->{twig_dtd}; # grab the dtd info + #$t->{twig_dtd_is_external}=1; + $t->entity_list->_add_list( $t_dtd->entity_list) if( $t_dtd->entity_list); # grab the entity info + $t->restore_global_state(); + } + return; + } + +sub _twig_element + { # warn " in _twig_element...\n"; # DEBUG handler + + my( $p, $name, $model)= @_; + my $t=$p->{twig}; + $t->{twig_dtd}||= {}; # may create the dtd + $t->{twig_dtd}->{model}||= {}; # may create the model hash + $t->{twig_dtd}->{elt_list}||= []; # ordered list of elements + push @{$t->{twig_dtd}->{elt_list}}, $name; # store the elt + $t->{twig_dtd}->{model}->{$name}= $model; # store the model + if( ($parser_version > 2.27) && ($t->{twig_doctype}->{internal}=~ m{(^|>)\s*$}) ) + { my $text= $XML::Twig::Elt::keep_encoding ? $p->original_string : $p->recognized_string; + unless( $text) + { # this version of XML::Parser does not return the text in the *_string method + # we need to rebuild it + $text= ""; + } + $t->{twig_doctype}->{internal} .= $text; + } + return; + } + +sub _twig_attlist + { # warn " in _twig_attlist...\n"; # DEBUG handler + + my( $p, $gi, $att, $type, $default, $fixed)= @_; + #warn "in attlist: gi: '$gi', att: '$att', type: '$type', default: '$default', fixed: '$fixed'\n"; + my $t=$p->{twig}; + $t->{twig_dtd}||= {}; # create dtd if need be + $t->{twig_dtd}->{$gi}||= {}; # create elt if need be + #$t->{twig_dtd}->{$gi}->{att}||= {}; # create att if need be + if( ($parser_version > 2.27) && ($t->{twig_doctype}->{internal}=~ m{(^|>)\s*$}) ) + { my $text= $XML::Twig::Elt::keep_encoding ? $p->original_string : $p->recognized_string; + unless( $text) + { # this version of XML::Parser does not return the text in the *_string method + # we need to rebuild it + my $att_decl="$att $type"; + $att_decl .= " #FIXED" if( $fixed); + $att_decl .= " $default" if( defined $default); + # 2 cases: there is already an attlist on that element or not + if( $t->{twig_dtd}->{att}->{$gi}) + { # there is already an attlist, add to it + $t->{twig_doctype}->{internal}=~ s{(} + { "$1$2\n" . ' ' x length( $1) . "$att_decl\n>"}es; + } + else + { # create the attlist + $t->{twig_doctype}->{internal}.= "" + } + } + } + $t->{twig_dtd}->{att}->{$gi}->{$att}= {} ; + $t->{twig_dtd}->{att}->{$gi}->{$att}->{type}= $type; + $t->{twig_dtd}->{att}->{$gi}->{$att}->{default}= $default if( defined $default); + $t->{twig_dtd}->{att}->{$gi}->{$att}->{fixed}= $fixed; + return; + } + +sub _twig_default + { # warn " in _twig_default...\n"; # DEBUG handler + + my( $p, $string)= @_; + + my $t= $p->{twig}; + + # we need to process the data in 2 cases: entity, or spaces after the closing tag + + # after the closing tag (no twig_current and root has been created) + if( ! $t->{twig_current} && $t->{twig_root} && $string=~ m{^\s+$}m) { $t->{twig_stored_spaces} .= $string; } + + # process only if we have an entity + if( $string=~ m{^&([^;]*);$}) + { # the entity has to be pure pcdata, or we have a problem + if( ($p->original_string=~ m{^<}) && ($p->original_string=~ m{>$}) ) + { # string is a tag, entity is in an attribute + $t->{twig_entities_in_attribute}=1 if( $t->{twig_do_not_escape_amp_in_atts}); + } + else + { my $ent; + if( $t->{twig_keep_encoding}) + { _twig_char( $p, $string); + $ent= substr( $string, 1, -1); + } + else + { $ent= _twig_insert_ent( $t, $string); + } + + return $ent; + } + } + } + +sub _twig_insert_ent + { + my( $t, $string)=@_; + + my $twig_current= $t->{twig_current}; + + my $ent= $t->{twig_elt_class}->new( $ENT); + $ent->{ent}= $string; + + _add_or_discard_stored_spaces( $t); + + if( $t->{twig_in_pcdata}) + { # create the node as a sibling of the #PCDATA + + $ent->{prev_sibling}=$twig_current; if( $XML::Twig::weakrefs) { weaken( $ent->{prev_sibling});} ; + $twig_current->{next_sibling}= $ent; + my $parent= $twig_current->{parent}; + $ent->{parent}=$parent; if( $XML::Twig::weakrefs) { weaken( $ent->{parent});} ; + $parent->{empty}=0; $parent->{last_child}=$ent; if( $XML::Twig::weakrefs) { weaken( $parent->{last_child});} ; + # the twig_current is now the parent + delete $twig_current->{'twig_current'}; + $t->{twig_current}= $parent; + # we left pcdata + $t->{twig_in_pcdata}=0; + } + else + { # create the node as a child of the current element + $ent->{parent}=$twig_current; if( $XML::Twig::weakrefs) { weaken( $ent->{parent});} ; + if( my $prev_sibling= $twig_current->{last_child}) + { $ent->{prev_sibling}=$prev_sibling; if( $XML::Twig::weakrefs) { weaken( $ent->{prev_sibling});} ; + $prev_sibling->{next_sibling}= $ent; + } + else + { if( $twig_current) { $twig_current->{first_child}= $ent; } } + if( $twig_current) { $twig_current->{empty}=0; $twig_current->{last_child}=$ent; if( $XML::Twig::weakrefs) { weaken( $twig_current->{last_child});} ; } + } + + # meant to trigger entity handler, does not seem to be activated at this time + #if( my $handler= $t->{twig_handlers}->{gi}->{$ENT}) + # { local $_= $ent; $handler->( $t, $ent); } + + return $ent; + } + +sub parser + { return $_[0]->{twig_parser}; } + +# returns the declaration text (or a default one) +sub xmldecl + { my $t= shift; + return '' unless( $t->{twig_xmldecl} || $t->{output_encoding}); + my $decl_string; + my $decl= $t->{twig_xmldecl}; + if( $decl) + { my $version= $decl->{version}; + $decl_string= q{{output_encoding}) + # or come from the document (in $decl->{encoding}) + if( $t->{output_encoding}) + { my $encoding= $t->{output_encoding}; + $decl_string .= qq{ encoding="$encoding"}; + } + elsif( $decl->{encoding}) + { my $encoding= $decl->{encoding}; + $decl_string .= qq{ encoding="$encoding"}; + } + + if( defined( $decl->{standalone})) + { $decl_string .= q{ standalone="}; + $decl_string .= $decl->{standalone} ? "yes" : "no"; + $decl_string .= q{"}; + } + + $decl_string .= "?>\n"; + } + else + { my $encoding= $t->{output_encoding}; + $decl_string= qq{}; + } + + my $output_filter= XML::Twig::Elt::output_filter(); + return $output_filter ? $output_filter->( $decl_string) : $decl_string; + } + +sub set_doctype + { my( $t, $name, $system, $public, $internal)= @_; + $t->{twig_doctype}= {} unless defined $t->{twig_doctype}; + my $doctype= $t->{twig_doctype}; + $doctype->{name} = $name if( defined $name); + $doctype->{sysid} = $system if( defined $system); + $doctype->{pub} = $public if( defined $public); + $doctype->{internal} = $internal if( defined $internal); + } + +sub doctype_name + { my $t= shift; + my $doctype= $t->{twig_doctype} or return ''; + return $doctype->{name} || ''; + } + +sub system_id + { my $t= shift; + my $doctype= $t->{twig_doctype} or return ''; + return $doctype->{sysid} || ''; + } + +sub public_id + { my $t= shift; + my $doctype= $t->{twig_doctype} or return ''; + return $doctype->{pub} || ''; + } + +sub internal_subset + { my $t= shift; + my $doctype= $t->{twig_doctype} or return ''; + return $doctype->{internal} || ''; + } + +# return the dtd object +sub dtd + { my $t= shift; + return $t->{twig_dtd}; + } + +# return an element model, or the list of element models +sub model + { my $t= shift; + my $elt= shift; + return $t->dtd->{model}->{$elt} if( $elt); + return (sort keys %{$t->dtd->{model}}); + } + + +# return the entity_list object +sub entity_list + { my $t= shift; + return $t->{twig_entity_list}; + } + +# return the list of entity names +sub entity_names + { my $t= shift; + return $t->entity_list->entity_names; + } + +# return the entity object +sub entity + { my $t= shift; + my $entity_name= shift; + return $t->entity_list->ent( $entity_name); + } + + +sub print_prolog + { my $t= shift; + my $fh= isa( $_[0], 'GLOB') || isa( $_[0], 'IO::Scalar') ? shift : $t->{twig_output_fh} || select() || \*STDOUT; + ## no critic (TestingAndDebugging::ProhibitNoStrict); + no strict 'refs'; + print {$fh} $t->prolog( @_); + } + +sub prolog + { my $t= shift; + if( $t->{no_prolog}){ return ''; } + + return $t->{no_prolog} ? '' + : defined $t->{no_dtd_output} ? $t->xmldecl + : $t->xmldecl . $t->doctype( @_); + } + +sub doctype + { my $t= shift; + my %args= _normalize_args( @_); + my $update_dtd = $args{UpdateDTD} || ''; + my $doctype_text=''; + + my $doctype= $t->{twig_doctype}; + + if( $doctype) + { $doctype_text .= qq{{name}} if( $doctype->{name}); + $doctype_text .= qq{ PUBLIC "$doctype->{pub}"} if( $doctype->{pub}); + $doctype_text .= qq{ SYSTEM} if( $doctype->{sysid} && !$doctype->{pub}); + $doctype_text .= qq{ "$doctype->{sysid}"} if( $doctype->{sysid}); + } + + if( $update_dtd) + { if( $doctype) + { my $internal=$doctype->{internal}; + # awful hack, but at least it works a little better that what was there before + if( $internal) + { # remove entity declarations (they will be re-generated from the updated entity list) + $internal=~ s{]*) >\s*}{}xg; + $internal=~ s{^\n}{}; + } + $internal .= $t->entity_list->text ||'' if( $t->entity_list); + if( $internal) { $doctype_text .= "[\n$internal]>\n"; } + } + elsif( !$t->{'twig_dtd'} && keys %{$t->entity_list}) + { $doctype_text .= "root->gi . " [\n" . $t->entity_list->text . "\n]>";;} + else + { $doctype_text= $t->{twig_dtd}; + $doctype_text .= $t->dtd_text; + } + } + elsif( $doctype) + { if( my $internal= $doctype->{internal}) + { # add opening and closing brackets if not already there + # plus some spaces and newlines for a nice formating + # I test it here because I can't remember which version of + # XML::Parser need it or not, nor guess which one will in the + # future, so this about the best I can do + $internal=~ s{^\s*(\[\s*)?}{ [\n}; + $internal=~ s{\s*(\]\s*(>\s*)?)?\s*$}{\n]>\n}; + $doctype_text .= $internal; + } + } + + if( $doctype_text) + { + # terrible hack, as I can't figure out in which case the darn prolog + # should get an extra > (depends on XML::Parser and expat versions) + $doctype_text=~ s/(>\s*)*$/>\n/; # if($doctype_text); + + my $output_filter= XML::Twig::Elt::output_filter(); + return $output_filter ? $output_filter->( $doctype_text) : $doctype_text; + } + else + { return $doctype_text; } + } + +sub _leading_cpi + { my $t= shift; + my $leading_cpi= $t->{leading_cpi} || return ''; + return $leading_cpi->sprint( 1); + } + +sub _trailing_cpi + { my $t= shift; + my $trailing_cpi= $t->{trailing_cpi} || return ''; + return $trailing_cpi->sprint( 1); + } + +sub _trailing_cpi_text + { my $t= shift; + return $t->{trailing_cpi_text} || ''; + } + +sub print_to_file + { my( $t, $filename)= (shift, shift); + my $out_fh; +# open( $out_fh, ">$filename") or _croak( "cannot create file $filename: $!"); # < perl 5.8 + my $mode= $t->{twig_keep_encoding} ? '>' : '>:utf8'; # >= perl 5.8 + open( $out_fh, $mode, $filename) or _croak( "cannot create file $filename: $!"); # >= perl 5.8 + $t->print( $out_fh, @_); + close $out_fh; + return $t; + } + +# probably only works on *nix (at least the chmod bit) +# first print to a temporary file, then rename that file to the desired file name, then change permissions +# to the original file permissions (or to the current umask) +sub safe_print_to_file + { my( $t, $filename)= (shift, shift); + my $perm= -f $filename ? (stat $filename)[2] & 07777 : ~umask() ; + XML::Twig::_use( 'File::Temp') || croak "need File::Temp to use safe_print_to_file\n"; + my $tmpdir= dirname( $filename); + my( $fh, $tmpfilename) = File::Temp::tempfile( DIR => $tmpdir); + $t->print_to_file( $tmpfilename, @_); + rename( $tmpfilename, $filename) or unlink $tmpfilename && _croak( "cannot move temporary file to $filename: $!"); + chmod $perm, $filename; + return $t; + } + + +sub print + { my $t= shift; + my $fh= isa( $_[0], 'GLOB') || isa( $_[0], 'IO::Scalar') ? shift : undef; + my %args= _normalize_args( @_); + + my $old_select = defined $fh ? select $fh : undef; + my $old_pretty = defined ($args{PrettyPrint}) ? $t->set_pretty_print( $args{PrettyPrint}) : undef; + my $old_empty_tag = defined ($args{EmptyTags}) ? $t->set_empty_tag_style( $args{EmptyTags}) : undef; + + #if( !$t->{encoding} || lc( $t->{encoding}) eq 'utf-8') { my $out= $fh || \*STDOUT; binmode $out, ':utf8'; } + + if( $perl_version > 5.006 && ! $t->{twig_keep_encoding}) + { if( grep /useperlio=define/, `$^X -V`) # we can only use binmode :utf8 if perl was compiled with useperlio + { binmode( $fh || \*STDOUT, ":utf8" ); } + } + + print $t->prolog( %args) . $t->_leading_cpi( %args); + $t->{twig_root}->print; + print $t->_trailing_cpi # trailing comments and pi's (elements, in 'process' mode) + . $t->_trailing_cpi_text # trailing comments and pi's (in 'keep' mode) + . ( ($t->{twig_keep_spaces}||'') && ($t->{trailing_spaces} || '')) + ; + + + $t->set_pretty_print( $old_pretty) if( defined $old_pretty); + $t->set_empty_tag_style( $old_empty_tag) if( defined $old_empty_tag); + if( $fh) { select $old_select; } + + return $t; + } + + +sub flush + { my $t= shift; + + $t->_trigger_tdh if $t->{twig_tdh}; + + return if( $t->{twig_completely_flushed}); + + my $fh= isa( $_[0], 'GLOB') || isa( $_[0], 'IO::Scalar') ? shift : undef; + my $old_select= defined $fh ? select $fh : undef; + my $up_to= ref $_[0] ? shift : undef; + my %args= _normalize_args( @_); + + my $old_pretty; + if( defined $args{PrettyPrint}) + { $old_pretty= $t->set_pretty_print( $args{PrettyPrint}); + delete $args{PrettyPrint}; + } + + my $old_empty_tag_style; + if( $args{EmptyTags}) + { $old_empty_tag_style= $t->set_empty_tag_style( $args{EmptyTags}); + delete $args{EmptyTags}; + } + + + # the "real" last element processed, as _twig_end has closed it + my $last_elt; + my $flush_trailing_data=0; + if( $up_to) + { $last_elt= $up_to; } + elsif( $t->{twig_current}) + { $last_elt= $t->{twig_current}->_last_child; } + else + { $last_elt= $t->{twig_root}; + $flush_trailing_data=1; + $t->{twig_completely_flushed}=1; + } + + # flush the DTD unless it has ready flushed (ie root has been flushed) + my $elt= $t->{twig_root}; + unless( $elt->_flushed) + { # store flush info so we can auto-flush later + if( $t->{twig_autoflush}) + { $t->{twig_autoflush_data}={}; + $t->{twig_autoflush_data}->{fh} = $fh if( $fh); + $t->{twig_autoflush_data}->{args} = \@_ if( @_); + } + $t->print_prolog( %args); + print $t->_leading_cpi; + } + + while( $elt) + { my $next_elt; + if( $last_elt && $last_elt->in( $elt)) + { + unless( $elt->_flushed) + { # just output the front tag + print $elt->start_tag(); + $elt->_set_flushed; + } + $next_elt= $elt->{first_child}; + } + else + { # an element before the last one or the last one, + $next_elt= $elt->{next_sibling}; + $elt->_flush(); + $elt->delete; + last if( $last_elt && ($elt == $last_elt)); + } + $elt= $next_elt; + } + + if( $flush_trailing_data) + { print $t->_trailing_cpi # trailing comments and pi's (elements, in 'process' mode) + , $t->_trailing_cpi_text # trailing comments and pi's (in 'keep' mode) + } + + select $old_select if( defined $old_select); + $t->set_pretty_print( $old_pretty) if( defined $old_pretty); + $t->set_empty_tag_style( $old_empty_tag_style) if( defined $old_empty_tag_style); + + if( my $ids= $t->{twig_id_list}) + { while( my ($id, $elt)= each %$ids) + { if( ! defined $elt) + { delete $t->{twig_id_list}->{$id} } + } + } + + return $t; + } + + +# flushes up to an element +# this method just reorders the arguments and calls flush +sub flush_up_to + { my $t= shift; + my $up_to= shift; + if( isa( $_[0], 'GLOB') || isa( $_[0], 'IO::Scalar')) + { my $fh= shift; + $t->flush( $fh, $up_to, @_); + } + else + { $t->flush( $up_to, @_); } + + return $t; + } + + +# same as print except the entire document text is returned as a string +sub sprint + { my $t= shift; + my %args= _normalize_args( @_); + + my $old_pretty; + if( defined $args{PrettyPrint}) + { $old_pretty= $t->set_pretty_print( $args{PrettyPrint}); + delete $args{PrettyPrint}; + } + + my $old_empty_tag_style; + if( defined $args{EmptyTags}) + { $old_empty_tag_style= $t->set_empty_tag_style( $args{EmptyTags}); + delete $args{EmptyTags}; + } + + my $string= $t->prolog( %args) # xml declaration and doctype + . $t->_leading_cpi( %args) # leading comments and pi's in 'process' mode + . ( ($t->{twig_root} && $t->{twig_root}->sprint) || '') + . $t->_trailing_cpi # trailing comments and pi's (elements, in 'process' mode) + . $t->_trailing_cpi_text # trailing comments and pi's (in 'keep' mode) + ; + if( $t->{twig_keep_spaces} && $t->{trailing_spaces}) { $string .= $t->{trailing_spaces}; } + + $t->set_pretty_print( $old_pretty) if( defined $old_pretty); + $t->set_empty_tag_style( $old_empty_tag_style) if( defined $old_empty_tag_style); + + return $string; + } + + +# this method discards useless elements in a tree +# it does the same thing as a flush except it does not print it +# the second argument is an element, the last purged element +# (this argument is usually set through the purge_up_to method) +sub purge + { my $t= shift; + my $up_to= shift; + + $t->_trigger_tdh if $t->{twig_tdh}; + + # the "real" last element processed, as _twig_end has closed it + my $last_elt; + if( $up_to) + { $last_elt= $up_to; } + elsif( $t->{twig_current}) + { $last_elt= $t->{twig_current}->_last_child; } + else + { $last_elt= $t->{twig_root}; } + + my $elt= $t->{twig_root}; + + while( $elt) + { my $next_elt; + if( $last_elt && $last_elt->in( $elt)) + { $elt->_set_flushed; + $next_elt= $elt->{first_child}; + } + else + { # an element before the last one or the last one, + $next_elt= $elt->{next_sibling}; + $elt->delete; + last if( $last_elt && ($elt == $last_elt) ); + } + $elt= $next_elt; + } + + if( my $ids= $t->{twig_id_list}) + { while( my ($id, $elt)= each %$ids) { if( ! defined $elt) { delete $t->{twig_id_list}->{$id} } } } + + return $t; + } + +# flushes up to an element. This method just calls purge +sub purge_up_to + { my $t= shift; + return $t->purge( @_); + } + +sub root + { return $_[0]->{twig_root}; } + +sub normalize + { return $_[0]->root->normalize; } + + +# create accessor methods on attribute names +{ my %accessor; # memorize accessor names so re-creating them won't trigger an error +sub att_accessors + { + my $twig_or_class= shift; + my $elt_class= ref $twig_or_class ? $twig_or_class->{twig_elt_class} + : 'XML::Twig::Elt' + ; + ## no critic (TestingAndDebugging::ProhibitNoStrict); + no strict 'refs'; + foreach my $att (@_) + { _croak( "attempt to redefine existing method $att using att_accessors") + if( $elt_class->can( $att) && !$accessor{$att}); + + if( !$accessor{$att}) + { *{"$elt_class\::$att"}= + sub + :lvalue # > perl 5.5 + { my $elt= shift; + if( @_) { $elt->{att}->{$att}= $_[0]; } + $elt->{att}->{$att}; + }; + $accessor{$att}=1; + } + } + return $twig_or_class; + } +} + +{ my %accessor; # memorize accessor names so re-creating them won't trigger an error +sub elt_accessors + { + my $twig_or_class= shift; + my $elt_class= ref $twig_or_class ? $twig_or_class->{twig_elt_class} + : 'XML::Twig::Elt' + ; + + # if arg is a hash ref, it's exp => name, otherwise it's a list of tags + my %exp_to_alias= ref( $_[0]) && isa( $_[0], 'HASH') ? %{$_[0]} + : map { $_ => $_ } @_; + ## no critic (TestingAndDebugging::ProhibitNoStrict); + no strict 'refs'; + while( my( $alias, $exp)= each %exp_to_alias ) + { if( $elt_class->can( $alias) && !$accessor{$alias}) + { _croak( "attempt to redefine existing method $alias using elt_accessors"); } + + if( !$accessor{$alias}) + { *{"$elt_class\::$alias"}= + sub + { my $elt= shift; + return wantarray ? $elt->children( $exp) : $elt->first_child( $exp); + }; + $accessor{$alias}=1; + } + } + return $twig_or_class; + } +} + +{ my %accessor; # memorize accessor names so re-creating them won't trigger an error +sub field_accessors + { + my $twig_or_class= shift; + my $elt_class= ref $twig_or_class ? $twig_or_class->{twig_elt_class} + : 'XML::Twig::Elt' + ; + my %exp_to_alias= ref( $_[0]) && isa( $_[0], 'HASH') ? %{$_[0]} + : map { $_ => $_ } @_; + + ## no critic (TestingAndDebugging::ProhibitNoStrict); + no strict 'refs'; + while( my( $alias, $exp)= each %exp_to_alias ) + { if( $elt_class->can( $alias) && !$accessor{$alias}) + { _croak( "attempt to redefine existing method $exp using field_accessors"); } + if( !$accessor{$alias}) + { *{"$elt_class\::$alias"}= + sub + { my $elt= shift; + $elt->field( $exp) + }; + $accessor{$alias}=1; + } + } + return $twig_or_class; + } +} + +sub first_elt + { my( $t, $cond)= @_; + my $root= $t->root || return undef; + return $root if( $root->passes( $cond)); + return $root->next_elt( $cond); + } + +sub last_elt + { my( $t, $cond)= @_; + my $root= $t->root || return undef; + return $root->last_descendant( $cond); + } + +sub next_n_elt + { my( $t, $offset, $cond)= @_; + $offset -- if( $t->root->matches( $cond) ); + return $t->root->next_n_elt( $offset, $cond); + } + +sub get_xpath + { my $twig= shift; + if( isa( $_[0], 'ARRAY')) + { my $elt_array= shift; + return _unique_elts( map { $_->get_xpath( @_) } @$elt_array); + } + else + { return $twig->root->get_xpath( @_); } + } + +# get a list of elts and return a sorted list of unique elts +sub _unique_elts + { my @sorted= sort { $a ->cmp( $b) } @_; + my @unique; + while( my $current= shift @sorted) + { push @unique, $current unless( @unique && ($unique[-1] == $current)); } + return @unique; + } + +sub findvalue + { my $twig= shift; + if( isa( $_[0], 'ARRAY')) + { my $elt_array= shift; + return join( '', map { $_->findvalue( @_) } @$elt_array); + } + else + { return $twig->root->findvalue( @_); } + } + +sub findvalues + { my $twig= shift; + if( isa( $_[0], 'ARRAY')) + { my $elt_array= shift; + return map { $_->findvalues( @_) } @$elt_array; + } + else + { return $twig->root->findvalues( @_); } + } + +sub set_id_seed + { my $t= shift; + XML::Twig::Elt->set_id_seed( @_); + return $t; + } + +# return an array ref to an index, or undef +sub index + { my( $twig, $name, $index)= @_; + return defined( $index) ? $twig->{_twig_index}->{$name}->[$index] : $twig->{_twig_index}->{$name}; + } + +# return a list with just the root +# if a condition is given then return an empty list unless the root matches +sub children + { my( $t, $cond)= @_; + my $root= $t->root; + unless( $cond && !($root->passes( $cond)) ) + { return ($root); } + else + { return (); } + } + +sub _children { return ($_[0]->root); } + +# weird, but here for completude +# used to solve (non-sensical) /doc[1] XPath queries +sub child + { my $t= shift; + my $nb= shift; + return ($t->children( @_))[$nb]; + } + +sub descendants + { my( $t, $cond)= @_; + my $root= $t->root; + if( $root->passes( $cond) ) + { return ($root, $root->descendants( $cond)); } + else + { return ( $root->descendants( $cond)); } + } + +sub simplify { my $t= shift; $t->root->simplify( @_); } +sub subs_text { my $t= shift; $t->root->subs_text( @_); } +sub trim { my $t= shift; $t->root->trim( @_); } + + +sub set_keep_encoding + { my( $t, $keep)= @_; + $t->{twig_keep_encoding}= $keep; + $t->{NoExpand}= $keep; + return XML::Twig::Elt::set_keep_encoding( $keep); + } + +sub set_expand_external_entities + { return XML::Twig::Elt::set_expand_external_entities( @_); } + +sub escape_gt + { my $t= shift; $t->{twig_escape_gt}= 1; return XML::Twig::Elt::escape_gt( @_); } + +sub do_not_escape_gt + { my $t= shift; $t->{twig_escape_gt}= 0; return XML::Twig::Elt::do_not_escape_gt( @_); } + +sub elt_id + { return $_[0]->{twig_id_list}->{$_[1]}; } + +# change it in ALL twigs at the moment +sub change_gi + { my( $twig, $old_gi, $new_gi)= @_; + my $index; + return unless($index= $XML::Twig::gi2index{$old_gi}); + $XML::Twig::index2gi[$index]= $new_gi; + delete $XML::Twig::gi2index{$old_gi}; + $XML::Twig::gi2index{$new_gi}= $index; + return $twig; + } + + +# builds the DTD from the stored (possibly updated) data +sub dtd_text + { my $t= shift; + my $dtd= $t->{twig_dtd}; + my $doctype= $t->{twig_doctype} or return ''; + my $string= "{name}; + + $string .= " [\n"; + + foreach my $gi (@{$dtd->{elt_list}}) + { $string.= "{model}->{$gi}.">\n" ; + if( $dtd->{att}->{$gi}) + { my $attlist= $dtd->{att}->{$gi}; + $string.= "{$att}->{fixed}) + { $string.= " $att $attlist->{$att}->{type} #FIXED $attlist->{$att}->{default}"; } + else + { $string.= " $att $attlist->{$att}->{type} $attlist->{$att}->{default}"; } + $string.= "\n"; + } + $string.= ">\n"; + } + } + $string.= $t->entity_list->text if( $t->entity_list); + $string.= "\n]>\n"; + return $string; + } + +# prints the DTD from the stored (possibly updated) data +sub dtd_print + { my $t= shift; + my $fh= isa( $_[0], 'GLOB') || isa( $_[0], 'IO::Scalar') ? shift : undef; + if( $fh) { print $fh $t->dtd_text; } + else { print $t->dtd_text; } + return $t; + } + +# build the subs that call directly expat +BEGIN + { my @expat_methods= qw( depth in_element within_element context + current_line current_column current_byte + recognized_string original_string + xpcroak xpcarp + base current_element element_index + xml_escape + position_in_context); + foreach my $method (@expat_methods) + { + ## no critic (TestingAndDebugging::ProhibitNoStrict); + no strict 'refs'; + *{$method}= sub { my $t= shift; + _croak( "calling $method after parsing is finished") unless( $t->{twig_parsing}); + return $t->{twig_parser}->$method(@_); + }; + } + } + +sub path + { my( $t, $gi)= @_; + if( $t->{twig_map_xmlns}) + { return "/" . join( "/", map { $t->_replace_prefix( $_)} ($t->{twig_parser}->context, $gi)); } + else + { return "/" . join( "/", ($t->{twig_parser}->context, $gi)); } + } + +sub finish + { my $t= shift; + return $t->{twig_parser}->finish; + } + +# just finish the parse by printing the rest of the document +sub finish_print + { my( $t, $fh)= @_; + my $old_fh; + unless( defined $fh) + { $t->_set_fh_to_twig_output_fh(); } + elsif( defined $fh) + { $old_fh= select $fh; + $t->{twig_original_selected_fh}= $old_fh if( $old_fh); + } + + my $p=$t->{twig_parser}; + if( $t->{twig_keep_encoding}) + { $p->setHandlers( %twig_handlers_finish_print); } + else + { $p->setHandlers( %twig_handlers_finish_print_original); } + return $t; + } + +sub set_remove_cdata { return XML::Twig::Elt::set_remove_cdata( @_); } + +sub output_filter { return XML::Twig::Elt::output_filter( @_); } +sub set_output_filter { return XML::Twig::Elt::set_output_filter( @_); } + +sub output_text_filter { return XML::Twig::Elt::output_text_filter( @_); } +sub set_output_text_filter { return XML::Twig::Elt::set_output_text_filter( @_); } + +sub set_input_filter + { my( $t, $input_filter)= @_; + my $old_filter= $t->{twig_input_filter}; + if( !$input_filter || isa( $input_filter, 'CODE') ) + { $t->{twig_input_filter}= $input_filter; } + elsif( $input_filter eq 'latin1') + { $t->{twig_input_filter}= latin1(); } + elsif( $filter{$input_filter}) + { $t->{twig_input_filter}= $filter{$input_filter}; } + else + { _croak( "invalid input filter: $input_filter"); } + + return $old_filter; + } + +sub set_empty_tag_style + { return XML::Twig::Elt::set_empty_tag_style( @_); } + +sub set_pretty_print + { return XML::Twig::Elt::set_pretty_print( @_); } + +sub set_quote + { return XML::Twig::Elt::set_quote( @_); } + +sub set_indent + { return XML::Twig::Elt::set_indent( @_); } + +sub set_keep_atts_order + { shift; return XML::Twig::Elt::set_keep_atts_order( @_); } + +sub keep_atts_order + { return XML::Twig::Elt::keep_atts_order( @_); } + +sub set_do_not_escape_amp_in_atts + { return XML::Twig::Elt::set_do_not_escape_amp_in_atts( @_); } + +# save and restore package globals (the ones in XML::Twig::Elt) +# should probably return the XML::Twig object itself, but instead +# returns the state (as a hashref) for backward compatibility +sub save_global_state + { my $t= shift; + return $t->{twig_saved_state}= XML::Twig::Elt::global_state(); + } + +sub restore_global_state + { my $t= shift; + XML::Twig::Elt::set_global_state( $t->{twig_saved_state}); + } + +sub global_state + { return XML::Twig::Elt::global_state(); } + +sub set_global_state + { return XML::Twig::Elt::set_global_state( $_[1]); } + +sub dispose + { my $t= shift; + $t->DESTROY; + return; + } + +sub DESTROY + { my $t= shift; + if( $t->{twig_root} && isa( $t->{twig_root}, 'XML::Twig::Elt')) + { $t->{twig_root}->delete } + + # added to break circular references + undef $t->{twig}; + undef $t->{twig_root}->{twig} if( $t->{twig_root}); + undef $t->{twig_parser}; + + undef %$t;# prevents memory leaks (especially when using mod_perl) + undef $t; + } + + +# +# non standard handlers +# + +# kludge: expat 1.95.2 calls both Default AND Doctype handlers +# so if the default handler finds 'recognized_string(); + if( $string eq 'setHandlers( Default => undef); + $p->setHandlers( Entity => undef); + $expat_1_95_2=1; + } + else + { print $string; } + + return; + } + + +sub _twig_print + { # warn " in _twig_print...\n"; # DEBUG handler + my $p= shift; + if( $expat_1_95_2 && ($p->recognized_string eq '[') && !$p->{twig}->{expat_1_95_2_seen_bracket}) + { # otherwise the opening square bracket of the doctype gets printed twice + $p->{twig}->{expat_1_95_2_seen_bracket}=1; + } + else + { if( $p->{twig}->{twig_right_after_root}) + { my $s= $p->recognized_string(); print $s if $s=~ m{\S}; } + else + { print $p->recognized_string(); } + } + return; + } +# recognized_string does not seem to work for entities, go figure! +# so this handler is used to print them anyway +sub _twig_print_entity + { # warn " in _twig_print_entity...\n"; # DEBUG handler + my $p= shift; + XML::Twig::Entity->new( @_)->print; + } + +# kludge: expat 1.95.2 calls both Default AND Doctype handlers +# so if the default handler finds 'original_string(); + if( $string eq 'setHandlers( Default => undef); + $p->setHandlers( Entity => undef); + $expat_1_95_2=1; + } + else + { print $string; } + + return; + } + +sub _twig_print_original + { # warn " in _twig_print_original...\n"; # DEBUG handler + my $p= shift; + print $p->original_string(); + return; + } + + +sub _twig_print_original_doctype + { # warn " in _twig_print_original_doctype...\n"; # DEBUG handler + + my( $p, $name, $sysid, $pubid, $internal)= @_; + if( $name) + { # with recent versions of XML::Parser original_string does not work, + # hence we need to rebuild the doctype declaration + my $doctype=''; + $doctype .= qq{} unless( $internal || $expat_1_95_2); + $p->{twig}->{twig_doctype}->{has_internal}=$internal; + print $doctype; + } + $p->setHandlers( Default => \&_twig_print_original); + return; + } + +sub _twig_print_doctype + { # warn " in _twig_print_doctype...\n"; # DEBUG handler + my( $p, $name, $sysid, $pubid, $internal)= @_; + if( $name) + { # with recent versions of XML::Parser original_string does not work, + # hence we need to rebuild the doctype declaration + my $doctype=''; + $doctype .= qq{} unless( $internal || $expat_1_95_2); + $p->{twig}->{twig_doctype}->{has_internal}=$internal; + print $doctype; + } + $p->setHandlers( Default => \&_twig_print); + return; + } + + +sub _twig_print_original_default + { # warn " in _twig_print_original_default...\n"; # DEBUG handler + my $p= shift; + print $p->original_string(); + return; + } + +# account for the case where the element is empty +sub _twig_print_end_original + { # warn " in _twig_print_end_original...\n"; # DEBUG handler + my $p= shift; + print $p->original_string(); + return; + } + +sub _twig_start_check_roots + { # warn " in _twig_start_check_roots...\n"; # DEBUG handler + my $p= shift; + my $gi= shift; + + my $t= $p->{twig}; + + my $fh= $t->{twig_output_fh} || select() || \*STDOUT; + + my $ns_decl; + unless( $p->depth == 0) + { if( $t->{twig_map_xmlns}) { $ns_decl= _replace_ns( $t, \$gi, \@_); } + } + + my $context= { $ST_TAG => $gi, @_}; + $context->{$ST_NS}= $ns_decl if $ns_decl; + push @{$t->{_twig_context_stack}}, $context; + my %att= @_; + + if( _handler( $t, $t->{twig_roots}, $gi)) + { $p->setHandlers( %twig_handlers); # restore regular handlers + $t->{twig_root_depth}= $p->depth; + pop @{$t->{_twig_context_stack}}; # will be pushed back in _twig_start + _twig_start( $p, $gi, @_); + return; + } + + # $tag will always be true if it needs to be printed (the tag string is never empty) + my $tag= $t->{twig_default_print} ? $t->{twig_keep_encoding} ? $p->original_string + : $p->recognized_string + : ''; + + if( $p->depth == 0) + { + ## no critic (TestingAndDebugging::ProhibitNoStrict); + no strict 'refs'; + print {$fh} $tag if( $tag); + pop @{$t->{_twig_context_stack}}; # will be pushed back in _twig_start + _twig_start( $p, $gi, @_); + $t->root->_set_flushed; # or the root start tag gets output the first time we flush + } + elsif( $t->{twig_starttag_handlers}) + { # look for start tag handlers + + my @handlers= _handler( $t, $t->{twig_starttag_handlers}, $gi); + my $last_handler_res; + foreach my $handler ( @handlers) + { $last_handler_res= $handler->($t, $gi, %att); + last unless $last_handler_res; + } + ## no critic (TestingAndDebugging::ProhibitNoStrict); + no strict 'refs'; + print {$fh} $tag if( $tag && (!@handlers || $last_handler_res)); + } + else + { + ## no critic (TestingAndDebugging::ProhibitNoStrict); + no strict 'refs'; + print {$fh} $tag if( $tag); + } + return; + } + +sub _twig_end_check_roots + { # warn " in _twig_end_check_roots...\n"; # DEBUG handler + + my( $p, $gi, %att)= @_; + my $t= $p->{twig}; + # $tag can be empty (), hence the undef and the tests for defined + my $tag= $t->{twig_default_print} ? $t->{twig_keep_encoding} ? $p->original_string + : $p->recognized_string + : undef; + my $fh= $t->{twig_output_fh} || select() || \*STDOUT; + + if( $t->{twig_endtag_handlers}) + { # look for end tag handlers + my @handlers= _handler( $t, $t->{twig_endtag_handlers}, $gi); + my $last_handler_res=1; + foreach my $handler ( @handlers) + { $last_handler_res= $handler->($t, $gi) || last; } + #if( ! $last_handler_res) + # { pop @{$t->{_twig_context_stack}}; warn "tested"; + # return; + # } + } + { + ## no critic (TestingAndDebugging::ProhibitNoStrict); + no strict 'refs'; + print {$fh} $tag if( defined $tag); + } + if( $p->depth == 0) + { + _twig_end( $p, $gi); + $t->root->{end_tag_flushed}=1; + } + + pop @{$t->{_twig_context_stack}}; + return; + } + +sub _twig_pi_check_roots + { # warn " in _twig_pi_check_roots...\n"; # DEBUG handler + my( $p, $target, $data)= @_; + my $t= $p->{twig}; + my $pi= $t->{twig_default_print} ? $t->{twig_keep_encoding} ? $p->original_string + : $p->recognized_string + : undef; + my $fh= $t->{twig_output_fh} || select() || \*STDOUT; + + if( my $handler= $t->{twig_handlers}->{pi_handlers}->{$target} + || $t->{twig_handlers}->{pi_handlers}->{''} + ) + { # if handler is called on pi, then it needs to be processed as a regular node + my @flags= qw( twig_process_pi twig_keep_pi); + my @save= @{$t}{@flags}; # save pi related flags + @{$t}{@flags}= (1, 0); # override them, pi needs to be processed + _twig_pi( @_); # call handler on the pi + @{$t}{@flags}= @save;; # restore flag + } + else + { + ## no critic (TestingAndDebugging::ProhibitNoStrict); + no strict 'refs'; + print {$fh} $pi if( defined( $pi)); + } + return; + } + + +sub _output_ignored + { my( $t, $p)= @_; + my $action= $t->{twig_ignore_action}; + + my $get_string= $t->{twig_keep_encoding} ? 'original_string' : 'recognized_string'; + + if( $action eq 'print' ) { print $p->$get_string; } + else + { my $string_ref; + if( $action eq 'string') + { $string_ref= \$t->{twig_buffered_string}; } + elsif( ref( $action) && ref( $action) eq 'SCALAR') + { $string_ref= $action; } + else + { _croak( "wrong ignore action: $action"); } + + $$string_ref .= $p->$get_string; + } + } + + + +sub _twig_ignore_start + { # warn " in _twig_ignore_start...\n"; # DEBUG handler + + my( $p, $gi)= @_; + my $t= $p->{twig}; + $t->{twig_ignore_level}++; + my $action= $t->{twig_ignore_action}; + + $t->_output_ignored( $p) unless $action eq 'discard'; + return; + } + +sub _twig_ignore_end + { # warn " in _twig_ignore_end...\n"; # DEBUG handler + + my( $p, $gi)= @_; + my $t= $p->{twig}; + + my $action= $t->{twig_ignore_action}; + $t->_output_ignored( $p) unless $action eq 'discard'; + + $t->{twig_ignore_level}--; + + if( ! $t->{twig_ignore_level}) + { + $t->{twig_current} = $t->{twig_ignore_elt}; + $t->{twig_current}->set_twig_current; + + $t->{twig_ignore_elt}->cut; # there could possibly be a memory leak here (delete would avoid it, + # but could also delete elements that should not be deleted) + + # restore the saved stack to the current level + splice( @{$t->{_twig_context_stack}}, $p->depth+ 1 ); + #warn "stack: ", _dump_stack( $t->{_twig_context_stack}), "\n"; + + $p->setHandlers( @{$t->{twig_saved_handlers}}); + # test for handlers + if( $t->{twig_endtag_handlers}) + { # look for end tag handlers + my @handlers= _handler( $t, $t->{twig_endtag_handlers}, $gi); + my $last_handler_res=1; + foreach my $handler ( @handlers) + { $last_handler_res= $handler->($t, $gi) || last; } + } + pop @{$t->{_twig_context_stack}}; + }; + return; + } + +#sub _dump_stack { my( $stack)= @_; return join( ":", map { $_->{$ST_TAG} } @$stack); } + +sub ignore + { my( $t, $elt, $action)= @_; + my $current= $t->{twig_current}; + + if( ! ($elt && ref( $elt) && isa( $elt, 'XML::Twig::Elt'))) { $elt= $current; } + + #warn "ignore: current = ", $current->tag, ", elt = ", $elt->tag, ")\n"; + + # we need the ($elt == $current->{last_child}) test because the current element is set to the + # parent _before_ handlers are called (and I can't figure out how to fix this) + unless( ($elt == $current) || ($current->{last_child} && ($elt == $current->{last_child})) || $current->in( $elt)) + { _croak( "element to be ignored must be ancestor of current element"); } + + $t->{twig_ignore_level}= $current == $elt ? 1 : $t->_level_in_stack( $current) - $t->_level_in_stack($elt) + 1; + #warn "twig_ignore_level: $t->{twig_ignore_level} (current: ", $current->tag, ", elt: ", $elt->tag, ")\n"; + $t->{twig_ignore_elt} = $elt; # save it, so we can delete it later + + $action ||= 'discard'; + if( !($action eq 'print' || $action eq 'string' || ( ref( $action) && ref( $action) eq 'SCALAR'))) + { $action= 'discard'; } + + $t->{twig_ignore_action}= $action; + + my $p= $t->{twig_parser}; + my @saved_handlers= $p->setHandlers( %twig_handlers_ignore); # set handlers + + my $get_string= $t->{twig_keep_encoding} ? 'original_string' : 'recognized_string'; + + my $default_handler; + + if( $action ne 'discard') + { if( $action eq 'print') + { $p->setHandlers( Default => sub { print $_[0]->$get_string; }); } + else + { my $string_ref; + if( $action eq 'string') + { if( ! exists $t->{twig_buffered_string}) { $t->{twig_buffered_string}=''; } + $string_ref= \$t->{twig_buffered_string}; + } + elsif( ref( $action) && ref( $action) eq 'SCALAR') + { $string_ref= $action; } + + $p->setHandlers( Default => sub { $$string_ref .= $_[0]->$get_string; }); + } + $t->_output_ignored( $p, $action); + } + + + $t->{twig_saved_handlers}= \@saved_handlers; # save current handlers + } + +sub _level_in_stack + { my( $t, $elt)= @_; + my $level=1; + foreach my $elt_in_stack ( @{$t->{_twig_context_stack}} ) + { if( $elt_in_stack->{$ST_ELT} && ($elt == $elt_in_stack->{$ST_ELT})) { return $level } + $level++; + } + } + + + +# select $t->{twig_output_fh} and store the current selected fh +sub _set_fh_to_twig_output_fh + { my $t= shift; + my $output_fh= $t->{twig_output_fh}; + if( $output_fh && !$t->{twig_output_fh_selected}) + { # there is an output fh + $t->{twig_selected_fh}= select(); # store the currently selected fh + $t->{twig_output_fh_selected}=1; + select $output_fh; # select the output fh for the twig + } + } + +# select the fh that was stored in $t->{twig_selected_fh} +# (before $t->{twig_output_fh} was selected) +sub _set_fh_to_selected_fh + { my $t= shift; + return unless( $t->{twig_output_fh}); + my $selected_fh= $t->{twig_selected_fh}; + $t->{twig_output_fh_selected}=0; + select $selected_fh; + return; + } + + +sub encoding + { return $_[0]->{twig_xmldecl}->{encoding} if( $_[0]->{twig_xmldecl}); } + +sub set_encoding + { my( $t, $encoding)= @_; + $t->{twig_xmldecl} ||={}; + $t->set_xml_version( "1.0") unless( $t->xml_version); + $t->{twig_xmldecl}->{encoding}= $encoding; + return $t; + } + +sub output_encoding + { return $_[0]->{output_encoding}; } + +sub set_output_encoding + { my( $t, $encoding)= @_; + my $output_filter= $t->output_filter || ''; + + if( ($encoding && $encoding !~ m{^utf-?8$}i) || $t->{twig_keep_encoding} || $output_filter) + { $t->set_output_filter( _encoding_filter( $encoding || '')); } + + $t->{output_encoding}= $encoding; + return $t; + } + +sub xml_version + { return $_[0]->{twig_xmldecl}->{version} if( $_[0]->{twig_xmldecl}); } + +sub set_xml_version + { my( $t, $version)= @_; + $t->{twig_xmldecl} ||={}; + $t->{twig_xmldecl}->{version}= $version; + return $t; + } + +sub standalone + { return $_[0]->{twig_xmldecl}->{standalone} if( $_[0]->{twig_xmldecl}); } + +sub set_standalone + { my( $t, $standalone)= @_; + $t->{twig_xmldecl} ||={}; + $t->set_xml_version( "1.0") unless( $t->xml_version); + $t->{twig_xmldecl}->{standalone}= $standalone; + return $t; + } + + +# SAX methods + +sub toSAX1 + { _croak( "cannot use toSAX1 while parsing (use flush_toSAX1)") if (defined $_[0]->{twig_parser}); + shift(@_)->_toSAX(@_, \&XML::Twig::Elt::_start_tag_data_SAX1, + \&XML::Twig::Elt::_end_tag_data_SAX1 + ); + } + +sub toSAX2 + { _croak( "cannot use toSAX2 while parsing (use flush_toSAX2)") if (defined $_[0]->{twig_parser}); + shift(@_)->_toSAX(@_, \&XML::Twig::Elt::_start_tag_data_SAX2, + \&XML::Twig::Elt::_end_tag_data_SAX2 + ); + } + + +sub _toSAX + { my( $t, $handler, $start_tag_data, $end_tag_data) = @_; + + if( my $start_document = $handler->can( 'start_document')) + { $start_document->( $handler); } + + $t->_prolog_toSAX( $handler); + + if( $t->root) { $t->root->_toSAX( $handler, $start_tag_data, $end_tag_data) ; } + if( my $end_document = $handler->can( 'end_document')) + { $end_document->( $handler); } + } + + +sub flush_toSAX1 + { shift(@_)->_flush_toSAX(@_, \&XML::Twig::Elt::_start_tag_data_SAX1, + \&XML::Twig::Elt::_end_tag_data_SAX1 + ); + } + +sub flush_toSAX2 + { shift(@_)->_flush_toSAX(@_, \&XML::Twig::Elt::_start_tag_data_SAX2, + \&XML::Twig::Elt::_end_tag_data_SAX2 + ); + } + +sub _flush_toSAX + { my( $t, $handler, $start_tag_data, $end_tag_data)= @_; + + # the "real" last element processed, as _twig_end has closed it + my $last_elt; + if( $t->{twig_current}) + { $last_elt= $t->{twig_current}->_last_child; } + else + { $last_elt= $t->{twig_root}; } + + my $elt= $t->{twig_root}; + unless( $elt->_flushed) + { # init unless already done (ie root has been flushed) + if( my $start_document = $handler->can( 'start_document')) + { $start_document->( $handler); } + # flush the DTD + $t->_prolog_toSAX( $handler) + } + + while( $elt) + { my $next_elt; + if( $last_elt && $last_elt->in( $elt)) + { + unless( $elt->_flushed) + { # just output the front tag + if( my $start_element = $handler->can( 'start_element')) + { if( my $tag_data= $start_tag_data->( $elt)) + { $start_element->( $handler, $tag_data); } + } + $elt->_set_flushed; + } + $next_elt= $elt->{first_child}; + } + else + { # an element before the last one or the last one, + $next_elt= $elt->{next_sibling}; + $elt->_toSAX( $handler, $start_tag_data, $end_tag_data); + $elt->delete; + last if( $last_elt && ($elt == $last_elt)); + } + $elt= $next_elt; + } + if( !$t->{twig_parsing}) + { if( my $end_document = $handler->can( 'end_document')) + { $end_document->( $handler); } + } + } + + +sub _prolog_toSAX + { my( $t, $handler)= @_; + $t->_xmldecl_toSAX( $handler); + $t->_DTD_toSAX( $handler); + } + +sub _xmldecl_toSAX + { my( $t, $handler)= @_; + my $decl= $t->{twig_xmldecl}; + my $data= { Version => $decl->{version}, + Encoding => $decl->{encoding}, + Standalone => $decl->{standalone}, + }; + if( my $xml_decl= $handler->can( 'xml_decl')) + { $xml_decl->( $handler, $data); } + } + +sub _DTD_toSAX + { my( $t, $handler)= @_; + my $doctype= $t->{twig_doctype}; + return unless( $doctype); + my $data= { Name => $doctype->{name}, + PublicId => $doctype->{pub}, + SystemId => $doctype->{sysid}, + }; + + if( my $start_dtd= $handler->can( 'start_dtd')) + { $start_dtd->( $handler, $data); } + + # I should call code to export the internal subset here + + if( my $end_dtd= $handler->can( 'end_dtd')) + { $end_dtd->( $handler); } + } + +# input/output filters + +sub latin1 + { local $SIG{__DIE__}; + if( _use( 'Encode')) + { return encode_convert( 'ISO-8859-15'); } + elsif( _use( 'Text::Iconv')) + { return iconv_convert( 'ISO-8859-15'); } + elsif( _use( 'Unicode::Map8') && _use( 'Unicode::String')) + { return unicode_convert( 'ISO-8859-15'); } + else + { return \®exp2latin1; } + } + +sub _encoding_filter + { + { local $SIG{__DIE__}; + my $encoding= $_[1] || $_[0]; + if( _use( 'Encode')) + { my $sub= encode_convert( $encoding); + return $sub; + } + elsif( _use( 'Text::Iconv')) + { return iconv_convert( $encoding); } + elsif( _use( 'Unicode::Map8') && _use( 'Unicode::String')) + { return unicode_convert( $encoding); } + } + _croak( "Encode, Text::Iconv or Unicode::Map8 and Unicode::String need to be installed in order to use encoding options"); + } + +# shamelessly lifted from XML::TyePYX (works only with XML::Parse 2.27) +sub regexp2latin1 + { my $text=shift; + $text=~s{([\xc0-\xc3])(.)}{ my $hi = ord($1); + my $lo = ord($2); + chr((($hi & 0x03) <<6) | ($lo & 0x3F)) + }ge; + return $text; + } + + +sub html_encode + { _use( 'HTML::Entities') or croak "cannot use html_encode: missing HTML::Entities"; + return HTML::Entities::encode_entities($_[0] ); + } + +sub safe_encode + { my $str= shift; + if( $perl_version < 5.008) + { # the no utf8 makes the regexp work in 5.6 + no utf8; # = perl 5.6 + $str =~ s{([\xC0-\xDF].|[\xE0-\xEF]..|[\xF0-\xFF]...)} + {_XmlUtf8Decode($1)}egs; + } + else + { $str= encode( ascii => $str, $FB_HTMLCREF); } + return $str; + } + +sub safe_encode_hex + { my $str= shift; + if( $perl_version < 5.008) + { # the no utf8 makes the regexp work in 5.6 + no utf8; # = perl 5.6 + $str =~ s{([\xC0-\xDF].|[\xE0-\xEF]..|[\xF0-\xFF]...)} + {_XmlUtf8Decode($1, 1)}egs; + } + else + { $str= encode( ascii => $str, $FB_XMLCREF); } + return $str; + } + +# this one shamelessly lifted from XML::DOM +# does NOT work on 5.8.0 +sub _XmlUtf8Decode + { my ($str, $hex) = @_; + my $len = length ($str); + my $n; + + if ($len == 2) + { my @n = unpack "C2", $str; + $n = (($n[0] & 0x3f) << 6) + ($n[1] & 0x3f); + } + elsif ($len == 3) + { my @n = unpack "C3", $str; + $n = (($n[0] & 0x1f) << 12) + (($n[1] & 0x3f) << 6) + ($n[2] & 0x3f); + } + elsif ($len == 4) + { my @n = unpack "C4", $str; + $n = (($n[0] & 0x0f) << 18) + (($n[1] & 0x3f) << 12) + + (($n[2] & 0x3f) << 6) + ($n[3] & 0x3f); + } + elsif ($len == 1) # just to be complete... + { $n = ord ($str); } + else + { croak "bad value [$str] for _XmlUtf8Decode"; } + + my $char= $hex ? sprintf ("&#x%x;", $n) : "&#$n;"; + return $char; + } + + +sub unicode_convert + { my $enc= $_[1] ? $_[1] : $_[0]; # so the method can be called on the twig or directly + _use( 'Unicode::Map8') or croak "Unicode::Map8 not available, needed for encoding filter: $!"; + _use( 'Unicode::String') or croak "Unicode::String not available, needed for encoding filter: $!"; + import Unicode::String qw(utf8); + my $sub= eval qq{ { $NO_WARNINGS; + my \$cnv; + BEGIN { \$cnv= Unicode::Map8->new(\$enc) + or croak "Can't create converter to \$enc"; + } + sub { return \$cnv->to8 (utf8(\$_[0])->ucs2); } + } + }; + unless( $sub) { croak $@; } + return $sub; + } + +sub iconv_convert + { my $enc= $_[1] ? $_[1] : $_[0]; # so the method can be called on the twig or directly + _use( 'Text::Iconv') or croak "Text::Iconv not available, needed for encoding filter: $!"; + my $sub= eval qq{ { $NO_WARNINGS; + my \$cnv; + BEGIN { \$cnv = Text::Iconv->new( 'utf8', \$enc) + or croak "Can't create iconv converter to \$enc"; + } + sub { return \$cnv->convert( \$_[0]); } + } + }; + unless( $sub) + { if( $@=~ m{^Unsupported conversion: Invalid argument}) + { croak "Unsupported encoding: $enc"; } + else + { croak $@; } + } + + return $sub; + } + +sub encode_convert + { my $enc= $_[1] ? $_[1] : $_[0]; # so the method can be called on the twig or directly + my $sub= eval qq{sub { $NO_WARNINGS; return encode( "$enc", \$_[0]); } }; + croak "can't create Encode-based filter: $@" unless( $sub); + return $sub; + } + + +# XML::XPath compatibility +sub getRootNode { return $_[0]; } +sub getParentNode { return undef; } +sub getChildNodes { my @children= ($_[0]->root); return wantarray ? @children : \@children; } + +sub _weakrefs { return $weakrefs; } +sub _set_weakrefs { $weakrefs=shift() || 0; XML::Twig::Elt::set_destroy()if ! $weakrefs; } # for testing purposes + +sub _dump + { my $t= shift; + my $dump=''; + + $dump="document\n"; # should dump twig level data here + if( $t->root) { $dump .= $t->root->_dump( @_); } + + return $dump; + + } + + +1; + +###################################################################### +package XML::Twig::Entity_list; +###################################################################### + +*isa= *UNIVERSAL::isa; + +sub new + { my $class = shift; + my $self={ entities => {}, updated => 0}; + + bless $self, $class; + return $self; + + } + +sub add_new_ent + { my $ent_list= shift; + my $ent= XML::Twig::Entity->new( @_); + $ent_list->add( $ent); + return $ent_list; + } + +sub _add_list + { my( $ent_list, $to_add)= @_; + my $ents_to_add= $to_add->{entities}; + return $ent_list unless( $ents_to_add && %$ents_to_add); + @{$ent_list->{entities}}{keys %$ents_to_add}= values %$ents_to_add; + $ent_list->{updated}=1; + return $ent_list; + } + +sub add + { my( $ent_list, $ent)= @_; + $ent_list->{entities}->{$ent->{name}}= $ent; + $ent_list->{updated}=1; + return $ent_list; + } + +sub ent + { my( $ent_list, $ent_name)= @_; + return $ent_list->{entities}->{$ent_name}; + } + +# can be called with an entity or with an entity name +sub delete + { my $ent_list= shift; + if( isa( ref $_[0], 'XML::Twig::Entity')) + { # the second arg is an entity + my $ent= shift; + delete $ent_list->{entities}->{$ent->{name}}; + } + else + { # the second arg was not entity, must be a string then + my $name= shift; + delete $ent_list->{entities}->{$name}; + } + $ent_list->{updated}=1; + return $ent_list; + } + +sub print + { my ($ent_list, $fh)= @_; + my $old_select= defined $fh ? select $fh : undef; + + foreach my $ent_name ( sort keys %{$ent_list->{entities}}) + { my $ent= $ent_list->{entities}->{$ent_name}; + # we have to test what the entity is or un-defined entities can creep in + if( isa( $ent, 'XML::Twig::Entity')) { $ent->print(); } + } + select $old_select if( defined $old_select); + return $ent_list; + } + +sub text + { my ($ent_list)= @_; + return join "\n", map { $ent_list->{entities}->{$_}->text} sort keys %{$ent_list->{entities}}; + } + +# return the list of entity names +sub entity_names + { my $ent_list= shift; + return (sort keys %{$ent_list->{entities}}) ; + } + + +sub list + { my ($ent_list)= @_; + return map { $ent_list->{entities}->{$_} } sort keys %{$ent_list->{entities}}; + } + +1; + +###################################################################### +package XML::Twig::Entity; +###################################################################### + +#*isa= *UNIVERSAL::isa; + +sub new + { my( $class, $name, $val, $sysid, $pubid, $ndata, $param)= @_; + $class= ref( $class) || $class; + + my $self={}; + + $self->{name} = $name; + $self->{val} = $val if( defined $val ); + $self->{sysid} = $sysid if( defined $sysid); + $self->{pubid} = $pubid if( defined $pubid); + $self->{ndata} = $ndata if( defined $ndata); + $self->{param} = $param if( defined $param); + + bless $self, $class; + return $self; + } + + +sub name { return $_[0]->{name}; } +sub val { return $_[0]->{val}; } +sub sysid { return defined( $_[0]->{sysid}) ? $_[0]->{sysid} : ''; } +sub pubid { return defined( $_[0]->{pubid}) ? $_[0]->{pubid} : ''; } +sub ndata { return defined( $_[0]->{ndata}) ? $_[0]->{ndata} : ''; } +sub param { return defined( $_[0]->{param}) ? $_[0]->{param} : ''; } + + +sub print + { my ($ent, $fh)= @_; + my $text= $ent->text; + if( $fh) { print $fh $text . "\n"; } + else { print $text . "\n"; } + } + +sub sprint + { my ($ent)= @_; + return $ent->text; + } + +sub text + { my ($ent)= @_; + #warn "text called: '", $ent->_dump, "'\n"; + return '' if( !$ent->{name}); + my @tokens; + push @tokens, '{param}); + push @tokens, $ent->{name}; + + if( defined $ent->{val} && !defined( $ent->{sysid}) && !defined($ent->{pubid}) ) + { push @tokens, _quoted_val( $ent->{val}); + } + elsif( defined $ent->{sysid}) + { push @tokens, 'PUBLIC', _quoted_val( $ent->{pubid}) if( $ent->{pubid}); + push @tokens, 'SYSTEM' unless( $ent->{pubid}); + push @tokens, _quoted_val( $ent->{sysid}); + push @tokens, 'NDATA', $ent->{ndata} if( $ent->{ndata}); + } + return join( ' ', @tokens) . '>'; + } + +sub _quoted_val + { my $q= $_[0]=~ m{"} ? q{'} : q{"}; + return qq{$q$_[0]$q}; + } + +sub _dump + { my( $ent)= @_; return join( " - ", map { "$_ => '$ent->{$_}'" } grep { defined $ent->{$_} } sort keys %$ent); } + +1; + +###################################################################### +package XML::Twig::Elt; +###################################################################### + +use Carp; +*isa= *UNIVERSAL::isa; + +my $CDATA_START = ""; +my $PI_START = ""; +my $COMMENT_START = ""; + +my $XMLNS_URI = 'http://www.w3.org/2000/xmlns/'; + + +BEGIN + { # set some aliases for methods + *tag = *gi; + *name = *gi; + *set_tag = *set_gi; + *set_name = *set_gi; + *find_nodes = *get_xpath; # as in XML::DOM + *findnodes = *get_xpath; # as in XML::LibXML + *field = *first_child_text; + *trimmed_field = *first_child_trimmed_text; + *is_field = *contains_only_text; + *is = *passes; + *matches = *passes; + *has_child = *first_child; + *has_children = *first_child; + *all_children_pass = *all_children_are; + *all_children_match= *all_children_are; + *getElementsByTagName= *descendants; + *find_by_tag_name= *descendants_or_self; + *unwrap = *erase; + *inner_xml = *xml_string; + *outer_xml = *sprint; + *add_class = *add_to_class; + + *first_child_is = *first_child_matches; + *last_child_is = *last_child_matches; + *next_sibling_is = *next_sibling_matches; + *prev_sibling_is = *prev_sibling_matches; + *next_elt_is = *next_elt_matches; + *prev_elt_is = *prev_elt_matches; + *parent_is = *parent_matches; + *child_is = *child_matches; + *inherited_att = *inherit_att; + + *sort_children_by_value= *sort_children_on_value; + + *has_atts= *att_nb; + + # imports from XML::Twig + *_is_fh= *XML::Twig::_is_fh; + + # XML::XPath compatibility + *string_value = *text; + *toString = *sprint; + *getName = *gi; + *getRootNode = *twig; + *getNextSibling = *_next_sibling; + *getPreviousSibling = *_prev_sibling; + *isElementNode = *is_elt; + *isTextNode = *is_text; + *isPI = *is_pi; + *isPINode = *is_pi; + *isProcessingInstructionNode= *is_pi; + *isComment = *is_comment; + *isCommentNode = *is_comment; + *getTarget = *target; + *getFirstChild = *_first_child; + *getLastChild = *_last_child; + + # try using weak references + # test whether we can use weak references + { local $SIG{__DIE__}; + if( eval 'require Scalar::Util' && defined( &Scalar::Util::weaken) ) + { import Scalar::Util qw(weaken); } + elsif( eval 'require WeakRef') + { import WeakRef; } + } +} + + +# can be called as XML::Twig::Elt->new( [[$gi, $atts, [@content]]) +# - gi is an optional gi given to the element +# - $atts is a hashref to attributes for the element +# - @content is an optional list of text and elements that will +# be inserted under the element +sub new + { my $class= shift; + $class= ref $class || $class; + my $elt = {}; + bless ($elt, $class); + + return $elt unless @_; + + if( @_ == 1 && $_[0]=~ m{^\s*<}) { return $class->parse( @_); } + + # if a gi is passed then use it + my $gi= shift; + $elt->{gi}=$XML::Twig::gi2index{$gi} or $elt->set_gi( $gi); + + + my $atts= ref $_[0] eq 'HASH' ? shift : undef; + + if( $atts && defined $atts->{$CDATA}) + { delete $atts->{$CDATA}; + + my $cdata= $class->new( $CDATA => @_); + return $class->new( $gi, $atts, $cdata); + } + + if( $gi eq $PCDATA) + { if( grep { ref $_ } @_) { croak "element $PCDATA can only be created from text"; } + $elt->_set_pcdata( join( '', @_)); + } + elsif( $gi eq $ENT) + { $elt->{ent}= shift; } + elsif( $gi eq $CDATA) + { if( grep { ref $_ } @_) { croak "element $CDATA can only be created from text"; } + $elt->_set_cdata( join( '', @_)); + } + elsif( $gi eq $COMMENT) + { if( grep { ref $_ } @_) { croak "element $COMMENT can only be created from text"; } + $elt->_set_comment( join( '', @_)); + } + elsif( $gi eq $PI) + { if( grep { ref $_ } @_) { croak "element $PI can only be created from text"; } + $elt->_set_pi( shift, join( '', @_)); + } + else + { # the rest of the arguments are the content of the element + if( @_) + { $elt->set_content( @_); } + else + { $elt->{empty}= 1; } + } + + if( $atts) + { # the attribute hash can be used to pass the asis status + if( defined $atts->{$ASIS}) { $elt->set_asis( $atts->{$ASIS} ); delete $atts->{$ASIS}; } + if( defined $atts->{$EMPTY}) { $elt->{empty}= $atts->{$EMPTY}; delete $atts->{$EMPTY}; } + if( keys %$atts) { $elt->set_atts( $atts); } + $elt->_set_id( $atts->{$ID}) if( $atts->{$ID}); + } + + return $elt; + } + +# optimized version of $elt->new( PCDATA, $text); +sub _new_pcdata + { my $class= $_[0]; + $class= ref $class || $class; + my $elt = {}; + bless $elt, $class; + $elt->{gi}=$XML::Twig::gi2index{$PCDATA} or $elt->set_gi( $PCDATA); + $elt->_set_pcdata( $_[1]); + return $elt; + } + +# this function creates an XM:::Twig::Elt from a string +# it is quite clumsy at the moment, as it just creates a +# new twig then returns its root +# there might also be memory leaks there +# additional arguments are passed to new XML::Twig +sub parse + { my $class= shift; + if( ref( $class)) { $class= ref( $class); } + my $string= shift; + my %args= @_; + my $t= XML::Twig->new(%args); + $t->parse( $string); + my $elt= $t->root; + # clean-up the node + delete $elt->{twig}; # get rid of the twig data + delete $elt->{twig_current}; # better get rid of this too + if( $t->{twig_id_list}) { $elt->{twig_id_list}= $t->{twig_id_list}; } + $elt->cut; + undef $t->{twig_root}; + return $elt; + } + +sub set_inner_xml + { my( $elt, $xml, @args)= @_; + my $new_elt= $elt->parse( "$xml", @args); + $elt->cut_children; + $new_elt->paste_first_child( $elt); + $new_elt->erase; + return $elt; + } + +sub set_outer_xml + { my( $elt, $xml, @args)= @_; + my $new_elt= $elt->parse( "$xml", @args); + $elt->cut_children; + $new_elt->replace( $elt); + $new_elt->erase; + return $new_elt; + } + + +sub set_inner_html + { my( $elt, $html)= @_; + my $t= XML::Twig->new->parse_html( "$html"); + my $new_elt= $t->root; + if( $elt->tag eq 'head') + { $new_elt->first_child( 'head')->unwrap; + $new_elt->first_child( 'body')->cut; + } + elsif( $elt->tag ne 'html') + { $new_elt->first_child( 'head')->cut; + $new_elt->first_child( 'body')->unwrap; + } + $new_elt->cut; + $elt->cut_children; + $new_elt->paste_first_child( $elt); + $new_elt->erase; + return $elt; + } + +sub set_gi + { my ($elt, $gi)= @_; + unless( defined $XML::Twig::gi2index{$gi}) + { # new gi, create entries in %gi2index and @index2gi + push @XML::Twig::index2gi, $gi; + $XML::Twig::gi2index{$gi}= $#XML::Twig::index2gi; + } + $elt->{gi}= $XML::Twig::gi2index{$gi}; + return $elt; + } + +sub gi { return $XML::Twig::index2gi[$_[0]->{gi}]; } + +sub local_name + { my $elt= shift; + return _local_name( $XML::Twig::index2gi[$elt->{'gi'}]); + } + +sub ns_prefix + { my $elt= shift; + return _ns_prefix( $XML::Twig::index2gi[$elt->{'gi'}]); + } + +# namespace prefix for any qname (can be used for elements or attributes) +sub _ns_prefix + { my $qname= shift; + if( $qname=~ m{^([^:]*):}) + { return $1; } + else + { return( ''); } # should it be '' ? + } + +# local name for any qname (can be used for elements or attributes) +sub _local_name + { my $qname= shift; + (my $local= $qname)=~ s{^[^:]*:}{}; + return $local; + } + +#sub get_namespace +sub namespace ## no critic (Subroutines::ProhibitNestedSubs); + { my $elt= shift; + my $prefix= defined $_[0] ? shift() : $elt->ns_prefix; + my $ns_att= $prefix ? "xmlns:$prefix" : "xmlns"; + my $expanded= $DEFAULT_NS{$prefix} || $elt->_inherit_att_through_cut( $ns_att) || ''; + return $expanded; + } + +sub declare_missing_ns ## no critic (Subroutines::ProhibitNestedSubs); + { my $root= shift; + my %missing_prefix; + my $map= $root->_current_ns_prefix_map; + + foreach my $prefix (keys %$map) + { my $prefix_att= $prefix eq '#default' ? 'xmlns' : "xmlns:$prefix"; + if( ! $root->{'att'}->{$prefix_att}) + { $root->set_att( $prefix_att => $map->{$prefix}); } + } + return $root; + } + +sub _current_ns_prefix_map + { my( $elt)= shift; + my $map; + while( $elt) + { foreach my $att ($elt->att_names) + { my $prefix= $att eq 'xmlns' ? '#default' + : $att=~ m{^xmlns:(.*)$} ? $1 + : next + ; + if( ! exists $map->{$prefix}) { $map->{$prefix}= $elt->{'att'}->{$att}; } + } + $elt= $elt->{parent} || ($elt->{former} && $elt->{former}->{parent}); + } + return $map; + } + +sub set_ns_decl + { my( $elt, $uri, $prefix)= @_; + my $ns_att= $prefix ? "xmlns:$prefix" : 'xmlns'; + $elt->set_att( $ns_att => $uri); + return $elt; + } + +sub set_ns_as_default + { my( $root, $uri)= @_; + my @ns_decl_to_remove; + foreach my $elt ($root->descendants_or_self) + { if( $elt->_ns_prefix && $elt->namespace eq $uri) + { $elt->set_tag( $elt->local_name); } + # store any namespace declaration for that uri + foreach my $ns_decl (grep { $_=~ m{xmlns(:|$)} && $elt->{'att'}->{$_} eq $uri } $elt->att_names) + { push @ns_decl_to_remove, [$elt, $ns_decl]; } + } + $root->set_ns_decl( $uri); + # now remove the ns declarations (if done earlier then descendants of an element with the ns declaration + # are not considered being in the namespace + foreach my $ns_decl_to_remove ( @ns_decl_to_remove) + { my( $elt, $ns_decl)= @$ns_decl_to_remove; + $elt->del_att( $ns_decl); + } + + return $root; + } + + + +# return #ELT for an element and #PCDATA... for others +sub get_type + { my $gi_nb= $_[0]->{gi}; # the number, not the string + return $ELT if( $gi_nb >= $XML::Twig::SPECIAL_GI); + return $_[0]->gi; + } + +# return the gi if it's a "real" element, 0 otherwise +sub is_elt + { if( $_[0]->{gi} >= $XML::Twig::SPECIAL_GI) + { return $_[0]->gi; } + else + { return 0; } + } + + +sub is_pcdata + { my $elt= shift; + return (exists $elt->{'pcdata'}); + } + +sub is_cdata + { my $elt= shift; + return (exists $elt->{'cdata'}); + } + +sub is_pi + { my $elt= shift; + return (exists $elt->{'target'}); + } + +sub is_comment + { my $elt= shift; + return (exists $elt->{'comment'}); + } + +sub is_ent + { my $elt= shift; + return (exists $elt->{ent} || $elt->{ent_name}); + } + + +sub is_text + { my $elt= shift; + return (exists( $elt->{'pcdata'}) || (exists $elt->{'cdata'})); + } + +sub is_empty + { return $_[0]->{empty} || 0; } + +sub set_empty + { $_[0]->{empty}= defined( $_[1]) ? $_[1] : 1; return $_[0]; } + +sub set_not_empty + { delete $_[0]->{empty} if( $_[0]->{'empty'}); return $_[0]; } + + +sub set_asis + { my $elt=shift; + + foreach my $descendant ($elt, $elt->_descendants ) + { $descendant->{asis}= 1; + if( (exists $descendant->{'cdata'})) + { $descendant->{gi}=$XML::Twig::gi2index{$PCDATA} or $descendant->set_gi( $PCDATA); + $descendant->_set_pcdata( $descendant->{cdata}); + } + + } + return $elt; + } + +sub set_not_asis + { my $elt=shift; + foreach my $descendant ($elt, $elt->descendants) + { delete $descendant->{asis} if $descendant->{asis};} + return $elt; + } + +sub is_asis + { return $_[0]->{asis}; } + +sub closed + { my $elt= shift; + my $t= $elt->twig || return; + my $curr_elt= $t->{twig_current}; + return 1 unless( $curr_elt); + return $curr_elt->in( $elt); + } + +sub set_pcdata + { my( $elt, $pcdata)= @_; + + if( $elt->{extra_data_in_pcdata}) + { _try_moving_extra_data( $elt, $pcdata); + } + $elt->{pcdata}= $pcdata; + return $elt; + } + +sub _extra_data_in_pcdata { return $_[0]->{extra_data_in_pcdata}; } +sub _set_extra_data_in_pcdata { $_[0]->{extra_data_in_pcdata}= $_[1]; return $_[0]; } +sub _del_extra_data_in_pcdata { delete $_[0]->{extra_data_in_pcdata}; return $_[0]; } +sub _unshift_extra_data_in_pcdata + { my $e= shift; + $e->{extra_data_in_pcdata}||=[]; + unshift @{$e->{extra_data_in_pcdata}}, { text => shift(), offset => shift() }; + } +sub _push_extra_data_in_pcdata + { my $e= shift; + $e->{extra_data_in_pcdata}||=[]; + push @{$e->{extra_data_in_pcdata}}, { text => shift(), offset => shift() }; + } + +sub _extra_data_before_end_tag { return $_[0]->{extra_data_before_end_tag} || ''; } +sub _set_extra_data_before_end_tag { $_[0]->{extra_data_before_end_tag}= $_[1]; return $_[0]} +sub _del_extra_data_before_end_tag { delete $_[0]->{extra_data_before_end_tag}; return $_[0]} +sub _prefix_extra_data_before_end_tag + { my( $elt, $data)= @_; + if($elt->{extra_data_before_end_tag}) + { $elt->{extra_data_before_end_tag}= $data . $elt->{extra_data_before_end_tag}; } + else + { $elt->{extra_data_before_end_tag}= $data; } + return $elt; + } + +# internal, in cases where we know there is no extra_data (inlined anyway!) +sub _set_pcdata { $_[0]->{pcdata}= $_[1]; } + +# try to figure out if we can keep the extra_data around +sub _try_moving_extra_data + { my( $elt, $modified)=@_; + my $initial= $elt->{pcdata}; + my $cpis= $elt->{extra_data_in_pcdata}; + + if( (my $offset= index( $modified, $initial)) != -1) + { # text has been added + foreach (@$cpis) { $_->{offset}+= $offset; } + } + elsif( ($offset= index( $initial, $modified)) != -1) + { # text has been cut + my $len= length( $modified); + foreach my $cpi (@$cpis) { $cpi->{offset} -= $offset; } + $elt->_set_extra_data_in_pcdata( [ grep { $_->{offset} >= 0 && $_->{offset} < $len } @$cpis ]); + } + else + { _match_extra_data_words( $elt, $initial, $modified) + || _match_extra_data_chars( $elt, $initial, $modified) + || $elt->_del_extra_data_in_pcdata; + } + } + +sub _match_extra_data_words + { my( $elt, $initial, $modified)= @_; + my @initial= split /\b/, $initial; + my @modified= split /\b/, $modified; + + return _match_extra_data( $elt, length( $initial), \@initial, \@modified); + } + +sub _match_extra_data_chars + { my( $elt, $initial, $modified)= @_; + my @initial= split //, $initial; + my @modified= split //, $modified; + + return _match_extra_data( $elt, length( $initial), \@initial, \@modified); + } + +sub _match_extra_data + { my( $elt, $length, $initial, $modified)= @_; + + my $cpis= $elt->{extra_data_in_pcdata}; + + if( @$initial <= @$modified) + { + my( $ok, $positions, $offsets)= _pos_offset( $initial, $modified); + if( $ok) + { my $offset=0; + my $pos= shift @$positions; + foreach my $cpi (@$cpis) + { while( $cpi->{offset} >= $pos) + { $offset= shift @$offsets; + $pos= shift @$positions || $length +1; + } + $cpi->{offset} += $offset; + } + return 1; + } + } + else + { my( $ok, $positions, $offsets)= _pos_offset( $modified, $initial); + if( $ok) + { #print STDERR "pos: ", join( ':', @$positions), "\n", + # "offset: ", join( ':', @$offsets), "\n"; + my $offset=0; + my $pos= shift @$positions; + my $prev_pos= 0; + + foreach my $cpi (@$cpis) + { while( $cpi->{offset} >= $pos) + { $offset= shift @$offsets; + $prev_pos= $pos; + $pos= shift @$positions || $length +1; + } + $cpi->{offset} -= $offset; + if( $cpi->{offset} < $prev_pos) { delete $cpi->{text}; } + } + $elt->_set_extra_data_in_pcdata( [ grep { exists $_->{text} } @$cpis ]); + return 1; + } + } + return 0; + } + + +sub _pos_offset + { my( $short, $long)= @_; + my( @pos, @offset); + my( $s_length, $l_length)=(0,0); + while (@$short) + { my $s_word= shift @$short; + my $l_word= shift @$long; + if( $s_word ne $l_word) + { while( @$long && $s_word ne $l_word) + { $l_length += length( $l_word); + $l_word= shift @$long; + } + if( !@$long && $s_word ne $l_word) { return 0; } + push @pos, $s_length; + push @offset, $l_length - $s_length; + } + my $length= length( $s_word); + $s_length += $length; + $l_length += $length; + } + return( 1, \@pos, \@offset); + } + +sub append_pcdata + { $_[0]->{'pcdata'}.= $_[1]; + return $_[0]; + } + +sub pcdata { return $_[0]->{pcdata}; } + + +sub append_extra_data + { $_[0]->{extra_data}.= $_[1]; + return $_[0]; + } + +sub set_extra_data + { $_[0]->{extra_data}= $_[1]; + return $_[0]; + } +sub extra_data { return $_[0]->{extra_data} || ''; } + +sub set_target + { my( $elt, $target)= @_; + $elt->{target}= $target; + return $elt; + } +sub target { return $_[0]->{target}; } + +sub set_data + { $_[0]->{'data'}= $_[1]; + return $_[0]; + } +sub data { return $_[0]->{data}; } + +sub set_pi + { my $elt= shift; + unless( $elt->{gi} == $XML::Twig::gi2index{$PI}) + { $elt->cut_children; + $elt->{gi}=$XML::Twig::gi2index{$PI} or $elt->set_gi( $PI); + } + return $elt->_set_pi( @_); + } + +sub _set_pi + { $_[0]->set_target( $_[1]); + $_[0]->{data}= $_[2]; + return $_[0]; + } + +sub pi_string { my $string= $PI_START . $_[0]->{target}; + my $data= $_[0]->{data}; + if( defined( $data) && $data ne '') { $string .= " $data"; } + $string .= $PI_END ; + return $string; + } + +sub set_comment + { my $elt= shift; + unless( $elt->{gi} == $XML::Twig::gi2index{$COMMENT}) + { $elt->cut_children; + $elt->{gi}=$XML::Twig::gi2index{$COMMENT} or $elt->set_gi( $COMMENT); + } + return $elt->_set_comment( @_); + } + +sub _set_comment { $_[0]->{comment}= $_[1]; return $_[0]; } +sub comment { return $_[0]->{comment}; } +sub comment_string { return $COMMENT_START . _comment_escaped_string( $_[0]->{comment}) . $COMMENT_END; } +# comments cannot start or end with +sub _comment_escaped_string + { my( $c)= @_; + $c=~ s{^-}{ -}; + $c=~ s{-$}{- }; + $c=~ s{--}{- -}g; + return $c; + } + +sub set_ent { $_[0]->{ent}= $_[1]; return $_[0]; } +sub ent { return $_[0]->{ent}; } +sub ent_name { return substr( $_[0]->{ent}, 1, -1);} + +sub set_cdata + { my $elt= shift; + unless( $elt->{gi} == $XML::Twig::gi2index{$CDATA}) + { $elt->cut_children; + $elt->insert_new_elt( first_child => $CDATA, @_); + return $elt; + } + return $elt->_set_cdata( @_); + } + +sub _set_cdata + { $_[0]->{cdata}= $_[1]; + return $_[0]; + } + +sub append_cdata + { $_[0]->{cdata}.= $_[1]; + return $_[0]; + } +sub cdata { return $_[0]->{cdata}; } + + +sub contains_only_text + { my $elt= shift; + return 0 unless $elt->is_elt; + foreach my $child ($elt->_children) + { return 0 if $child->is_elt; } + return $elt; + } + +sub contains_only + { my( $elt, $exp)= @_; + my @children= do { my $elt= $elt; my @children=(); my $child= $elt->{first_child}; while( $child) { push @children, $child; $child= $child->{next_sibling}; } @children; }; + foreach my $child (@children) + { return 0 unless $child->is( $exp); } + return @children || 1; + } + +sub contains_a_single + { my( $elt, $exp)= @_; + my $child= $elt->{first_child} or return 0; + return 0 unless $child->passes( $exp); + return 0 if( $child->{next_sibling}); + return $child; + } + + +sub root + { my $elt= shift; + while( $elt->{parent}) { $elt= $elt->{parent}; } + return $elt; + } + +sub _root_through_cut + { my $elt= shift; + while( $elt->{parent} || ($elt->{former} && $elt->{former}->{parent})) { $elt= $elt->{parent} || ($elt->{former} && $elt->{former}->{parent}); } + return $elt; + } + +sub twig + { my $elt= shift; + my $root= $elt->root; + return $root->{twig}; + } + +sub _twig_through_cut + { my $elt= shift; + my $root= $elt->_root_through_cut; + return $root->{twig}; + } + + +# used for navigation +# returns undef or the element, depending on whether $elt passes $cond +# $cond can be +# - empty: the element passes the condition +# - ELT ('#ELT'): the element passes the condition if it is a "real" element +# - TEXT ('#TEXT'): the element passes if it is a CDATA or PCDATA element +# - a string with an XPath condition (only a subset of XPath is actually +# supported). +# - a regexp: the element passes if its gi matches the regexp +# - a code ref: the element passes if the code, applied on the element, +# returns true + +my %cond_cache; # expression => coderef + +sub reset_cond_cache { %cond_cache=(); } + +{ + sub _install_cond + { my $cond= shift; + my $test; + my $init=''; + + my $original_cond= $cond; + + my $not= ($cond=~ s{^\s*!}{}) ? '!' : ''; + + if( ref $cond eq 'CODE') { return $cond; } + + if( ref $cond eq 'Regexp') + { $test = qq{(\$_[0]->gi=~ /$cond/)}; } + else + { my @tests; + while( $cond) + { + # the condition is a string + if( $cond=~ s{$ELT$SEP}{}) + { push @tests, qq{\$_[0]->is_elt}; } + elsif( $cond=~ s{$TEXT$SEP}{}) + { push @tests, qq{\$_[0]->is_text}; } + elsif( $cond=~ s{^\s*($REG_TAG_PART)$SEP}{}) + { push @tests, _gi_test( $1); } + elsif( $cond=~ s{^\s*($REG_REGEXP)$SEP}{}) + { # /regexp/ + push @tests, qq{ \$_[0]->gi=~ $1 }; + } + elsif( $cond=~ s{^\s*($REG_TAG_PART)?\s* # $1 + \[\s*(-?)\s*(\d+)\s*\] # [$2] + $SEP}{}xo + ) + { my( $gi, $neg, $index)= ($1, $2, $3); + my $siblings= $neg ? q{$_[0]->_next_siblings} : q{$_[0]->_prev_siblings}; + if( $gi && ($gi ne '*')) + #{ $test= qq{((\$_[0]->gi eq "$gi") && (scalar( grep { \$_->gi eq "$gi" } $siblings) + 1 == $index))}; } + { push @tests, _and( _gi_test( $gi), qq{ (scalar( grep { \$_->gi eq "$gi" } $siblings) + 1 == $index)}); } + else + { push @tests, qq{(scalar( $siblings) + 1 == $index)}; } + } + elsif( $cond=~ s{^\s*($REG_TAG_PART?)\s*($REG_PREDICATE)$SEP}{}) + { my( $gi, $predicate)= ( $1, $2); + push @tests, _and( _gi_test( $gi), _parse_predicate_in_step( $predicate)); + } + elsif( $cond=~ s{^\s*($REG_NAKED_PREDICATE)$SEP}{}) + { push @tests, _parse_predicate_in_step( $1); } + else + { croak "wrong navigation condition '$original_cond' ($@)"; } + } + $test= @tests > 1 ? '(' . join( '||', map { "($_)" } @tests) . ')' : $tests[0]; + } + + #warn "init: '$init' - test: '$test'\n"; + + my $sub= qq{sub { $NO_WARNINGS; $init; return $not($test) ? \$_[0] : undef; } }; + my $s= eval $sub; + #warn "cond: $cond\n$sub\n"; + if( $@) + { croak "wrong navigation condition '$original_cond' ($@);" } + return $s; + } + + sub _gi_test + { my( $full_gi)= @_; + + # optimize if the gi exists, including the case where the gi includes a dot + my $index= $XML::Twig::gi2index{$full_gi}; + if( $index) { return qq{\$_[0]->{gi} == $index}; } + + my( $gi, $class, $id)= $full_gi=~ m{^(.*?)(?:[.]([^.]*)|[#](.*))?$}; + + my $gi_test=''; + if( $gi && $gi ne '*' ) + { # 2 options, depending on whether the gi exists in gi2index + # start optimization + my $index= $XML::Twig::gi2index{$gi}; + if( $index) + { # the gi exists, use its index as a faster shortcut + $gi_test = qq{\$_[0]->{gi} == $index}; + } + else + # end optimization + { # it does not exist (but might be created later), compare the strings + $gi_test = qq{ \$_[0]->gi eq "$gi"}; + } + } + else + { $gi_test= 1; } + + my $class_test=''; + #warn "class: '$class'"; + if( $class) + { $class_test = qq{ defined( \$_[0]->{att}->{class}) && \$_[0]->{att}->{class}=~ m{\\b$class\\b} }; } + + my $id_test=''; + #warn "id: '$id'"; + if( $id) + { $id_test = qq{ defined( \$_[0]->{att}->{$ID}) && \$_[0]->{att}->{$ID} eq '$id' }; } + + + #warn "gi_test: '$gi_test' - class_test: '$class_test' returning ", _and( $gi_test, $class_test); + return _and( $gi_test, $class_test, $id_test); + } + + + # input: the original predicate + sub _parse_predicate_in_step + { my $cond= shift; + my %PERL_ALPHA_TEST= ( '=' => ' eq ', '!=' => ' ne ', '>' => ' gt ', '>=' => ' ge ', '<' => ' lt ', '<=' => ' le '); + + $cond=~ s{^\s*\[\s*}{}; + $cond=~ s{\s*\]\s*$}{}; + $cond=~ s{( ($REG_STRING|$REG_REGEXP) # strings or regexps + |\@($REG_TAG_NAME)(?=\s*(?:[><=!]|!~|=~)) # @att (followed by a comparison operator) + |\@($REG_TAG_NAME) # @att (not followed by a comparison operator) + |=~|!~ # matching operators + |([><]=?|=|!=)(?=\s*[\d+-]) # test before a number + |([><]=?|=|!=) # test, other cases + |($REG_FUNCTION) # no arg functions + # this bit is a mess, but it is the only solution with this half-baked parser + |((?:string|text)\(\s*$REG_TAG_NAME\s*\)\s*$REG_MATCH\s*$REG_REGEXP) # string( child) =~ /regexp/ + |((?:string|text)\(\s*$REG_TAG_NAME\s*\)\s*!?=\s*$REG_VALUE) # string( child) = "value" (or !=) + |((?:string|text)\(\s*$REG_TAG_NAME\s*\)\s*[<>]=?\s*$REG_VALUE) # string( child) > "value" + |(and|or) + )} + { my( $token, $string, $att, $bare_att, $num_test, $alpha_test, $func, $string_regexp, $string_eq, $string_test, $and_or) + = ( $1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11); + + if( defined $string) { $token } + elsif( $att) { "( \$_[0]->{att} && exists( \$_[0]->{att}->{'$att'}) && \$_[0]->{att}->{'$att'})"; } + elsif( $bare_att) { "(\$_[0]->{att} && defined( \$_[0]->{att}->{'$bare_att'}))"; } + elsif( $num_test && ($num_test eq '=') ) { "==" } # others tests are unchanged + elsif( $alpha_test) { $PERL_ALPHA_TEST{$alpha_test} } + elsif( $func && $func=~ m{^(?:string|text)}) + { "\$_[0]->text"; } + elsif( $string_regexp && $string_regexp =~ m{(?:string|text)\(\s*($REG_TAG_NAME)\s*\)\s*($REG_MATCH)\s*($REG_REGEXP)}) + { "(XML::Twig::_first_n { (\$_->gi eq '$1') && (\$_->text $2 $3) } 1, \$_[0]->_children)"; } + elsif( $string_eq && $string_eq =~ m{(?:string|text)\(\s*($REG_TAG_NAME)\s*\)\s*(!?=)\s*($REG_VALUE)}) + {"(XML::Twig::_first_n { (\$_->gi eq '$1') && (\$_->text $PERL_ALPHA_TEST{$2} $3) } 1, \$_[0]->_children)"; } + elsif( $string_test && $string_test =~ m{(?:string|text)\(\s*($REG_TAG_NAME)\s*\)\s*([<>]=?)\s*($REG_VALUE)}) + { "(XML::Twig::_first_n { (\$_->gi eq '$1') && (\$_->text $2 $3) } 1, \$_[0]->_children)"; } + elsif( $and_or) { $and_or eq 'and' ? '&&' : '||' ; } + else { $token; } + }gexs; + return "($cond)"; + } + + + sub _op + { my $op= shift; + if( $op eq '=') { $op= 'eq'; } + elsif( $op eq '!=') { $op= 'ne'; } + return $op; + } + + sub passes + { my( $elt, $cond)= @_; + return $elt unless $cond; + my $sub= ($cond_cache{$cond} ||= _install_cond( $cond)); + return $sub->( $elt); + } +} + +sub set_parent + { $_[0]->{parent}= $_[1]; + if( $XML::Twig::weakrefs) { weaken( $_[0]->{parent}); } + } + +sub parent + { my $elt= shift; + my $cond= shift || return $elt->{parent}; + do { $elt= $elt->{parent} || return; } until ( $elt->passes( $cond)); + return $elt; + } + +sub set_first_child + { $_[0]->{'first_child'}= $_[1]; + } + +sub first_child + { my $elt= shift; + my $cond= shift || return $elt->{first_child}; + my $child= $elt->{first_child}; + my $test_cond= ($cond_cache{$cond} ||= _install_cond( $cond)); + while( $child && !$test_cond->( $child)) + { $child= $child->{next_sibling}; } + return $child; + } + +sub _first_child { return $_[0]->{first_child}; } +sub _last_child { return $_[0]->{last_child}; } +sub _next_sibling { return $_[0]->{next_sibling}; } +sub _prev_sibling { return $_[0]->{prev_sibling}; } +sub _parent { return $_[0]->{parent}; } +sub _next_siblings { my $elt= shift; my @siblings; while( $elt= $elt->{next_sibling}) { push @siblings, $elt; } return @siblings; } +sub _prev_siblings { my $elt= shift; my @siblings; while( $elt= $elt->{prev_sibling}) { push @siblings, $elt; } return @siblings; } + +# sets a field +# arguments $record, $cond, @content +sub set_field + { my $record = shift; + my $cond = shift; + my $child= $record->first_child( $cond); + if( $child) + { $child->set_content( @_); } + else + { if( $cond=~ m{^\s*($REG_TAG_NAME)}) + { my $gi= $1; + $child= $record->insert_new_elt( last_child => $gi, @_); + } + else + { croak "can't create a field name from $cond"; } + } + return $child; + } + +sub set_last_child + { $_[0]->{'last_child'}= $_[1]; + if( $XML::Twig::weakrefs) { weaken( $_[0]->{'last_child'}); } + } + +sub last_child + { my $elt= shift; + my $cond= shift || return $elt->{last_child}; + my $test_cond= ($cond_cache{$cond} ||= _install_cond( $cond)); + my $child= $elt->{last_child}; + while( $child && !$test_cond->( $child) ) + { $child= $child->{prev_sibling}; } + return $child + } + + +sub set_prev_sibling + { $_[0]->{'prev_sibling'}= $_[1]; + if( $XML::Twig::weakrefs) { weaken( $_[0]->{'prev_sibling'}); } + } + +sub prev_sibling + { my $elt= shift; + my $cond= shift || return $elt->{prev_sibling}; + my $test_cond= ($cond_cache{$cond} ||= _install_cond( $cond)); + my $sibling= $elt->{prev_sibling}; + while( $sibling && !$test_cond->( $sibling) ) + { $sibling= $sibling->{prev_sibling}; } + return $sibling; + } + +sub set_next_sibling { $_[0]->{'next_sibling'}= $_[1]; } + +sub next_sibling + { my $elt= shift; + my $cond= shift || return $elt->{next_sibling}; + my $test_cond= ($cond_cache{$cond} ||= _install_cond( $cond)); + my $sibling= $elt->{next_sibling}; + while( $sibling && !$test_cond->( $sibling) ) + { $sibling= $sibling->{next_sibling}; } + return $sibling; + } + +# methods dealing with the class attribute, convenient if you work with xhtml +sub class { $_[0]->{att}->{class}; } +# lvalue version of class. separate from class to avoid problem like RT# +sub lclass + :lvalue # > perl 5.5 + { $_[0]->{att}->{class}; } + +sub set_class { my( $elt, $class)= @_; $elt->set_att( class => $class); } + +# adds a class to an element +sub add_to_class + { my( $elt, $new_class)= @_; + return $elt unless $new_class; + my $class= $elt->class; + my %class= $class ? map { $_ => 1 } split /\s+/, $class : (); + $class{$new_class}= 1; + $elt->set_class( join( ' ', sort keys %class)); + } + +sub remove_class + { my( $elt, $class_to_remove)= @_; + return $elt unless $class_to_remove; + my $class= $elt->class; + my %class= $class ? map { $_ => 1 } split /\s+/, $class : (); + delete $class{$class_to_remove}; + $elt->set_class( join( ' ', sort keys %class)); + } + +sub att_to_class { my( $elt, $att)= @_; $elt->set_class( $elt->{'att'}->{$att}); } +sub add_att_to_class { my( $elt, $att)= @_; $elt->add_to_class( $elt->{'att'}->{$att}); } +sub move_att_to_class { my( $elt, $att)= @_; $elt->add_to_class( $elt->{'att'}->{$att}); + $elt->del_att( $att); + } +sub tag_to_class { my( $elt)= @_; $elt->set_class( $elt->tag); } +sub add_tag_to_class { my( $elt)= @_; $elt->add_to_class( $elt->tag); } +sub set_tag_class { my( $elt, $new_tag)= @_; $elt->add_tag_to_class; $elt->set_tag( $new_tag); } + +sub tag_to_span + { my( $elt)= @_; + $elt->set_class( $elt->tag) unless( $elt->tag eq 'span' && $elt->class); # set class to span unless it would mean replacing it with span + $elt->set_tag( 'span'); + } + +sub tag_to_div + { my( $elt)= @_; + $elt->set_class( $elt->tag) unless( $elt->tag eq 'div' && $elt->class); # set class to div unless it would mean replacing it with div + $elt->set_tag( 'div'); + } + +sub in_class + { my( $elt, $class)= @_; + my $elt_class= $elt->class; + return unless( defined $elt_class); + return $elt->class=~ m{(?:^|\s)\Q$class\E(?:\s|$)} ? $elt : 0; + } + + +# get or set all attributes +# argument can be a hash or a hashref +sub set_atts + { my $elt= shift; + my %atts; + tie %atts, 'Tie::IxHash' if( keep_atts_order()); + %atts= ( (ref( $_[0] || '') eq 'HASH') || isa( $_[0] || '', 'HASH')) ? %{$_[0]} : @_; + $elt->{att}= \%atts; + if( exists $atts{$ID}) { $elt->_set_id( $atts{$ID}); } + return $elt; + } + +sub atts { return $_[0]->{att}; } +sub att_names { return (sort keys %{$_[0]->{att}}); } +sub del_atts { $_[0]->{att}={}; return $_[0]; } + +# get or set a single attribute (set works for several atts) +sub set_att + { my $elt= shift; + + if( $_[0] && ref( $_[0]) && !$_[1]) + { croak "improper call to set_att, usage is \$elt->set_att( att1 => 'val1', att2 => 'val2',...)"; } + + unless( $elt->{att}) + { $elt->{att}={}; + tie %{$elt->{att}}, 'Tie::IxHash' if( keep_atts_order()); + } + + while(@_) + { my( $att, $val)= (shift, shift); + $elt->{att}->{$att}= $val; + if( $att eq $ID) { $elt->_set_id( $val); } + } + return $elt; + } + +sub att { $_[0]->{att}->{$_[1]}; } +# lvalue version of att. separate from class to avoid problem like RT# +sub latt + :lvalue # > perl 5.5 + { $_[0]->{att}->{$_[1]}; } + +sub del_att + { my $elt= shift; + while( @_) { delete $elt->{'att'}->{shift()}; } + return $elt; + } + +sub att_exists { return exists $_[0]->{att}->{$_[1]}; } + +# delete an attribute from all descendants of an element +sub strip_att + { my( $elt, $att)= @_; + $_->del_att( $att) foreach ($elt->descendants_or_self( qq{*[\@$att]})); + return $elt; + } + +sub change_att_name + { my( $elt, $old_name, $new_name)= @_; + my $value= $elt->{'att'}->{$old_name}; + return $elt unless( defined $value); + $elt->del_att( $old_name) + ->set_att( $new_name => $value); + return $elt; + } + +sub lc_attnames + { my $elt= shift; + foreach my $att ($elt->att_names) + { if( $att ne lc $att) { $elt->change_att_name( $att, lc $att); } } + return $elt; + } + +sub set_twig_current { $_[0]->{twig_current}=1; } +sub del_twig_current { delete $_[0]->{twig_current}; } + + +# get or set the id attribute +sub set_id + { my( $elt, $id)= @_; + $elt->del_id() if( exists $elt->{att}->{$ID}); + $elt->set_att($ID, $id); + $elt->_set_id( $id); + return $elt; + } + +# only set id, does not update the attribute value +sub _set_id + { my( $elt, $id)= @_; + my $t= $elt->twig || $elt; + $t->{twig_id_list}->{$id}= $elt; + if( $XML::Twig::weakrefs) { weaken( $t->{twig_id_list}->{$id}); } + return $elt; + } + +sub id { return $_[0]->{att}->{$ID}; } + +# methods used to add ids to elements that don't have one +BEGIN +{ my $id_nb = "0001"; + my $id_seed = "twig_id_"; + + sub set_id_seed ## no critic (Subroutines::ProhibitNestedSubs); + { $id_seed= $_[1]; $id_nb=1; } + + sub add_id ## no critic (Subroutines::ProhibitNestedSubs); + { my $elt= shift; + if( defined $elt->{'att'}->{$ID}) + { return $elt->{'att'}->{$ID}; } + else + { my $id= $_[0] && ref( $_[0]) && isa( $_[0], 'CODE') ? $_[0]->( $elt) : $id_seed . $id_nb++; + $elt->set_id( $id); + return $id; + } + } +} + + + +# delete the id attribute and remove the element from the id list +sub del_id + { my $elt= shift; + if( ! exists $elt->{att}->{$ID}) { return $elt }; + my $id= $elt->{att}->{$ID}; + + delete $elt->{att}->{$ID}; + + my $t= shift || $elt->twig; + unless( $t) { return $elt; } + if( exists $t->{twig_id_list}->{$id}) { delete $t->{twig_id_list}->{$id}; } + + return $elt; + } + +# return the list of children +sub children + { my $elt= shift; + my @children; + my $child= $elt->first_child( @_); + while( $child) + { push @children, $child; + $child= $child->next_sibling( @_); + } + return @children; + } + +sub _children + { my $elt= shift; + my @children=(); + my $child= $elt->{first_child}; + while( $child) + { push @children, $child; + $child= $child->{next_sibling}; + } + return @children; + } + +sub children_copy + { my $elt= shift; + my @children; + my $child= $elt->first_child( @_); + while( $child) + { push @children, $child->copy; + $child= $child->next_sibling( @_); + } + return @children; + } + + +sub children_count + { my $elt= shift; + my $cond= shift; + my $count=0; + my $child= $elt->{first_child}; + while( $child) + { $count++ if( $child->passes( $cond)); + $child= $child->{next_sibling}; + } + return $count; + } + +sub children_text + { my $elt= shift; + return wantarray() ? map { $_->text} $elt->children( @_) + : join( '', map { $_->text} $elt->children( @_) ) + ; + } + +sub children_trimmed_text + { my $elt= shift; + return wantarray() ? map { $_->trimmed_text} $elt->children( @_) + : join( '', map { $_->trimmed_text} $elt->children( @_) ) + ; + } + +sub all_children_are + { my( $parent, $cond)= @_; + foreach my $child ($parent->_children) + { return 0 unless( $child->passes( $cond)); } + return $parent; + } + + +sub ancestors + { my( $elt, $cond)= @_; + my @ancestors; + while( $elt->{parent}) + { $elt= $elt->{parent}; + push @ancestors, $elt if( $elt->passes( $cond)); + } + return @ancestors; + } + +sub ancestors_or_self + { my( $elt, $cond)= @_; + my @ancestors; + while( $elt) + { push @ancestors, $elt if( $elt->passes( $cond)); + $elt= $elt->{parent}; + } + return @ancestors; + } + + +sub _ancestors + { my( $elt, $include_self)= @_; + my @ancestors= $include_self ? ($elt) : (); + while( $elt= $elt->{parent}) { push @ancestors, $elt; } + return @ancestors; + } + + +sub inherit_att + { my $elt= shift; + my $att= shift; + my %tags= map { ($_, 1) } @_; + + do + { if( (defined $elt->{'att'}->{$att}) + && ( !%tags || $tags{$XML::Twig::index2gi[$elt->{'gi'}]}) + ) + { return $elt->{'att'}->{$att}; } + } while( $elt= $elt->{parent}); + return undef; + } + +sub _inherit_att_through_cut + { my $elt= shift; + my $att= shift; + my %tags= map { ($_, 1) } @_; + + do + { if( (defined $elt->{'att'}->{$att}) + && ( !%tags || $tags{$XML::Twig::index2gi[$elt->{'gi'}]}) + ) + { return $elt->{'att'}->{$att}; } + } while( $elt= $elt->{parent} || ($elt->{former} && $elt->{former}->{parent})); + return undef; + } + + +sub current_ns_prefixes + { my $elt= shift; + my %prefix; + $prefix{''}=1 if( $elt->namespace( '')); + while( $elt) + { my @ns= grep { !m{^xml} } map { m{^([^:]+):} } ($XML::Twig::index2gi[$elt->{'gi'}], $elt->att_names); + $prefix{$_}=1 foreach (@ns); + $elt= $elt->{parent}; + } + + return (sort keys %prefix); + } + +# kinda counter-intuitive actually: +# the next element is found by looking for the next open tag after from the +# current one, which is the first child, if it exists, or the next sibling +# or the first next sibling of an ancestor +# optional arguments are: +# - $subtree_root: a reference to an element, when the next element is not +# within $subtree_root anymore then next_elt returns undef +# - $cond: a condition, next_elt returns the next element matching the condition + +sub next_elt + { my $elt= shift; + my $subtree_root= 0; + $subtree_root= shift if( ref( $_[0]) && isa( $_[0], 'XML::Twig::Elt')); + my $cond= shift; + my $next_elt; + + my $ind; # optimization + my $test_cond; + if( $cond) # optimization + { unless( defined( $ind= $XML::Twig::gi2index{$cond}) ) # optimization + { $test_cond= ($cond_cache{$cond} ||= _install_cond( $cond)); } # optimization + } # optimization + + do + { if( $next_elt= $elt->{first_child}) + { # simplest case: the elt has a child + } + elsif( $next_elt= $elt->{next_sibling}) + { # no child but a next sibling (just check we stay within the subtree) + + # case where elt is subtree_root, is empty and has a sibling + return undef if( $subtree_root && ($elt == $subtree_root)); + + } + else + { # case where the element has no child and no next sibling: + # get the first next sibling of an ancestor, checking subtree_root + + # case where elt is subtree_root, is empty and has no sibling + return undef if( $subtree_root && ($elt == $subtree_root)); + + $next_elt= $elt->{parent}; + + until( $next_elt->{next_sibling}) + { return undef if( $subtree_root && ($subtree_root == $next_elt)); + $next_elt= $next_elt->{parent} || return undef; + } + return undef if( $subtree_root && ($subtree_root == $next_elt)); + $next_elt= $next_elt->{next_sibling}; + } + $elt= $next_elt; # just in case we need to loop + } until( ! defined $elt + || ! defined $cond + || (defined $ind && ($elt->{gi} eq $ind)) # optimization + || (defined $test_cond && ($test_cond->( $elt))) + ); + + return $elt; + } + +# return the next_elt within the element +# just call next_elt with the element as first and second argument +sub first_descendant { return $_[0]->next_elt( @_); } + +# get the last descendant, # then return the element found or call prev_elt with the condition +sub last_descendant + { my( $elt, $cond)= @_; + my $last_descendant= $elt->_last_descendant; + if( !$cond || $last_descendant->matches( $cond)) + { return $last_descendant; } + else + { return $last_descendant->prev_elt( $elt, $cond); } + } + +# no argument allowed here, just go down the last_child recursively +sub _last_descendant + { my $elt= shift; + while( my $child= $elt->{last_child}) { $elt= $child; } + return $elt; + } + +# counter-intuitive too: +# the previous element is found by looking +# for the first open tag backwards from the current one +# it's the last descendant of the previous sibling +# if it exists, otherwise it's simply the parent +sub prev_elt + { my $elt= shift; + my $subtree_root= 0; + if( defined $_[0] and (ref( $_[0]) && isa( $_[0], 'XML::Twig::Elt'))) + { $subtree_root= shift ; + return undef if( $elt == $subtree_root); + } + my $cond= shift; + # get prev elt + my $prev_elt; + do + { return undef if( $elt == $subtree_root); + if( $prev_elt= $elt->{prev_sibling}) + { while( $prev_elt->{last_child}) + { $prev_elt= $prev_elt->{last_child}; } + } + else + { $prev_elt= $elt->{parent} || return undef; } + $elt= $prev_elt; # in case we need to loop + } until( $elt->passes( $cond)); + + return $elt; + } + +sub _following_elt + { my( $elt)= @_; + while( $elt && !$elt->{next_sibling}) + { $elt= $elt->{parent}; } + return $elt ? $elt->{next_sibling} : undef; + } + +sub following_elt + { my( $elt, $cond)= @_; + $elt= $elt->_following_elt || return undef; + return $elt if( !$cond || $elt->matches( $cond)); + return $elt->next_elt( $cond); + } + +sub following_elts + { my( $elt, $cond)= @_; + if( !$cond) { undef $cond; } + my $following= $elt->following_elt( $cond); + if( $following) + { my @followings= $following; + while( $following= $following->next_elt( $cond)) + { push @followings, $following; } + return( @followings); + } + else + { return (); } + } + +sub _preceding_elt + { my( $elt)= @_; + while( $elt && !$elt->{prev_sibling}) + { $elt= $elt->{parent}; } + return $elt ? $elt->{prev_sibling}->_last_descendant : undef; + } + +sub preceding_elt + { my( $elt, $cond)= @_; + $elt= $elt->_preceding_elt || return undef; + return $elt if( !$cond || $elt->matches( $cond)); + return $elt->prev_elt( $cond); + } + +sub preceding_elts + { my( $elt, $cond)= @_; + if( !$cond) { undef $cond; } + my $preceding= $elt->preceding_elt( $cond); + if( $preceding) + { my @precedings= $preceding; + while( $preceding= $preceding->prev_elt( $cond)) + { push @precedings, $preceding; } + return( @precedings); + } + else + { return (); } + } + +# used in get_xpath +sub _self + { my( $elt, $cond)= @_; + return $cond ? $elt->matches( $cond) : $elt; + } + +sub next_n_elt + { my $elt= shift; + my $offset= shift || return undef; + foreach (1..$offset) + { $elt= $elt->next_elt( @_) || return undef; } + return $elt; + } + +# checks whether $elt is included in $ancestor, returns 1 in that case +sub in + { my ($elt, $ancestor)= @_; + if( ref( $ancestor) && isa( $ancestor, 'XML::Twig::Elt')) + { # element + while( $elt= $elt->{parent}) { return $elt if( $elt == $ancestor); } + } + else + { # condition + while( $elt= $elt->{parent}) { return $elt if( $elt->matches( $ancestor)); } + } + return 0; + } + +sub first_child_text + { my $elt= shift; + my $dest=$elt->first_child(@_) or return ''; + return $dest->text; + } + +sub fields + { my $elt= shift; + return map { $elt->field( $_) } @_; + } + +sub first_child_trimmed_text + { my $elt= shift; + my $dest=$elt->first_child(@_) or return ''; + return $dest->trimmed_text; + } + +sub first_child_matches + { my $elt= shift; + my $dest= $elt->{first_child} or return undef; + return $dest->passes( @_); + } + +sub last_child_text + { my $elt= shift; + my $dest=$elt->last_child(@_) or return ''; + return $dest->text; + } + +sub last_child_trimmed_text + { my $elt= shift; + my $dest=$elt->last_child(@_) or return ''; + return $dest->trimmed_text; + } + +sub last_child_matches + { my $elt= shift; + my $dest= $elt->{last_child} or return undef; + return $dest->passes( @_); + } + +sub child_text + { my $elt= shift; + my $dest=$elt->child(@_) or return ''; + return $dest->text; + } + +sub child_trimmed_text + { my $elt= shift; + my $dest=$elt->child(@_) or return ''; + return $dest->trimmed_text; + } + +sub child_matches + { my $elt= shift; + my $nb= shift; + my $dest= $elt->child( $nb) or return undef; + return $dest->passes( @_); + } + +sub prev_sibling_text + { my $elt= shift; + my $dest=$elt->_prev_sibling(@_) or return ''; + return $dest->text; + } + +sub prev_sibling_trimmed_text + { my $elt= shift; + my $dest=$elt->_prev_sibling(@_) or return ''; + return $dest->trimmed_text; + } + +sub prev_sibling_matches + { my $elt= shift; + my $dest= $elt->{prev_sibling} or return undef; + return $dest->passes( @_); + } + +sub next_sibling_text + { my $elt= shift; + my $dest=$elt->next_sibling(@_) or return ''; + return $dest->text; + } + +sub next_sibling_trimmed_text + { my $elt= shift; + my $dest=$elt->next_sibling(@_) or return ''; + return $dest->trimmed_text; + } + +sub next_sibling_matches + { my $elt= shift; + my $dest= $elt->{next_sibling} or return undef; + return $dest->passes( @_); + } + +sub prev_elt_text + { my $elt= shift; + my $dest=$elt->prev_elt(@_) or return ''; + return $dest->text; + } + +sub prev_elt_trimmed_text + { my $elt= shift; + my $dest=$elt->prev_elt(@_) or return ''; + return $dest->trimmed_text; + } + +sub prev_elt_matches + { my $elt= shift; + my $dest= $elt->prev_elt or return undef; + return $dest->passes( @_); + } + +sub next_elt_text + { my $elt= shift; + my $dest=$elt->next_elt(@_) or return ''; + return $dest->text; + } + +sub next_elt_trimmed_text + { my $elt= shift; + my $dest=$elt->next_elt(@_) or return ''; + return $dest->trimmed_text; + } + +sub next_elt_matches + { my $elt= shift; + my $dest= $elt->next_elt or return undef; + return $dest->passes( @_); + } + +sub parent_text + { my $elt= shift; + my $dest=$elt->parent(@_) or return ''; + return $dest->text; + } + +sub parent_trimmed_text + { my $elt= shift; + my $dest=$elt->parent(@_) or return ''; + return $dest->trimmed_text; + } + +sub parent_matches + { my $elt= shift; + my $dest= $elt->{parent} or return undef; + return $dest->passes( @_); + } + +sub is_first_child + { my $elt= shift; + my $parent= $elt->{parent} or return 0; + my $first_child= $parent->first_child( @_) or return 0; + return ($first_child == $elt) ? $elt : 0; + } + +sub is_last_child + { my $elt= shift; + my $parent= $elt->{parent} or return 0; + my $last_child= $parent->last_child( @_) or return 0; + return ($last_child == $elt) ? $elt : 0; + } + +# returns the depth level of the element +# if 2 parameter are used then counts the 2cd element name in the +# ancestors list +sub level + { my( $elt, $cond)= @_; + my $level=0; + my $name=shift || ''; + while( $elt= $elt->{parent}) { $level++ if( !$cond || $elt->matches( $cond)); } + return $level; + } + +# checks whether $elt has an ancestor that satisfies $cond, returns the ancestor +sub in_context + { my ($elt, $cond, $level)= @_; + $level= -1 unless( $level) ; # $level-- will never hit 0 + + while( $level) + { $elt= $elt->{parent} or return 0; + if( $elt->matches( $cond)) { return $elt; } + $level--; + } + return 0; + } + +sub _descendants + { my( $subtree_root, $include_self)= @_; + my @descendants= $include_self ? ($subtree_root) : (); + + my $elt= $subtree_root; + my $next_elt; + + MAIN: while( 1) + { if( $next_elt= $elt->{first_child}) + { # simplest case: the elt has a child + } + elsif( $next_elt= $elt->{next_sibling}) + { # no child but a next sibling (just check we stay within the subtree) + + # case where elt is subtree_root, is empty and has a sibling + last MAIN if( $elt == $subtree_root); + } + else + { # case where the element has no child and no next sibling: + # get the first next sibling of an ancestor, checking subtree_root + + # case where elt is subtree_root, is empty and has no sibling + last MAIN if( $elt == $subtree_root); + + # backtrack until we find a parent with a next sibling + $next_elt= $elt->{parent} || last; + until( $next_elt->{next_sibling}) + { last MAIN if( $subtree_root == $next_elt); + $next_elt= $next_elt->{parent} || last MAIN; + } + last MAIN if( $subtree_root == $next_elt); + $next_elt= $next_elt->{next_sibling}; + } + $elt= $next_elt || last MAIN; + push @descendants, $elt; + } + return @descendants; + } + + +sub descendants + { my( $subtree_root, $cond)= @_; + my @descendants=(); + my $elt= $subtree_root; + + # this branch is pure optimization for speed: if $cond is a gi replace it + # by the index of the gi and loop here + # start optimization + my $ind; + if( !$cond || ( defined ( $ind= $XML::Twig::gi2index{$cond})) ) + { + my $next_elt; + + while( 1) + { if( $next_elt= $elt->{first_child}) + { # simplest case: the elt has a child + } + elsif( $next_elt= $elt->{next_sibling}) + { # no child but a next sibling (just check we stay within the subtree) + + # case where elt is subtree_root, is empty and has a sibling + last if( $subtree_root && ($elt == $subtree_root)); + } + else + { # case where the element has no child and no next sibling: + # get the first next sibling of an ancestor, checking subtree_root + + # case where elt is subtree_root, is empty and has no sibling + last if( $subtree_root && ($elt == $subtree_root)); + + # backtrack until we find a parent with a next sibling + $next_elt= $elt->{parent} || last undef; + until( $next_elt->{next_sibling}) + { last if( $subtree_root && ($subtree_root == $next_elt)); + $next_elt= $next_elt->{parent} || last; + } + last if( $subtree_root && ($subtree_root == $next_elt)); + $next_elt= $next_elt->{next_sibling}; + } + $elt= $next_elt || last; + push @descendants, $elt if( !$cond || ($elt->{gi} eq $ind)); + } + } + else + # end optimization + { # branch for a complex condition: use the regular (slow but simple) way + while( $elt= $elt->next_elt( $subtree_root, $cond)) + { push @descendants, $elt; } + } + return @descendants; + } + + +sub descendants_or_self + { my( $elt, $cond)= @_; + my @descendants= $elt->passes( $cond) ? ($elt) : (); + push @descendants, $elt->descendants( $cond); + return @descendants; + } + +sub sibling + { my $elt= shift; + my $nb= shift; + if( $nb > 0) + { foreach( 1..$nb) + { $elt= $elt->next_sibling( @_) or return undef; } + } + elsif( $nb < 0) + { foreach( 1..(-$nb)) + { $elt= $elt->prev_sibling( @_) or return undef; } + } + else # $nb == 0 + { return $elt->passes( $_[0]); } + return $elt; + } + +sub sibling_text + { my $elt= sibling( @_); + return $elt ? $elt->text : undef; + } + + +sub child + { my $elt= shift; + my $nb= shift; + if( $nb >= 0) + { $elt= $elt->first_child( @_) or return undef; + foreach( 1..$nb) + { $elt= $elt->next_sibling( @_) or return undef; } + } + else + { $elt= $elt->last_child( @_) or return undef; + foreach( 2..(-$nb)) + { $elt= $elt->prev_sibling( @_) or return undef; } + } + return $elt; + } + +sub prev_siblings + { my $elt= shift; + my @siblings=(); + while( $elt= $elt->prev_sibling( @_)) + { unshift @siblings, $elt; } + return @siblings; + } + +sub siblings + { my $elt= shift; + return grep { $_ ne $elt } $elt->{parent}->children( @_); + } + +sub pos + { my $elt= shift; + return 0 if ($_[0] && !$elt->matches( @_)); + my $pos=1; + $pos++ while( $elt= $elt->prev_sibling( @_)); + return $pos; + } + + +sub next_siblings + { my $elt= shift; + my @siblings=(); + while( $elt= $elt->next_sibling( @_)) + { push @siblings, $elt; } + return @siblings; + } + + +# used by get_xpath: parses the xpath expression and generates a sub that performs the +# search +{ my %axis2method; + BEGIN { %axis2method= ( child => 'children', + descendant => 'descendants', + 'descendant-or-self' => 'descendants_or_self', + parent => 'parent_is', + ancestor => 'ancestors', + 'ancestor-or-self' => 'ancestors_or_self', + 'following-sibling' => 'next_siblings', + 'preceding-sibling' => 'prev_siblings', + following => 'following_elts', + preceding => 'preceding_elts', + self => '_self', + ); + } + + sub _install_xpath + { my( $xpath_exp, $type)= @_; + my $original_exp= $xpath_exp; + my $sub= 'my $elt= shift; my @results;'; + + # grab the root if expression starts with a / + if( $xpath_exp=~ s{^/}{}) + { $sub .= '@results= ($elt->twig) || croak "cannot use an XPath query starting with a / on a node not attached to a whole twig";'; } + elsif( $xpath_exp=~ s{^\./}{}) + { $sub .= '@results= ($elt);'; } + else + { $sub .= '@results= ($elt);'; } + + + #warn "xpath_exp= '$xpath_exp'\n"; + + while( $xpath_exp && + $xpath_exp=~s{^\s*(/?) + # the xxx=~/regexp/ is a pain as it includes / + (\s*(?:(?:($REG_AXIS)::)?(\*|$REG_TAG_PART|\.\.|\.)\s*)?($REG_PREDICATE_ALT*) + ) + (/|$)}{}xo) + + { my( $wildcard, $sub_exp, $axis, $gi, $predicates)= ($1, $2, $3, $4, $5); + if( $axis && ! $gi) + { _croak_and_doublecheck_xpath( $original_exp, "error in xpath expression $original_exp"); } + + # grab a parent + if( $sub_exp eq '..') + { _croak_and_doublecheck_xpath( $original_exp, "error in xpath expression $original_exp") if( $wildcard); + $sub .= '@results= map { $_->{parent}} @results;'; + } + # test the element itself + elsif( $sub_exp=~ m{^\.(.*)$}s) + { $sub .= "\@results= grep { \$_->matches( q{$1}) } \@results;" } + # grab children + else + { + if( !$axis) + { $axis= $wildcard ? 'descendant' : 'child'; } + if( !$gi or $gi eq '*') { $gi=''; } + my $function; + + # "special" predicates, that return just one element + if( $predicates && ($predicates =~ m{^\s*\[\s*((-\s*)?\d+)\s*\]\s*$})) + { # [] + my $offset= $1; + $offset-- if( $offset > 0); + $function= $axis eq 'descendant' ? "next_n_elt( $offset, '$gi')" + : $axis eq 'child' ? "child( $offset, '$gi')" + : _croak_and_doublecheck_xpath( $original_exp, "error [$1] not supported along axis '$axis'") + ; + $sub .= "\@results= grep { \$_ } map { \$_->$function } \@results;" + } + elsif( $predicates && ($predicates =~ m{^\s*\[\s*last\s*\(\s*\)\s*\]\s*$}) ) + { # last() + _croak_and_doublecheck_xpath( $original_exp, "error in xpath expression $original_exp, usage of // and last() not supported") if( $wildcard); + $sub .= "\@results= map { \$_->last_child( '$gi') } \@results;"; + } + else + { # follow the axis + #warn "axis: '$axis' - method: '$axis2method{$axis}' - gi: '$gi'\n"; + + my $follow_axis= " \$_->$axis2method{$axis}( '$gi')"; + my $step= $follow_axis; + + # now filter using the predicate + while( $predicates=~ s{^\s*($REG_PREDICATE_ALT)\s*}{}o) + { my $pred= $1; + $pred=~ s{^\s*\[\s*}{}; + $pred=~ s{\s*\]\s*$}{}; + my $test=""; + my $pos; + if( $pred=~ m{^(-?\s*\d+)$}) + { my $pos= $1; + if( $step=~ m{^\s*grep(.*) (\$_->\w+\(\s*'[^']*'\s*\))}) + { $step= "XML::Twig::_first_n $1 $pos, $2"; } + else + { if( $pos > 0) { $pos--; } + $step= "($step)[$pos]"; + } + #warn "number predicate '$pos' - generated step '$step'\n"; + } + else + { my $syntax_error=0; + do + { if( $pred =~ s{^string\(\s*\)\s*=\s*($REG_STRING)\s*}{}o) # string()="string" pred + { $test .= "\$_->text eq $1"; } + elsif( $pred =~ s{^string\(\s*\)\s*!=\s*($REG_STRING)\s*}{}o) # string()!="string" pred + { $test .= "\$_->text ne $1"; } + if( $pred =~ s{^string\(\s*\)\s*=\s*($REG_NUMBER)\s*}{}o) # string()= pred + { $test .= "\$_->text eq $1"; } + elsif( $pred =~ s{^string\(\s*\)\s*!=\s*($REG_NUMBER)\s*}{}o) # string()!= pred + { $test .= "\$_->text ne $1"; } + elsif( $pred =~ s{^string\(\s*\)\s*(>|<|>=|<=)\s*($REG_NUMBER)\s*}{}o) # string()!= pred + { $test .= "\$_->text $1 $2"; } + + elsif( $pred =~ s{^string\(\s*\)\s*($REG_MATCH)\s*($REG_REGEXP)\s*}{}o) # string()=~/regex/ pred + { my( $match, $regexp)= ($1, $2); + $test .= "\$_->text $match $regexp"; + } + elsif( $pred =~ s{^string\(\s*\)\s*}{}o) # string() pred + { $test .= "\$_->text"; } + elsif( $pred=~ s{^@($REG_TAG_NAME)\s*($REG_OP)\s*($REG_STRING|$REG_NUMBER)}{}o) # @att="val" pred + { my( $att, $oper, $val)= ($1, _op( $2), $3); + $test .= qq{((defined \$_->{'att'}->{"$att"}) && (\$_->{'att'}->{"$att"} $oper $val))}; + } + elsif( $pred =~ s{^@($REG_TAG_NAME)\s*($REG_MATCH)\s*($REG_REGEXP)\s*}{}o) # @att=~/regex/ pred XXX + { my( $att, $match, $regexp)= ($1, $2, $3); + $test .= qq{((defined \$_->{'att'}->{"$att"}) && (\$_->{'att'}->{"$att"} $match $regexp))};; + } + elsif( $pred=~ s{^@($REG_TAG_NAME)\s*}{}o) # @att pred + { $test .= qq{(defined \$_->{'att'}->{"$1"})}; } + elsif( $pred=~ s{^\s*(?:not|!)\s*@($REG_TAG_NAME)\s*}{}o) # not @att pred + { $test .= qq{((\$_->is_elt) && (not defined \$_->{'att'}->{"$1"}))}; } + elsif( $pred=~ s{^\s*([()])}{}) # ( or ) (just add to the test) + { $test .= qq{$1}; } + elsif( $pred=~ s{^\s*(and|or)\s*}{}) + { $test .= lc " $1 "; } + else + { $syntax_error=1; } + + } while( !$syntax_error && $pred); + _croak_and_doublecheck_xpath( $original_exp, "error in xpath expression $original_exp at $pred") if( $pred); + $step= " grep { $test } $step "; + } + } + #warn "step: '$step'"; + $sub .= "\@results= grep { \$_ } map { $step } \@results;"; + } + } + } + + if( $xpath_exp) + { _croak_and_doublecheck_xpath( $original_exp, "error in xpath expression $original_exp around $xpath_exp"); } + + $sub .= q{return XML::Twig::_unique_elts( @results); }; + #warn "generated: '$sub'\n"; + my $s= eval "sub { $NO_WARNINGS; $sub }"; + if( $@) + { _croak_and_doublecheck_xpath( $original_exp, "error in xpath expression $original_exp ($@);") } + return( $s); + } +} + +sub _croak_and_doublecheck_xpath + { my $xpath_expression= shift; + my $mess= join( "\n", @_); + if( $XML::Twig::XPath::VERSION || 0) + { my $check_twig= XML::Twig::XPath->new; + if( eval { $check_twig->{twig_xp}->_parse( $xpath_expression) }) + { $mess .= "\nthe expression is a valid XPath statement, and you are using XML::Twig::XPath, but" + . "\nyou are using either 'find_nodes' or 'get_xpath' where the method you likely wanted" + . "\nto use is 'findnodes', which is the only one that uses the full XPath engine\n"; + } + } + croak $mess; + } + + + +{ # extremely elaborate caching mechanism + my %xpath; # xpath_expression => subroutine_code; + sub get_xpath + { my( $elt, $xpath_exp, $offset)= @_; + my $sub= ($xpath{$xpath_exp} ||= _install_xpath( $xpath_exp)); + return $sub->( $elt) unless( defined $offset); + my @res= $sub->( $elt); + return $res[$offset]; + } +} + + +sub findvalues + { my $elt= shift; + return map { $_->text } $elt->get_xpath( @_); + } + +sub findvalue + { my $elt= shift; + return join '', map { $_->text } $elt->get_xpath( @_); + } + + +# XML::XPath compatibility +sub getElementById { return $_[0]->twig->elt_id( $_[1]); } +sub getChildNodes { my @children= do { my $elt= $_[0]; my @children=(); my $child= $elt->{first_child}; while( $child) { push @children, $child; $child= $child->{next_sibling}; } @children; }; return wantarray ? @children : \@children; } + +sub _flushed { return $_[0]->{flushed}; } +sub _set_flushed { $_[0]->{flushed}=1; } +sub _del_flushed { delete $_[0]->{flushed}; } + +sub cut + { my $elt= shift; + my( $parent, $prev_sibling, $next_sibling); + $parent= $elt->{parent}; + my $a= $elt->{'att'}->{'a'} || 'na'; + if( ! $parent && $elt->is_elt) + { # are we cutting the root? + my $t= $elt->{twig}; + if( $t && ! $t->{twig_parsing}) + { delete $t->{twig_root}; + delete $elt->{twig}; + return $elt; + } # cutt`ing the root + else + { return; } # cutting an orphan, returning $elt would break backward compatibility + } + + # save the old links, that'll make it easier for some loops + foreach my $link ( qw(parent prev_sibling next_sibling) ) + { $elt->{former}->{$link}= $elt->{$link}; + if( $XML::Twig::weakrefs) { weaken( $elt->{former}->{$link}); } + } + + # if we cut the current element then its parent becomes the current elt + if( $elt->{twig_current}) + { my $twig_current= $elt->{parent}; + $elt->twig->{twig_current}= $twig_current; + $twig_current->{'twig_current'}=1; + delete $elt->{'twig_current'}; + } + + if( $parent->{first_child} && $parent->{first_child} == $elt) + { $parent->{first_child}= $elt->{next_sibling}; + # cutting can make the parent empty + if( ! $parent->{first_child}) { $parent->{empty}= 1; } + } + + if( $parent->{last_child} && $parent->{last_child} == $elt) + { $parent->{empty}=0; $parent->{last_child}=$elt->{prev_sibling}; if( $XML::Twig::weakrefs) { weaken( $parent->{last_child});} ; + } + + if( $prev_sibling= $elt->{prev_sibling}) + { $prev_sibling->{next_sibling}= $elt->{next_sibling}; } + if( $next_sibling= $elt->{next_sibling}) + { $next_sibling->{prev_sibling}=$elt->{prev_sibling}; if( $XML::Twig::weakrefs) { weaken( $next_sibling->{prev_sibling});} ; } + + + $elt->{parent}=undef; if( $XML::Twig::weakrefs) { weaken( $elt->{parent});} ; + $elt->{prev_sibling}=undef; if( $XML::Twig::weakrefs) { weaken( $elt->{prev_sibling});} ; + $elt->{next_sibling}= undef; + + # merge 2 (now) consecutive text nodes if they are of the same type + # (type can be PCDATA or CDATA) + if( $prev_sibling && $next_sibling && $prev_sibling->is_text && ( $XML::Twig::index2gi[$prev_sibling->{'gi'}] eq $XML::Twig::index2gi[$next_sibling->{'gi'}])) + { $prev_sibling->merge_text( $next_sibling); } + + return $elt; + } + + +sub former_next_sibling { return $_[0]->{former}->{next_sibling}; } +sub former_prev_sibling { return $_[0]->{former}->{prev_sibling}; } +sub former_parent { return $_[0]->{former}->{parent}; } + +sub cut_children + { my( $elt, $exp)= @_; + my @children= $elt->children( $exp); + foreach (@children) { $_->cut; } + if( ! $elt->has_children) { $elt->{empty}= 1; } + return @children; + } + +sub cut_descendants + { my( $elt, $exp)= @_; + my @descendants= $elt->descendants( $exp); + foreach ($elt->descendants( $exp)) { $_->cut; } + if( ! $elt->has_children) { $elt->{empty}= 1; } + return @descendants; + } + + + +sub erase + { my $elt= shift; + #you cannot erase the current element + if( $elt->{twig_current}) + { croak "trying to erase an element before it has been completely parsed"; } + unless( $elt->{parent}) + { # trying to erase the root (of a twig or of a cut/new element) + my @children= do { my $elt= $elt; my @children=(); my $child= $elt->{first_child}; while( $child) { push @children, $child; $child= $child->{next_sibling}; } @children; }; + unless( @children == 1) + { croak "can only erase an element with no parent if it has a single child"; } + $elt->_move_extra_data_after_erase; + my $child= shift @children; + $child->{parent}=undef; if( $XML::Twig::weakrefs) { weaken( $child->{parent});} ; + my $twig= $elt->twig; + $twig->set_root( $child); + } + else + { # normal case + $elt->_move_extra_data_after_erase; + my @children= do { my $elt= $elt; my @children=(); my $child= $elt->{first_child}; while( $child) { push @children, $child; $child= $child->{next_sibling}; } @children; }; + if( @children) + { # elt has children, move them up + + my $first_child= $elt->{first_child}; + my $prev_sibling=$elt->{prev_sibling}; + if( $prev_sibling) + { # connect first child to previous sibling + $first_child->{prev_sibling}=$prev_sibling; if( $XML::Twig::weakrefs) { weaken( $first_child->{prev_sibling});} ; + $prev_sibling->{next_sibling}= $first_child; + } + else + { # elt was the first child + $elt->{parent}->set_first_child( $first_child); + } + + my $last_child= $elt->{last_child}; + my $next_sibling= $elt->{next_sibling}; + if( $next_sibling) + { # connect last child to next sibling + $last_child->{next_sibling}= $next_sibling; + $next_sibling->{prev_sibling}=$last_child; if( $XML::Twig::weakrefs) { weaken( $next_sibling->{prev_sibling});} ; + } + else + { # elt was the last child + $elt->{parent}->set_last_child( $last_child); + } + # update parent for all siblings + foreach my $child (@children) + { $child->{parent}=$elt->{parent}; if( $XML::Twig::weakrefs) { weaken( $child->{parent});} ; } + + # merge consecutive text elements if need be + if( $prev_sibling && $prev_sibling->is_text && ($XML::Twig::index2gi[$first_child->{'gi'}] eq $XML::Twig::index2gi[$prev_sibling->{'gi'}]) ) + { $prev_sibling->merge_text( $first_child); } + if( $next_sibling && $next_sibling->is_text && ($XML::Twig::index2gi[$last_child->{'gi'}] eq $XML::Twig::index2gi[$next_sibling->{'gi'}]) ) + { $last_child->merge_text( $next_sibling); } + + # if parsing and have now a PCDATA text, mark so we can normalize later on if need be + if( $elt->{parent}->{twig_current} && $elt->{last_child}->is_text) { $elt->{parent}->{twig_to_be_normalized}=1; } + + # elt is not referenced any more, so it will be DESTROYed + # so we'd better break the links to its children ## FIX + undef $elt->{first_child}; + undef $elt->{last_child}; + undef $elt->{parent}; + undef $elt->{next_sibling}; + undef $elt->{prev_sibling}; + + } + { # elt had no child, delete it + $elt->delete; + } + + } + return $elt; + + } + +sub _move_extra_data_after_erase + { my( $elt)= @_; + # extra_data + if( my $extra_data= $elt->{extra_data}) + { my $target= $elt->{first_child} || $elt->{next_sibling}; + if( $target) + { + if( $target->is( $ELT)) + { $target->set_extra_data( $extra_data . ($target->extra_data || '')); } + elsif( $target->is( $TEXT)) + { $target->_unshift_extra_data_in_pcdata( $extra_data, 0); } # TO CHECK + } + else + { my $parent= $elt->{parent}; # always exists or the erase cannot be performed + $parent->_prefix_extra_data_before_end_tag( $extra_data); + } + } + + # extra_data_before_end_tag + if( my $extra_data= $elt->{extra_data_before_end_tag}) + { if( my $target= $elt->{next_sibling}) + { if( $target->is( $ELT)) + { $target->set_extra_data( $extra_data . ($target->extra_data || '')); } + elsif( $target->is( $TEXT)) + { + $target->_unshift_extra_data_in_pcdata( $extra_data, 0); + } + } + elsif( my $parent= $elt->{parent}) + { $parent->_prefix_extra_data_before_end_tag( $extra_data); } + } + + return $elt; + + } +BEGIN + { my %method= ( before => \&paste_before, + after => \&paste_after, + first_child => \&paste_first_child, + last_child => \&paste_last_child, + within => \&paste_within, + ); + + # paste elt somewhere around ref + # pos can be first_child (default), last_child, before, after or within + sub paste ## no critic (Subroutines::ProhibitNestedSubs); + { my $elt= shift; + if( $elt->{parent}) + { croak "cannot paste an element that belongs to a tree"; } + my $pos; + my $ref; + if( ref $_[0]) + { $pos= 'first_child'; + croak "wrong argument order in paste, should be $_[1] first" if($_[1]); + } + else + { $pos= shift; } + + if( my $method= $method{$pos}) + { + unless( ref( $_[0]) && isa( $_[0], 'XML::Twig::Elt')) + { if( ! defined( $_[0])) + { croak "missing target in paste"; } + elsif( ! ref( $_[0])) + { croak "wrong target type in paste (not a reference), should be XML::Twig::Elt or a subclass"; } + else + { my $ref= ref $_[0]; + croak "wrong target type in paste: '$ref', should be XML::Twig::Elt or a subclass"; + } + } + $ref= $_[0]; + # check here so error message lists the caller file/line + if( !$ref->{parent} && ($pos=~ m{^(before|after)$}) && !(exists $elt->{'target'}) && !(exists $elt->{'comment'})) + { croak "cannot paste $1 root"; } + $elt->$method( @_); + } + else + { croak "tried to paste in wrong position '$pos', allowed positions " . + " are 'first_child', 'last_child', 'before', 'after' and " . + "'within'"; + } + if( (my $ids= $elt->{twig_id_list}) && (my $t= $ref->twig) ) + { $t->{twig_id_list}||={}; + foreach my $id (keys %$ids) + { $t->{twig_id_list}->{$id}= $ids->{$id}; + if( $XML::Twig::weakrefs) { weaken( $t->{twig_id_list}->{$id}); } + } + } + return $elt; + } + + + sub paste_before + { my( $elt, $ref)= @_; + my( $parent, $prev_sibling, $next_sibling ); + + # trying to paste before an orphan (root or detached wlt) + unless( $ref->{parent}) + { if( my $t= $ref->twig) + { if( (exists $elt->{'comment'}) || (exists $elt->{'target'})) # we can still do this + { $t->_add_cpi_outside_of_root( leading_cpi => $elt); return; } + else + { croak "cannot paste before root"; } + } + else + { croak "cannot paste before an orphan element"; } + } + $parent= $ref->{parent}; + $prev_sibling= $ref->{prev_sibling}; + $next_sibling= $ref; + + $elt->{parent}=$parent; if( $XML::Twig::weakrefs) { weaken( $elt->{parent});} ; + if( $parent->{first_child} == $ref) { $parent->{first_child}= $elt; } + + if( $prev_sibling) { $prev_sibling->{next_sibling}= $elt; } + $elt->{prev_sibling}=$prev_sibling; if( $XML::Twig::weakrefs) { weaken( $elt->{prev_sibling});} ; + + $next_sibling->{prev_sibling}=$elt; if( $XML::Twig::weakrefs) { weaken( $next_sibling->{prev_sibling});} ; + $elt->{next_sibling}= $ref; + return $elt; + } + + sub paste_after + { my( $elt, $ref)= @_; + my( $parent, $prev_sibling, $next_sibling ); + + # trying to paste after an orphan (root or detached wlt) + unless( $ref->{parent}) + { if( my $t= $ref->twig) + { if( (exists $elt->{'comment'}) || (exists $elt->{'target'})) # we can still do this + { $t->_add_cpi_outside_of_root( trailing_cpi => $elt); return; } + else + { croak "cannot paste after root"; } + } + else + { croak "cannot paste after an orphan element"; } + } + $parent= $ref->{parent}; + $prev_sibling= $ref; + $next_sibling= $ref->{next_sibling}; + + $elt->{parent}=$parent; if( $XML::Twig::weakrefs) { weaken( $elt->{parent});} ; + if( $parent->{last_child}== $ref) { $parent->{empty}=0; $parent->{last_child}=$elt; if( $XML::Twig::weakrefs) { weaken( $parent->{last_child});} ; } + + $prev_sibling->{next_sibling}= $elt; + $elt->{prev_sibling}=$prev_sibling; if( $XML::Twig::weakrefs) { weaken( $elt->{prev_sibling});} ; + + if( $next_sibling) { $next_sibling->{prev_sibling}=$elt; if( $XML::Twig::weakrefs) { weaken( $next_sibling->{prev_sibling});} ; } + $elt->{next_sibling}= $next_sibling; + return $elt; + + } + + sub paste_first_child + { my( $elt, $ref)= @_; + my( $parent, $prev_sibling, $next_sibling ); + $parent= $ref; + $next_sibling= $ref->{first_child}; + + $elt->{parent}=$parent; if( $XML::Twig::weakrefs) { weaken( $elt->{parent});} ; + $parent->{first_child}= $elt; + unless( $parent->{last_child}) { $parent->{empty}=0; $parent->{last_child}=$elt; if( $XML::Twig::weakrefs) { weaken( $parent->{last_child});} ; } + + $elt->{prev_sibling}=undef; if( $XML::Twig::weakrefs) { weaken( $elt->{prev_sibling});} ; + + if( $next_sibling) { $next_sibling->{prev_sibling}=$elt; if( $XML::Twig::weakrefs) { weaken( $next_sibling->{prev_sibling});} ; } + $elt->{next_sibling}= $next_sibling; + return $elt; + } + + sub paste_last_child + { my( $elt, $ref)= @_; + my( $parent, $prev_sibling, $next_sibling ); + $parent= $ref; + $prev_sibling= $ref->{last_child}; + + $elt->{parent}=$parent; if( $XML::Twig::weakrefs) { weaken( $elt->{parent});} ; + $parent->{empty}=0; $parent->{last_child}=$elt; if( $XML::Twig::weakrefs) { weaken( $parent->{last_child});} ; + unless( $parent->{first_child}) { $parent->{first_child}= $elt; } + + $elt->{prev_sibling}=$prev_sibling; if( $XML::Twig::weakrefs) { weaken( $elt->{prev_sibling});} ; + if( $prev_sibling) { $prev_sibling->{next_sibling}= $elt; } + + $elt->{next_sibling}= undef; + return $elt; + } + + sub paste_within + { my( $elt, $ref, $offset)= @_; + my $text= $ref->is_text ? $ref : $ref->next_elt( $TEXT, $ref); + my $new= $text->split_at( $offset); + $elt->paste_before( $new); + return $elt; + } + } + +# load an element into a structure similar to XML::Simple's +sub simplify + { my $elt= shift; + + # normalize option names + my %options= @_; + %options= map { my ($key, $val)= ($_, $options{$_}); + $key=~ s{(\w)([A-Z])}{$1_\L$2}g; + $key => $val + } keys %options; + + # check options + my @allowed_options= qw( keyattr forcearray noattr content_key + var var_regexp variables var_attr + group_tags forcecontent + normalise_space normalize_space + ); + my %allowed_options= map { $_ => 1 } @allowed_options; + foreach my $option (keys %options) + { carp "invalid option $option\n" unless( $allowed_options{$option}); } + + $options{normalise_space} ||= $options{normalize_space} || 0; + + $options{content_key} ||= 'content'; + if( $options{content_key}=~ m{^-}) + { # need to remove the - and to activate extra folding + $options{content_key}=~ s{^-}{}; + $options{extra_folding}= 1; + } + else + { $options{extra_folding}= 0; } + + $options{forcearray} ||=0; + if( isa( $options{forcearray}, 'ARRAY')) + { my %forcearray_tags= map { $_ => 1 } @{$options{forcearray}}; + $options{forcearray_tags}= \%forcearray_tags; + $options{forcearray}= 0; + } + + $options{keyattr} ||= ['name', 'key', 'id']; + if( ref $options{keyattr} eq 'ARRAY') + { foreach my $keyattr (@{$options{keyattr}}) + { my( $prefix, $att)= ($keyattr=~ m{^([+-])?(.*)}); + $prefix ||= ''; + $options{key_for_all}->{$att}= 1; + $options{remove_key_for_all}->{$att}=1 unless( $prefix eq '+'); + $options{prefix_key_for_all}->{$att}=1 if( $prefix eq '-'); + } + } + elsif( ref $options{keyattr} eq 'HASH') + { while( my( $elt, $keyattr)= each %{$options{keyattr}}) + { my( $prefix, $att)= ($keyattr=~ m{^([+-])?(.*)}); + $prefix ||=''; + $options{key_for_elt}->{$elt}= $att; + $options{remove_key_for_elt}->{"$elt#$att"}=1 unless( $prefix); + $options{prefix_key_for_elt}->{"$elt#$att"}=1 if( $prefix eq '-'); + } + } + + + $options{var}||= $options{var_attr}; # for compat with XML::Simple + if( $options{var}) { $options{var_values}= {}; } + else { $options{var}=''; } + + if( $options{variables}) + { $options{var}||= 1; + $options{var_values}= $options{variables}; + } + + if( $options{var_regexp} and !$options{var}) + { warn "var option not used, var_regexp option ignored\n"; } + $options{var_regexp} ||= '\$\{?(\w+)\}?'; + + $elt->_simplify( \%options); + + } + +sub _simplify + { my( $elt, $options)= @_; + + my $data; + + my $gi= $XML::Twig::index2gi[$elt->{'gi'}]; + my @children= do { my $elt= $elt; my @children=(); my $child= $elt->{first_child}; while( $child) { push @children, $child; $child= $child->{next_sibling}; } @children; }; + my %atts= $options->{noattr} || !$elt->{att} ? () : %{$elt->{att}}; + my $nb_atts= keys %atts; + my $nb_children= $elt->children_count + $nb_atts; + + my %nb_children; + foreach (@children) { $nb_children{$_->tag}++; } + foreach (keys %atts) { $nb_children{$_}++; } + + my $arrays; # tag => array where elements are stored + + + # store children + foreach my $child (@children) + { if( $child->is_text) + { # generate with a content key + my $text= $elt->_text_with_vars( $options); + if( $options->{normalise_space} >= 2) { $text= _normalize_space( $text); } + if( $options->{force_content} + || $nb_atts + || (scalar @children > 1) + ) + { $data->{$options->{content_key}}= $text; } + else + { $data= $text; } + } + else + { # element with sub-elements + my $child_gi= $XML::Twig::index2gi[$child->{'gi'}]; + + my $child_data= $child->_simplify( $options); + + # first see if we need to simplify further the child data + # simplify because of grouped tags + if( my $grouped_tag= $options->{group_tags}->{$child_gi}) + { # check that the child data is a hash with a single field + unless( (ref( $child_data) eq 'HASH') + && (keys %$child_data == 1) + && defined ( my $grouped_child_data= $child_data->{$grouped_tag}) + ) + { croak "error in grouped tag $child_gi"; } + else + { $child_data= $grouped_child_data; } + } + # simplify because of extra folding + if( $options->{extra_folding}) + { if( (ref( $child_data) eq 'HASH') + && (keys %$child_data == 1) + && defined( my $content= $child_data->{$options->{content_key}}) + ) + { $child_data= $content; } + } + + if( my $keyatt= $child->_key_attr( $options)) + { # simplify element with key + my $key= $child->{'att'}->{$keyatt}; + if( $options->{normalise_space} >= 1) { $key= _normalize_space( $key); } + $data->{$child_gi}->{$key}= $child_data; + } + elsif( $options->{forcearray} + || $options->{forcearray_tags}->{$child_gi} + || ( $nb_children{$child_gi} > 1) + ) + { # simplify element to store in an array + $data->{$child_gi} ||= []; + push @{$data->{$child_gi}}, $child_data; + } + else + { # simplify element to store as a hash field + $data->{$child_gi}= $child_data; + } + } + } + + # store atts + # TODO: deal with att that already have an element by that name + foreach my $att (keys %atts) + { # do not store if the att is a key that needs to be removed + if( $options->{remove_key_for_all}->{$att} + || $options->{remove_key_for_elt}->{"$gi#$att"} + ) + { next; } + + my $att_text= $options->{var} ? _replace_vars_in_text( $atts{$att}, $options) : $atts{$att} ; + if( $options->{normalise_space} >= 2) { $att_text= _normalize_space( $att_text); } + + if( $options->{prefix_key_for_all}->{$att} + || $options->{prefix_key_for_elt}->{"$gi#$att"} + ) + { # prefix the att + $data->{"-$att"}= $att_text; + } + else + { # normal case + $data->{$att}= $att_text; + } + } + + return $data; + } + +sub _key_attr + { my( $elt, $options)=@_; + return if( $options->{noattr}); + if( $options->{key_for_all}) + { foreach my $att ($elt->att_names) + { if( $options->{key_for_all}->{$att}) + { return $att; } + } + } + elsif( $options->{key_for_elt}) + { if( my $key_for_elt= $options->{key_for_elt}->{$XML::Twig::index2gi[$elt->{'gi'}]} ) + { return $key_for_elt if( defined( $elt->{'att'}->{$key_for_elt})); } + } + return; + } + +sub _text_with_vars + { my( $elt, $options)= @_; + my $text; + if( $options->{var}) + { $text= _replace_vars_in_text( $elt->text, $options); + $elt->_store_var( $options); + } + else + { $text= $elt->text; } + return $text; + } + + +sub _normalize_space + { my $text= shift; + $text=~ s{\s+}{ }sg; + $text=~ s{^\s}{}; + $text=~ s{\s$}{}; + return $text; + } + + +sub att_nb + { return 0 unless( my $atts= $_[0]->{att}); + return scalar keys %$atts; + } + +sub has_no_atts + { return 1 unless( my $atts= $_[0]->{att}); + return scalar keys %$atts ? 0 : 1; + } + +sub _replace_vars_in_text + { my( $text, $options)= @_; + + $text=~ s{($options->{var_regexp})} + { if( defined( my $value= $options->{var_values}->{$2})) + { $value } + else + { warn "unknown variable $2\n"; + $1 + } + }gex; + return $text; + } + +sub _store_var + { my( $elt, $options)= @_; + if( defined (my $var_name= $elt->{'att'}->{$options->{var}})) + { $options->{var_values}->{$var_name}= $elt->text; + } + } + + +# split a text element at a given offset +sub split_at + { my( $elt, $offset)= @_; + my $text_elt= $elt->is_text ? $elt : $elt->first_child( $TEXT) || return ''; + my $string= $text_elt->text; + my $left_string= substr( $string, 0, $offset); + my $right_string= substr( $string, $offset); + $text_elt->{pcdata}= (delete $text_elt->{empty} || 1) && $left_string; + my $new_elt= $elt->new( $XML::Twig::index2gi[$elt->{'gi'}], $right_string); + $new_elt->paste( after => $elt); + return $new_elt; + } + + +# split an element or its text descendants into several, in place +# all elements (new and untouched) are returned +sub split + { my $elt= shift; + my @text_chunks; + my @result; + if( $elt->is_text) { @text_chunks= ($elt); } + else { @text_chunks= $elt->descendants( $TEXT); } + foreach my $text_chunk (@text_chunks) + { push @result, $text_chunk->_split( 1, @_); } + return @result; + } + +# split an element or its text descendants into several, in place +# created elements (those which match the regexp) are returned +sub mark + { my $elt= shift; + my @text_chunks; + my @result; + if( $elt->is_text) { @text_chunks= ($elt); } + else { @text_chunks= $elt->descendants( $TEXT); } + foreach my $text_chunk (@text_chunks) + { push @result, $text_chunk->_split( 0, @_); } + return @result; + } + +# split a single text element +# return_all defines what is returned: if it is true +# only returns the elements created by matches in the split regexp +# otherwise all elements (new and untouched) are returned + + +{ + + sub _split + { my $elt= shift; + my $return_all= shift; + my $regexp= shift; + my @tags; + + while( @_) + { my $tag= shift(); + if( ref $_[0]) + { push @tags, { tag => $tag, atts => shift }; } + else + { push @tags, { tag => $tag }; } + } + + unless( @tags) { @tags= { tag => $elt->{parent}->gi }; } + + my @result; # the returned list of elements + my $text= $elt->text; + my $gi= $XML::Twig::index2gi[$elt->{'gi'}]; + + # 2 uses: if split matches then the first substring reuses $elt + # once a split has occurred then the last match needs to be put in + # a new element + my $previous_match= 0; + + while( my( $pre_match, @matches)= $text=~ /^(.*?)$regexp(.*)$/gcs) + { $text= pop @matches; + if( $previous_match) + { # match, not the first one, create a new text ($gi) element + _utf8_ify( $pre_match) if( $] < 5.010); + $elt= $elt->insert_new_elt( after => $gi, $pre_match); + push @result, $elt if( $return_all); + } + else + { # first match in $elt, re-use $elt for the first sub-string + _utf8_ify( $pre_match) if( $] < 5.010); + $elt->set_text( $pre_match); + $previous_match++; # store the fact that there was a match + push @result, $elt if( $return_all); + } + + # now deal with matches captured in the regexp + if( @matches) + { # match, with capture + my $i=0; + foreach my $match (@matches) + { # create new element, text is the match + _utf8_ify( $match) if( $] < 5.010); + my $tag = _repl_match( $tags[$i]->{tag}, @matches) || '#PCDATA'; + my $atts = \%{$tags[$i]->{atts}} || {}; + my %atts= map { _repl_match( $_, @matches) => _repl_match( $atts->{$_}, @matches) } keys %$atts; + $elt= $elt->insert_new_elt( after => $tag, \%atts, $match); + push @result, $elt; + $i= ($i + 1) % @tags; + } + } + else + { # match, no captures + my $tag = $tags[0]->{tag}; + my $atts = \%{$tags[0]->{atts}} || {}; + $elt= $elt->insert_new_elt( after => $tag, $atts); + push @result, $elt; + } + } + if( $previous_match && $text) + { # there was at least 1 match, and there is text left after the match + $elt= $elt->insert_new_elt( after => $gi, $text); + } + + push @result, $elt if( $return_all); + + return @result; # return all elements + } + +sub _repl_match + { my( $val, @matches)= @_; + $val=~ s{\$(\d+)}{$matches[$1-1]}g; + return $val; + } + + # evil hack needed as sometimes + my $encode_is_loaded=0; # so we only load Encode once + sub _utf8_ify + { + if( $perl_version >= 5.008 and $perl_version < 5.010 and !_keep_encoding()) + { unless( $encode_is_loaded) { require Encode; import Encode; $encode_is_loaded++; } + Encode::_utf8_on( $_[0]); # the flag should be set but is not + } + } + + +} + +{ my %replace_sub; # cache for complex expressions (expression => sub) + + sub subs_text + { my( $elt, $regexp, $replace)= @_; + + my $replacement_string; + my $is_string= _is_string( $replace); + + my @parents; + + foreach my $text_elt ($elt->descendants_or_self( $TEXT)) + { + if( $is_string) + { my $text= $text_elt->text; + $text=~ s{$regexp}{ _replace_var( $replace, $1, $2, $3, $4, $5, $6, $7, $8, $9)}egx; + $text_elt->set_text( $text); + } + else + { + no utf8; # = perl 5.6 + my $replace_sub= ( $replace_sub{$replace} ||= _install_replace_sub( $replace)); + my $text= $text_elt->text; + my $pos=0; # used to skip text that was previously matched + my $found_hit; + while( my( $pre_match_string, $match_string, @var)= ($text=~ m{(.*?)($regexp)}sg)) + { $found_hit=1; + my $match_start = length( $pre_match_string); + my $match = $match_start ? $text_elt->split_at( $match_start + $pos) : $text_elt; + my $match_length = length( $match_string); + my $post_match = $match->split_at( $match_length); + $replace_sub->( $match, @var); + + # go to next + $text_elt= $post_match; + $text= $post_match->text; + + if( $found_hit) { push @parents, $text_elt->{parent} unless $parents[-1] && $parents[-1]== $text_elt->{parent}; } + + } + } + } + + foreach my $parent (@parents) { $parent->normalize; } + + return $elt; + } + + + sub _is_string + { return ($_[0]=~ m{&e[ln]t}) ? 0: 1 } + + sub _replace_var + { my( $string, @var)= @_; + unshift @var, undef; + $string=~ s{\$(\d)}{$var[$1]}g; + return $string; + } + + sub _install_replace_sub + { my $replace_exp= shift; + my @item= split m{(&e[ln]t\s*\([^)]*\))}, $replace_exp; + my $sub= q{ my( $match, @var)= @_; my $new; my $last_inserted=$match;}; + my( $gi, $exp); + foreach my $item (@item) + { next if ! length $item; + if( $item=~ m{^&elt\s*\(([^)]*)\)}) + { $exp= $1; } + elsif( $item=~ m{^&ent\s*\(\s*([^\s)]*)\s*\)}) + { $exp= " '#ENT' => $1"; } + else + { $exp= qq{ '#PCDATA' => "$item"}; } + $exp=~ s{\$(\d)}{my $i= $1-1; "\$var[$i]"}eg; # replace references to matches + $sub.= qq{ \$new= \$match->new( $exp); }; + $sub .= q{ $new->paste( after => $last_inserted); $last_inserted=$new;}; + } + $sub .= q{ $match->delete; }; + #$sub=~ s/;/;\n/g; warn "subs: $sub"; + my $coderef= eval "sub { $NO_WARNINGS; $sub }"; + if( $@) { croak( "invalid replacement expression $replace_exp: ",$@); } + return $coderef; + } + + } + + +sub merge_text + { my( $e1, $e2)= @_; + croak "invalid merge: can only merge 2 elements" + unless( isa( $e2, 'XML::Twig::Elt')); + croak "invalid merge: can only merge 2 text elements" + unless( $e1->is_text && $e2->is_text && ($e1->gi eq $e2->gi)); + + my $t1_length= length( $e1->text); + + $e1->set_text( $e1->text . $e2->text); + + if( my $extra_data_in_pcdata= $e2->_extra_data_in_pcdata) + { foreach my $data (@$extra_data_in_pcdata) { $e1->_push_extra_data_in_pcdata( $data->{text}, $data->{offset} + $t1_length); } } + + $e2->delete; + + return $e1; + } + +sub merge + { my( $e1, $e2)= @_; + my @e2_children= $e2->_children; + if( $e1->_last_child && $e1->_last_child->is_pcdata + && @e2_children && $e2_children[0]->is_pcdata + ) + { my $t1_length= length( $e1->_last_child->{pcdata}); + my $child1= $e1->_last_child; + my $child2= shift @e2_children; + $child1->{pcdata} .= $child2->{pcdata}; + + my $extra_data= $e1->_extra_data_before_end_tag . $e2->extra_data; + + if( $extra_data) + { $e1->_del_extra_data_before_end_tag; + $child1->_push_extra_data_in_pcdata( $extra_data, $t1_length); + } + + if( my $extra_data_in_pcdata= $child2->_extra_data_in_pcdata) + { foreach my $data (@$extra_data_in_pcdata) { $child1->_push_extra_data_in_pcdata( $data->{text}, $data->{offset} + $t1_length); } } + + if( my $extra_data_before_end_tag= $e2->_extra_data_before_end_tag) + { $e1->_set_extra_data_before_end_tag( $extra_data_before_end_tag); } + } + + foreach my $e (@e2_children) { $e->move( last_child => $e1); } + + $e2->delete; + return $e1; + } + + +# recursively copy an element and returns the copy (can be huge and long) +sub copy + { my $elt= shift; + my $copy= $elt->new( $XML::Twig::index2gi[$elt->{'gi'}]); + + if( $elt->extra_data) { $copy->set_extra_data( $elt->extra_data); } + if( $elt->{extra_data_before_end_tag}) { $copy->_set_extra_data_before_end_tag( $elt->{extra_data_before_end_tag}); } + + if( $elt->is_asis) { $copy->set_asis; } + + if( (exists $elt->{'pcdata'})) + { $copy->{pcdata}= (delete $copy->{empty} || 1) && $elt->{pcdata}; + if( $elt->{extra_data_in_pcdata}) { $copy->_set_extra_data_in_pcdata( $elt->{extra_data_in_pcdata}); } + } + elsif( (exists $elt->{'cdata'})) + { $copy->_set_cdata( $elt->{cdata}); + if( $elt->{extra_data_in_pcdata}) { $copy->_set_extra_data_in_pcdata( $elt->{extra_data_in_pcdata}); } + } + elsif( (exists $elt->{'target'})) + { $copy->_set_pi( $elt->{target}, $elt->{data}); } + elsif( (exists $elt->{'comment'})) + { $copy->_set_comment( $elt->{comment}); } + elsif( (exists $elt->{'ent'})) + { $copy->{ent}= $elt->{ent}; } + else + { my @children= do { my $elt= $elt; my @children=(); my $child= $elt->{first_child}; while( $child) { push @children, $child; $child= $child->{next_sibling}; } @children; }; + if( my $atts= $elt->{att}) + { my %atts; + tie %atts, 'Tie::IxHash' if (keep_atts_order()); + %atts= %{$atts}; # we want to do a real copy of the attributes + $copy->set_atts( \%atts); + } + foreach my $child (@children) + { my $child_copy= $child->copy; + $child_copy->paste( 'last_child', $copy); + } + } + # save links to the original location, which can be convenient and is used for namespace resolution + foreach my $link ( qw(parent prev_sibling next_sibling) ) + { $copy->{former}->{$link}= $elt->{$link}; + if( $XML::Twig::weakrefs) { weaken( $copy->{former}->{$link}); } + } + + $copy->{empty}= $elt->{'empty'}; + + return $copy; + } + + +sub delete + { my $elt= shift; + $elt->cut; + $elt->DESTROY unless $XML::Twig::weakrefs; + return undef; + } + +sub __destroy + { my $elt= shift; + return if( $XML::Twig::weakrefs); + my $t= shift || $elt->twig; # optional argument, passed in recursive calls + + foreach( @{[$elt->_children]}) { $_->DESTROY( $t); } + + # the id reference needs to be destroyed + # lots of tests to avoid warnings during the cleanup phase + $elt->del_id( $t) if( $ID && $t && defined( $elt->{att}) && exists( $elt->{att}->{$ID})); + if( $elt->{former}) { foreach (keys %{$elt->{former}}) { delete $elt->{former}->{$_}; } delete $elt->{former}; } + foreach (qw( keys %$elt)) { delete $elt->{$_}; } + undef $elt; + } + +BEGIN +{ sub set_destroy { if( $XML::Twig::weakrefs) { undef *DESTROY } else { *DESTROY= *__destroy; } } + set_destroy(); +} + +# ignores the element +sub ignore + { my $elt= shift; + my $t= $elt->twig; + $t->ignore( $elt, @_); + } + +BEGIN { + my $pretty = 0; + my $quote = '"'; + my $INDENT = ' '; + my $empty_tag_style = 0; + my $remove_cdata = 0; + my $keep_encoding = 0; + my $expand_external_entities = 0; + my $keep_atts_order = 0; + my $do_not_escape_amp_in_atts = 0; + my $WRAP = '80'; + my $REPLACED_ENTS = qq{&<}; + + my ($NSGMLS, $NICE, $INDENTED, $INDENTEDCT, $INDENTEDC, $WRAPPED, $RECORD1, $RECORD2, $INDENTEDA)= (1..9); + my %KEEP_TEXT_TAG_ON_ONE_LINE= map { $_ => 1 } ( $INDENTED, $INDENTEDCT, $INDENTEDC, $INDENTEDA, $WRAPPED); + my %WRAPPED = map { $_ => 1 } ( $WRAPPED, $INDENTEDA, $INDENTEDC); + + my %pretty_print_style= + ( none => 0, # no added \n + nsgmls => $NSGMLS, # nsgmls-style, \n in tags + # below this line styles are UNSAFE (the generated XML can be well-formed but invalid) + nice => $NICE, # \n after open/close tags except when the + # element starts with text + indented => $INDENTED, # nice plus idented + indented_close_tag => $INDENTEDCT, # nice plus idented + indented_c => $INDENTEDC, # slightly more compact than indented (closing + # tags are on the same line) + wrapped => $WRAPPED, # text is wrapped at column + record_c => $RECORD1, # for record-like data (compact) + record => $RECORD2, # for record-like data (not so compact) + indented_a => $INDENTEDA, # nice, indented, and with attributes on separate + # lines as the nsgmls style, as well as wrapped + # lines - to make the xml friendly to line-oriented tools + cvs => $INDENTEDA, # alias for indented_a + ); + + my ($HTML, $EXPAND)= (1..2); + my %empty_tag_style= + ( normal => 0, # + html => $HTML, # + xhtml => $HTML, # + expand => $EXPAND, # + ); + + my %quote_style= + ( double => '"', + single => "'", + # smart => "smart", + ); + + my $xml_space_preserve; # set when an element includes xml:space="preserve" + + my $output_filter; # filters the entire output (including < and >) + my $output_text_filter; # filters only the text part (tag names, attributes, pcdata) + + my $replaced_ents= $REPLACED_ENTS; + + + # returns those pesky "global" variables so you can switch between twigs + sub global_state ## no critic (Subroutines::ProhibitNestedSubs); + { return + { pretty => $pretty, + quote => $quote, + indent => $INDENT, + empty_tag_style => $empty_tag_style, + remove_cdata => $remove_cdata, + keep_encoding => $keep_encoding, + expand_external_entities => $expand_external_entities, + output_filter => $output_filter, + output_text_filter => $output_text_filter, + keep_atts_order => $keep_atts_order, + do_not_escape_amp_in_atts => $do_not_escape_amp_in_atts, + wrap => $WRAP, + replaced_ents => $replaced_ents, + }; + } + + # restores the global variables + sub set_global_state + { my $state= shift; + $pretty = $state->{pretty}; + $quote = $state->{quote}; + $INDENT = $state->{indent}; + $empty_tag_style = $state->{empty_tag_style}; + $remove_cdata = $state->{remove_cdata}; + $keep_encoding = $state->{keep_encoding}; + $expand_external_entities = $state->{expand_external_entities}; + $output_filter = $state->{output_filter}; + $output_text_filter = $state->{output_text_filter}; + $keep_atts_order = $state->{keep_atts_order}; + $do_not_escape_amp_in_atts = $state->{do_not_escape_amp_in_atts}; + $WRAP = $state->{wrap}; + $replaced_ents = $state->{replaced_ents}, + } + + # sets global state to defaults + sub init_global_state + { set_global_state( + { pretty => 0, + quote => '"', + indent => $INDENT, + empty_tag_style => 0, + remove_cdata => 0, + keep_encoding => 0, + expand_external_entities => 0, + output_filter => undef, + output_text_filter => undef, + keep_atts_order => undef, + do_not_escape_amp_in_atts => 0, + wrap => $WRAP, + replaced_ents => $REPLACED_ENTS, + }); + } + + + # set the pretty_print style (in $pretty) and returns the old one + # can be called from outside the package with 2 arguments (elt, style) + # or from inside with only one argument (style) + # the style can be either a string (one of the keys of %pretty_print_style + # or a number (presumably an old value saved) + sub set_pretty_print + { my $style= lc( defined $_[1] ? $_[1] : $_[0]); # so we cover both cases + my $old_pretty= $pretty; + if( $style=~ /^\d+$/) + { croak "invalid pretty print style $style" unless( $style < keys %pretty_print_style); + $pretty= $style; + } + else + { croak "invalid pretty print style '$style'" unless( exists $pretty_print_style{$style}); + $pretty= $pretty_print_style{$style}; + } + if( $WRAPPED{$pretty} ) + { XML::Twig::_use( 'Text::Wrap') or croak( "Text::Wrap not available, cannot use style $style"); } + return $old_pretty; + } + + sub _pretty_print { return $pretty; } + + # set the empty tag style (in $empty_tag_style) and returns the old one + # can be called from outside the package with 2 arguments (elt, style) + # or from inside with only one argument (style) + # the style can be either a string (one of the keys of %empty_tag_style + # or a number (presumably an old value saved) + sub set_empty_tag_style + { my $style= lc( defined $_[1] ? $_[1] : $_[0]); # so we cover both cases + my $old_style= $empty_tag_style; + if( $style=~ /^\d+$/) + { croak "invalid empty tag style $style" + unless( $style < keys %empty_tag_style); + $empty_tag_style= $style; + } + else + { croak "invalid empty tag style '$style'" + unless( exists $empty_tag_style{$style}); + $empty_tag_style= $empty_tag_style{$style}; + } + return $old_style; + } + + sub _pretty_print_styles + { return (sort { $pretty_print_style{$a} <=> $pretty_print_style{$b} || $a cmp $b } keys %pretty_print_style); } + + sub set_quote + { my $style= $_[1] || $_[0]; + my $old_quote= $quote; + croak "invalid quote '$style'" unless( exists $quote_style{$style}); + $quote= $quote_style{$style}; + return $old_quote; + } + + sub set_remove_cdata + { my $new_value= defined $_[1] ? $_[1] : $_[0]; + my $old_value= $remove_cdata; + $remove_cdata= $new_value; + return $old_value; + } + + + sub set_indent + { my $new_value= defined $_[1] ? $_[1] : $_[0]; + my $old_value= $INDENT; + $INDENT= $new_value; + return $old_value; + } + + sub set_wrap + { my $new_value= defined $_[1] ? $_[1] : $_[0]; + my $old_value= $WRAP; + $WRAP= $new_value; + return $old_value; + } + + + sub set_keep_encoding + { my $new_value= defined $_[1] ? $_[1] : $_[0]; + my $old_value= $keep_encoding; + $keep_encoding= $new_value; + return $old_value; + } + + sub set_replaced_ents + { my $new_value= defined $_[1] ? $_[1] : $_[0]; + my $old_value= $replaced_ents; + $replaced_ents= $new_value; + return $old_value; + } + + sub do_not_escape_gt + { my $old_value= $replaced_ents; + $replaced_ents= q{&<}; # & needs to be first + return $old_value; + } + + sub escape_gt + { my $old_value= $replaced_ents; + $replaced_ents= qq{&<>}; # & needs to be first + return $old_value; + } + + sub _keep_encoding { return $keep_encoding; } # so I can use elsewhere in the module + + sub set_do_not_escape_amp_in_atts + { my $new_value= defined $_[1] ? $_[1] : $_[0]; + my $old_value= $do_not_escape_amp_in_atts; + $do_not_escape_amp_in_atts= $new_value; + return $old_value; + } + + sub output_filter { return $output_filter; } + sub output_text_filter { return $output_text_filter; } + + sub set_output_filter + { my $new_value= defined $_[1] ? $_[1] : $_[0]; # can be called in object/non-object mode + # if called in object mode with no argument, the filter is undefined + if( isa( $new_value, 'XML::Twig::Elt') || isa( $new_value, 'XML::Twig')) { undef $new_value; } + my $old_value= $output_filter; + if( !$new_value || isa( $new_value, 'CODE') ) + { $output_filter= $new_value; } + elsif( $new_value eq 'latin1') + { $output_filter= XML::Twig::latin1(); + } + elsif( $XML::Twig::filter{$new_value}) + { $output_filter= $XML::Twig::filter{$new_value}; } + else + { croak "invalid output filter '$new_value'"; } + + return $old_value; + } + + sub set_output_text_filter + { my $new_value= defined $_[1] ? $_[1] : $_[0]; # can be called in object/non-object mode + # if called in object mode with no argument, the filter is undefined + if( isa( $new_value, 'XML::Twig::Elt') || isa( $new_value, 'XML::Twig')) { undef $new_value; } + my $old_value= $output_text_filter; + if( !$new_value || isa( $new_value, 'CODE') ) + { $output_text_filter= $new_value; } + elsif( $new_value eq 'latin1') + { $output_text_filter= XML::Twig::latin1(); + } + elsif( $XML::Twig::filter{$new_value}) + { $output_text_filter= $XML::Twig::filter{$new_value}; } + else + { croak "invalid output text filter '$new_value'"; } + + return $old_value; + } + + sub set_expand_external_entities + { my $new_value= defined $_[1] ? $_[1] : $_[0]; + my $old_value= $expand_external_entities; + $expand_external_entities= $new_value; + return $old_value; + } + + sub set_keep_atts_order + { my $new_value= defined $_[1] ? $_[1] : $_[0]; + my $old_value= $keep_atts_order; + $keep_atts_order= $new_value; + return $old_value; + + } + + sub keep_atts_order { return $keep_atts_order; } # so I can use elsewhere in the module + + my %html_empty_elt; + BEGIN { %html_empty_elt= map { $_ => 1} qw( base meta link hr br param img area input col); } + + sub start_tag + { my( $elt, $option)= @_; + + + return if( $elt->{gi} < $XML::Twig::SPECIAL_GI); + + my $extra_data= $elt->{extra_data} || ''; + + my $gi= $XML::Twig::index2gi[$elt->{'gi'}]; + my $att= $elt->{att}; # should be $elt->{att}, optimized into a pure hash look-up + + my $ns_map= $att ? $att->{'#original_gi'} : ''; + if( $ns_map) { $gi= _restore_original_prefix( $ns_map, $gi); } + $gi=~ s{^#default:}{}; # remove default prefix + + if( $output_text_filter) { $gi= $output_text_filter->( $gi); } + + # get the attribute and their values + my $att_sep = $pretty==$NSGMLS ? "\n" + : $pretty==$INDENTEDA ? "\n" . $INDENT x ($elt->level+1) . ' ' + : ' ' + ; + + my $replace_in_att_value= $replaced_ents . "$quote\t\r\n"; + if( $option->{escape_gt} && $replaced_ents !~ m{>}) { $replace_in_att_value.= '>'; } + + my $tag; + my @att_names= grep { !( $_=~ m{^#(?!default:)} ) } $keep_atts_order ? keys %{$att} : sort keys %{$att}; + if( @att_names) + { my $atts= join $att_sep, map { my $output_att_name= $ns_map ? _restore_original_prefix( $ns_map, $_) : $_; + if( $output_text_filter) + { $output_att_name= $output_text_filter->( $output_att_name); } + $output_att_name . '=' . $quote . _att_xml_string( $att->{$_}, $replace_in_att_value) . $quote + + } + @att_names + ; + if( $pretty==$INDENTEDA && @att_names == 1) { $att_sep= ' '; } + $tag= "<$gi$att_sep$atts"; + } + else + { $tag= "<$gi"; } + + $tag .= "\n" if($pretty==$NSGMLS); + + + # force empty if suitable HTML tag, otherwise use the value from the input tree + if( ($empty_tag_style eq $HTML) && !$elt->{first_child} && !$elt->{extra_data_before_end_tag} && $html_empty_elt{$gi}) + { $elt->{empty}= 1; } + my $empty= defined $elt->{empty} ? $elt->{empty} + : $elt->{first_child} ? 0 + : 1; + + $tag .= (!$elt->{empty} || $elt->{extra_data_before_end_tag}) ? '>' # element has content + : (($empty_tag_style eq $HTML) && $html_empty_elt{$gi}) ? ' />' # html empty element + # cvs-friendly format + : ( $pretty == $INDENTEDA && @att_names > 1) ? "\n" . $INDENT x $elt->level . "/>" + : ( $pretty == $INDENTEDA && @att_names == 1) ? " />" + : $empty_tag_style ? ">{'gi'}] . ">" # $empty_tag_style is $HTML or $EXPAND + : '/>' + ; + + if( ( (substr( $XML::Twig::index2gi[$elt->{'gi'}], 0, 1) eq '#') && (substr( $XML::Twig::index2gi[$elt->{'gi'}], 0, 9) ne '#default:') )) { $tag= ''; } + +#warn "TRACE: ", $tag,": ", Encode::is_utf8( $tag) ? "has flag" : "FLAG NOT SET"; + + unless( $pretty) { return defined( $extra_data) ? $extra_data . $tag : $tag; } + + my $prefix=''; + my $return=''; # '' or \n is to be printed before the tag + my $indent=0; # number of indents before the tag + + if( $pretty==$RECORD1) + { my $level= $elt->level; + $return= "\n" if( $level < 2); + $indent= 1 if( $level == 1); + } + + elsif( $pretty==$RECORD2) + { $return= "\n"; + $indent= $elt->level; + } + + elsif( $pretty==$NICE) + { my $parent= $elt->{parent}; + unless( !$parent || $parent->{contains_text}) + { $return= "\n"; } + $elt->{contains_text}= 1 if( ($parent && $parent->{contains_text}) + || $elt->contains_text); + } + + elsif( $KEEP_TEXT_TAG_ON_ONE_LINE{$pretty}) + { my $parent= $elt->{parent}; + unless( !$parent || $parent->{contains_text}) + { $return= "\n"; + $indent= $elt->level; + } + $elt->{contains_text}= 1 if( ($parent && $parent->{contains_text}) + || $elt->contains_text); + } + + if( $return || $indent) + { # check for elements in which spaces should be kept + my $t= $elt->twig; + return $extra_data . $tag if( $xml_space_preserve); + if( $t && $t->{twig_keep_spaces_in}) + { foreach my $ancestor ($elt->ancestors) + { return $extra_data . $tag if( $t->{twig_keep_spaces_in}->{$XML::Twig::index2gi[$ancestor->{'gi'}]}) } + } + + $prefix= $INDENT x $indent; + if( $extra_data) + { $extra_data=~ s{\s+$}{}; + $extra_data=~ s{^\s+}{}; + $extra_data= $prefix . $extra_data . $return; + } + } + + + return $return . $extra_data . $prefix . $tag; + } + + sub end_tag + { my $elt= shift; + return '' if( ($elt->{gi}<$XML::Twig::SPECIAL_GI) + || ($elt->{'empty'} && !$elt->{extra_data_before_end_tag}) + ); + my $tag= "<"; + my $gi= $XML::Twig::index2gi[$elt->{'gi'}]; + + if( my $map= $elt->{'att'}->{'#original_gi'}) { $gi= _restore_original_prefix( $map, $gi); } + $gi=~ s{^#default:}{}; # remove default prefix + + if( $output_text_filter) { $gi= $output_text_filter->( $XML::Twig::index2gi[$elt->{'gi'}]); } + $tag .= "/$gi>"; + + $tag = ($elt->{extra_data_before_end_tag} || '') . $tag; + + if( ( (substr( $XML::Twig::index2gi[$elt->{'gi'}], 0, 1) eq '#') && (substr( $XML::Twig::index2gi[$elt->{'gi'}], 0, 9) ne '#default:') )) { $tag= ''; } + + return $tag unless $pretty; + + my $prefix=''; + my $return=0; # 1 if a \n is to be printed before the tag + my $indent=0; # number of indents before the tag + + if( $pretty==$RECORD1) + { $return= 1 if( $elt->level == 0); + } + + elsif( $pretty==$RECORD2) + { unless( $elt->contains_text) + { $return= 1 ; + $indent= $elt->level; + } + } + + elsif( $pretty==$NICE) + { my $parent= $elt->{parent}; + if( ( ($parent && !$parent->{contains_text}) || !$parent ) + && ( !$elt->{contains_text} + && ($elt->{has_flushed_child} || $elt->{first_child}) + ) + ) + { $return= 1; } + } + + elsif( $KEEP_TEXT_TAG_ON_ONE_LINE{$pretty}) + { my $parent= $elt->{parent}; + if( ( ($parent && !$parent->{contains_text}) || !$parent ) + && ( !$elt->{contains_text} + && ($elt->{has_flushed_child} || $elt->{first_child}) + ) + ) + { $return= 1; + $indent= $elt->level; + } + } + + if( $return || $indent) + { # check for elements in which spaces should be kept + my $t= $elt->twig; + return $tag if( $xml_space_preserve); + if( $t && $t->{twig_keep_spaces_in}) + { foreach my $ancestor ($elt, $elt->ancestors) + { return $tag if( $t->{twig_keep_spaces_in}->{$XML::Twig::index2gi[$ancestor->{'gi'}]}) } + } + + if( $return) { $prefix= ($pretty== $INDENTEDCT) ? "\n$INDENT" : "\n"; } + $prefix.= $INDENT x $indent; + } + + # add a \n at the end of the document (after the root element) + $tag .= "\n" unless( $elt->{parent}); + + return $prefix . $tag; + } + + sub _restore_original_prefix + { my( $map, $name)= @_; + my $prefix= _ns_prefix( $name); + if( my $original_prefix= $map->{$prefix}) + { if( $original_prefix eq '#default') + { $name=~ s{^$prefix:}{}; } + else + { $name=~ s{^$prefix(?=:)}{$original_prefix}; } + } + return $name; + } + + # buffer used to hold the text to print/sprint, to avoid passing it back and forth between methods + my @sprint; + + # $elt is an element to print + # $fh is an optional filehandle to print to + # $pretty is an optional value, if true a \n is printed after the < of the + # opening tag + sub print + { my $elt= shift; + + my $fh= isa( $_[0], 'GLOB') || isa( $_[0], 'IO::Scalar') ? shift : undef; + my $old_select= defined $fh ? select $fh : undef; + print $elt->sprint( @_); + select $old_select if( defined $old_select); + } + + +# those next 2 methods need to be refactored, they are copies of the same methods in XML::Twig +sub print_to_file + { my( $elt, $filename)= (shift, shift); + my $out_fh; +# open( $out_fh, ">$filename") or _croak( "cannot create file $filename: $!"); # < perl 5.8 + my $mode= $keep_encoding ? '>' : '>:utf8'; # >= perl 5.8 + open( $out_fh, $mode, $filename) or _croak( "cannot create file $filename: $!"); # >= perl 5.8 + $elt->print( $out_fh, @_); + close $out_fh; + return $elt; + } + +# probably only works on *nix (at least the chmod bit) +# first print to a temporary file, then rename that file to the desired file name, then change permissions +# to the original file permissions (or to the current umask) +sub safe_print_to_file + { my( $elt, $filename)= (shift, shift); + my $perm= -f $filename ? (stat $filename)[2] & 07777 : ~umask() ; + XML::Twig::_use( 'File::Temp') || croak "need File::Temp to use safe_print_to_file\n"; + XML::Twig::_use( 'File::Basename') || croak "need File::Basename to use safe_print_to_file\n"; + my $tmpdir= File::Basename::dirname( $filename); + my( $fh, $tmpfilename) = File::Temp::tempfile( DIR => $tmpdir); + $elt->print_to_file( $tmpfilename, @_); + rename( $tmpfilename, $filename) or unlink $tmpfilename && _croak( "cannot move temporary file to $filename: $!"); + chmod $perm, $filename; + return $elt; + } + + + # same as print but does not output the start tag if the element + # is marked as flushed + sub flush + { my $elt= shift; + my $up_to= $_[0] && isa( $_[0], 'XML::Twig::Elt') ? shift : $elt; + $elt->twig->flush_up_to( $up_to, @_); + } + sub purge + { my $elt= shift; + my $up_to= $_[0] && isa( $_[0], 'XML::Twig::Elt') ? shift : $elt; + $elt->twig->purge_up_to( $up_to, @_); + } + + sub _flush + { my $elt= shift; + + my $pretty; + my $fh= isa( $_[0], 'GLOB') || isa( $_[0], 'IO::Scalar') ? shift : undef; + my $old_select= defined $fh ? select $fh : undef; + my $old_pretty= defined ($pretty= shift) ? set_pretty_print( $pretty) : undef; + + $xml_space_preserve= 1 if( ($elt->inherit_att( 'xml:space') || '') eq 'preserve'); + + $elt->__flush(); + + $xml_space_preserve= 0; + + select $old_select if( defined $old_select); + set_pretty_print( $old_pretty) if( defined $old_pretty); + } + + sub __flush + { my $elt= shift; + + if( $elt->{gi} >= $XML::Twig::SPECIAL_GI) + { my $preserve= ($elt->{'att'}->{'xml:space'} || '') eq 'preserve'; + $xml_space_preserve++ if $preserve; + unless( $elt->_flushed) + { print $elt->start_tag(); + } + + # flush the children + my @children= do { my $elt= $elt; my @children=(); my $child= $elt->{first_child}; while( $child) { push @children, $child; $child= $child->{next_sibling}; } @children; }; + foreach my $child (@children) + { $child->_flush( $pretty); } + unless( $elt->{end_tag_flushed}) { print $elt->end_tag; } + $xml_space_preserve-- if $preserve; + # used for pretty printing + if( my $parent= $elt->{parent}) { $parent->{has_flushed_child}= 1; } + } + else # text or special element + { my $text; + if( (exists $elt->{'pcdata'})) { $text= $elt->pcdata_xml_string; + if( my $parent= $elt->{parent}) + { $parent->{contains_text}= 1; } + } + elsif( (exists $elt->{'cdata'})) { $text= $elt->cdata_string; + if( my $parent= $elt->{parent}) + { $parent->{contains_text}= 1; } + } + elsif( (exists $elt->{'target'})) { $text= $elt->pi_string; } + elsif( (exists $elt->{'comment'})) { $text= $elt->comment_string; } + elsif( (exists $elt->{'ent'})) { $text= $elt->ent_string; } + + print $output_filter ? $output_filter->( $text) : $text; + } + } + + + sub xml_text + { my( $elt, @options)= @_; + + if( @options && grep { lc( $_) eq 'no_recurse' } @options) { return $elt->xml_text_only; } + + my $string=''; + + if( ($elt->{gi} >= $XML::Twig::SPECIAL_GI) ) + { # sprint the children + my $child= $elt->{first_child} || ''; + while( $child) + { $string.= $child->xml_text; + } continue { $child= $child->{next_sibling}; } + } + elsif( (exists $elt->{'pcdata'})) { $string .= $output_filter ? $output_filter->($elt->pcdata_xml_string) + : $elt->pcdata_xml_string; + } + elsif( (exists $elt->{'cdata'})) { $string .= $output_filter ? $output_filter->($elt->cdata_string) + : $elt->cdata_string; + } + elsif( (exists $elt->{'ent'})) { $string .= $elt->ent_string; } + + return $string; + } + + sub xml_text_only + { return join '', map { $_->xml_text if( $_->is_text || (exists $_->{'ent'})) } $_[0]->_children; } + + # same as print but except... it does not print but rather returns the string + # if the second parameter is set then only the content is returned, not the + # start and end tags of the element (but the tags of the included elements are + # returned) + + sub sprint + { my $elt= shift; + my( $old_pretty, $old_empty_tag_style); + + if( $_[0] && isa( $_[0], 'HASH')) + { my %args= XML::Twig::_normalize_args( %{shift()}); + if( defined $args{PrettyPrint}) { $old_pretty = set_pretty_print( $args{PrettyPrint}); } + if( defined $args{EmptyTags}) { $old_empty_tag_style = set_empty_tag_style( $args{EmptyTags}); } + } + + $xml_space_preserve= 1 if( ($elt->inherit_att( 'xml:space') || '') eq 'preserve'); + + @sprint=(); + $elt->_sprint( @_); + my $sprint= join( '', @sprint); + if( $output_filter) { $sprint= $output_filter->( $sprint); } + + if( ( ($pretty== $WRAPPED) || ($pretty==$INDENTEDC)) && !$xml_space_preserve) + { $sprint= _wrap_text( $sprint); } + $xml_space_preserve= 0; + + + if( defined $old_pretty) { set_pretty_print( $old_pretty); } + if( defined $old_empty_tag_style) { set_empty_tag_style( $old_empty_tag_style); } + + return $sprint; + } + + sub _wrap_text + { my( $string)= @_; + my $wrapped; + foreach my $line (split /\n/, $string) + { my( $initial_indent)= $line=~ m{^(\s*)}; + my $wrapped_line= Text::Wrap::wrap( '', $initial_indent . $INDENT, $line) . "\n"; + + # fix glitch with Text::wrap when the first line is long and does not include spaces + # the first line ends up being too short by 2 chars, but we'll have to live with it! + $wrapped_line=~ s{^ +\n }{}s; # this prefix needs to be removed + + $wrapped .= $wrapped_line; + } + + return $wrapped; + } + + + sub _sprint + { my $elt= shift; + my $no_tag= shift || 0; + # in case there's some comments or PI's piggybacking + + if( $elt->{gi} >= $XML::Twig::SPECIAL_GI) + { + my $preserve= ($elt->{'att'}->{'xml:space'} || '') eq 'preserve'; + $xml_space_preserve++ if $preserve; + + push @sprint, $elt->start_tag unless( $no_tag); + + # sprint the children + my $child= $elt->{first_child}; + while( $child) + { $child->_sprint; + $child= $child->{next_sibling}; + } + push @sprint, $elt->end_tag unless( $no_tag); + $xml_space_preserve-- if $preserve; + } + else + { push @sprint, $elt->{extra_data} if( $elt->{extra_data}) ; + if( (exists $elt->{'pcdata'})) { push @sprint, $elt->pcdata_xml_string; } + elsif( (exists $elt->{'cdata'})) { push @sprint, $elt->cdata_string; } + elsif( (exists $elt->{'target'})) { if( ($pretty >= $INDENTED) && !$elt->{parent}->{contains_text}) { push @sprint, "\n" . $INDENT x $elt->level; } + push @sprint, $elt->pi_string; + } + elsif( (exists $elt->{'comment'})) { if( ($pretty >= $INDENTED) && !$elt->{parent}->{contains_text}) { push @sprint, "\n" . $INDENT x $elt->level; } + push @sprint, $elt->comment_string; + } + elsif( (exists $elt->{'ent'})) { push @sprint, $elt->ent_string; } + } + + return; + } + + # just a shortcut to $elt->sprint( 1) + sub xml_string + { my $elt= shift; + isa( $_[0], 'HASH') ? $elt->sprint( shift(), 1) : $elt->sprint( 1); + } + + sub pcdata_xml_string + { my $elt= shift; + if( defined( my $string= $elt->{pcdata}) ) + { + if( ! $elt->{extra_data_in_pcdata}) + { + $string=~ s/([$replaced_ents])/$XML::Twig::base_ent{$1}/g unless( !$replaced_ents || $keep_encoding || $elt->{asis}); + $string=~ s{\Q]]>}{]]>}g; + } + else + { _gen_mark( $string); # used by _(un)?protect_extra_data + foreach my $data (reverse @{$elt->{extra_data_in_pcdata}}) + { my $substr= substr( $string, $data->{offset}); + if( $keep_encoding || $elt->{asis}) + { substr( $string, $data->{offset}, 0, $data->{text}); } + else + { substr( $string, $data->{offset}, 0, _protect_extra_data( $data->{text})); } + } + unless( $keep_encoding || $elt->{asis}) + { + $string=~ s{([$replaced_ents])}{$XML::Twig::base_ent{$1}}g ; + $string=~ s{\Q]]>}{]]>}g; + _unprotect_extra_data( $string); + } + } + return $output_text_filter ? $output_text_filter->( $string) : $string; + } + else + { return ''; } + } + + { my $mark; + my( %char2ent, %ent2char); + BEGIN + { %char2ent= ( '<' => 'lt', '&' => 'amp', '>' => 'gt'); + %ent2char= map { $char2ent{$_} => $_ } keys %char2ent; + } + + # generate a unique mark (a string) not found in the string, + # used to mark < and & in the extra data + sub _gen_mark + { $mark="AAAA"; + $mark++ while( index( $_[0], $mark) > -1); + return $mark; + } + + sub _protect_extra_data + { my( $extra_data)= @_; + $extra_data=~ s{([<&>])}{:$mark:$char2ent{$1}:}g; + return $extra_data; + } + + sub _unprotect_extra_data + { $_[0]=~ s{:$mark:(\w+):}{$ent2char{$1}}g; } + + } + + sub cdata_string + { my $cdata= $_[0]->{cdata}; + unless( defined $cdata) { return ''; } + if( $remove_cdata) + { $cdata=~ s/([$replaced_ents])/$XML::Twig::base_ent{$1}/g; } + else + { $cdata= $CDATA_START . $cdata . $CDATA_END; } + return $cdata; + } + + sub att_xml_string + { my $elt= shift; + my $att= shift; + + my $replace= $replaced_ents . "$quote\n\r\t"; + if($_[0] && $_[0]->{escape_gt} && ($replace!~ m{>}) ) { $replace .='>'; } + + if( defined (my $string= $elt->{att}->{$att})) + { return _att_xml_string( $string, $replace); } + else + { return ''; } + } + + # escaped xml string for an attribute value + sub _att_xml_string + { my( $string, $escape)= @_; + if( !defined( $string)) { return ''; } + if( $keep_encoding) + { $string=~ s{$quote}{$XML::Twig::base_ent{$quote}}g; + } + else + { + if( $do_not_escape_amp_in_atts) + { $escape=~ s{^.}{}; # seems like the most backward compatible way to remove & from the list + $string=~ s{([$escape])}{$XML::Twig::base_ent{$1}}g; + $string=~ s{&(?!(\w+|#\d+|[xX][0-9a-fA-F]+);)}{&}g; # dodgy: escape & that do not start an entity + } + else + { $string=~ s{([$escape])}{$XML::Twig::base_ent{$1}}g; + $string=~ s{\Q]]>}{]]>}g; + } + } + + return $output_text_filter ? $output_text_filter->( $string) : $string; + } + + sub ent_string + { my $ent= shift; + my $ent_text= $ent->{ent}; + my( $t, $el, $ent_string); + if( $expand_external_entities + && ($t= $ent->twig) + && ($el= $t->entity_list) + && ($ent_string= $el->{entities}->{$ent->ent_name}->{val}) + ) + { return $ent_string; } + else + { return $ent_text; } + } + + # returns just the text, no tags, for an element + sub text + { my( $elt, @options)= @_; + + if( @options && grep { lc( $_) eq 'no_recurse' } @options) { return $elt->text_only; } + + my $string; + + if( (exists $elt->{'pcdata'})) { return $elt->{pcdata}; } + elsif( (exists $elt->{'cdata'})) { return $elt->{cdata}; } + elsif( (exists $elt->{'target'})) { return $elt->pi_string;} + elsif( (exists $elt->{'comment'})) { return $elt->{comment}; } + elsif( (exists $elt->{'ent'})) { return $elt->{ent} ; } + + my $child= $elt->{first_child} ||''; + while( $child) + { + my $child_text= $child->text; + $string.= defined( $child_text) ? $child_text : ''; + } continue { $child= $child->{next_sibling}; } + + unless( defined $string) { $string=''; } + + return $output_text_filter ? $output_text_filter->( $string) : $string; + } + + sub text_only + { return join '', map { $_->text if( $_->is_text || (exists $_->{'ent'})) } $_[0]->_children; } + + sub trimmed_text + { my $elt= shift; + my $text= $elt->text( @_); + $text=~ s{\s+}{ }sg; + $text=~ s{^\s*}{}; + $text=~ s{\s*$}{}; + return $text; + } + + sub trim + { my( $elt)= @_; + my $pcdata= $elt->first_descendant( $TEXT); + (my $pcdata_text= $pcdata->text)=~ s{^\s+}{}s; + $pcdata->set_text( $pcdata_text); + $pcdata= $elt->last_descendant( $TEXT); + ($pcdata_text= $pcdata->text)=~ s{\s+$}{}; + $pcdata->set_text( $pcdata_text); + foreach my $pcdata ($elt->descendants( $TEXT)) + { ($pcdata_text= $pcdata->text)=~ s{\s+}{ }g; + $pcdata->set_text( $pcdata_text); + } + return $elt; + } + + + # remove cdata sections (turns them into regular pcdata) in an element + sub remove_cdata + { my $elt= shift; + foreach my $cdata ($elt->descendants_or_self( $CDATA)) + { if( $keep_encoding) + { my $data= $cdata->{cdata}; + $data=~ s{([&<"'])}{$XML::Twig::base_ent{$1}}g; + $cdata->{pcdata}= (delete $cdata->{empty} || 1) && $data; + } + else + { $cdata->{pcdata}= (delete $cdata->{empty} || 1) && $cdata->{cdata}; } + $cdata->{gi}=$XML::Twig::gi2index{$PCDATA} or $cdata->set_gi( $PCDATA); + undef $cdata->{cdata}; + } + } + +sub _is_private { return _is_private_name( $_[0]->gi); } +sub _is_private_name { return $_[0]=~ m{^#(?!default:)}; } + + +} # end of block containing package globals ($pretty_print, $quotes, keep_encoding...) + +# merges consecutive #PCDATAs in am element +sub normalize + { my( $elt)= @_; + my @descendants= $elt->descendants( $PCDATA); + while( my $desc= shift @descendants) + { if( ! length $desc->{pcdata}) { $desc->delete; next; } + while( @descendants && $desc->{next_sibling} && $desc->{next_sibling}== $descendants[0]) + { my $to_merge= shift @descendants; + $desc->merge_text( $to_merge); + } + } + return $elt; + } + +# SAX export methods +sub toSAX1 + { _toSAX(@_, \&_start_tag_data_SAX1, \&_end_tag_data_SAX1); } + +sub toSAX2 + { _toSAX(@_, \&_start_tag_data_SAX2, \&_end_tag_data_SAX2); } + +sub _toSAX + { my( $elt, $handler, $start_tag_data, $end_tag_data)= @_; + if( $elt->{gi} >= $XML::Twig::SPECIAL_GI) + { my $data= $start_tag_data->( $elt); + _start_prefix_mapping( $elt, $handler, $data); + if( $data && (my $start_element = $handler->can( 'start_element'))) + { unless( $elt->_flushed) { $start_element->( $handler, $data); } } + + foreach my $child ($elt->_children) + { $child->_toSAX( $handler, $start_tag_data, $end_tag_data); } + + if( (my $data= $end_tag_data->( $elt)) && (my $end_element = $handler->can( 'end_element')) ) + { $end_element->( $handler, $data); } + _end_prefix_mapping( $elt, $handler); + } + else # text or special element + { if( (exists $elt->{'pcdata'}) && (my $characters= $handler->can( 'characters'))) + { $characters->( $handler, { Data => $elt->{pcdata} }); } + elsif( (exists $elt->{'cdata'})) + { if( my $start_cdata= $handler->can( 'start_cdata')) + { $start_cdata->( $handler); } + if( my $characters= $handler->can( 'characters')) + { $characters->( $handler, {Data => $elt->{cdata} }); } + if( my $end_cdata= $handler->can( 'end_cdata')) + { $end_cdata->( $handler); } + } + elsif( ((exists $elt->{'target'})) && (my $pi= $handler->can( 'processing_instruction'))) + { $pi->( $handler, { Target =>$elt->{target}, Data => $elt->{data} }); } + elsif( ((exists $elt->{'comment'})) && (my $comment= $handler->can( 'comment'))) + { $comment->( $handler, { Data => $elt->{comment} }); } + elsif( ((exists $elt->{'ent'}))) + { + if( my $se= $handler->can( 'skipped_entity')) + { $se->( $handler, { Name => $elt->ent_name }); } + elsif( my $characters= $handler->can( 'characters')) + { if( defined $elt->ent_string) + { $characters->( $handler, {Data => $elt->ent_string}); } + else + { $characters->( $handler, {Data => $elt->ent_name}); } + } + } + + } + } + +sub _start_tag_data_SAX1 + { my( $elt)= @_; + my $name= $XML::Twig::index2gi[$elt->{'gi'}]; + return if( ( (substr( $XML::Twig::index2gi[$elt->{'gi'}], 0, 1) eq '#') && (substr( $XML::Twig::index2gi[$elt->{'gi'}], 0, 9) ne '#default:') )); + my $attributes={}; + my $atts= $elt->{att}; + while( my( $att, $value)= each %$atts) + { $attributes->{$att}= $value unless( ( $att=~ m{^#(?!default:)} )); } + my $data= { Name => $name, Attributes => $attributes}; + return $data; + } + +sub _end_tag_data_SAX1 + { my( $elt)= @_; + return if( ( (substr( $XML::Twig::index2gi[$elt->{'gi'}], 0, 1) eq '#') && (substr( $XML::Twig::index2gi[$elt->{'gi'}], 0, 9) ne '#default:') )); + return { Name => $XML::Twig::index2gi[$elt->{'gi'}] }; + } + +sub _start_tag_data_SAX2 + { my( $elt)= @_; + my $data={}; + + my $name= $XML::Twig::index2gi[$elt->{'gi'}]; + return if( ( (substr( $XML::Twig::index2gi[$elt->{'gi'}], 0, 1) eq '#') && (substr( $XML::Twig::index2gi[$elt->{'gi'}], 0, 9) ne '#default:') )); + $data->{Name} = $name; + $data->{Prefix} = $elt->ns_prefix; + $data->{LocalName} = $elt->local_name; + $data->{NamespaceURI} = $elt->namespace; + + # save a copy of the data so we can re-use it for the end tag + my %sax2_data= %$data; + $elt->{twig_elt_SAX2_data}= \%sax2_data; + + # add the attributes + $data->{Attributes}= $elt->_atts_to_SAX2; + + return $data; + } + +sub _atts_to_SAX2 + { my $elt= shift; + my $SAX2_atts= {}; + foreach my $att (keys %{$elt->{att}}) + { + next if( ( $att=~ m{^#(?!default:)} )); + my $SAX2_att={}; + $SAX2_att->{Name} = $att; + $SAX2_att->{Prefix} = _ns_prefix( $att); + $SAX2_att->{LocalName} = _local_name( $att); + $SAX2_att->{NamespaceURI} = $elt->namespace( $SAX2_att->{Prefix}); + $SAX2_att->{Value} = $elt->{'att'}->{$att}; + my $SAX2_att_name= "{$SAX2_att->{NamespaceURI}}$SAX2_att->{LocalName}"; + + $SAX2_atts->{$SAX2_att_name}= $SAX2_att; + } + return $SAX2_atts; + } + +sub _start_prefix_mapping + { my( $elt, $handler, $data)= @_; + if( my $start_prefix_mapping= $handler->can( 'start_prefix_mapping') + and my @new_prefix_mappings= grep { /^\{[^}]*\}xmlns/ || /^\{$XMLNS_URI\}/ } keys %{$data->{Attributes}} + ) + { foreach my $prefix (@new_prefix_mappings) + { my $prefix_string= $data->{Attributes}->{$prefix}->{LocalName}; + if( $prefix_string eq 'xmlns') { $prefix_string=''; } + my $prefix_data= + { Prefix => $prefix_string, + NamespaceURI => $data->{Attributes}->{$prefix}->{Value} + }; + $start_prefix_mapping->( $handler, $prefix_data); + $elt->{twig_end_prefix_mapping} ||= []; + push @{$elt->{twig_end_prefix_mapping}}, $prefix_string; + } + } + } + +sub _end_prefix_mapping + { my( $elt, $handler)= @_; + if( my $end_prefix_mapping= $handler->can( 'end_prefix_mapping')) + { foreach my $prefix (@{$elt->{twig_end_prefix_mapping}}) + { $end_prefix_mapping->( $handler, { Prefix => $prefix} ); } + } + } + +sub _end_tag_data_SAX2 + { my( $elt)= @_; + return if( ( (substr( $XML::Twig::index2gi[$elt->{'gi'}], 0, 1) eq '#') && (substr( $XML::Twig::index2gi[$elt->{'gi'}], 0, 9) ne '#default:') )); + return $elt->{twig_elt_SAX2_data}; + } + +sub contains_text + { my $elt= shift; + my $child= $elt->{first_child}; + while ($child) + { return 1 if( $child->is_text || (exists $child->{'ent'})); + $child= $child->{next_sibling}; + } + return 0; + } + +# creates a single pcdata element containing the text as child of the element +# options: +# - force_pcdata: when set to a true value forces the text to be in a #PCDATA +# even if the original element was a #CDATA +sub set_text + { my( $elt, $string, %option)= @_; + + if( $XML::Twig::index2gi[$elt->{'gi'}] eq $PCDATA) + { return $elt->{pcdata}= (delete $elt->{empty} || 1) && $string; } + elsif( $XML::Twig::index2gi[$elt->{'gi'}] eq $CDATA) + { if( $option{force_pcdata}) + { $elt->{gi}=$XML::Twig::gi2index{$PCDATA} or $elt->set_gi( $PCDATA); + $elt->_set_cdata(''); + return $elt->{pcdata}= (delete $elt->{empty} || 1) && $string; + } + else + { return $elt->_set_cdata( $string); } + } + elsif( $elt->contains_a_single( $PCDATA) ) + { # optimized so we have a slight chance of not loosing embedded comments and pi's + $elt->{first_child}->set_pcdata( $string); + return $elt; + } + + foreach my $child (@{[$elt->_children]}) + { $child->delete; } + + my $pcdata= $elt->_new_pcdata( $string); + $pcdata->paste( $elt); + + $elt->{empty}=0; + + return $elt; + } + +# set the content of an element from a list of strings and elements +sub set_content + { my $elt= shift; + + return $elt unless defined $_[0]; + + # attributes can be given as a hash (passed by ref) + if( ref $_[0] eq 'HASH') + { my $atts= shift; + $elt->del_atts; # usually useless but better safe than sorry + $elt->set_atts( $atts); + return $elt unless defined $_[0]; + } + + # check next argument for #EMPTY + if( !(ref $_[0]) && ($_[0] eq $EMPTY) ) + { $elt->{empty}= 1; return $elt; } + + # case where we really want to do a set_text, the element is '#PCDATA' + # or contains a single PCDATA and we only want to add text in it + if( ($XML::Twig::index2gi[$elt->{'gi'}] eq $PCDATA || $elt->contains_a_single( $PCDATA)) + && (@_ == 1) && !( ref $_[0])) + { $elt->set_text( $_[0]); + return $elt; + } + elsif( ($XML::Twig::index2gi[$elt->{'gi'}] eq $CDATA) && (@_ == 1) && !( ref $_[0])) + { $elt->_set_cdata( $_[0]); + return $elt; + } + + # delete the children + foreach my $child (@{[$elt->_children]}) + { $child->delete; } + + if( @_) { $elt->{empty}=0; } + + foreach my $child (@_) + { if( ref( $child) && isa( $child, 'XML::Twig::Elt')) + { # argument is an element + $child->paste( 'last_child', $elt); + } + else + { # argument is a string + if( (my $pcdata= $elt->{last_child}) && $elt->{last_child}->is_pcdata) + { # previous child is also pcdata: just concatenate + $pcdata->{pcdata}= (delete $pcdata->{empty} || 1) && $pcdata->{pcdata} . $child + } + else + { # previous child is not a string: create a new pcdata element + $pcdata= $elt->_new_pcdata( $child); + $pcdata->paste( 'last_child', $elt); + } + } + } + + + return $elt; + } + +# inserts an element (whose gi is given) as child of the element +# all children of the element are now children of the new element +# returns the new element +sub insert + { my ($elt, @args)= @_; + # first cut the children + my @children= do { my $elt= $elt; my @children=(); my $child= $elt->{first_child}; while( $child) { push @children, $child; $child= $child->{next_sibling}; } @children; }; + foreach my $child (@children) + { $child->cut; } + # insert elements + while( my $gi= shift @args) + { my $new_elt= $elt->new( $gi); + # add attributes if needed + if( defined( $args[0]) && ( isa( $args[0], 'HASH')) ) + { $new_elt->set_atts( shift @args); } + # paste the element + $new_elt->paste( $elt); + $elt->{empty}=0; + $elt= $new_elt; + } + # paste back the children + foreach my $child (@children) + { $child->paste( 'last_child', $elt); } + return $elt; + } + +# insert a new element +# $elt->insert_new_element( $opt_position, $gi, $opt_atts_hash, @opt_content); +# the element is created with the same syntax as new +# position is the same as in paste, first_child by default +sub insert_new_elt + { my $elt= shift; + my $position= $_[0]; + if( ($position eq 'before') || ($position eq 'after') + || ($position eq 'first_child') || ($position eq 'last_child')) + { shift; } + else + { $position= 'first_child'; } + + my $new_elt= $elt->new( @_); + $new_elt->paste( $position, $elt); + + #if( defined $new_elt->{'att'}->{$ID}) { $new_elt->set_id( $new_elt->{'att'}->{$ID}); } + + return $new_elt; + } + +# wraps an element in elements which gi's are given as arguments +# $elt->wrap_in( 'td', 'tr', 'table') wraps the element as a single +# cell in a table for example +# returns the new element +sub wrap_in + { my $elt= shift; + while( my $gi = shift @_) + { my $new_elt = $elt->new( $gi); + if( $elt->{twig_current}) + { my $t= $elt->twig; + $t->{twig_current}= $new_elt; + delete $elt->{'twig_current'}; + $new_elt->{'twig_current'}=1; + } + + if( my $parent= $elt->{parent}) + { $new_elt->{parent}=$parent; if( $XML::Twig::weakrefs) { weaken( $new_elt->{parent});} ; + if( $parent->{first_child} == $elt) { $parent->{first_child}= $new_elt; } + if( $parent->{last_child} == $elt) { $parent->{empty}=0; $parent->{last_child}=$new_elt; if( $XML::Twig::weakrefs) { weaken( $parent->{last_child});} ; } + } + else + { # wrapping the root + my $twig= $elt->twig; + if( $twig && $twig->root && ($twig->root eq $elt) ) + { $twig->set_root( $new_elt); + } + } + + if( my $prev_sibling= $elt->{prev_sibling}) + { $new_elt->{prev_sibling}=$prev_sibling; if( $XML::Twig::weakrefs) { weaken( $new_elt->{prev_sibling});} ; + $prev_sibling->{next_sibling}= $new_elt; + } + + if( my $next_sibling= $elt->{next_sibling}) + { $new_elt->{next_sibling}= $next_sibling; + $next_sibling->{prev_sibling}=$new_elt; if( $XML::Twig::weakrefs) { weaken( $next_sibling->{prev_sibling});} ; + } + $new_elt->{first_child}= $elt; + $new_elt->{empty}=0; $new_elt->{last_child}=$elt; if( $XML::Twig::weakrefs) { weaken( $new_elt->{last_child});} ; + + $elt->{parent}=$new_elt; if( $XML::Twig::weakrefs) { weaken( $elt->{parent});} ; + $elt->{prev_sibling}=undef; if( $XML::Twig::weakrefs) { weaken( $elt->{prev_sibling});} ; + $elt->{next_sibling}= undef; + + # add the attributes if the next argument is a hash ref + if( defined( $_[0]) && (isa( $_[0], 'HASH')) ) + { $new_elt->set_atts( shift @_); } + + $elt= $new_elt; + } + + return $elt; + } + +sub replace + { my( $elt, $ref)= @_; + + if( $elt->{parent}) { $elt->cut; } + + if( my $parent= $ref->{parent}) + { $elt->{parent}=$parent; if( $XML::Twig::weakrefs) { weaken( $elt->{parent});} ; + if( $parent->{first_child} == $ref) { $parent->{first_child}= $elt; } + if( $parent->{last_child} == $ref) { $parent->{empty}=0; $parent->{last_child}=$elt; if( $XML::Twig::weakrefs) { weaken( $parent->{last_child});} ; } + } + elsif( $ref->twig && $ref == $ref->twig->root) + { $ref->twig->set_root( $elt); } + + if( my $prev_sibling= $ref->{prev_sibling}) + { $elt->{prev_sibling}=$prev_sibling; if( $XML::Twig::weakrefs) { weaken( $elt->{prev_sibling});} ; + $prev_sibling->{next_sibling}= $elt; + } + if( my $next_sibling= $ref->{next_sibling}) + { $elt->{next_sibling}= $next_sibling; + $next_sibling->{prev_sibling}=$elt; if( $XML::Twig::weakrefs) { weaken( $next_sibling->{prev_sibling});} ; + } + + $ref->{parent}=undef; if( $XML::Twig::weakrefs) { weaken( $ref->{parent});} ; + $ref->{prev_sibling}=undef; if( $XML::Twig::weakrefs) { weaken( $ref->{prev_sibling});} ; + $ref->{next_sibling}= undef; + return $ref; + } + +sub replace_with + { my $ref= shift; + my $elt= shift; + $elt->replace( $ref); + foreach my $new_elt (reverse @_) + { $new_elt->paste( after => $elt); } + return $elt; + } + + +# move an element, same syntax as paste, except the element is first cut +sub move + { my $elt= shift; + $elt->cut; + $elt->paste( @_); + return $elt; + } + + +# adds a prefix to an element, creating a pcdata child if needed +sub prefix + { my ($elt, $prefix, $option)= @_; + my $asis= ($option && ($option eq 'asis')) ? 1 : 0; + if( (exists $elt->{'pcdata'}) + && (($asis && $elt->{asis}) || (!$asis && ! $elt->{asis})) + ) + { $elt->{pcdata}= (delete $elt->{empty} || 1) && $prefix . $elt->{pcdata}; } + elsif( $elt->{first_child} && $elt->{first_child}->is_pcdata + && ( ($asis && $elt->{first_child}->{asis}) + || (!$asis && ! $elt->{first_child}->{asis})) + ) + { + $elt->{first_child}->set_pcdata( $prefix . $elt->{first_child}->pcdata); + } + else + { my $new_elt= $elt->_new_pcdata( $prefix); + my $pos= (exists $elt->{'pcdata'}) ? 'before' : 'first_child'; + $new_elt->paste( $pos => $elt); + if( $asis) { $new_elt->set_asis; } + } + return $elt; + } + +# adds a suffix to an element, creating a pcdata child if needed +sub suffix + { my ($elt, $suffix, $option)= @_; + my $asis= ($option && ($option eq 'asis')) ? 1 : 0; + if( (exists $elt->{'pcdata'}) + && (($asis && $elt->{asis}) || (!$asis && ! $elt->{asis})) + ) + { $elt->{pcdata}= (delete $elt->{empty} || 1) && $elt->{pcdata} . $suffix; } + elsif( $elt->{last_child} && $elt->{last_child}->is_pcdata + && ( ($asis && $elt->{last_child}->{asis}) + || (!$asis && ! $elt->{last_child}->{asis})) + ) + { $elt->{last_child}->set_pcdata( $elt->{last_child}->pcdata . $suffix); } + else + { my $new_elt= $elt->_new_pcdata( $suffix); + my $pos= (exists $elt->{'pcdata'}) ? 'after' : 'last_child'; + $new_elt->paste( $pos => $elt); + if( $asis) { $new_elt->set_asis; } + } + return $elt; + } + +# create a path to an element ('/root/.../gi) +sub path + { my $elt= shift; + my @context= ( $elt, $elt->ancestors); + return "/" . join( "/", reverse map {$_->gi} @context); + } + +sub xpath + { my $elt= shift; + my $xpath; + foreach my $ancestor (reverse $elt->ancestors_or_self) + { my $gi= $XML::Twig::index2gi[$ancestor->{'gi'}]; + $xpath.= "/$gi"; + my $index= $ancestor->prev_siblings( $gi) + 1; + unless( ($index == 1) && !$ancestor->next_sibling( $gi)) + { $xpath.= "[$index]"; } + } + return $xpath; + } + +# methods used mainly by wrap_children + +# return a string with the +# for an element ...... +# returns '' +sub _stringify_struct + { my( $elt, %opt)= @_; + my $string=''; + my $pretty_print= set_pretty_print( 'none'); + foreach my $child ($elt->_children) + { $child->add_id; $string .= $child->start_tag( { escape_gt => 1 }) ||''; } + set_pretty_print( $pretty_print); + return $string; + } + +# wrap a series of elements in a new one +sub _wrap_range + { my $elt= shift; + my $gi= shift; + my $atts= isa( $_[0], 'HASH') ? shift : undef; + my $range= shift; # the string with the tags to wrap + + my $t= $elt->twig; + + # get the tags to wrap + my @to_wrap; + while( $range=~ m{<\w+\s+[^>]*id=("[^"]*"|'[^']*')[^>]*>}g) + { push @to_wrap, $t->elt_id( substr( $1, 1, -1)); } + + return '' unless @to_wrap; + + my $to_wrap= shift @to_wrap; + my %atts= %$atts; + my $new_elt= $to_wrap->wrap_in( $gi, \%atts); + $_->move( last_child => $new_elt) foreach (@to_wrap); + + return ''; + } + +# wrap children matching a regexp in a new element +sub wrap_children + { my( $elt, $regexp, $gi, $atts)= @_; + + $atts ||={}; + + my $elt_as_string= $elt->_stringify_struct; # stringify the elt structure + $regexp=~ s{(<[^>]*>)}{_match_expr( $1)}eg; # in the regexp, replace gi's by the proper regexp + $elt_as_string=~ s{($regexp)}{$elt->_wrap_range( $gi, $atts, $1)}eg; # then do the actual replace + + return $elt; + } + +sub _match_expr + { my $tag= shift; + my( $gi, %atts)= XML::Twig::_parse_start_tag( $tag); + return _match_tag( $gi, %atts); + } + + +sub _match_tag + { my( $elt, %atts)= @_; + my $string= "<$elt\\b"; + foreach my $key (sort keys %atts) + { my $val= qq{\Q$atts{$key}\E}; + $string.= qq{[^>]*$key=(?:"$val"|'$val')}; + } + $string.= qq{[^>]*>}; + return "(?:$string)"; + } + +sub field_to_att + { my( $elt, $cond, $att)= @_; + $att ||= $cond; + my $child= $elt->first_child( $cond) or return undef; + $elt->set_att( $att => $child->text); + $child->cut; + return $elt; + } + +sub att_to_field + { my( $elt, $att, $tag)= @_; + $tag ||= $att; + my $child= $elt->insert_new_elt( first_child => $tag, $elt->{'att'}->{$att}); + $elt->del_att( $att); + return $elt; + } + +# sort children methods + +sub sort_children_on_field + { my $elt = shift; + my $field = shift; + my $get_key= sub { return $_[0]->field( $field) }; + return $elt->sort_children( $get_key, @_); + } + +sub sort_children_on_att + { my $elt = shift; + my $att = shift; + my $get_key= sub { return $_[0]->{'att'}->{$att} }; + return $elt->sort_children( $get_key, @_); + } + +sub sort_children_on_value + { my $elt = shift; + #my $get_key= eval qq{ sub { $NO_WARNINGS; return \$_[0]->text } }; + my $get_key= \&text; + return $elt->sort_children( $get_key, @_); + } + +sub sort_children + { my( $elt, $get_key, %opt)=@_; + $opt{order} ||= 'normal'; + $opt{type} ||= 'alpha'; + my( $par_a, $par_b)= ($opt{order} eq 'reverse') ? qw( b a) : qw ( a b) ; + my $op= ($opt{type} eq 'numeric') ? '<=>' : 'cmp' ; + my @children= $elt->cut_children; + if( $opt{type} eq 'numeric') + { @children= map { $_->[1] } + sort { $a->[0] <=> $b->[0] } + map { [ $get_key->( $_), $_] } @children; + } + elsif( $opt{type} eq 'alpha') + { @children= map { $_->[1] } + sort { $a->[0] cmp $b->[0] } + map { [ $get_key->( $_), $_] } @children; + } + else + { croak "wrong sort type '$opt{type}', should be either 'alpha' or 'numeric'"; } + + @children= reverse @children if( $opt{order} eq 'reverse'); + $elt->set_content( @children); + } + + +# comparison methods + +sub before + { my( $a, $b)=@_; + if( $a->cmp( $b) == -1) { return 1; } else { return 0; } + } + +sub after + { my( $a, $b)=@_; + if( $a->cmp( $b) == 1) { return 1; } else { return 0; } + } + +sub lt + { my( $a, $b)=@_; + return 1 if( $a->cmp( $b) == -1); + return 0; + } + +sub le + { my( $a, $b)=@_; + return 1 unless( $a->cmp( $b) == 1); + return 0; + } + +sub gt + { my( $a, $b)=@_; + return 1 if( $a->cmp( $b) == 1); + return 0; + } + +sub ge + { my( $a, $b)=@_; + return 1 unless( $a->cmp( $b) == -1); + return 0; + } + + +sub cmp + { my( $a, $b)=@_; + + # easy cases + return 0 if( $a == $b); + return 1 if( $a->in($b)); # a in b => a starts after b + return -1 if( $b->in($a)); # b in a => a starts before b + + # ancestors does not include the element itself + my @a_pile= ($a, $a->ancestors); + my @b_pile= ($b, $b->ancestors); + + # the 2 elements are not in the same twig + return undef unless( $a_pile[-1] == $b_pile[-1]); + + # find the first non common ancestors (they are siblings) + my $a_anc= pop @a_pile; + my $b_anc= pop @b_pile; + + while( $a_anc == $b_anc) + { $a_anc= pop @a_pile; + $b_anc= pop @b_pile; + } + + # from there move left and right and figure out the order + my( $a_prev, $a_next, $b_prev, $b_next)= ($a_anc, $a_anc, $b_anc, $b_anc); + while() + { $a_prev= $a_prev->{prev_sibling} || return( -1); + return 1 if( $a_prev == $b_next); + $a_next= $a_next->{next_sibling} || return( 1); + return -1 if( $a_next == $b_prev); + $b_prev= $b_prev->{prev_sibling} || return( 1); + return -1 if( $b_prev == $a_next); + $b_next= $b_next->{next_sibling} || return( -1); + return 1 if( $b_next == $a_prev); + } + } + +sub _dump + { my( $elt, $option)= @_; + + my $atts = defined $option->{atts} ? $option->{atts} : 1; + my $extra = defined $option->{extra} ? $option->{extra} : 0; + my $short_text = defined $option->{short_text} ? $option->{short_text} : 40; + + my $sp= '| '; + my $indent= $sp x $elt->level; + my $indent_sp= ' ' x $elt->level; + + my $dump=''; + if( $elt->is_elt) + { + $dump .= $indent . '|-' . $XML::Twig::index2gi[$elt->{'gi'}]; + + if( $atts && (my @atts= $elt->att_names) ) + { $dump .= ' ' . join( ' ', map { qq{$_="} . $elt->{'att'}->{$_} . qq{"} } @atts); } + + $dump .= "\n"; + if( $extra) { $dump .= $elt->_dump_extra_data( $indent, $indent_sp, $short_text); } + $dump .= join( "", map { $_->_dump( $option) } do { my $elt= $elt; my @children=(); my $child= $elt->{first_child}; while( $child) { push @children, $child; $child= $child->{next_sibling}; } @children; }); + } + else + { + if( (exists $elt->{'pcdata'})) + { $dump .= "$indent|-PCDATA: '" . _short_text( $elt->{pcdata}, $short_text) . "'\n" } + elsif( (exists $elt->{'ent'})) + { $dump .= "$indent|-ENTITY: '" . _short_text( $elt->{ent}, $short_text) . "'\n" } + elsif( (exists $elt->{'cdata'})) + { $dump .= "$indent|-CDATA: '" . _short_text( $elt->{cdata}, $short_text) . "'\n" } + elsif( (exists $elt->{'comment'})) + { $dump .= "$indent|-COMMENT: '" . _short_text( $elt->comment_string, $short_text) . "'\n" } + elsif( (exists $elt->{'target'})) + { $dump .= "$indent|-PI: '" . $elt->{target} . "' - '" . _short_text( $elt->{data}, $short_text) . "'\n" } + if( $extra) { $dump .= $elt->_dump_extra_data( $indent, $indent_sp, $short_text); } + } + return $dump; + } + +sub _dump_extra_data + { my( $elt, $indent, $indent_sp, $short_text)= @_; + my $dump=''; + if( $elt->extra_data) + { my $extra_data = $indent . "|-- (cpi before) '" . _short_text( $elt->extra_data, $short_text) . "'"; + $extra_data=~ s{\n}{$indent_sp}g; + $dump .= $extra_data . "\n"; + } + if( $elt->{extra_data_in_pcdata}) + { foreach my $data ( @{$elt->{extra_data_in_pcdata}}) + { my $extra_data = $indent . "|-- (cpi offset $data->{offset}) '" . _short_text( $data->{text}, $short_text) . "'"; + $extra_data=~ s{\n}{$indent_sp}g; + $dump .= $extra_data . "\n"; + } + } + if( $elt->{extra_data_before_end_tag}) + { my $extra_data = $indent . "|-- (cpi end) '" . _short_text( $elt->{extra_data_before_end_tag}, $short_text) . "'"; + $extra_data=~ s{\n}{$indent_sp}g; + $dump .= $extra_data . "\n"; + } + return $dump; + } + + +sub _short_text + { my( $string, $length)= @_; + if( !$length || (length( $string) < $length) ) { return $string; } + my $l1= (length( $string) -5) /2; + my $l2= length( $string) - ($l1 + 5); + return substr( $string, 0, $l1) . ' ... ' . substr( $string, -$l2); + } + + +sub _and { return _join_defined( ' && ', @_); } +sub _join_defined { return join( shift(), grep { $_ } @_); } + +1; +__END__ + +=head1 NAME + +XML::Twig - A perl module for processing huge XML documents in tree mode. + +=head1 SYNOPSIS + +Note that this documentation is intended as a reference to the module. + +Complete docs, including a tutorial, examples, an easier to use HTML version, +a quick reference card and a FAQ are available at L + +Small documents (loaded in memory as a tree): + + my $twig=XML::Twig->new(); # create the twig + $twig->parsefile( 'doc.xml'); # build it + my_process( $twig); # use twig methods to process it + $twig->print; # output the twig + +Huge documents (processed in combined stream/tree mode): + + # at most one div will be loaded in memory + my $twig=XML::Twig->new( + twig_handlers => + { title => sub { $_->set_tag( 'h2') }, # change title tags to h2 + para => sub { $_->set_tag( 'p') }, # change para to p + hidden => sub { $_->delete; }, # remove hidden elements + list => \&my_list_process, # process list elements + div => sub { $_[0]->flush; }, # output and free memory + }, + pretty_print => 'indented', # output will be nicely formatted + empty_tags => 'html', # outputs + ); + $twig->parsefile( 'my_big.xml'); + + sub my_list_process + { my( $twig, $list)= @_; + # ... + } + +See L for other ways to use the module, as a +filter for example. + +=encoding utf8 + +=head1 DESCRIPTION + +This module provides a way to process XML documents. It is build on top +of C. + +The module offers a tree interface to the document, while allowing you +to output the parts of it that have been completely processed. + +It allows minimal resource (CPU and memory) usage by building the tree +only for the parts of the documents that need actual processing, through the +use of the C > and +C > options. The +C > and C > methods also help +to increase performances. + +XML::Twig tries to make simple things easy so it tries its best to takes care +of a lot of the (usually) annoying (but sometimes necessary) features that +come with XML and XML::Parser. + +=head1 TOOLS + +XML::Twig comes with a few command-line utilities: + +=head2 xml_pp - xml pretty-printer + +XML pretty printer using XML::Twig + +=head2 xml_grep - grep XML files looking for specific elements + +C does a grep on XML files. Instead of using regular expressions +it uses XPath expressions (in fact the subset of XPath supported by +XML::Twig). + +=head2 xml_split - cut a big XML file into smaller chunks + +C takes a (presumably big) XML file and split it in several smaller +files, based on various criteria (level in the tree, size or an XPath +expression) + +=head2 xml_merge - merge back XML files split with xml_split + +C takes several xml files that have been split using C +and recreates a single file. + +=head2 xml_spellcheck - spellcheck XML files + +C lets you spell check the content of an XML file. It extracts +the text (the content of elements and optionally of attributes), call a spell +checker on it and then recreates the XML document. + + +=head1 XML::Twig 101 + +XML::Twig can be used either on "small" XML documents (that fit in memory) +or on huge ones, by processing parts of the document and outputting or +discarding them once they are processed. + + +=head2 Loading an XML document and processing it + + my $t= XML::Twig->new(); + $t->parse( 'titlep 1p 2'); + my $root= $t->root; + $root->set_tag( 'html'); # change doc to html + $title= $root->first_child( 'title'); # get the title + $title->set_tag( 'h1'); # turn it into h1 + my @para= $root->children( 'para'); # get the para children + foreach my $para (@para) + { $para->set_tag( 'p'); } # turn them into p + $t->print; # output the document + +Other useful methods include: + +L: C<< $elt->{'att'}->{'foo'} >> return the C attribute for an +element, + +L : C<< $elt->set_att( foo => "bar") >> sets the C +attribute to the C value, + +L: C<< $elt->{next_sibling} >> return the next sibling +in the document (in the example C<< $title->{next_sibling} >> is the first +C, you can also (and actually should) use +C<< $elt->next_sibling( 'para') >> to get it + +The document can also be transformed through the use of the L, +L, L and L methods: +C<< $title->cut; $title->paste( after => $p); >> for example + +And much, much more, see L. + +=head2 Processing an XML document chunk by chunk + +One of the strengths of XML::Twig is that it let you work with files that do +not fit in memory (BTW storing an XML document in memory as a tree is quite +memory-expensive, the expansion factor being often around 10). + +To do this you can define handlers, that will be called once a specific +element has been completely parsed. In these handlers you can access the +element and process it as you see fit, using the navigation and the +cut-n-paste methods, plus lots of convenient ones like C >. +Once the element is completely processed you can then C > it, +which will output it and free the memory. You can also C > it +if you don't need to output it (if you are just extracting some data from +the document for example). The handler will be called again once the next +relevant element has been parsed. + + my $t= XML::Twig->new( twig_handlers => + { section => \§ion, + para => sub { $_->set_tag( 'p'); } + }, + ); + $t->parsefile( 'doc.xml'); + + # the handler is called once a section is completely parsed, ie when + # the end tag for section is found, it receives the twig itself and + # the element (including all its sub-elements) as arguments + sub section + { my( $t, $section)= @_; # arguments for all twig_handlers + $section->set_tag( 'div'); # change the tag name.4, my favourite method... + # let's use the attribute nb as a prefix to the title + my $title= $section->first_child( 'title'); # find the title + my $nb= $title->{'att'}->{'nb'}; # get the attribute + $title->prefix( "$nb - "); # easy isn't it? + $section->flush; # outputs the section and frees memory + } + + +There is of course more to it: you can trigger handlers on more elaborate +conditions than just the name of the element, C
for example. + + my $t= XML::Twig->new( twig_handlers => + { 'section/title' => sub { $_->print } } + ) + ->parsefile( 'doc.xml'); + +Here C<< sub { $_->print } >> simply prints the current element (C<$_> is aliased +to the element in the handler). + +You can also trigger a handler on a test on an attribute: + + my $t= XML::Twig->new( twig_handlers => + { 'section[@level="1"]' => sub { $_->print } } + ); + ->parsefile( 'doc.xml'); + +You can also use C > to process an +element as soon as the start tag is found. Besides C > you +can also use C >, + +=head2 Processing just parts of an XML document + +The twig_roots mode builds only the required sub-trees from the document +Anything outside of the twig roots will just be ignored: + + my $t= XML::Twig->new( + # the twig will include just the root and selected titles + twig_roots => { 'section/title' => \&print_n_purge, + 'annex/title' => \&print_n_purge + } + ); + $t->parsefile( 'doc.xml'); + + sub print_n_purge + { my( $t, $elt)= @_; + print $elt->text; # print the text (including sub-element texts) + $t->purge; # frees the memory + } + +You can use that mode when you want to process parts of a documents but are +not interested in the rest and you don't want to pay the price, either in +time or memory, to build the tree for the it. + + +=head2 Building an XML filter + +You can combine the C and the C options to +build filters, which let you modify selected elements and will output the rest +of the document as is. + +This would convert prices in $ to prices in Euro in a document: + + my $t= XML::Twig->new( + twig_roots => { 'price' => \&convert, }, # process prices + twig_print_outside_roots => 1, # print the rest + ); + $t->parsefile( 'doc.xml'); + + sub convert + { my( $t, $price)= @_; + my $currency= $price->{'att'}->{'currency'}; # get the currency + if( $currency eq 'USD') + { $usd_price= $price->text; # get the price + # %rate is just a conversion table + my $euro_price= $usd_price * $rate{usd2euro}; + $price->set_text( $euro_price); # set the new price + $price->set_att( currency => 'EUR'); # don't forget this! + } + $price->print; # output the price + } + +=head2 XML::Twig and various versions of Perl, XML::Parser and expat: + +XML::Twig is a lot more sensitive to variations in versions of perl, +XML::Parser and expat than to the OS, so this should cover some +reasonable configurations. + +The "recommended configuration" is perl 5.8.3+ (for good Unicode +support), XML::Parser 2.31+ and expat 1.95.5+ + +See L for the +CPAN testers reports on XML::Twig, which list all tested configurations. + +An Atom feed of the CPAN Testers results is available at +L + +Finally: + +=over 4 + +=item XML::Twig does B work with expat 1.95.4 + +=item XML::Twig only works with XML::Parser 2.27 in perl 5.6.* + +Note that I can't compile XML::Parser 2.27 anymore, so I can't guarantee +that it still works + +=item XML::Parser 2.28 does not really work + +=back + +When in doubt, upgrade expat, XML::Parser and Scalar::Util + +Finally, for some optional features, XML::Twig depends on some additional +modules. The complete list, which depends somewhat on the version of Perl +that you are running, is given by running C + +=head1 Simplifying XML processing + +=over 4 + +=item Whitespaces + +Whitespaces that look non-significant are discarded, this behaviour can be +controlled using the C >, +C > and +C > options. + +=item Encoding + +You can specify that you want the output in the same encoding as the input +(provided you have valid XML, which means you have to specify the encoding +either in the document or when you create the Twig object) using the +C > option + +You can also use C> to convert the internal UTF-8 format +to the required encoding. + +=item Comments and Processing Instructions (PI) + +Comments and PI's can be hidden from the processing, but still appear in the +output (they are carried by the "real" element closer to them) + +=item Pretty Printing + +XML::Twig can output the document pretty printed so it is easier to read for +us humans. + +=item Surviving an untimely death + +XML parsers are supposed to react violently when fed improper XML. +XML::Parser just dies. + +XML::Twig provides the C > and the +C > methods which wrap the parse in an eval +and return either the parsed twig or 0 in case of failure. + +=item Private attributes + +Attributes with a name starting with # (illegal in XML) will not be +output, so you can safely use them to store temporary values during +processing. Note that you can store anything in a private attribute, +not just text, it's just a regular Perl variable, so a reference to +an object or a huge data structure is perfectly fine. + +=back + +=head1 CLASSES + +XML::Twig uses a very limited number of classes. The ones you are most likely to use +are C> of course, which represents a complete XML document, including the +document itself (the root of the document itself is C>), its handlers, its +input or output filters... The other main class is C>, which models +an XML element. Element here has a very wide definition: it can be a regular element, or +but also text, with an element C> of C<#PCDATA> (or C<#CDATA>), an entity (tag is +C<#ENT>), a Processing Instruction (C<#PI>), a comment (C<#COMMENT>). + +Those are the 2 commonly used classes. + +You might want to look the C> option if you want to subclass C. + +Attributes are just attached to their parent element, they are not objects per se. (Please +use the provided methods C> and C> to access them, if you access them +as a hash, then your code becomes implementation dependent and might break in the future). + +Other classes that are seldom used are C> and C>. + +If you use C> instead of C, elements are then created as +C> + + +=head1 METHODS + +=head2 XML::Twig + +A twig is a subclass of XML::Parser, so all XML::Parser methods can be +called on a twig object, including parse and parsefile. +C on the other hand cannot be used, see C > + + +=over 4 + +=item new + +This is a class method, the constructor for XML::Twig. Options are passed +as keyword value pairs. Recognized options are the same as XML::Parser, +plus some (in fact a lot!) XML::Twig specifics. + +New Options: + +=over 4 + +=item twig_handlers + +This argument consists of a hash C<{ expression => \&handler}> where +expression is a an I (+ some others). + +XPath expressions are limited to using the child and descendant axis +(indeed you can't specify an axis), and predicates cannot be nested. +You can use the C, or C<< string() >> function (except +in C triggers). + +Additionally you can use regexps (/ delimited) to match attribute +and string values. + +Examples: + + foo + foo/bar + foo//bar + /foo/bar + /foo//bar + /foo/bar[@att1 = "val1" and @att2 = "val2"]/baz[@a >= 1] + foo[string()=~ /^duh!+/] + /foo[string(bar)=~ /\d+/]/baz[@att != 3] + +#CDATA can be used to call a handler for a CDATA section. +#COMMENT can be used to call a handler for comments + +Some additional (non-XPath) expressions are also provided for convenience: + +=over 4 + +=item processing instructions + +C<'?'> or C<'#PI'> triggers the handler for any processing instruction, +and C<< '?' >> or C<< '#PI ' >> triggers a handler for processing +instruction with the given target( ex: C<'#PI xml-stylesheet'>). + +=item level() + +Triggers the handler on any element at that level in the tree (root is level 1) + +=item _all_ + +Triggers the handler for B elements in the tree + +=item _default_ + +Triggers the handler for each element that does NOT have any other handler. + +=back + +Expressions are evaluated against the input document. +Which means that even if you have changed the tag of an element (changing the +tag of a parent element from a handler for example) the change will not impact +the expression evaluation. There is an exception to this: "private" attributes +(which name start with a '#', and can only be created during the parsing, as +they are not valid XML) are checked against the current twig. + +Handlers are triggered in fixed order, sorted by their type (xpath expressions +first, then regexps, then level), then by whether they specify a full path +(starting at the root element) or +not, then by number of steps in the expression , then number of +predicates, then number of tests in predicates. Handlers where the last +step does not specify a step (C) are triggered after other XPath +handlers. Finally C<_all_> handlers are triggered last. + +B: once a handler has been triggered if it returns 0 then no other +handler is called, except a C<_all_> handler which will be called anyway. + +If a handler returns a true value and other handlers apply, then the next +applicable handler will be called. Repeat, rinse, lather..; The exception +to that rule is when the C> +option is set, in which case only the first handler will be called. + +Note that it might be a good idea to explicitly return a short true value +(like 1) from handlers: this ensures that other applicable handlers are +called even if the last statement for the handler happens to evaluate to +false. This might also speedup the code by avoiding the result of the last +statement of the code to be copied and passed to the code managing handlers. +It can really pay to have 1 instead of a long string returned. + +When the closing tag for an element is parsed the corresponding handler is +called, with 2 arguments: the twig and the C >. The twig includes +the document tree that has been built so far, the element is the complete +sub-tree for the element. The fact that the handler is called only when the +closing tag for the element is found means that handlers for inner elements +are called before handlers for outer elements. + +C<$_> is also set to the element, so it is easy to write inline handlers like + + para => sub { $_->set_tag( 'p'); } + +Text is stored in elements whose tag name is #PCDATA (due to mixed content, +text and sub-element in an element there is no way to store the text as just +an attribute of the enclosing element). + +B: if you have used purge or flush on the twig the element might not +be complete, some of its children might have been entirely flushed or purged, +and the start tag might even have been printed (by C) already, so changing +its tag might not give the expected result. + + +=item twig_roots + +This argument let's you build the tree only for those elements you are +interested in. + + Example: my $t= XML::Twig->new( twig_roots => { title => 1, subtitle => 1}); + $t->parsefile( file); + my $t= XML::Twig->new( twig_roots => { 'section/title' => 1}); + $t->parsefile( file); + + +return a twig containing a document including only C and C<subtitle> +elements, as children of the root element. + +You can use I<generic_attribute_condition>, I<attribute_condition>, +I<full_path>, I<partial_path>, I<tag>, I<tag_regexp>, I<_default_> and +I<_all_> to trigger the building of the twig. +I<string_condition> and I<regexp_condition> cannot be used as the content +of the element, and the string, have not yet been parsed when the condition +is checked. + +B<WARNING>: path are checked for the document. Even if the C<twig_roots> option +is used they will be checked against the full document tree, not the virtual +tree created by XML::Twig + + +B<WARNING>: twig_roots elements should NOT be nested, that would hopelessly +confuse XML::Twig ;--( + +Note: you can set handlers (twig_handlers) using twig_roots + Example: my $t= XML::Twig->new( twig_roots => + { title => sub { $_[1]->print;}, + subtitle => \&process_subtitle + } + ); + $t->parsefile( file); + + +=item twig_print_outside_roots + +To be used in conjunction with the C<twig_roots> argument. When set to a true +value this will print the document outside of the C<twig_roots> elements. + + Example: my $t= XML::Twig->new( twig_roots => { title => \&number_title }, + twig_print_outside_roots => 1, + ); + $t->parsefile( file); + { my $nb; + sub number_title + { my( $twig, $title); + $nb++; + $title->prefix( "$nb "); + $title->print; + } + } + + +This example prints the document outside of the title element, calls +C<number_title> for each C<title> element, prints it, and then resumes printing +the document. The twig is built only for the C<title> elements. + +If the value is a reference to a file handle then the document outside the +C<twig_roots> elements will be output to this file handle: + + open( my $out, '>', 'out_file.xml') or die "cannot open out file.xml out_file:$!"; + my $t= XML::Twig->new( twig_roots => { title => \&number_title }, + # default output to $out + twig_print_outside_roots => $out, + ); + + { my $nb; + sub number_title + { my( $twig, $title); + $nb++; + $title->prefix( "$nb "); + $title->print( $out); # you have to print to \*OUT here + } + } + + +=item start_tag_handlers + +A hash C<{ expression => \&handler}>. Sets element handlers that are called when +the element is open (at the end of the XML::Parser C<Start> handler). The handlers +are called with 2 params: the twig and the element. The element is empty at +that point, its attributes are created though. + +You can use I<generic_attribute_condition>, I<attribute_condition>, +I<full_path>, I<partial_path>, I<tag>, I<tag_regexp>, I<_default_> and I<_all_> +to trigger the handler. + +I<string_condition> and I<regexp_condition> cannot be used as the content of +the element, and the string, have not yet been parsed when the condition is +checked. + +The main uses for those handlers are to change the tag name (you might have to +do it as soon as you find the open tag if you plan to C<flush> the twig at some +point in the element, and to create temporary attributes that will be used +when processing sub-element with C<twig_hanlders>. + +You should also use it to change tags if you use C<flush>. If you change the tag +in a regular C<twig_handler> then the start tag might already have been flushed. + +B<Note>: C<start_tag> handlers can be called outside of C<twig_roots> if this +argument is used, in this case handlers are called with the following arguments: +C<$t> (the twig), C<$tag> (the tag of the element) and C<%att> (a hash of the +attributes of the element). + +If the C<twig_print_outside_roots> argument is also used, if the last handler +called returns a C<true> value, then the start tag will be output as it +appeared in the original document, if the handler returns a C<false> value +then the start tag will B<not> be printed (so you can print a modified string +yourself for example). + +Note that you can use the L<ignore> method in C<start_tag_handlers> +(and only there). + +=item end_tag_handlers + +A hash C<{ expression => \&handler}>. Sets element handlers that are called when +the element is closed (at the end of the XML::Parser C<End> handler). The handlers +are called with 2 params: the twig and the tag of the element. + +I<twig_handlers> are called when an element is completely parsed, so why have +this redundant option? There is only one use for C<end_tag_handlers>: when using +the C<twig_roots> option, to trigger a handler for an element B<outside> the roots. +It is for example very useful to number titles in a document using nested +sections: + + my @no= (0); + my $no; + my $t= XML::Twig->new( + start_tag_handlers => + { section => sub { $no[$#no]++; $no= join '.', @no; push @no, 0; } }, + twig_roots => + { title => sub { $_[1]->prefix( $no); $_[1]->print; } }, + end_tag_handlers => { section => sub { pop @no; } }, + twig_print_outside_roots => 1 + ); + $t->parsefile( $file); + +Using the C<end_tag_handlers> argument without C<twig_roots> will result in an +error. + +=item do_not_chain_handlers + +If this option is set to a true value, then only one handler will be called for +each element, even if several satisfy the condition + +Note that the C<_all_> handler will still be called regardless + +=item ignore_elts + +This option lets you ignore elements when building the twig. This is useful +in cases where you cannot use C<twig_roots> to ignore elements, for example if +the element to ignore is a sibling of elements you are interested in. + +Example: + + my $twig= XML::Twig->new( ignore_elts => { elt => 'discard' }); + $twig->parsefile( 'doc.xml'); + +This will build the complete twig for the document, except that all C<elt> +elements (and their children) will be left out. + +The keys in the hash are triggers, limited to the same subset as +C<L<start_tag_handlers>>. The values can be C<discard>, to discard +the element, C<print>, to output the element as-is, C<string> to +store the text of the ignored element(s), including markup, in a field of +the twig: C<< $t->{twig_buffered_string} >> or a reference to a scalar, in +which case the text of the ignored element(s), including markup, will be +stored in the scalar. Any other value will be treated as C<discard>. + + +=item char_handler + +A reference to a subroutine that will be called every time C<PCDATA> is found. + +The subroutine receives the string as argument, and returns the modified string: + + # we want all strings in upper case + sub my_char_handler + { my( $text)= @_; + $text= uc( $text); + return $text; + } + +=item elt_class + +The name of a class used to store elements. this class should inherit from +C<XML::Twig::Elt> (and by default it is C<XML::Twig::Elt>). This option is used +to subclass the element class and extend it with new methods. + +This option is needed because during the parsing of the XML, elements are created +by C<XML::Twig>, without any control from the user code. + +=item keep_atts_order + +Setting this option to a true value causes the attribute hash to be tied to +a C<Tie::IxHash> object. +This means that C<Tie::IxHash> needs to be installed for this option to be +available. It also means that the hash keeps its order, so you will get +the attributes in order. This allows outputting the attributes in the same +order as they were in the original document. + +=item keep_encoding + +This is a (slightly?) evil option: if the XML document is not UTF-8 encoded and +you want to keep it that way, then setting keep_encoding will use theC<Expat> +original_string method for character, thus keeping the original encoding, as +well as the original entities in the strings. + +See the C<t/test6.t> test file to see what results you can expect from the +various encoding options. + +B<WARNING>: if the original encoding is multi-byte then attribute parsing will +be EXTREMELY unsafe under any Perl before 5.6, as it uses regular expressions +which do not deal properly with multi-byte characters. You can specify an +alternate function to parse the start tags with the C<parse_start_tag> option +(see below) + +B<WARNING>: this option is NOT used when parsing with the non-blocking parser +(C<parse_start>, C<parse_more>, parse_done methods) which you probably should +not use with XML::Twig anyway as they are totally untested! + +=item output_encoding + +This option generates an output_filter using C<Encode>, C<Text::Iconv> or +C<Unicode::Map8> and C<Unicode::Strings>, and sets the encoding in the XML +declaration. This is the easiest way to deal with encodings, if you need +more sophisticated features, look at C<output_filter> below + + +=item output_filter + +This option is used to convert the character encoding of the output document. +It is passed either a string corresponding to a predefined filter or +a subroutine reference. The filter will be called every time a document or +element is processed by the "print" functions (C<print>, C<sprint>, C<flush>). + +Pre-defined filters: + +=over 4 + +=item latin1 + +uses either C<Encode>, C<Text::Iconv> or C<Unicode::Map8> and C<Unicode::String> +or a regexp (which works only with XML::Parser 2.27), in this order, to convert +all characters to ISO-8859-15 (usually latin1 is synonym to ISO-8859-1, but +in practice it seems that ISO-8859-15, which includes the euro sign, is more +useful and probably what most people want). + +=item html + +does the same conversion as C<latin1>, plus encodes entities using +C<HTML::Entities> (oddly enough you will need to have HTML::Entities installed +for it to be available). This should only be used if the tags and attribute +names themselves are in US-ASCII, or they will be converted and the output will +not be valid XML any more + +=item safe + +converts the output to ASCII (US) only plus I<character entities> (C<&#nnn;>) +this should be used only if the tags and attribute names themselves are in +US-ASCII, or they will be converted and the output will not be valid XML any +more + +=item safe_hex + +same as C<safe> except that the character entities are in hex (C<&#xnnn;>) + +=item encode_convert ($encoding) + +Return a subref that can be used to convert utf8 strings to C<$encoding>). +Uses C<Encode>. + + my $conv = XML::Twig::encode_convert( 'latin1'); + my $t = XML::Twig->new(output_filter => $conv); + +=item iconv_convert ($encoding) + +this function is used to create a filter subroutine that will be used to +convert the characters to the target encoding using C<Text::Iconv> (which needs +to be installed, look at the documentation for the module and for the +C<iconv> library to find out which encodings are available on your system) + + my $conv = XML::Twig::iconv_convert( 'latin1'); + my $t = XML::Twig->new(output_filter => $conv); + +=item unicode_convert ($encoding) + +this function is used to create a filter subroutine that will be used to +convert the characters to the target encoding using C<Unicode::Strings> +and C<Unicode::Map8> (which need to be installed, look at the documentation +for the modules to find out which encodings are available on your system) + + my $conv = XML::Twig::unicode_convert( 'latin1'); + my $t = XML::Twig->new(output_filter => $conv); + +=back + +The C<text> and C<att> methods do not use the filter, so their +result are always in unicode. + +Those predeclared filters are based on subroutines that can be used +by themselves (as C<XML::Twig::foo>). + +=over 4 + +=item html_encode ($string) + +Use C<HTML::Entities> to encode a utf8 string + +=item safe_encode ($string) + +Use either a regexp (perl < 5.8) or C<Encode> to encode non-ascii characters +in the string in C<< &#<nnnn>; >> format + +=item safe_encode_hex ($string) + +Use either a regexp (perl < 5.8) or C<Encode> to encode non-ascii characters +in the string in C<< &#x<nnnn>; >> format + +=item regexp2latin1 ($string) + +Use a regexp to encode a utf8 string into latin 1 (ISO-8859-1). Does not +work with Perl 5.8.0! + +=back + +=item output_text_filter + +same as output_filter, except it doesn't apply to the brackets and quotes +around attribute values. This is useful for all filters that could change +the tagging, basically anything that does not just change the encoding of +the output. C<html>, C<safe> and C<safe_hex> are better used with this option. + +=item input_filter + +This option is similar to C<output_filter> except the filter is applied to +the characters before they are stored in the twig, at parsing time. + +=item remove_cdata + +Setting this option to a true value will force the twig to output CDATA +sections as regular (escaped) PCDATA + +=item parse_start_tag + +If you use the C<keep_encoding> option then this option can be used to replace +the default parsing function. You should provide a coderef (a reference to a +subroutine) as the argument, this subroutine takes the original tag (given +by XML::Parser::Expat C<original_string()> method) and returns a tag and the +attributes in a hash (or in a list attribute_name/attribute value). + +=item expand_external_ents + +When this option is used external entities (that are defined) are expanded +when the document is output using "print" functions such as C<L<print> >, +C<L<sprint> >, C<L<flush> > and C<L<xml_string> >. +Note that in the twig the entity will be stored as an element with a +tag 'C<#ENT>', the entity will not be expanded there, so you might want to +process the entities before outputting it. + +If an external entity is not available, then the parse will fail. + +A special case is when the value of this option is -1. In that case a missing +entity will not cause the parser to die, but its C<name>, C<sysid> and C<pubid> +will be stored in the twig as C<< $twig->{twig_missing_system_entities} >> +(a reference to an array of hashes { name => <name>, sysid => <sysid>, +pubid => <pubid> }). Yes, this is a bit of a hack, but it's useful in some +cases. + +=item load_DTD + +If this argument is set to a true value, C<parse> or C<parsefile> on the twig +will load the DTD information. This information can then be accessed through +the twig, in a C<DTD_handler> for example. This will load even an external DTD. + +Default and fixed values for attributes will also be filled, based on the DTD. + +Note that to do this the module will generate a temporary file in the current +directory. If this is a problem let me know and I will add an option to +specify an alternate directory. + +See L<DTD Handling> for more information + +=item DTD_handler + +Set a handler that will be called once the doctype (and the DTD) have been +loaded, with 2 arguments, the twig and the DTD. + +=item no_prolog + +Does not output a prolog (XML declaration and DTD) + +=item id + +This optional argument gives the name of an attribute that can be used as +an ID in the document. Elements whose ID is known can be accessed through +the elt_id method. id defaults to 'id'. +See C<L<BUGS> > + +=item discard_spaces + +If this optional argument is set to a true value then spaces are discarded +when they look non-significant: strings containing only spaces and at least +one line feed are discarded. This argument is set to true by default. + +The exact algorithm to drop spaces is: strings including only spaces (perl \s) +and at least one \n right before an open or close tag are dropped. + +=item discard_all_spaces + +If this argument is set to a true value, spaces are discarded more +aggressively than with C<discard_spaces>: strings not including a \n are also +dropped. This option is appropriate for data-oriented XML. + + +=item keep_spaces + +If this optional argument is set to a true value then all spaces in the +document are kept, and stored as C<PCDATA>. + +B<Warning>: adding this option can result in changes in the twig generated: +space that was previously discarded might end up in a new text element. see +the difference by calling the following code with 0 and 1 as arguments: + + perl -MXML::Twig -e'print XML::Twig->new( keep_spaces => shift)->parse( "<d> \n<e/></d>")->_dump' + + +C<keep_spaces> and C<discard_spaces> cannot be both set. + +=item discard_spaces_in + +This argument sets C<keep_spaces> to true but will cause the twig builder to +discard spaces in the elements listed. + +The syntax for using this argument is: + + XML::Twig->new( discard_spaces_in => [ 'elt1', 'elt2']); + +=item keep_spaces_in + +This argument sets C<discard_spaces> to true but will cause the twig builder to +keep spaces in the elements listed. + +The syntax for using this argument is: + + XML::Twig->new( keep_spaces_in => [ 'elt1', 'elt2']); + +B<Warning>: adding this option can result in changes in the twig generated: +space that was previously discarded might end up in a new text element. + +=item pretty_print + +Set the pretty print method, amongst 'C<none>' (default), 'C<nsgmls>', +'C<nice>', 'C<indented>', 'C<indented_c>', 'C<indented_a>', +'C<indented_close_tag>', 'C<cvs>', 'C<wrapped>', 'C<record>' and 'C<record_c>' + +pretty_print formats: + +=over 4 + +=item none + +The document is output as one ling string, with no line breaks except those +found within text elements + +=item nsgmls + +Line breaks are inserted in safe places: that is within tags, between a tag +and an attribute, between attributes and before the > at the end of a tag. + +This is quite ugly but better than C<none>, and it is very safe, the document +will still be valid (conforming to its DTD). + +This is how the SGML parser C<sgmls> splits documents, hence the name. + +=item nice + +This option inserts line breaks before any tag that does not contain text (so +element with textual content are not broken as the \n is the significant). + +B<WARNING>: this option leaves the document well-formed but might make it +invalid (not conformant to its DTD). If you have elements declared as + + <!ELEMENT foo (#PCDATA|bar)> + +then a C<foo> element including a C<bar> one will be printed as + + <foo> + <bar>bar is just pcdata</bar> + </foo> + +This is invalid, as the parser will take the line break after the C<foo> tag +as a sign that the element contains PCDATA, it will then die when it finds the +C<bar> tag. This may or may not be important for you, but be aware of it! + +=item indented + +Same as C<nice> (and with the same warning) but indents elements according to +their level + +=item indented_c + +Same as C<indented> but a little more compact: the closing tags are on the +same line as the preceding text + +=item indented_close_tag + +Same as C<indented> except that the closing tag is also indented, to line up +with the tags within the element + +=item idented_a + +This formats XML files in a line-oriented version control friendly way. +The format is described in L<http://tinyurl.com/2kwscq> (that's an Oracle +document with an insanely long URL). + +Note that to be totaly conformant to the "spec", the order of attributes +should not be changed, so if they are not already in alphabetical order +you will need to use the C<L<keep_atts_order>> option. + +=item cvs + +Same as C<L<idented_a>>. + +=item wrapped + +Same as C<indented_c> but lines are wrapped using L<Text::Wrap::wrap>. The +default length for lines is the default for C<$Text::Wrap::columns>, and can +be changed by changing that variable. + +=item record + +This is a record-oriented pretty print, that display data in records, one field +per line (which looks a LOT like C<indented>) + +=item record_c + +Stands for record compact, one record per line + +=back + + +=item empty_tags + +Set the empty tag display style ('C<normal>', 'C<html>' or 'C<expand>'). + +C<normal> outputs an empty tag 'C<< <tag/> >>', C<html> adds a space +'C<< <tag /> >>' for elements that can be empty in XHTML and C<expand> outputs +'C<< <tag></tag> >>' + +=item quote + +Set the quote character for attributes ('C<single>' or 'C<double>'). + +=item escape_gt + +By default XML::Twig does not escape the character > in its output, as it is not +mandated by the XML spec. With this option on, > will be replaced by C<>> + +=item comments + +Set the way comments are processed: 'C<drop>' (default), 'C<keep>' or +'C<process>' + +Comments processing options: + +=over 4 + +=item drop + +drops the comments, they are not read, nor printed to the output + +=item keep + +comments are loaded and will appear on the output, they are not +accessible within the twig and will not interfere with processing +though + +B<Note>: comments in the middle of a text element such as + + <p>text <!-- comment --> more text --></p> + +are kept at their original position in the text. Using ˝"print" +methods like C<print> or C<sprint> will return the comments in the +text. Using C<text> or C<field> on the other hand will not. + +Any use of C<set_pcdata> on the C<#PCDATA> element (directly or +through other methods like C<set_content>) will delete the comment(s). + +=item process + +comments are loaded in the twig and will be treated as regular elements +(their C<tag> is C<#COMMENT>) this can interfere with processing if you +expect C<< $elt->{first_child} >> to be an element but find a comment there. +Validation will not protect you from this as comments can happen anywhere. +You can use C<< $elt->first_child( 'tag') >> (which is a good habit anyway) +to get where you want. + +Consider using C<process> if you are outputting SAX events from XML::Twig. + +=back + +=item pi + +Set the way processing instructions are processed: 'C<drop>', 'C<keep>' +(default) or 'C<process>' + +Note that you can also set PI handlers in the C<twig_handlers> option: + + '?' => \&handler + '?target' => \&handler 2 + +The handlers will be called with 2 parameters, the twig and the PI element if +C<pi> is set to C<process>, and with 3, the twig, the target and the data if +C<pi> is set to C<keep>. Of course they will not be called if C<pi> is set to +C<drop>. + +If C<pi> is set to C<keep> the handler should return a string that will be used +as-is as the PI text (it should look like "C< <?target data?> >" or '' if you +want to remove the PI), + +Only one handler will be called, C<?target> or C<?> if no specific handler for +that target is available. + +=item map_xmlns + +This option is passed a hashref that maps uri's to prefixes. The prefixes in +the document will be replaced by the ones in the map. The mapped prefixes can +(actually have to) be used to trigger handlers, navigate or query the document. + +Here is an example: + + my $t= XML::Twig->new( map_xmlns => {'http://www.w3.org/2000/svg' => "svg"}, + twig_handlers => + { 'svg:circle' => sub { $_->set_att( r => 20) } }, + pretty_print => 'indented', + ) + ->parse( '<doc xmlns:gr="http://www.w3.org/2000/svg"> + <gr:circle cx="10" cy="90" r="10"/> + </doc>' + ) + ->print; + +This will output: + + <doc xmlns:svg="http://www.w3.org/2000/svg"> + <svg:circle cx="10" cy="90" r="20"/> + </doc> + +=item keep_original_prefix + +When used with C<L<map_xmlns>> this option will make C<XML::Twig> use the original +namespace prefixes when outputting a document. The mapped prefix will still be used +for triggering handlers and in navigation and query methods. + + my $t= XML::Twig->new( map_xmlns => {'http://www.w3.org/2000/svg' => "svg"}, + twig_handlers => + { 'svg:circle' => sub { $_->set_att( r => 20) } }, + keep_original_prefix => 1, + pretty_print => 'indented', + ) + ->parse( '<doc xmlns:gr="http://www.w3.org/2000/svg"> + <gr:circle cx="10" cy="90" r="10"/> + </doc>' + ) + ->print; + +This will output: + + <doc xmlns:gr="http://www.w3.org/2000/svg"> + <gr:circle cx="10" cy="90" r="20"/> + </doc> + +=item original_uri ($prefix) + +called within a handler, this will return the uri bound to the namespace prefix +in the original document. + +=item index ($arrayref or $hashref) + +This option creates lists of specific elements during the parsing of the XML. +It takes a reference to either a list of triggering expressions or to a hash +name => expression, and for each one generates the list of elements that +match the expression. The list can be accessed through the C<L<index>> method. + +example: + + # using an array ref + my $t= XML::Twig->new( index => [ 'div', 'table' ]) + ->parsefile( "foo.xml"); + my $divs= $t->index( 'div'); + my $first_div= $divs->[0]; + my $last_table= $t->index( table => -1); + + # using a hashref to name the indexes + my $t= XML::Twig->new( index => { email => 'a[@href=~/^ \s*mailto:/]'}) + ->parsefile( "foo.xml"); + my $last_emails= $t->index( email => -1); + +Note that the index is not maintained after the parsing. If elements are +deleted, renamed or otherwise hurt during processing, the index is NOT updated. +(changing the id element OTOH will update the index) + +=item att_accessors <list of attribute names> + +creates methods that give direct access to attribute: + + my $t= XML::Twig->new( att_accessors => [ 'href', 'src']) + ->parsefile( $file); + my $first_href= $t->first_elt( 'img')->src; # same as ->att( 'src') + $t->first_elt( 'img')->src( 'new_logo.png') # changes the attribute value + +=item elt_accessors + +creates methods that give direct access to the first child element (in scalar context) +or the list of elements (in list context): + +the list of accessors to create can be given 1 2 different ways: in an array, +or in a hash alias => expression + my $t= XML::Twig->new( elt_accessors => [ 'head']) + ->parsefile( $file); + my $title_text= $t->root->head->field( 'title'); + # same as $title_text= $t->root->first_child( 'head')->field( 'title'); + + my $t= XML::Twig->new( elt_accessors => { warnings => 'p[@class="warning"]', d2 => 'div[2]'}, ) + ->parsefile( $file); + my $body= $t->first_elt( 'body'); + my @warnings= $body->warnings; # same as $body->children( 'p[@class="warning"]'); + my $s2= $body->d2; # same as $body->first_child( 'div[2]') + +=item field_accessors + +creates methods that give direct access to the first child element text: + + my $t= XML::Twig->new( field_accessors => [ 'h1']) + ->parsefile( $file); + my $div_title_text= $t->first_elt( 'div')->title; + # same as $title_text= $t->first_elt( 'div')->field( 'title'); + +=item use_tidy + +set this option to use HTML::Tidy instead of HTML::TreeBuilder to convert +HTML to XML. HTML, especially real (real "crap") HTML found in the wild, +so depending on the data, one module or the other does a better job at +the conversion. Also, HTML::Tidy can be a bit difficult to install, so +XML::Twig offers both option. TIMTOWTDI + +=item output_html_doctype + +when using HTML::TreeBuilder to convert HTML, this option causes the DOCTYPE +declaration to be output, which may be important for some legacy browsers. +Without that option the DOCTYPE definition is NOT output. Also if the definition +is completely wrong (ie not easily parsable), it is not output either. + +=back + +B<Note>: I _HATE_ the Java-like name of arguments used by most XML modules. +So in pure TIMTOWTDI fashion all arguments can be written either as +C<UglyJavaLikeName> or as C<readable_perl_name>: C<twig_print_outside_roots> +or C<TwigPrintOutsideRoots> (or even C<twigPrintOutsideRoots> {shudder}). +XML::Twig normalizes them before processing them. + +=item parse ( $source) + +The C<$source> parameter should either be a string containing the whole XML +document, or it should be an open C<IO::Handle> (aka a filehandle). + +A die call is thrown if a parse error occurs. Otherwise it will return +the twig built by the parse. Use C<safe_parse> if you want the parsing +to return even when an error occurs. + +If this method is called as a class method +(C<< XML::Twig->parse( $some_xml_or_html) >>) then an XML::Twig object is +created, using the parameters except the last one (eg +C<< XML::Twig->parse( pretty_print => 'indented', $some_xml_or_html) >>) +and C<L<xparse>> is called on it. + +Note that when parsing a filehandle, the handle should NOT be open with an +encoding (ie open with C<open( my $in, '<', $filename)>. The file will be +parsed by C<expat>, so specifying the encoding actually causes problems +for the parser (as in: it can crash it, see +https://rt.cpan.org/Ticket/Display.html?id=78877). For parsing a file it +is actually recommended to use C<parsefile> on the file name, instead of +<parse> on the open file. + +=item parsestring + +This is just an alias for C<parse> for backwards compatibility. + +=item parsefile (FILE [, OPT => OPT_VALUE [...]]) + +Open C<FILE> for reading, then call C<parse> with the open handle. The file +is closed no matter how C<parse> returns. + +A C<die> call is thrown if a parse error occurs. Otherwise it will return +the twig built by the parse. Use C<safe_parsefile> if you want the parsing +to return even when an error occurs. + +=item parsefile_inplace ( $file, $optional_extension) + +Parse and update a file "in place". It does this by creating a temp file, +selecting it as the default for print() statements (and methods), then parsing +the input file. If the parsing is successful, then the temp file is +moved to replace the input file. + +If an extension is given then the original file is backed-up (the rules for +the extension are the same as the rule for the -i option in perl). + +=item parsefile_html_inplace ( $file, $optional_extension) + +Same as parsefile_inplace, except that it parses HTML instead of XML + +=item parseurl ($url $optional_user_agent) + +Gets the data from C<$url> and parse it. The data is piped to the parser in +chunks the size of the XML::Parser::Expat buffer, so memory consumption and +hopefully speed are optimal. + +For most (read "small") XML it is probably as efficient (and easier to debug) +to just C<get> the XML file and then parse it as a string. + + use XML::Twig; + use LWP::Simple; + my $twig= XML::Twig->new(); + $twig->parse( LWP::Simple::get( $URL )); + +or + + use XML::Twig; + my $twig= XML::Twig->nparse( $URL); + + +If the C<$optional_user_agent> argument is used then it is used, otherwise a +new one is created. + +=item safe_parse ( SOURCE [, OPT => OPT_VALUE [...]]) + +This method is similar to C<parse> except that it wraps the parsing in an +C<eval> block. It returns the twig on success and 0 on failure (the twig object +also contains the parsed twig). C<$@> contains the error message on failure. + +Note that the parsing still stops as soon as an error is detected, there is +no way to keep going after an error. + +=item safe_parsefile (FILE [, OPT => OPT_VALUE [...]]) + +This method is similar to C<parsefile> except that it wraps the parsing in an +C<eval> block. It returns the twig on success and 0 on failure (the twig object +also contains the parsed twig) . C<$@> contains the error message on failure + +Note that the parsing still stops as soon as an error is detected, there is +no way to keep going after an error. + +=item safe_parseurl ($url $optional_user_agent) + +Same as C<parseurl> except that it wraps the parsing in an C<eval> block. It +returns the twig on success and 0 on failure (the twig object also contains +the parsed twig) . C<$@> contains the error message on failure + +=item parse_html ($string_or_fh) + +parse an HTML string or file handle (by converting it to XML using +HTML::TreeBuilder, which needs to be available). + +This works nicely, but some information gets lost in the process: +newlines are removed, and (at least on the version I use), comments +get an extra CDATA section inside ( <!-- foo --> becomes +<!-- <![CDATA[ foo ]]> --> + +=item parsefile_html ($file) + +parse an HTML file (by converting it to XML using HTML::TreeBuilder, which +needs to be available, or HTML::Tidy if the C<use_tidy> option was used). +The file is loaded completely in memory and converted to XML before being parsed. + +this method is to be used with caution though, as it doesn't know about the +file encoding, it is usually better to use C<L<parse_html>>, which gives you +a chance to open the file with the proper encoding layer. + +=item parseurl_html ($url $optional_user_agent) + +parse an URL as html the same way C<L<parse_html>> does + +=item safe_parseurl_html ($url $optional_user_agent) + +Same as C<L<parseurl_html>>> except that it wraps the parsing in an C<eval> +block. It returns the twig on success and 0 on failure (the twig object also +contains the parsed twig) . C<$@> contains the error message on failure + +=item safe_parsefile_html ($file $optional_user_agent) + +Same as C<L<parsefile_html>>> except that it wraps the parsing in an C<eval> +block. It returns the twig on success and 0 on failure (the twig object also +contains the parsed twig) . C<$@> contains the error message on failure + +=item safe_parse_html ($string_or_fh) + +Same as C<L<parse_html>> except that it wraps the parsing in an C<eval> block. +It returns the twig on success and 0 on failure (the twig object also contains +the parsed twig) . C<$@> contains the error message on failure + +=item xparse ($thing_to_parse) + +parse the C<$thing_to_parse>, whether it is a filehandle, a string, an HTML +file, an HTML URL, an URL or a file. + +Note that this is mostly a convenience method for one-off scripts. For example +files that end in '.htm' or '.html' are parsed first as XML, and if this fails +as HTML. This is certainly not the most efficient way to do this in general. + +=item nparse ($optional_twig_options, $thing_to_parse) + +create a twig with the C<$optional_options>, and parse the C<$thing_to_parse>, +whether it is a filehandle, a string, an HTML file, an HTML URL, an URL or a +file. + +Examples: + + XML::Twig->nparse( "file.xml"); + XML::Twig->nparse( error_context => 1, "file://file.xml"); + +=item nparse_pp ($optional_twig_options, $thing_to_parse) + +same as C<L<nparse>> but also sets the C<pretty_print> option to C<indented>. + +=item nparse_e ($optional_twig_options, $thing_to_parse) + +same as C<L<nparse>> but also sets the C<error_context> option to 1. + +=item nparse_ppe ($optional_twig_options, $thing_to_parse) + +same as C<L<nparse>> but also sets the C<pretty_print> option to C<indented> +and the C<error_context> option to 1. + +=item parser + +This method returns the C<expat> object (actually the XML::Parser::Expat object) +used during parsing. It is useful for example to call XML::Parser::Expat methods +on it. To get the line of a tag for example use C<< $t->parser->current_line >>. + +=item setTwigHandlers ($handlers) + +Set the twig_handlers. C<$handlers> is a reference to a hash similar to the +one in the C<twig_handlers> option of new. All previous handlers are unset. +The method returns the reference to the previous handlers. + +=item setTwigHandler ($exp $handler) + +Set a single twig_handler for elements matching C<$exp>. C<$handler> is a +reference to a subroutine. If the handler was previously set then the reference +to the previous handler is returned. + +=item setStartTagHandlers ($handlers) + +Set the start_tag handlers. C<$handlers> is a reference to a hash similar to the +one in the C<start_tag_handlers> option of new. All previous handlers are unset. +The method returns the reference to the previous handlers. + +=item setStartTagHandler ($exp $handler) + +Set a single start_tag handlers for elements matching C<$exp>. C<$handler> is a +reference to a subroutine. If the handler was previously set then the reference +to the previous handler is returned. + +=item setEndTagHandlers ($handlers) + +Set the end_tag handlers. C<$handlers> is a reference to a hash similar to the +one in the C<end_tag_handlers> option of new. All previous handlers are unset. +The method returns the reference to the previous handlers. + +=item setEndTagHandler ($exp $handler) + +Set a single end_tag handlers for elements matching C<$exp>. C<$handler> is a +reference to a subroutine. If the handler was previously set then the +reference to the previous handler is returned. + +=item setTwigRoots ($handlers) + +Same as using the C<L<twig_roots>> option when creating the twig + +=item setCharHandler ($exp $handler) + +Set a C<char_handler> + +=item setIgnoreEltsHandler ($exp) + +Set a C<ignore_elt> handler (elements that match C<$exp> will be ignored + +=item setIgnoreEltsHandlers ($exp) + +Set all C<ignore_elt> handlers (previous handlers are replaced) + +=item dtd + +Return the dtd (an L<XML::Twig::DTD> object) of a twig + +=item xmldecl + +Return the XML declaration for the document, or a default one if it doesn't +have one + +=item doctype + +Return the doctype for the document + +=item doctype_name + +returns the doctype of the document from the doctype declaration + +=item system_id + +returns the system value of the DTD of the document from the doctype declaration + +=item public_id + +returns the public doctype of the document from the doctype declaration + +=item internal_subset + +returns the internal subset of the DTD + +=item dtd_text + +Return the DTD text + +=item dtd_print + +Print the DTD + +=item model ($tag) + +Return the model (in the DTD) for the element C<$tag> + +=item root + +Return the root element of a twig + +=item set_root ($elt) + +Set the root of a twig + +=item first_elt ($optional_condition) + +Return the first element matching C<$optional_condition> of a twig, if +no condition is given then the root is returned + +=item last_elt ($optional_condition) + +Return the last element matching C<$optional_condition> of a twig, if +no condition is given then the last element of the twig is returned + +=item elt_id ($id) + +Return the element whose C<id> attribute is $id + +=item getEltById + +Same as C<L<elt_id>> + +=item index ($index_name, $optional_index) + +If the C<$optional_index> argument is present, return the corresponding element +in the index (created using the C<index> option for C<XML::Twig->new>) + +If the argument is not present, return an arrayref to the index + +=item normalize + +merge together all consecutive pcdata elements in the document (if for example +you have turned some elements into pcdata using C<L<erase>>, this will give you +a "clean" document in which there all text elements are as long as possible). + +=item encoding + +This method returns the encoding of the XML document, as defined by the +C<encoding> attribute in the XML declaration (ie it is C<undef> if the attribute +is not defined) + +=item set_encoding + +This method sets the value of the C<encoding> attribute in the XML declaration. +Note that if the document did not have a declaration it is generated (with +an XML version of 1.0) + +=item xml_version + +This method returns the XML version, as defined by the C<version> attribute in +the XML declaration (ie it is C<undef> if the attribute is not defined) + +=item set_xml_version + +This method sets the value of the C<version> attribute in the XML declaration. +If the declaration did not exist it is created. + +=item standalone + +This method returns the value of the C<standalone> declaration for the document + +=item set_standalone + +This method sets the value of the C<standalone> attribute in the XML +declaration. Note that if the document did not have a declaration it is +generated (with an XML version of 1.0) + +=item set_output_encoding + +Set the C<encoding> "attribute" in the XML declaration + +=item set_doctype ($name, $system, $public, $internal) + +Set the doctype of the element. If an argument is C<undef> (or not present) +then its former value is retained, if a false ('' or 0) value is passed then +the former value is deleted; + +=item entity_list + +Return the entity list of a twig + +=item entity_names + +Return the list of all defined entities + +=item entity ($entity_name) + +Return the entity + +=item change_gi ($old_gi, $new_gi) + +Performs a (very fast) global change. All elements C<$old_gi> are now +C<$new_gi>. This is a bit dangerous though and should be avoided if +< possible, as the new tag might be ignored in subsequent processing. + +See C<L<BUGS> > + +=item flush ($optional_filehandle, %options) + +Flushes a twig up to (and including) the current element, then deletes +all unnecessary elements from the tree that's kept in memory. +C<flush> keeps track of which elements need to be open/closed, so if you +flush from handlers you don't have to worry about anything. Just keep +flushing the twig every time you're done with a sub-tree and it will +come out well-formed. After the whole parsing don't forget toC<flush> +one more time to print the end of the document. +The doctype and entity declarations are also printed. + +flush take an optional filehandle as an argument. + +If you use C<flush> at any point during parsing, the document will be flushed +one last time at the end of the parsing, to the proper filehandle. + +options: use the C<update_DTD> option if you have updated the (internal) DTD +and/or the entity list and you want the updated DTD to be output + +The C<pretty_print> option sets the pretty printing of the document. + + Example: $t->flush( Update_DTD => 1); + $t->flush( $filehandle, pretty_print => 'indented'); + $t->flush( \*FILE); + + +=item flush_up_to ($elt, $optional_filehandle, %options) + +Flushes up to the C<$elt> element. This allows you to keep part of the +tree in memory when you C<flush>. + +options: see flush. + +=item purge + +Does the same as a C<flush> except it does not print the twig. It just deletes +all elements that have been completely parsed so far. + +=item purge_up_to ($elt) + +Purges up to the C<$elt> element. This allows you to keep part of the tree in +memory when you C<purge>. + +=item print ($optional_filehandle, %options) + +Prints the whole document associated with the twig. To be used only AFTER the +parse. + +options: see C<flush>. + +=item print_to_file ($filename, %options) + +Prints the whole document associated with the twig to file C<$filename>. +To be used only AFTER the parse. + +options: see C<flush>. + +=item safe_print_to_file ($filename, %options) + +Prints the whole document associated with the twig to file C<$filename>. +This variant, which probably only works on *nix prints to a temp file, +then move the temp file to overwrite the original file. + +This is a bit safer when 2 processes an potentiallywrite the same file: +only the last one will succeed, but the file won't be corruted. I often +use this for cron jobs, so testing the code doesn't interfere with the +cron job running at the same time. + +options: see C<flush>. + +=item sprint + +Return the text of the whole document associated with the twig. To be used only +AFTER the parse. + +options: see C<flush>. + +=item trim + +Trim the document: gets rid of initial and trailing spaces, and replaces multiple spaces +by a single one. + +=item toSAX1 ($handler) + +Send SAX events for the twig to the SAX1 handler C<$handler> + +=item toSAX2 ($handler) + +Send SAX events for the twig to the SAX2 handler C<$handler> + +=item flush_toSAX1 ($handler) + +Same as flush, except that SAX events are sent to the SAX1 handler +C<$handler> instead of the twig being printed + +=item flush_toSAX2 ($handler) + +Same as flush, except that SAX events are sent to the SAX2 handler +C<$handler> instead of the twig being printed + +=item ignore + +This method should be called during parsing, usually in C<start_tag_handlers>. +It causes the element to be skipped during the parsing: the twig is not built +for this element, it will not be accessible during parsing or after it. The +element will not take up any memory and parsing will be faster. + +Note that this method can also be called on an element. If the element is a +parent of the current element then this element will be ignored (the twig will +not be built any more for it and what has already been built will be deleted). + +=item set_pretty_print ($style) + +Set the pretty print method, amongst 'C<none>' (default), 'C<nsgmls>', +'C<nice>', 'C<indented>', C<indented_c>, 'C<wrapped>', 'C<record>' and +'C<record_c>' + +B<WARNING:> the pretty print style is a B<GLOBAL> variable, so once set it's +applied to B<ALL> C<print>'s (and C<sprint>'s). Same goes if you use XML::Twig +with C<mod_perl> . This should not be a problem as the XML that's generated +is valid anyway, and XML processors (as well as HTML processors, including +browsers) should not care. Let me know if this is a big problem, but at the +moment the performance/cleanliness trade-off clearly favors the global +approach. + +=item set_empty_tag_style ($style) + +Set the empty tag display style ('C<normal>', 'C<html>' or 'C<expand>'). As +with C<L<set_pretty_print>> this sets a global flag. + +C<normal> outputs an empty tag 'C<< <tag/> >>', C<html> adds a space +'C<< <tag /> >>' for elements that can be empty in XHTML and C<expand> outputs +'C<< <tag></tag> >>' + +=item set_remove_cdata ($flag) + +set (or unset) the flag that forces the twig to output CDATA sections as +regular (escaped) PCDATA + +=item print_prolog ($optional_filehandle, %options) + +Prints the prolog (XML declaration + DTD + entity declarations) of a document. + +options: see C<L<flush>>. + +=item prolog ($optional_filehandle, %options) + +Return the prolog (XML declaration + DTD + entity declarations) of a document. + +options: see C<L<flush>>. + +=item finish + +Call Expat C<finish> method. +Unsets all handlers (including internal ones that set context), but expat +continues parsing to the end of the document or until it finds an error. +It should finish up a lot faster than with the handlers set. + +=item finish_print + +Stops twig processing, flush the twig and proceed to finish printing the +document as fast as possible. Use this method when modifying a document and +the modification is done. + +=item finish_now + +Stops twig processing, does not finish parsing the document (which could +actually be not well-formed after the point where C<finish_now> is called). +Execution resumes after the C<Lparse>> or C<L<parsefile>> call. The content +of the twig is what has been parsed so far (all open elements at the time +C<finish_now> is called are considered closed). + +=item set_expand_external_entities + +Same as using the C<L<expand_external_ents>> option when creating the twig + +=item set_input_filter + +Same as using the C<L<input_filter>> option when creating the twig + +=item set_keep_atts_order + +Same as using the C<L<keep_atts_order>> option when creating the twig + +=item set_keep_encoding + +Same as using the C<L<keep_encoding>> option when creating the twig + +=item escape_gt + +usually XML::Twig does not escape > in its output. Using this option +makes it replace > by > + +=item do_not_escape_gt + +reverts XML::Twig behavior to its default of not escaping > in its output. + +=item set_output_filter + +Same as using the C<L<output_filter>> option when creating the twig + +=item set_output_text_filter + +Same as using the C<L<output_text_filter>> option when creating the twig + +=item add_stylesheet ($type, @options) + +Adds an external stylesheet to an XML document. + +Supported types and options: + +=over 4 + +=item xsl + +option: the url of the stylesheet + +Example: + + $t->add_stylesheet( xsl => "xsl_style.xsl"); + +will generate the following PI at the beginning of the document: + + <?xml-stylesheet type="text/xsl" href="xsl_style.xsl"?> + +=item css + +option: the url of the stylesheet + +=item active_twig + +a class method that returns the last processed twig, so you don't necessarily +need the object to call methods on it. + +=back + +=item Methods inherited from XML::Parser::Expat + +A twig inherits all the relevant methods from XML::Parser::Expat. These +methods can only be used during the parsing phase (they will generate +a fatal error otherwise). + +Inherited methods are: + +=over 4 + +=item depth + +Returns the size of the context list. + +=item in_element + +Returns true if NAME is equal to the name of the innermost cur‐ +rently opened element. If namespace processing is being used and +you want to check against a name that may be in a namespace, then +use the generate_ns_name method to create the NAME argument. + +=item within_element + +Returns the number of times the given name appears in the context +list. If namespace processing is being used and you want to check +against a name that may be in a namespace, then use the gener‐ +ate_ns_name method to create the NAME argument. + +=item context + +Returns a list of element names that represent open elements, with +the last one being the innermost. Inside start and end tag han‐ +dlers, this will be the tag of the parent element. + +=item current_line + +Returns the line number of the current position of the parse. + +=item current_column + +Returns the column number of the current position of the parse. + +=item current_byte + +Returns the current position of the parse. + +=item position_in_context + +Returns a string that shows the current parse position. LINES +should be an integer >= 0 that represents the number of lines on +either side of the current parse line to place into the returned +string. + +=item base ([NEWBASE]) + +Returns the current value of the base for resolving relative URIs. +If NEWBASE is supplied, changes the base to that value. + +=item current_element + +Returns the name of the innermost currently opened element. Inside +start or end handlers, returns the parent of the element associated +with those tags. + +=item element_index + +Returns an integer that is the depth-first visit order of the cur‐ +rent element. This will be zero outside of the root element. For +example, this will return 1 when called from the start handler for +the root element start tag. + +=item recognized_string + +Returns the string from the document that was recognized in order +to call the current handler. For instance, when called from a start +handler, it will give us the start-tag string. The string is +encoded in UTF-8. This method doesn't return a meaningful string +inside declaration handlers. + +=item original_string + +Returns the verbatim string from the document that was recognized +in order to call the current handler. The string is in the original +document encoding. This method doesn't return a meaningful string +inside declaration handlers. + +=item xpcroak + +Concatenate onto the given message the current line number within +the XML document plus the message implied by ErrorContext. Then +croak with the formed message. + +=item xpcarp + +Concatenate onto the given message the current line number within +the XML document plus the message implied by ErrorContext. Then +carp with the formed message. + +=item xml_escape(TEXT [, CHAR [, CHAR ...]]) + +Returns TEXT with markup characters turned into character entities. +Any additional characters provided as arguments are also turned +into character references where found in TEXT. + +(this method is broken on some versions of expat/XML::Parser) + +=back + +=item path ( $optional_tag) + +Return the element context in a form similar to XPath's short +form: 'C</root/tag1/../tag>' + +=item get_xpath ( $optional_array_ref, $xpath, $optional_offset) + +Performs a C<get_xpath> on the document root (see <Elt|"Elt">) + +If the C<$optional_array_ref> argument is used the array must contain +elements. The C<$xpath> expression is applied to each element in turn +and the result is union of all results. This way a first query can be +refined in further steps. + + +=item find_nodes ( $optional_array_ref, $xpath, $optional_offset) + +same as C<get_xpath> + +=item findnodes ( $optional_array_ref, $xpath, $optional_offset) + +same as C<get_xpath> (similar to the XML::LibXML method) + +=item findvalue ( $optional_array_ref, $xpath, $optional_offset) + +Return the C<join> of all texts of the results of applying C<L<get_xpath>> +to the node (similar to the XML::LibXML method) + +=item findvalues ( $optional_array_ref, $xpath, $optional_offset) + +Return an array of all texts of the results of applying C<L<get_xpath>> +to the node + +=item subs_text ($regexp, $replace) + +subs_text does text substitution on the whole document, similar to perl's +C< s///> operator. + +=item dispose + +Useful only if you don't have C<Scalar::Util> or C<WeakRef> installed. + +Reclaims properly the memory used by an XML::Twig object. As the object has +circular references it never goes out of scope, so if you want to parse lots +of XML documents then the memory leak becomes a problem. Use +C<< $twig->dispose >> to clear this problem. + +=item att_accessors (list_of_attribute_names) + +A convenience method that creates l-valued accessors for attributes. +So C<< $twig->create_accessors( 'foo') >> will create a C<foo> method +that can be called on elements: + + $elt->foo; # equivalent to $elt->{'att'}->{'foo'}; + $elt->foo( 'bar'); # equivalent to $elt->set_att( foo => 'bar'); + +The methods are l-valued only under those perl's that support this +feature (5.6 and above) + +=item create_accessors (list_of_attribute_names) + +Same as att_accessors + +=item elt_accessors (list_of_attribute_names) + +A convenience method that creates accessors for elements. +So C<< $twig->create_accessors( 'foo') >> will create a C<foo> method +that can be called on elements: + + $elt->foo; # equivalent to $elt->first_child( 'foo'); + +=item field_accessors (list_of_attribute_names) + +A convenience method that creates accessors for element values (C<field>). +So C<< $twig->create_accessors( 'foo') >> will create a C<foo> method +that can be called on elements: + + $elt->foo; # equivalent to $elt->field( 'foo'); + +=item set_do_not_escape_amp_in_atts + +An evil method, that I only document because Test::Pod::Coverage complaints otherwise, +but really, you don't want to know about it. + +=back + +=head2 XML::Twig::Elt + +=over 4 + +=item new ($optional_tag, $optional_atts, @optional_content) + +The C<tag> is optional (but then you can't have a content ), the C<$optional_atts> +argument is a reference to a hash of attributes, the content can be just a +string or a list of strings and element. A content of 'C<#EMPTY>' creates an empty +element; + + Examples: my $elt= XML::Twig::Elt->new(); + my $elt= XML::Twig::Elt->new( para => { align => 'center' }); + my $elt= XML::Twig::Elt->new( para => { align => 'center' }, 'foo'); + my $elt= XML::Twig::Elt->new( br => '#EMPTY'); + my $elt= XML::Twig::Elt->new( 'para'); + my $elt= XML::Twig::Elt->new( para => 'this is a para'); + my $elt= XML::Twig::Elt->new( para => $elt3, 'another para'); + +The strings are not parsed, the element is not attached to any twig. + +B<WARNING>: if you rely on ID's then you will have to set the id yourself. At +this point the element does not belong to a twig yet, so the ID attribute +is not known so it won't be stored in the ID list. + +Note that C<#COMMENT>, C<#PCDATA> or C<#CDATA> are valid tag names, that will +create text elements. + +To create an element C<foo> containing a CDATA section: + + my $foo= XML::Twig::Elt->new( '#CDATA' => "content of the CDATA section") + ->wrap_in( 'foo'); + +An attribute of '#CDATA', will create the content of the element as CDATA: + + my $elt= XML::Twig::Elt->new( 'p' => { '#CDATA' => 1}, 'foo < bar'); + +creates an element + + <p><![CDATA[foo < bar]]></> + +=item parse ($string, %args) + +Creates an element from an XML string. The string is actually +parsed as a new twig, then the root of that twig is returned. +The arguments in C<%args> are passed to the twig. +As always if the parse fails the parser will die, so use an +eval if you want to trap syntax errors. + +As obviously the element does not exist beforehand this method has to be +called on the class: + + my $elt= parse XML::Twig::Elt( "<a> string to parse, with <sub/> + <elements>, actually tons of </elements> + h</a>"); + +=item set_inner_xml ($string) + +Sets the content of the element to be the tree created from the string + +=item set_inner_html ($string) + +Sets the content of the element, after parsing the string with an HTML +parser (HTML::Parser) + +=item set_outer_xml ($string) + +Replaces the element with the tree created from the string + +=item print ($optional_filehandle, $optional_pretty_print_style) + +Prints an entire element, including the tags, optionally to a +C<$optional_filehandle>, optionally with a C<$pretty_print_style>. + +The print outputs XML data so base entities are escaped. + +=item print_to_file ($filename, %options) + +Prints the element to file C<$filename>. + +options: see C<flush>. +=item sprint ($elt, $optional_no_enclosing_tag) + +Return the xml string for an entire element, including the tags. +If the optional second argument is true then only the string inside the +element is returned (the start and end tag for $elt are not). +The text is XML-escaped: base entities (& and < in text, & < and " in +attribute values) are turned into entities. + +=item gi + +Return the gi of the element (the gi is the C<generic identifier> the tag +name in SGML parlance). + +C<tag> and C<name> are synonyms of C<gi>. + +=item tag + +Same as C<L<gi>> + +=item name + +Same as C<L<tag>> + +=item set_gi ($tag) + +Set the gi (tag) of an element + +=item set_tag ($tag) + +Set the tag (=C<L<tag>>) of an element + +=item set_name ($name) + +Set the name (=C<L<tag>>) of an element + +=item root + +Return the root of the twig in which the element is contained. + +=item twig + +Return the twig containing the element. + +=item parent ($optional_condition) + +Return the parent of the element, or the first ancestor matching the +C<$optional_condition> + +=item first_child ($optional_condition) + +Return the first child of the element, or the first child matching the +C<$optional_condition> + +=item has_child ($optional_condition) + +Return the first child of the element, or the first child matching the +C<$optional_condition> (same as L<first_child>) + +=item has_children ($optional_condition) + +Return the first child of the element, or the first child matching the +C<$optional_condition> (same as L<first_child>) + + +=item first_child_text ($optional_condition) + +Return the text of the first child of the element, or the first child + matching the C<$optional_condition> +If there is no first_child then returns ''. This avoids getting the +child, checking for its existence then getting the text for trivial cases. + +Similar methods are available for the other navigation methods: + +=over 4 + +=item last_child_text + +=item prev_sibling_text + +=item next_sibling_text + +=item prev_elt_text + +=item next_elt_text + +=item child_text + +=item parent_text + +=back + +All this methods also exist in "trimmed" variant: + +=over 4 + +=item first_child_trimmed_text + +=item last_child_trimmed_text + +=item prev_sibling_trimmed_text + +=item next_sibling_trimmed_text + +=item prev_elt_trimmed_text + +=item next_elt_trimmed_text + +=item child_trimmed_text + +=item parent_trimmed_text + +=back + +=item field ($condition) + +Same method as C<first_child_text> with a different name + +=item fields ($condition_list) + +Return the list of field (text of first child matching the conditions), +missing fields are returned as the empty string. + +Same method as C<first_child_text> with a different name + +=item trimmed_field ($optional_condition) + +Same method as C<first_child_trimmed_text> with a different name + +=item set_field ($condition, $optional_atts, @list_of_elt_and_strings) + +Set the content of the first child of the element that matches +C<$condition>, the rest of the arguments is the same as for C<L<set_content>> + +If no child matches C<$condition> _and_ if C<$condition> is a valid +XML element name, then a new element by that name is created and +inserted as the last child. + +=item first_child_matches ($optional_condition) + +Return the element if the first child of the element (if it exists) passes +the C<$optional_condition> C<undef> otherwise + + if( $elt->first_child_matches( 'title')) ... + +is equivalent to + + if( $elt->{first_child} && $elt->{first_child}->passes( 'title')) + +C<first_child_is> is an other name for this method + +Similar methods are available for the other navigation methods: + +=over 4 + +=item last_child_matches + +=item prev_sibling_matches + +=item next_sibling_matches + +=item prev_elt_matches + +=item next_elt_matches + +=item child_matches + +=item parent_matches + +=back + +=item is_first_child ($optional_condition) + +returns true (the element) if the element is the first child of its parent +(optionally that satisfies the C<$optional_condition>) + +=item is_last_child ($optional_condition) + +returns true (the element) if the element is the last child of its parent +(optionally that satisfies the C<$optional_condition>) + +=item prev_sibling ($optional_condition) + +Return the previous sibling of the element, or the previous sibling matching +C<$optional_condition> + +=item next_sibling ($optional_condition) + +Return the next sibling of the element, or the first one matching +C<$optional_condition>. + +=item next_elt ($optional_elt, $optional_condition) + +Return the next elt (optionally matching C<$optional_condition>) of the element. This +is defined as the next element which opens after the current element opens. +Which usually means the first child of the element. +Counter-intuitive as it might look this allows you to loop through the +whole document by starting from the root. + +The C<$optional_elt> is the root of a subtree. When the C<next_elt> is out of the +subtree then the method returns undef. You can then walk a sub-tree with: + + my $elt= $subtree_root; + while( $elt= $elt->next_elt( $subtree_root)) + { # insert processing code here + } + +=item prev_elt ($optional_condition) + +Return the previous elt (optionally matching C<$optional_condition>) of the +element. This is the first element which opens before the current one. +It is usually either the last descendant of the previous sibling or +simply the parent + +=item next_n_elt ($offset, $optional_condition) + +Return the C<$offset>-th element that matches the C<$optional_condition> + +=item following_elt + +Return the following element (as per the XPath following axis) + +=item preceding_elt + +Return the preceding element (as per the XPath preceding axis) + +=item following_elts + +Return the list of following elements (as per the XPath following axis) + +=item preceding_elts + +Return the list of preceding elements (as per the XPath preceding axis) + +=item children ($optional_condition) + +Return the list of children (optionally which matches C<$optional_condition>) of +the element. The list is in document order. + +=item children_count ($optional_condition) + +Return the number of children of the element (optionally which matches +C<$optional_condition>) + +=item children_text ($optional_condition) + +In array context, returns an array containing the text of children of the +element (optionally which matches C<$optional_condition>) + +In scalar context, returns the concatenation of the text of children of +the element + +=item children_trimmed_text ($optional_condition) + +In array context, returns an array containing the trimmed text of children +of the element (optionally which matches C<$optional_condition>) + +In scalar context, returns the concatenation of the trimmed text of children of +the element + + +=item children_copy ($optional_condition) + +Return a list of elements that are copies of the children of the element, +optionally which matches C<$optional_condition> + +=item descendants ($optional_condition) + +Return the list of all descendants (optionally which matches +C<$optional_condition>) of the element. This is the equivalent of the +C<getElementsByTagName> of the DOM (by the way, if you are really a DOM +addict, you can use C<getElementsByTagName> instead) + +=item getElementsByTagName ($optional_condition) + +Same as C<L<descendants>> + +=item find_by_tag_name ($optional_condition) + +Same as C<L<descendants>> + +=item descendants_or_self ($optional_condition) + +Same as C<L<descendants>> except that the element itself is included in the list +if it matches the C<$optional_condition> + +=item first_descendant ($optional_condition) + +Return the first descendant of the element that matches the condition + +=item last_descendant ($optional_condition) + +Return the last descendant of the element that matches the condition + +=item ancestors ($optional_condition) + +Return the list of ancestors (optionally matching C<$optional_condition>) of the +element. The list is ordered from the innermost ancestor to the outermost one + +NOTE: the element itself is not part of the list, in order to include it +you will have to use ancestors_or_self + +=item ancestors_or_self ($optional_condition) + +Return the list of ancestors (optionally matching C<$optional_condition>) of the +element, including the element (if it matches the condition>). +The list is ordered from the innermost ancestor to the outermost one + +=item passes ($condition) + +Return the element if it passes the C<$condition> + +=item att ($att) + +Return the value of attribute C<$att> or C<undef> + +=item latt ($att) + +Return the value of attribute C<$att> or C<undef> + +this method is an lvalue, so you can do C<< $elt->latt( 'foo')= 'bar' >> or C<< $elt->latt( 'foo')++; >> + +=item set_att ($att, $att_value) + +Set the attribute of the element to the given value + +You can actually set several attributes this way: + + $elt->set_att( att1 => "val1", att2 => "val2"); + +=item del_att ($att) + +Delete the attribute for the element + +You can actually delete several attributes at once: + + $elt->del_att( 'att1', 'att2', 'att3'); + +=item att_exists ($att) + +Returns true if the attribute C<$att> exists for the element, false +otherwise + +=item cut + +Cut the element from the tree. The element still exists, it can be copied +or pasted somewhere else, it is just not attached to the tree anymore. + +Note that the "old" links to the parent, previous and next siblings can +still be accessed using the former_* methods + +=item former_next_sibling + +Returns the former next sibling of a cut node (or undef if the node has not been cut) + +This makes it easier to write loops where you cut elements: + + my $child= $parent->first_child( 'achild'); + while( $child->{'att'}->{'cut'}) + { $child->cut; $child= ($child->{former} && $child->{former}->{next_sibling}); } + +=item former_prev_sibling + +Returns the former previous sibling of a cut node (or undef if the node has not been cut) + +=item former_parent + +Returns the former parent of a cut node (or undef if the node has not been cut) + +=item cut_children ($optional_condition) + +Cut all the children of the element (or all of those which satisfy the +C<$optional_condition>). + +Return the list of children + +=item cut_descendants ($optional_condition) + +Cut all the descendants of the element (or all of those which satisfy the +C<$optional_condition>). + +Return the list of descendants + +=item copy ($elt) + +Return a copy of the element. The copy is a "deep" copy: all sub-elements of +the element are duplicated. + +=item paste ($optional_position, $ref) + +Paste a (previously C<cut> or newly generated) element. Die if the element +already belongs to a tree. + +Note that the calling element is pasted: + + $child->paste( first_child => $existing_parent); + $new_sibling->paste( after => $this_sibling_is_already_in_the_tree); + +or + + my $new_elt= XML::Twig::Elt->new( tag => $content); + $new_elt->paste( $position => $existing_elt); + +Example: + + my $t= XML::Twig->new->parse( 'doc.xml') + my $toc= $t->root->new( 'toc'); + $toc->paste( $t->root); # $toc is pasted as first child of the root + foreach my $title ($t->findnodes( '/doc/section/title')) + { my $title_toc= $title->copy; + # paste $title_toc as the last child of toc + $title_toc->paste( last_child => $toc) + } + +Position options: + +=over 4 + +=item first_child (default) + +The element is pasted as the first child of C<$ref> + +=item last_child + +The element is pasted as the last child of C<$ref> + +=item before + +The element is pasted before C<$ref>, as its previous sibling. + +=item after + +The element is pasted after C<$ref>, as its next sibling. + +=item within + +In this case an extra argument, C<$offset>, should be supplied. The element +will be pasted in the reference element (or in its first text child) at the +given offset. To achieve this the reference element will be split at the +offset. + +=back + +Note that you can call directly the underlying method: + +=over 4 + +=item paste_before + +=item paste_after + +=item paste_first_child + +=item paste_last_child + +=item paste_within + +=back + +=item move ($optional_position, $ref) + +Move an element in the tree. +This is just a C<cut> then a C<paste>. The syntax is the same as C<paste>. + +=item replace ($ref) + +Replaces an element in the tree. Sometimes it is just not possible toC<cut> +an element then C<paste> another in its place, so C<replace> comes in handy. +The calling element replaces C<$ref>. + +=item replace_with (@elts) + +Replaces the calling element with one or more elements + +=item delete + +Cut the element and frees the memory. + +=item prefix ($text, $optional_option) + +Add a prefix to an element. If the element is a C<PCDATA> element the text +is added to the pcdata, if the elements first child is a C<PCDATA> then the +text is added to it's pcdata, otherwise a new C<PCDATA> element is created +and pasted as the first child of the element. + +If the option is C<asis> then the prefix is added asis: it is created in +a separate C<PCDATA> element with an C<asis> property. You can then write: + + $elt1->prefix( '<b>', 'asis'); + +to create a C<< <b> >> in the output of C<print>. + +=item suffix ($text, $optional_option) + +Add a suffix to an element. If the element is a C<PCDATA> element the text +is added to the pcdata, if the elements last child is a C<PCDATA> then the +text is added to it's pcdata, otherwise a new PCDATA element is created +and pasted as the last child of the element. + +If the option is C<asis> then the suffix is added asis: it is created in +a separate C<PCDATA> element with an C<asis> property. You can then write: + + $elt2->suffix( '</b>', 'asis'); + +=item trim + +Trim the element in-place: spaces at the beginning and at the end of the element +are discarded and multiple spaces within the element (or its descendants) are +replaced by a single space. + +Note that in some cases you can still end up with multiple spaces, if they are +split between several elements: + + <doc> text <b> hah! </b> yep</doc> + +gets trimmed to + + <doc>text <b> hah! </b> yep</doc> + +This is somewhere in between a bug and a feature. + +=item normalize + +merge together all consecutive pcdata elements in the element (if for example +you have turned some elements into pcdata using C<L<erase>>, this will give you +a "clean" element in which there all text fragments are as long as possible). + + +=item simplify (%options) + +Return a data structure suspiciously similar to XML::Simple's. Options are +identical to XMLin options, see XML::Simple doc for more details (or use +DATA::dumper or YAML to dump the data structure) + +B<Note>: there is no magic here, if you write +C<< $twig->parsefile( $file )->simplify(); >> then it will load the entire +document in memory. I am afraid you will have to put some work into it to +get just the bits you want and discard the rest. Look at the synopsis or +the XML::Twig 101 section at the top of the docs for more information. + +=over 4 + +=item content_key + +=item forcearray + +=item keyattr + +=item noattr + +=item normalize_space + +aka normalise_space + +=item variables (%var_hash) + +%var_hash is a hash { name => value } + +This option allows variables in the XML to be expanded when the file is read. (there is no facility for putting the variable names back if you regenerate XML using XMLout). + +A 'variable' is any text of the form ${name} (or $name) which occurs in an attribute value or in the text content of an element. If 'name' matches a key in the supplied hashref, ${name} will be replaced with the corresponding value from the hashref. If no matching key is found, the variable will not be replaced. + +=item var_att ($attribute_name) + +This option gives the name of an attribute that will be used to create +variables in the XML: + + <dirs> + <dir name="prefix">/usr/local</dir> + <dir name="exec_prefix">$prefix/bin</dir> + </dirs> + +use C<< var => 'name' >> to get $prefix replaced by /usr/local in the +generated data structure + +By default variables are captured by the following regexp: /$(\w+)/ + +=item var_regexp (regexp) + +This option changes the regexp used to capture variables. The variable +name should be in $1 + +=item group_tags { grouping tag => grouped tag, grouping tag 2 => grouped tag 2...} + +Option used to simplify the structure: elements listed will not be used. +Their children will be, they will be considered children of the element +parent. + +If the element is: + + <config host="laptop.xmltwig.org"> + <server>localhost</server> + <dirs> + <dir name="base">/home/mrodrigu/standards</dir> + <dir name="tools">$base/tools</dir> + </dirs> + <templates> + <template name="std_def">std_def.templ</template> + <template name="dummy">dummy</template> + </templates> + </config> + +Then calling simplify with C<< group_tags => { dirs => 'dir', +templates => 'template'} >> +makes the data structure be exactly as if the start and end tags for C<dirs> and +C<templates> were not there. + +A YAML dump of the structure + + base: '/home/mrodrigu/standards' + host: laptop.xmltwig.org + server: localhost + template: + - std_def.templ + - dummy.templ + tools: '$base/tools' + + +=back + +=item split_at ($offset) + +Split a text (C<PCDATA> or C<CDATA>) element in 2 at C<$offset>, the original +element now holds the first part of the string and a new element holds the +right part. The new element is returned + +If the element is not a text element then the first text child of the element +is split + +=item split ( $optional_regexp, $tag1, $atts1, $tag2, $atts2...) + +Split the text descendants of an element in place, the text is split using +the C<$regexp>, if the regexp includes () then the matched separators will be +wrapped in elements. C<$1> is wrapped in $tag1, with attributes C<$atts1> if +C<$atts1> is given (as a hashref), C<$2> is wrapped in $tag2... + +if $elt is C<< <p>tati tata <b>tutu tati titi</b> tata tati tata</p> >> + + $elt->split( qr/(ta)ti/, 'foo', {type => 'toto'} ) + +will change $elt to + + <p><foo type="toto">ta</foo> tata <b>tutu <foo type="toto">ta</foo> + titi</b> tata <foo type="toto">ta</foo> tata</p> + +The regexp can be passed either as a string or as C<qr//> (perl 5.005 and +later), it defaults to \s+ just as the C<split> built-in (but this would be +quite a useless behaviour without the C<$optional_tag> parameter) + +C<$optional_tag> defaults to PCDATA or CDATA, depending on the initial element +type + +The list of descendants is returned (including un-touched original elements +and newly created ones) + +=item mark ( $regexp, $optional_tag, $optional_attribute_ref) + +This method behaves exactly as L<split>, except only the newly created +elements are returned + +=item wrap_children ( $regexp_string, $tag, $optional_attribute_hashref) + +Wrap the children of the element that match the regexp in an element C<$tag>. +If $optional_attribute_hashref is passed then the new element will +have these attributes. + +The $regexp_string includes tags, within pointy brackets, as in +C<< <title><para>+ >> and the usual Perl modifiers (+*?...). +Tags can be further qualified with attributes: +C<< <para type="warning" classif="cosmic_secret">+ >>. The values +for attributes should be xml-escaped: C<< <candy type="M&Ms">* >> +(C<E<lt>>, C<&> B<C<E<gt>>> and C<"> should be escaped). + +Note that elements might get extra C<id> attributes in the process. See L<add_id>. +Use L<strip_att> to remove unwanted id's. + +Here is an example: + +If the element C<$elt> has the following content: + + <elt> + <p>para 1</p> + <l_l1_1>list 1 item 1 para 1</l_l1_1> + <l_l1>list 1 item 1 para 2</l_l1> + <l_l1_n>list 1 item 2 para 1 (only para)</l_l1_n> + <l_l1_n>list 1 item 3 para 1</l_l1_n> + <l_l1>list 1 item 3 para 2</l_l1> + <l_l1>list 1 item 3 para 3</l_l1> + <l_l1_1>list 2 item 1 para 1</l_l1_1> + <l_l1>list 2 item 1 para 2</l_l1> + <l_l1_n>list 2 item 2 para 1 (only para)</l_l1_n> + <l_l1_n>list 2 item 3 para 1</l_l1_n> + <l_l1>list 2 item 3 para 2</l_l1> + <l_l1>list 2 item 3 para 3</l_l1> + </elt> + +Then the code + + $elt->wrap_children( q{<l_l1_1><l_l1>*} , li => { type => "ul1" }); + $elt->wrap_children( q{<l_l1_n><l_l1>*} , li => { type => "ul" }); + + $elt->wrap_children( q{<li type="ul1"><li type="ul">+}, "ul"); + $elt->strip_att( 'id'); + $elt->strip_att( 'type'); + $elt->print; + +will output: + + <elt> + <p>para 1</p> + <ul> + <li> + <l_l1_1>list 1 item 1 para 1</l_l1_1> + <l_l1>list 1 item 1 para 2</l_l1> + </li> + <li> + <l_l1_n>list 1 item 2 para 1 (only para)</l_l1_n> + </li> + <li> + <l_l1_n>list 1 item 3 para 1</l_l1_n> + <l_l1>list 1 item 3 para 2</l_l1> + <l_l1>list 1 item 3 para 3</l_l1> + </li> + </ul> + <ul> + <li> + <l_l1_1>list 2 item 1 para 1</l_l1_1> + <l_l1>list 2 item 1 para 2</l_l1> + </li> + <li> + <l_l1_n>list 2 item 2 para 1 (only para)</l_l1_n> + </li> + <li> + <l_l1_n>list 2 item 3 para 1</l_l1_n> + <l_l1>list 2 item 3 para 2</l_l1> + <l_l1>list 2 item 3 para 3</l_l1> + </li> + </ul> + </elt> + +=item subs_text ($regexp, $replace) + +subs_text does text substitution, similar to perl's C< s///> operator. + +C<$regexp> must be a perl regexp, created with the C<qr> operator. + +C<$replace> can include C<$1, $2>... from the C<$regexp>. It can also be +used to create element and entities, by using +C<< &elt( tag => { att => val }, text) >> (similar syntax as C<L<new>>) and +C<< &ent( name) >>. + +Here is a rather complex example: + + $elt->subs_text( qr{(?<!do not )link to (http://([^\s,]*))}, + 'see &elt( a =>{ href => $1 }, $2)' + ); + +This will replace text like I<link to http://www.xmltwig.org> by +I<< see <a href="www.xmltwig.org">www.xmltwig.org</a> >>, but not +I<do not link to...> + +Generating entities (here replacing spaces with  ): + + $elt->subs_text( qr{ }, '&ent( " ")'); + +or, using a variable: + + my $ent=" "; + $elt->subs_text( qr{ }, "&ent( '$ent')"); + +Note that the substitution is always global, as in using the C<g> modifier +in a perl substitution, and that it is performed on all text descendants +of the element. + +B<Bug>: in the C<$regexp>, you can only use C<\1>, C<\2>... if the replacement +expression does not include elements or attributes. eg + + $t->subs_text( qr/((t[aiou])\2)/, '$2'); # ok, replaces toto, tata, titi, tutu by to, ta, ti, tu + $t->subs_text( qr/((t[aiou])\2)/, '&elt(p => $1)' ); # NOK, does not find toto... + +=item add_id ($optional_coderef) + +Add an id to the element. + +The id is an attribute, C<id> by default, see the C<id> option for XML::Twig +C<new> to change it. Use an id starting with C<#> to get an id that's not +output by L<print>, L<flush> or L<sprint>, yet that allows you to use the +L<elt_id> method to get the element easily. + +If the element already has an id, no new id is generated. + +By default the method create an id of the form C<< twig_id_<nnnn> >>, +where C<< <nnnn> >> is a number, incremented each time the method is called +successfully. + +=item set_id_seed ($prefix) + +by default the id generated by C<L<add_id>> is C<< twig_id_<nnnn> >>, +C<set_id_seed> changes the prefix to C<$prefix> and resets the number +to 1 + +=item strip_att ($att) + +Remove the attribute C<$att> from all descendants of the element (including +the element) + +Return the element + +=item change_att_name ($old_name, $new_name) + +Change the name of the attribute from C<$old_name> to C<$new_name>. If there is no +attribute C<$old_name> nothing happens. + +=item lc_attnames + +Lower cases the name all the attributes of the element. + +=item sort_children_on_value( %options) + +Sort the children of the element in place according to their text. +All children are sorted. + +Return the element, with its children sorted. + + +C<%options> are + + type : numeric | alpha (default: alpha) + order : normal | reverse (default: normal) + +Return the element, with its children sorted + + +=item sort_children_on_att ($att, %options) + +Sort the children of the element in place according to attribute C<$att>. +C<%options> are the same as for C<sort_children_on_value> + +Return the element. + + +=item sort_children_on_field ($tag, %options) + +Sort the children of the element in place, according to the field C<$tag> (the +text of the first child of the child with this tag). C<%options> are the same +as for C<sort_children_on_value>. + +Return the element, with its children sorted + + +=item sort_children( $get_key, %options) + +Sort the children of the element in place. The C<$get_key> argument is +a reference to a function that returns the sort key when passed an element. + +For example: + + $elt->sort_children( sub { $_[0]->{'att'}->{"nb"} + $_[0]->text }, + type => 'numeric', order => 'reverse' + ); + +=item field_to_att ($cond, $att) + +Turn the text of the first sub-element matched by C<$cond> into the value of +attribute C<$att> of the element. If C<$att> is omitted then C<$cond> is used +as the name of the attribute, which makes sense only if C<$cond> is a valid +element (and attribute) name. + +The sub-element is then cut. + +=item att_to_field ($att, $tag) + +Take the value of attribute C<$att> and create a sub-element C<$tag> as first +child of the element. If C<$tag> is omitted then C<$att> is used as the name of +the sub-element. + + +=item get_xpath ($xpath, $optional_offset) + +Return a list of elements satisfying the C<$xpath>. C<$xpath> is an XPATH-like +expression. + +A subset of the XPATH abbreviated syntax is covered: + + tag + tag[1] (or any other positive number) + tag[last()] + tag[@att] (the attribute exists for the element) + tag[@att="val"] + tag[@att=~ /regexp/] + tag[att1="val1" and att2="val2"] + tag[att1="val1" or att2="val2"] + tag[string()="toto"] (returns tag elements which text (as per the text method) + is toto) + tag[string()=~/regexp/] (returns tag elements which text (as per the text + method) matches regexp) + expressions can start with / (search starts at the document root) + expressions can start with . (search starts at the current element) + // can be used to get all descendants instead of just direct children + * matches any tag + +So the following examples from the +F<XPath recommendationL<http://www.w3.org/TR/xpath.html#path-abbrev>> work: + + para selects the para element children of the context node + * selects all element children of the context node + para[1] selects the first para child of the context node + para[last()] selects the last para child of the context node + */para selects all para grandchildren of the context node + /doc/chapter[5]/section[2] selects the second section of the fifth chapter + of the doc + chapter//para selects the para element descendants of the chapter element + children of the context node + //para selects all the para descendants of the document root and thus selects + all para elements in the same document as the context node + //olist/item selects all the item elements in the same document as the + context node that have an olist parent + .//para selects the para element descendants of the context node + .. selects the parent of the context node + para[@type="warning"] selects all para children of the context node that have + a type attribute with value warning + employee[@secretary and @assistant] selects all the employee children of the + context node that have both a secretary attribute and an assistant + attribute + + +The elements will be returned in the document order. + +If C<$optional_offset> is used then only one element will be returned, the one +with the appropriate offset in the list, starting at 0 + +Quoting and interpolating variables can be a pain when the Perl syntax and the +XPATH syntax collide, so use alternate quoting mechanisms like q or qq +(I like q{} and qq{} myself). + +Here are some more examples to get you started: + + my $p1= "p1"; + my $p2= "p2"; + my @res= $t->get_xpath( qq{p[string( "$p1") or string( "$p2")]}); + + my $a= "a1"; + my @res= $t->get_xpath( qq{//*[@att="$a"]}); + + my $val= "a1"; + my $exp= qq{//p[ \@att='$val']}; # you need to use \@ or you will get a warning + my @res= $t->get_xpath( $exp); + +Note that the only supported regexps delimiters are / and that you must +backslash all / in regexps AND in regular strings. + +XML::Twig does not provide natively full XPATH support, but you can use +C<L<XML::Twig::XPath>> to get C<findnodes> to use C<XML::XPath> as the +XPath engine, with full coverage of the spec. + +C<L<XML::Twig::XPath>> to get C<findnodes> to use C<XML::XPath> as the +XPath engine, with full coverage of the spec. + +=item find_nodes + +same asC<get_xpath> + +=item findnodes + +same as C<get_xpath> + + +=item text @optional_options + +Return a string consisting of all the C<PCDATA> and C<CDATA> in an element, +without any tags. The text is not XML-escaped: base entities such as C<&> +and C<< < >> are not escaped. + +The 'C<no_recurse>' option will only return the text of the element, not +of any included sub-elements (same as C<L<text_only>>). + +=item text_only + +Same as C<L<text>> except that the text returned doesn't include +the text of sub-elements. + +=item trimmed_text + +Same as C<text> except that the text is trimmed: leading and trailing spaces +are discarded, consecutive spaces are collapsed + +=item set_text ($string) + +Set the text for the element: if the element is a C<PCDATA>, just set its +text, otherwise cut all the children of the element and create a single +C<PCDATA> child for it, which holds the text. + +=item merge ($elt2) + +Move the content of C<$elt2> within the element + +=item insert ($tag1, [$optional_atts1], $tag2, [$optional_atts2],...) + +For each tag in the list inserts an element C<$tag> as the only child of the +element. The element gets the optional attributes inC<< $optional_atts<n>. >> +All children of the element are set as children of the new element. +The upper level element is returned. + + $p->insert( table => { border=> 1}, 'tr', 'td') + +put C<$p> in a table with a visible border, a single C<tr> and a single C<td> +and return the C<table> element: + + <p><table border="1"><tr><td>original content of p</td></tr></table></p> + +=item wrap_in (@tag) + +Wrap elements in C<@tag> as the successive ancestors of the element, returns the +new element. +C<< $elt->wrap_in( 'td', 'tr', 'table') >> wraps the element as a single cell in a +table for example. + +Optionally each tag can be followed by a hashref of attributes, that will be +set on the wrapping element: + + $elt->wrap_in( p => { class => "advisory" }, div => { class => "intro", id => "div_intro" }); + +=item insert_new_elt ($opt_position, $tag, $opt_atts_hashref, @opt_content) + +Combines a C<L<new> > and a C<L<paste> >: creates a new element using +C<$tag>, C<$opt_atts_hashref >and C<@opt_content> which are arguments similar +to those for C<new>, then paste it, using C<$opt_position> or C<'first_child'>, +relative to C<$elt>. + +Return the newly created element + +=item erase + +Erase the element: the element is deleted and all of its children are +pasted in its place. + +=item set_content ( $optional_atts, @list_of_elt_and_strings) + ( $optional_atts, '#EMPTY') + +Set the content for the element, from a list of strings and +elements. Cuts all the element children, then pastes the list +elements as the children. This method will create a C<PCDATA> element +for any strings in the list. + +The C<$optional_atts> argument is the ref of a hash of attributes. If this +argument is used then the previous attributes are deleted, otherwise they +are left untouched. + +B<WARNING>: if you rely on ID's then you will have to set the id yourself. At +this point the element does not belong to a twig yet, so the ID attribute +is not known so it won't be stored in the ID list. + +A content of 'C<#EMPTY>' creates an empty element; + +=item namespace ($optional_prefix) + +Return the URI of the namespace that C<$optional_prefix> or the element name +belongs to. If the name doesn't belong to any namespace, C<undef> is returned. + +=item local_name + +Return the local name (without the prefix) for the element + +=item ns_prefix + +Return the namespace prefix for the element + +=item current_ns_prefixes + +Return a list of namespace prefixes valid for the element. The order of the +prefixes in the list has no meaning. If the default namespace is currently +bound, '' appears in the list. + + +=item inherit_att ($att, @optional_tag_list) + +Return the value of an attribute inherited from parent tags. The value +returned is found by looking for the attribute in the element then in turn +in each of its ancestors. If the C<@optional_tag_list> is supplied only those +ancestors whose tag is in the list will be checked. + +=item all_children_are ($optional_condition) + +return 1 if all children of the element pass the C<$optional_condition>, +0 otherwise + +=item level ($optional_condition) + +Return the depth of the element in the twig (root is 0). +If C<$optional_condition> is given then only ancestors that match the condition are +counted. + +B<WARNING>: in a tree created using the C<twig_roots> option this will not return +the level in the document tree, level 0 will be the document root, level 1 +will be the C<twig_roots> elements. During the parsing (in a C<twig_handler>) +you can use the C<depth> method on the twig object to get the real parsing depth. + +=item in ($potential_parent) + +Return true if the element is in the potential_parent (C<$potential_parent> is +an element) + +=item in_context ($cond, $optional_level) + +Return true if the element is included in an element which passes C<$cond> +optionally within C<$optional_level> levels. The returned value is the +including element. + +=item pcdata + +Return the text of a C<PCDATA> element or C<undef> if the element is not +C<PCDATA>. + +=item pcdata_xml_string + +Return the text of a C<PCDATA> element or undef if the element is not C<PCDATA>. +The text is "XML-escaped" ('&' and '<' are replaced by '&' and '<') + +=item set_pcdata ($text) + +Set the text of a C<PCDATA> element. This method does not check that the element is +indeed a C<PCDATA> so usually you should use C<L<set_text>> instead. + +=item append_pcdata ($text) + +Add the text at the end of a C<PCDATA> element. + +=item is_cdata + +Return 1 if the element is a C<CDATA> element, returns 0 otherwise. + +=item is_text + +Return 1 if the element is a C<CDATA> or C<PCDATA> element, returns 0 otherwise. + +=item cdata + +Return the text of a C<CDATA> element or C<undef> if the element is not +C<CDATA>. + +=item cdata_string + +Return the XML string of a C<CDATA> element, including the opening and +closing markers. + +=item set_cdata ($text) + +Set the text of a C<CDATA> element. + +=item append_cdata ($text) + +Add the text at the end of a C<CDATA> element. + +=item remove_cdata + +Turns all C<CDATA> sections in the element into regular C<PCDATA> elements. This is useful +when converting XML to HTML, as browsers do not support CDATA sections. + +=item extra_data + +Return the extra_data (comments and PI's) attached to an element + +=item set_extra_data ($extra_data) + +Set the extra_data (comments and PI's) attached to an element + +=item append_extra_data ($extra_data) + +Append extra_data to the existing extra_data before the element (if no +previous extra_data exists then it is created) + +=item set_asis + +Set a property of the element that causes it to be output without being XML +escaped by the print functions: if it contains C<< a < b >> it will be output +as such and not as C<< a < b >>. This can be useful to create text elements +that will be output as markup. Note that all C<PCDATA> descendants of the +element are also marked as having the property (they are the ones that are +actually impacted by the change). + +If the element is a C<CDATA> element it will also be output asis, without the +C<CDATA> markers. The same goes for any C<CDATA> descendant of the element + +=item set_not_asis + +Unsets the C<asis> property for the element and its text descendants. + +=item is_asis + +Return the C<asis> property status of the element ( 1 or C<undef>) + +=item closed + +Return true if the element has been closed. Might be useful if you are +somewhere in the tree, during the parse, and have no idea whether a parent +element is completely loaded or not. + +=item get_type + +Return the type of the element: 'C<#ELT>' for "real" elements, or 'C<#PCDATA>', +'C<#CDATA>', 'C<#COMMENT>', 'C<#ENT>', 'C<#PI>' + +=item is_elt + +Return the tag if the element is a "real" element, or 0 if it is C<PCDATA>, +C<CDATA>... + +=item contains_only_text + +Return 1 if the element does not contain any other "real" element + +=item contains_only ($exp) + +Return the list of children if all children of the element match +the expression C<$exp> + + if( $para->contains_only( 'tt')) { ... } + +=item contains_a_single ($exp) + +If the element contains a single child that matches the expression C<$exp> +returns that element. Otherwise returns 0. + +=item is_field + +same as C<contains_only_text> + +=item is_pcdata + +Return 1 if the element is a C<PCDATA> element, returns 0 otherwise. + +=item is_ent + +Return 1 if the element is an entity (an unexpanded entity) element, +return 0 otherwise. + +=item is_empty + +Return 1 if the element is empty, 0 otherwise + +=item set_empty + +Flags the element as empty. No further check is made, so if the element +is actually not empty the output will be messed. The only effect of this +method is that the output will be C<< <tag att="value""/> >>. + +=item set_not_empty + +Flags the element as not empty. if it is actually empty then the element will +be output as C<< <tag att="value""></tag> >> + +=item is_pi + +Return 1 if the element is a processing instruction (C<#PI>) element, +return 0 otherwise. + +=item target + +Return the target of a processing instruction + +=item set_target ($target) + +Set the target of a processing instruction + +=item data + +Return the data part of a processing instruction + +=item set_data ($data) + +Set the data of a processing instruction + +=item set_pi ($target, $data) + +Set the target and data of a processing instruction + +=item pi_string + +Return the string form of a processing instruction +(C<< <?target data?> >>) + +=item is_comment + +Return 1 if the element is a comment (C<#COMMENT>) element, +return 0 otherwise. + +=item set_comment ($comment_text) + +Set the text for a comment + +=item comment + +Return the content of a comment (just the text, not the C<< <!-- >> +and C<< --> >>) + +=item comment_string + +Return the XML string for a comment (C<< <!-- comment --> >>) + +Note that an XML comment cannot start or end with a '-', or include '--' +(http://www.w3.org/TR/2008/REC-xml-20081126/#sec-comments), +if that is the case (because you have created the comment yourself presumably, +as it could not be in the input XML), then a space will be inserted before +an initial '-', after a trailing one or between two '-' in the comment +(which could presumably mangle javascript "hidden" in an XHTML comment); + +=item set_ent ($entity) + +Set an (non-expanded) entity (C<#ENT>). C<$entity>) is the entity +text (C<&ent;>) + +=item ent + +Return the entity for an entity (C<#ENT>) element (C<&ent;>) + +=item ent_name + +Return the entity name for an entity (C<#ENT>) element (C<ent>) + +=item ent_string + +Return the entity, either expanded if the expanded version is available, +or non-expanded (C<&ent;>) otherwise + +=item child ($offset, $optional_condition) + +Return the C<$offset>-th child of the element, optionally the C<$offset>-th +child that matches C<$optional_condition>. The children are treated as a list, so +C<< $elt->child( 0) >> is the first child, while C<< $elt->child( -1) >> is +the last child. + +=item child_text ($offset, $optional_condition) + +Return the text of a child or C<undef> if the sibling does not exist. Arguments +are the same as child. + +=item last_child ($optional_condition) + +Return the last child of the element, or the last child matching +C<$optional_condition> (ie the last of the element children matching +the condition). + +=item last_child_text ($optional_condition) + +Same as C<first_child_text> but for the last child. + +=item sibling ($offset, $optional_condition) + +Return the next or previous C<$offset>-th sibling of the element, or the +C<$offset>-th one matching C<$optional_condition>. If C<$offset> is negative then a +previous sibling is returned, if $offset is positive then a next sibling is +returned. C<$offset=0> returns the element if there is no condition or +if the element matches the condition>, C<undef> otherwise. + +=item sibling_text ($offset, $optional_condition) + +Return the text of a sibling or C<undef> if the sibling does not exist. +Arguments are the same as C<sibling>. + +=item prev_siblings ($optional_condition) + +Return the list of previous siblings (optionally matching C<$optional_condition>) +for the element. The elements are ordered in document order. + +=item next_siblings ($optional_condition) + +Return the list of siblings (optionally matching C<$optional_condition>) +following the element. The elements are ordered in document order. + +=item siblings ($optional_condition) + +Return the list of siblings (optionally matching C<$optional_condition>) +of the element (excluding the element itself). The elements are ordered +in document order. + +=item pos ($optional_condition) + +Return the position of the element in the children list. The first child has a +position of 1 (as in XPath). + +If the C<$optional_condition> is given then only siblings that match the condition +are counted. If the element itself does not match the condition then +0 is returned. + +=item atts + +Return a hash ref containing the element attributes + +=item set_atts ({ att1=>$att1_val, att2=> $att2_val... }) + +Set the element attributes with the hash ref supplied as the argument. The previous +attributes are lost (ie the attributes set by C<set_atts> replace all of the +attributes of the element). + +You can also pass a list instead of a hashref: C<< $elt->set_atts( att1 => 'val1',...) >> + +=item del_atts + +Deletes all the element attributes. + +=item att_nb + +Return the number of attributes for the element + +=item has_atts + +Return true if the element has attributes (in fact return the number of +attributes, thus being an alias to C<L<att_nb>> + +=item has_no_atts + +Return true if the element has no attributes, false (0) otherwise + +=item att_names + +return a list of the attribute names for the element + +=item att_xml_string ($att, $options) + +Return the attribute value, where '&', '<' and quote (" or the value of the quote option +at twig creation) are XML-escaped. + +The options are passed as a hashref, setting C<escape_gt> to a true value will also escape +'>' ($elt( 'myatt', { escape_gt => 1 }); + +=item set_id ($id) + +Set the C<id> attribute of the element to the value. +See C<L<elt_id> > to change the id attribute name + +=item id + +Gets the id attribute value + +=item del_id ($id) + +Deletes the C<id> attribute of the element and remove it from the id list +for the document + +=item class + +Return the C<class> attribute for the element (methods on the C<class> +attribute are quite convenient when dealing with XHTML, or plain XML that +will eventually be displayed using CSS) + +=item lclass + +same as class, except that +this method is an lvalue, so you can do C<< $elt->lclass= "foo" >> + +=item set_class ($class) + +Set the C<class> attribute for the element to C<$class> + +=item add_class ($class) + +Add C<$class> to the element C<class> attribute: the new class is added +only if it is not already present. + +Note that classes are then sorted alphabetically, so the C<class> attribute +can be changed even if the class is already there + +=item remove_class ($class) + +Remove C<$class> from the element C<class> attribute. + +Note that classes are then sorted alphabetically, so the C<class> attribute can be +changed even if the class is already there + + +=item add_to_class ($class) + +alias for add_class + +=item att_to_class ($att) + +Set the C<class> attribute to the value of attribute C<$att> + +=item add_att_to_class ($att) + +Add the value of attribute C<$att> to the C<class> attribute of the element + +=item move_att_to_class ($att) + +Add the value of attribute C<$att> to the C<class> attribute of the element +and delete the attribute + +=item tag_to_class + +Set the C<class> attribute of the element to the element tag + +=item add_tag_to_class + +Add the element tag to its C<class> attribute + +=item set_tag_class ($new_tag) + +Add the element tag to its C<class> attribute and sets the tag to C<$new_tag> + +=item in_class ($class) + +Return true (C<1>) if the element is in the class C<$class> (if C<$class> is +one of the tokens in the element C<class> attribute) + +=item tag_to_span + +Change the element tag tp C<span> and set its class to the old tag + +=item tag_to_div + +Change the element tag tp C<div> and set its class to the old tag + +=item DESTROY + +Frees the element from memory. + +=item start_tag + +Return the string for the start tag for the element, including +the C<< /> >> at the end of an empty element tag + +=item end_tag + +Return the string for the end tag of an element. For an empty +element, this returns the empty string (''). + +=item xml_string @optional_options + +Equivalent to C<< $elt->sprint( 1) >>, returns the string for the entire +element, excluding the element's tags (but nested element tags are present) + +The 'C<no_recurse>' option will only return the text of the element, not +of any included sub-elements (same as C<L<xml_text_only>>). + +=item inner_xml + +Another synonym for xml_string + +=item outer_xml + +An other synonym for sprint + +=item xml_text + +Return the text of the element, encoded (and processed by the current +C<L<output_filter>> or C<L<output_encoding>> options, without any tag. + +=item xml_text_only + +Same as C<L<xml_text>> except that the text returned doesn't include +the text of sub-elements. + +=item set_pretty_print ($style) + +Set the pretty print method, amongst 'C<none>' (default), 'C<nsgmls>', +'C<nice>', 'C<indented>', 'C<record>' and 'C<record_c>' + +pretty_print styles: + +=over 4 + +=item none + +the default, no C<\n> is used + +=item nsgmls + +nsgmls style, with C<\n> added within tags + +=item nice + +adds C<\n> wherever possible (NOT SAFE, can lead to invalid XML) + +=item indented + +same as C<nice> plus indents elements (NOT SAFE, can lead to invalid XML) + +=item record + +table-oriented pretty print, one field per line + +=item record_c + +table-oriented pretty print, more compact than C<record>, one record per line + +=back + +=item set_empty_tag_style ($style) + +Set the method to output empty tags, amongst 'C<normal>' (default), 'C<html>', +and 'C<expand>', + +C<normal> outputs an empty tag 'C<< <tag/> >>', C<html> adds a space +'C<< <tag /> >>' for elements that can be empty in XHTML and C<expand> outputs +'C<< <tag></tag> >>' + +=item set_remove_cdata ($flag) + +set (or unset) the flag that forces the twig to output CDATA sections as +regular (escaped) PCDATA + + +=item set_indent ($string) + +Set the indentation for the indented pretty print style (default is 2 spaces) + +=item set_quote ($quote) + +Set the quotes used for attributes. can be 'C<double>' (default) or 'C<single>' + +=item cmp ($elt) + + Compare the order of the 2 elements in a twig. + + C<$a> is the <A>..</A> element, C<$b> is the <B>...</B> element + + document $a->cmp( $b) + <A> ... </A> ... <B> ... </B> -1 + <A> ... <B> ... </B> ... </A> -1 + <B> ... </B> ... <A> ... </A> 1 + <B> ... <A> ... </A> ... </B> 1 + $a == $b 0 + $a and $b not in the same tree undef + +=item before ($elt) + +Return 1 if C<$elt> starts before the element, 0 otherwise. If the 2 elements +are not in the same twig then return C<undef>. + + if( $a->cmp( $b) == -1) { return 1; } else { return 0; } + +=item after ($elt) + +Return 1 if $elt starts after the element, 0 otherwise. If the 2 elements +are not in the same twig then return C<undef>. + + if( $a->cmp( $b) == -1) { return 1; } else { return 0; } + +=item other comparison methods + +=over 4 + +=item lt + +=item le + +=item gt + +=item ge + +=back + +=item path + +Return the element context in a form similar to XPath's short +form: 'C</root/tag1/../tag>' + +=item xpath + +Return a unique XPath expression that can be used to find the element +again. + +It looks like C</doc/sect[3]/title>: unique elements do not have an index, +the others do. + +=item flush + +flushes the twig up to the current element (strictly equivalent to +C<< $elt->root->flush >>) + +=item private methods + +Low-level methods on the twig: + +=over 4 + +=item set_parent ($parent) + +=item set_first_child ($first_child) + +=item set_last_child ($last_child) + +=item set_prev_sibling ($prev_sibling) + +=item set_next_sibling ($next_sibling) + +=item set_twig_current + +=item del_twig_current + +=item twig_current + +=item contains_text + +=back + +Those methods should not be used, unless of course you find some creative +and interesting, not to mention useful, ways to do it. + +=back + +=head2 cond + +Most of the navigation functions accept a condition as an optional argument +The first element (or all elements for C<L<children> > or +C<L<ancestors> >) that passes the condition is returned. + +The condition is a single step of an XPath expression using the XPath subset +defined by C<L<get_xpath>>. Additional conditions are: + +The condition can be + +=over 4 + +=item #ELT + +return a "real" element (not a PCDATA, CDATA, comment or pi element) + +=item #TEXT + +return a PCDATA or CDATA element + +=item regular expression + +return an element whose tag matches the regexp. The regexp has to be created +with C<qr//> (hence this is available only on perl 5.005 and above) + +=item code reference + +applies the code, passing the current element as argument, if the code returns +true then the element is returned, if it returns false then the code is applied +to the next candidate. + +=back + +=head2 XML::Twig::XPath + +XML::Twig implements a subset of XPath through the C<L<get_xpath>> method. + +If you want to use the whole XPath power, then you can use C<XML::Twig::XPath> +instead. In this case C<XML::Twig> uses C<XML::XPath> to execute XPath queries. +You will of course need C<XML::XPath> installed to be able to use C<XML::Twig::XPath>. + +See L<XML::XPath> for more information. + +The methods you can use are: + +=over 4 + +=item findnodes ($path) + +return a list of nodes found by C<$path>. + +=item findnodes_as_string ($path) + +return the nodes found reproduced as XML. The result is not guaranteed +to be valid XML though. + +=item findvalue ($path) + +return the concatenation of the text content of the result nodes + +=back + +In order for C<XML::XPath> to be used as the XPath engine the following methods +are included in C<XML::Twig>: + +in XML::Twig + +=over 4 + +=item getRootNode + +=item getParentNode + +=item getChildNodes + +=back + +in XML::Twig::Elt + +=over 4 + +=item string_value + +=item toString + +=item getName + +=item getRootNode + +=item getNextSibling + +=item getPreviousSibling + +=item isElementNode + +=item isTextNode + +=item isPI + +=item isPINode + +=item isProcessingInstructionNode + +=item isComment + +=item isCommentNode + +=item getTarget + +=item getChildNodes + +=item getElementById + +=back + +=head2 XML::Twig::XPath::Elt + +The methods you can use are the same as on C<XML::Twig::XPath> elements: + +=over 4 + +=item findnodes ($path) + +return a list of nodes found by C<$path>. + +=item findnodes_as_string ($path) + +return the nodes found reproduced as XML. The result is not guaranteed +to be valid XML though. + +=item findvalue ($path) + +return the concatenation of the text content of the result nodes + +=back + + +=head2 XML::Twig::Entity_list + +=over 4 + +=item new + +Create an entity list. + +=item add ($ent) + +Add an entity to an entity list. + +=item add_new_ent ($name, $val, $sysid, $pubid, $ndata, $param) + +Create a new entity and add it to the entity list + +=item delete ($ent or $tag). + +Delete an entity (defined by its name or by the Entity object) +from the list. + +=item print ($optional_filehandle) + +Print the entity list. + +=item list + +Return the list as an array + +=back + + +=head2 XML::Twig::Entity + +=over 4 + +=item new ($name, $val, $sysid, $pubid, $ndata, $param) + +Same arguments as the Entity handler for XML::Parser. + +=item print ($optional_filehandle) + +Print an entity declaration. + +=item name + +Return the name of the entity + +=item val + +Return the value of the entity + +=item sysid + +Return the system id for the entity (for NDATA entities) + +=item pubid + +Return the public id for the entity (for NDATA entities) + +=item ndata + +Return true if the entity is an NDATA entity + +=item param + +Return true if the entity is a parameter entity + + +=item text + +Return the entity declaration text. + +=back + + +=head1 EXAMPLES + +Additional examples (and a complete tutorial) can be found on the +F<XML::Twig PageL<http://www.xmltwig.org/xmltwig/>> + +To figure out what flush does call the following script with an +XML file and an element name as arguments + + use XML::Twig; + + my ($file, $elt)= @ARGV; + my $t= XML::Twig->new( twig_handlers => + { $elt => sub {$_[0]->flush; print "\n[flushed here]\n";} }); + $t->parsefile( $file, ErrorContext => 2); + $t->flush; + print "\n"; + + +=head1 NOTES + +=head2 Subclassing XML::Twig + +Useful methods: + +=over 4 + +=item elt_class + +In order to subclass C<XML::Twig> you will probably need to subclass also +C<L<XML::Twig::Elt>>. Use the C<elt_class> option when you create the +C<XML::Twig> object to get the elements created in a different class +(which should be a subclass of C<XML::Twig::Elt>. + +=item add_options + +If you inherit C<XML::Twig> new method but want to add more options to it +you can use this method to prevent XML::Twig to issue warnings for those +additional options. + +=back + +=head2 DTD Handling + +There are 3 possibilities here. They are: + +=over 4 + +=item No DTD + +No doctype, no DTD information, no entity information, the world is simple... + +=item Internal DTD + +The XML document includes an internal DTD, and maybe entity declarations. + +If you use the load_DTD option when creating the twig the DTD information and +the entity declarations can be accessed. + +The DTD and the entity declarations will be C<flush>'ed (or C<print>'ed) either +as is (if they have not been modified) or as reconstructed (poorly, comments +are lost, order is not kept, due to it's content this DTD should not be viewed +by anyone) if they have been modified. You can also modify them directly by +changing the C<< $twig->{twig_doctype}->{internal} >> field (straight from +XML::Parser, see the C<Doctype> handler doc) + +=item External DTD + +The XML document includes a reference to an external DTD, and maybe entity +declarations. + +If you use the C<load_DTD> when creating the twig the DTD information and the +entity declarations can be accessed. The entity declarations will be +C<flush>'ed (or C<print>'ed) either as is (if they have not been modified) or +as reconstructed (badly, comments are lost, order is not kept). + +You can change the doctype through the C<< $twig->set_doctype >> method and +print the dtd through the C<< $twig->dtd_text >> or C<< $twig->dtd_print >> + methods. + +If you need to modify the entity list this is probably the easiest way to do it. + +=back + + +=head2 Flush + +Remember that element handlers are called when the element is CLOSED, so +if you have handlers for nested elements the inner handlers will be called +first. It makes it for example trickier than it would seem to number nested +sections (or clauses, or divs), as the titles in the inner sections are handled +before the outer sections. + + +=head1 BUGS + +=over 4 + +=item segfault during parsing + +This happens when parsing huge documents, or lots of small ones, with a version +of Perl before 5.16. + +This is due to a bug in the way weak references are handled in Perl itself. + +The fix is either to upgrade to Perl 5.16 or later (C<perlbrew> is a great +tool to manage several installations of perl on the same machine). + +An other, NOT RECOMMENDED, way of fixing the problem, is to switch off weak +references by writing C<XML::Twig::_set_weakrefs( 0);> at the top of the code. +This is totally unsupported, and may lead to other problems though, + +=item entity handling + +Due to XML::Parser behaviour, non-base entities in attribute values disappear if +they are not declared in the document: +C<att="val&ent;"> will be turned into C<< att => val >>, unless you use the +C<keep_encoding> argument to C<< XML::Twig->new >> + +=item DTD handling + +The DTD handling methods are quite bugged. No one uses them and +it seems very difficult to get them to work in all cases, including with +several slightly incompatible versions of XML::Parser and of libexpat. + +Basically you can read the DTD, output it back properly, and update entities, +but not much more. + +So use XML::Twig with standalone documents, or with documents referring to an +external DTD, but don't expect it to properly parse and even output back the +DTD. + +=item memory leak + +If you use a REALLY old Perl (5.005!) and +a lot of twigs you might find that you leak quite a lot of memory +(about 2Ks per twig). You can use the C<L<dispose> > method to free +that memory after you are done. + +If you create elements the same thing might happen, use the C<L<delete>> +method to get rid of them. + +Alternatively installing the C<Scalar::Util> (or C<WeakRef>) module on a version +of Perl that supports it (>5.6.0) will get rid of the memory leaks automagically. + +=item ID list + +The ID list is NOT updated when elements are cut or deleted. + +=item change_gi + +This method will not function properly if you do: + + $twig->change_gi( $old1, $new); + $twig->change_gi( $old2, $new); + $twig->change_gi( $new, $even_newer); + +=item sanity check on XML::Parser method calls + +XML::Twig should really prevent calls to some XML::Parser methods, especially +the C<setHandlers> method. + +=item pretty printing + +Pretty printing (at least using the 'C<indented>' style) is hard to get right! +Only elements that belong to the document will be properly indented. Printing +elements that do not belong to the twig makes it impossible for XML::Twig to +figure out their depth, and thus their indentation level. + +Also there is an unavoidable bug when using C<flush> and pretty printing for +elements with mixed content that start with an embedded element: + + <elt><b>b</b>toto<b>bold</b></elt> + + will be output as + + <elt> + <b>b</b>toto<b>bold</b></elt> + +if you flush the twig when you find the C<< <b> >> element + + +=back + +=head1 Globals + +These are the things that can mess up calling code, especially if threaded. +They might also cause problem under mod_perl. + +=over 4 + +=item Exported constants + +Whether you want them or not you get them! These are subroutines to use +as constant when creating or testing elements + + PCDATA return '#PCDATA' + CDATA return '#CDATA' + PI return '#PI', I had the choice between PROC and PI :--( + +=item Module scoped values: constants + +these should cause no trouble: + + %base_ent= ( '>' => '>', + '<' => '<', + '&' => '&', + "'" => ''', + '"' => '"', + ); + CDATA_START = "<![CDATA["; + CDATA_END = "]]>"; + PI_START = "<?"; + PI_END = "?>"; + COMMENT_START = "<!--"; + COMMENT_END = "-->"; + +pretty print styles + + ( $NSGMLS, $NICE, $INDENTED, $INDENTED_C, $WRAPPED, $RECORD1, $RECORD2)= (1..7); + +empty tag output style + + ( $HTML, $EXPAND)= (1..2); + +=item Module scoped values: might be changed + +Most of these deal with pretty printing, so the worst that can +happen is probably that XML output does not look right, but is +still valid and processed identically by XML processors. + +C<$empty_tag_style> can mess up HTML bowsers though and changing C<$ID> +would most likely create problems. + + $pretty=0; # pretty print style + $quote='"'; # quote for attributes + $INDENT= ' '; # indent for indented pretty print + $empty_tag_style= 0; # how to display empty tags + $ID # attribute used as an id ('id' by default) + +=item Module scoped values: definitely changed + +These 2 variables are used to replace tags by an index, thus +saving some space when creating a twig. If they really cause +you too much trouble, let me know, it is probably possible to +create either a switch or at least a version of XML::Twig that +does not perform this optimization. + + %gi2index; # tag => index + @index2gi; # list of tags + +=back + +If you need to manipulate all those values, you can use the following methods on the +XML::Twig object: + +=over 4 + +=item global_state + +Return a hashref with all the global variables used by XML::Twig + +The hash has the following fields: C<pretty>, C<quote>, C<indent>, +C<empty_tag_style>, C<keep_encoding>, C<expand_external_entities>, +C<output_filter>, C<output_text_filter>, C<keep_atts_order> + +=item set_global_state ($state) + +Set the global state, C<$state> is a hashref + +=item save_global_state + +Save the current global state + +=item restore_global_state + +Restore the previously saved (using C<Lsave_global_state>> state + +=back + +=head1 TODO + +=over 4 + +=item SAX handlers + +Allowing XML::Twig to work on top of any SAX parser + +=item multiple twigs are not well supported + +A number of twig features are just global at the moment. These include +the ID list and the "tag pool" (if you use C<change_gi> then you change the tag +for ALL twigs). + +A future version will try to support this while trying not to be to +hard on performance (at least when a single twig is used!). + +=back + +=head1 AUTHOR + +Michel Rodriguez <mirod@cpan.org> + +=head1 LICENSE + +This library is free software; you can redistribute it and/or modify +it under the same terms as Perl itself. + +Bug reports should be sent using: +F<RT L<http://rt.cpan.org/NoAuth/Bugs.html?Dist=XML-Twig>> + +Comments can be sent to mirod@cpan.org + +The XML::Twig page is at L<http://www.xmltwig.org/xmltwig/> +It includes the development version of the module, a slightly better version +of the documentation, examples, a tutorial and a: +F<Processing XML efficiently with Perl and XML::Twig: +L<http://www.xmltwig.org/xmltwig/tutorial/index.html>> + +=head1 SEE ALSO + +Complete docs, including a tutorial, examples, an easier to use HTML version of +the docs, a quick reference card and a FAQ are available at +L<http://www.xmltwig.org/xmltwig/> + +git repository at L<http://github.com/mirod/xmltwig> + +L<XML::Parser>, L<XML::Parser::Expat>, L<XML::XPath>, L<Encode>, +L<Text::Iconv>, L<Scalar::Utils> + + +=head2 Alternative Modules + +XML::Twig is not the only XML::Processing module available on CPAN (far from +it!). + +The main alternative I would recommend is L<XML::LibXML>. + +Here is a quick comparison of the 2 modules: + +XML::LibXML, actually C<libxml2> on which it is based, sticks to the standards, +and implements a good number of them in a rather strict way: XML, XPath, DOM, +RelaxNG, I must be forgetting a couple (XInclude?). It is fast and rather +frugal memory-wise. + +XML::Twig is older: when I started writing it XML::Parser/expat was the only +game in town. It implements XML and that's about it (plus a subset of XPath, +and you can use XML::Twig::XPath if you have XML::XPathEngine installed for full +support). It is slower and requires more memory for a full tree than +XML::LibXML. On the plus side (yes, there is a plus side!) it lets you process +a big document in chunks, and thus let you tackle documents that couldn't be +loaded in memory by XML::LibXML, and it offers a lot (and I mean a LOT!) of +higher-level methods, for everything, from adding structure to "low-level" XML, +to shortcuts for XHTML conversions and more. It also DWIMs quite a bit, getting +comments and non-significant whitespaces out of the way but preserving them in +the output for example. As it does not stick to the DOM, is also usually leads +to shorter code than in XML::LibXML. + +Beyond the pure features of the 2 modules, XML::LibXML seems to be preferred by +"XML-purists", while XML::Twig seems to be more used by Perl Hackers who have +to deal with XML. As you have noted, XML::Twig also comes with quite a lot of +docs, but I am sure if you ask for help about XML::LibXML here or on Perlmonks +you will get answers. + +Note that it is actually quite hard for me to compare the 2 modules: on one hand +I know XML::Twig inside-out and I can get it to do pretty much anything I need +to (or I improve it ;--), while I have a very basic knowledge of XML::LibXML. +So feature-wise, I'd rather use XML::Twig ;--). On the other hand, I am +painfully aware of some of the deficiencies, potential bugs and plain ugly code +that lurk in XML::Twig, even though you are unlikely to be affected by them +(unless for example you need to change the DTD of a document programmatically), +while I haven't looked much into XML::LibXML so it still looks shinny and clean +to me. + +That said, if you need to process a document that is too big to fit memory +and XML::Twig is too slow for you, my reluctant advice would be to use "bare" +XML::Parser. It won't be as easy to use as XML::Twig: basically with XML::Twig +you trade some speed (depending on what you do from a factor 3 to... none) +for ease-of-use, but it will be easier IMHO than using SAX (albeit not +standard), and at this point a LOT faster (see the last test in +L<http://www.xmltwig.org/article/simple_benchmark/>). + +=cut + + Binary files /tmp/WlnUgzz6bE/gdata-2.13.3/inst/xls/ExampleExcelFile_1900.xls and /tmp/J1mSmQsMZw/gdata-2.17.0/inst/xls/ExampleExcelFile_1900.xls differ Binary files /tmp/WlnUgzz6bE/gdata-2.13.3/inst/xls/ExampleExcelFile_1900.xlsx and /tmp/J1mSmQsMZw/gdata-2.17.0/inst/xls/ExampleExcelFile_1900.xlsx differ Binary files /tmp/WlnUgzz6bE/gdata-2.13.3/inst/xls/ExampleExcelFile_1904.xls and /tmp/J1mSmQsMZw/gdata-2.17.0/inst/xls/ExampleExcelFile_1904.xls differ Binary files /tmp/WlnUgzz6bE/gdata-2.13.3/inst/xls/ExampleExcelFile_1904.xlsx and /tmp/J1mSmQsMZw/gdata-2.17.0/inst/xls/ExampleExcelFile_1904.xlsx differ Binary files /tmp/WlnUgzz6bE/gdata-2.13.3/inst/xls/ExampleExcelFile.xls and /tmp/J1mSmQsMZw/gdata-2.17.0/inst/xls/ExampleExcelFile.xls differ Binary files /tmp/WlnUgzz6bE/gdata-2.13.3/inst/xls/ExampleExcelFile.xlsx and /tmp/J1mSmQsMZw/gdata-2.17.0/inst/xls/ExampleExcelFile.xlsx differ Binary files /tmp/WlnUgzz6bE/gdata-2.13.3/inst/xls/wide.xls and /tmp/J1mSmQsMZw/gdata-2.17.0/inst/xls/wide.xls differ Binary files /tmp/WlnUgzz6bE/gdata-2.13.3/inst/xls/wide.xlsx and /tmp/J1mSmQsMZw/gdata-2.17.0/inst/xls/wide.xlsx differ diff -Nru gdata-2.13.3/man/first.Rd gdata-2.17.0/man/first.Rd --- gdata-2.13.3/man/first.Rd 1970-01-01 00:00:00.000000000 +0000 +++ gdata-2.17.0/man/first.Rd 2015-04-28 06:11:59.000000000 +0000 @@ -0,0 +1,55 @@ +\name{first} +\alias{first} +\alias{last} +\title{Return first or last element of an object} +\description{ + Return first or last element of an object. These functions are convenience + wrappers for \code{head(x, n=1, ...)} and \code{tail(x, n=1, ...)}. + } +\usage{ +first(x, n=1, ...) +last(x, n=1, ...) +} +\arguments{ + \item{x}{data object} + \item{n}{a single integer. If positive, size for the resulting object: + number of elements for a vector (including lists), rows for a + matrix or data frame or lines for a function. If negative, + all but the 'n' last/first number of elements of 'x'.} + \item{...}{arguments to be passed to or from other methods.} +} +\value{ + An object (usually) like 'x' but generally smaller. +} +\author{ + Gregory R. Warnes \email{greg@warnes.net} +} +\seealso{ + \code{\link[utils]{head}}, + \code{\link[utils]{tail}}, + \code{\link{left}}, + \code{\link{right}} +} +\examples{ +## works for vectors.. +v <- 1:10 +first(v) +last(v) + +## and for lists +l <- list(a=1, b=2, c=3) +first(l) +last(l) + +## and data.frames +df <- data.frame(a=1:2, b=3:4, c=5:6) +first(df) +last(df) + +## and matrixes +m <- as.matrix(df) +first(m) +last(m) + +} +\keyword{ manip } diff -Nru gdata-2.13.3/man/humanReadable.Rd gdata-2.17.0/man/humanReadable.Rd --- gdata-2.13.3/man/humanReadable.Rd 2014-01-03 19:32:44.000000000 +0000 +++ gdata-2.17.0/man/humanReadable.Rd 2015-04-25 06:34:06.000000000 +0000 @@ -1,44 +1,43 @@ -% humanReadable.Rd -%-------------------------------------------------------------------------- -% What: Print byte size in human readable format man page -% $Id$ -% Time-stamp: <2008-12-30 13:26:35 ggorjan> -%-------------------------------------------------------------------------- - \name{humanReadable} \alias{humanReadable} -\title{Print byte size in human readable format} +\title{Print Byte Size in Human Readable Format} \description{ -\code{humanReadable} converts byte size in human readable format such as +\code{humanReadable} converts integer byte sizes to a human readable units such as kB, MB, GB, etc. } \usage{ - -humanReadable(x, standard="SI", digits=1, width=3, sep=" ") - +humanReadable(x, units="auto", standard=c("IEC", "SI", "Unix"), + digits=1, width=NULL, sep=" ", justify=c("right", "left") ) } \arguments{ \item{x}{integer, byte size} - \item{standard}{character, "SI" for powers of 1000 or anything else for - powers of 1024, see details} + \item{standard}{character, "IEC" for powers of 1024 ('MiB'), "SI" for + powers of 1000 ('MB'), or "Unix" for powers of 1024 ('M'). See + details.} + \item{units}{character, unit to use for all values (optional), one of + "auto", "bytes", or an appropriate unit corresponding to + \code{standard}.} \item{digits}{integer, number of digits after decimal point} \item{width}{integer, width of number string} \item{sep}{character, separator between number and unit} + \item{justify}{two-element vector specifiy the alignment for the number + and unit components of the size. Each element should be one of + "none", "left", "right", or "center"} } \details{ -Basic unit used to store information in computers is a bit. Bits are +The basic unit used to store information in computers is a bit. Bits are represented as zeroes and ones - binary number system. Although, the binary number system is not the same as the decimal number system, decimal prefixes -for binary multiples such as kilo and mega are often used. In the decimal system +for binary multiples such as kilo and mega are often used. In the decimal system kilo represent 1000, which is close to \eqn{1024 = 2^{10}} in the binary system. This sometimes causes problems as it is not clear which powers (2 or 10) are used in a notation like 1 kB. To overcome this problem International Electrotechnical @@ -66,14 +65,30 @@ } where Zi and Yi are GNU extensions to IEC. To get the output in the decimal -system (powers of 1000) use \code{standard="SI"}. Otherwise IEC standard -(powers of 1024) is used. +system (powers of 1000) use \code{standard="SI"}. To obtain IEC standard +(powers of 1024) use \code{standard="IEC"}. + +In addition, single-character units are provided that follow (and +extend) the Unix pattern (use \code{standard="Unix"}): + +\tabular{lrcll}{ +Name \tab System \tab Symbol \tab Size \tab Conversion \cr +byte \tab binary \tab B \tab \eqn{2^3} \tab 8 bits \cr +kibibyte \tab binary \tab K \tab \eqn{2^{10}} \tab 1024 bytes \cr +mebibyte \tab binary \tab M \tab \eqn{(2^{10})^2} \tab 1024 kibibytes\cr +gibibyte \tab binary \tab G \tab \eqn{(2^{10})^3} \tab 1024 mebibytes\cr +tebibyte \tab binary \tab T \tab \eqn{(2^{10})^4} \tab 1024 gibibytes\cr +pebibyte \tab binary \tab P \tab \eqn{(2^{10})^5} \tab 1024 tebibytes\cr +exbibyte \tab binary \tab E \tab \eqn{(2^{10})^6} \tab 1024 pebibytes\cr +zebibyte \tab binary \tab Z \tab \eqn{(2^{10})^7} \tab 1024 exbibytes\cr +yottabyte \tab binary \tab Y \tab \eqn{(2^{10})^8} \tab 1024 zebibytes\cr +} For printout both \code{digits} and \code{width} can be specified. If \code{width} is \code{NULL}, all values have given number of digits. If \code{width} is not \code{NULL}, output is rounded to a given width and -formated similar to human readable format of \code{ls}, \code{df} or -\code{du} shell commands. +formated similar to human readable format of the Unix \code{ls}, +\code{df} or \code{du} shell commands. } @@ -96,33 +111,40 @@ } -\author{Ales Korosec and Gregor Gorjanc} +\author{Ales Korosec, Gregor Gorjanc, and Gregory R. Warnes + \email{greg@warnes.net}} + \seealso{ - \code{\link{object.size}}, \code{\link[gdata]{ll}} + \code{\link{object.size}} in package 'gdata', + \code{\link[utils]{object.size}} in package 'utils', + \code{\link[gdata]{ll}} } \examples{ -baseSI <- 10 -powerSI <- seq(from=3, to=27, by=3) -SI0 <- (baseSI)^powerSI -k <- length(SI0) - 1 -SI1 <- SI0 - SI0 / c(2, runif(n=k, min=1.01, max=5.99)) -SI2 <- SI0 + SI0 / c(2, runif(n=k, min=1.01, max=5.99)) - -baseIEC <- 2 -powerIEC <- seq(from=10, to=90, by=10) -IEC0 <- (baseIEC)^powerIEC -IEC1 <- IEC0 - IEC0 / c(2, runif(n=k, min=1.01, max=5.99)) -IEC2 <- IEC0 + IEC0 / c(2, runif(n=k, min=1.01, max=5.99)) - -cbind(humanReadable(x=SI1, width=NULL, digits=3), - humanReadable(x=SI0, width=NULL, digits=2), - humanReadable(x=SI2, width=NULL, digits=1), - humanReadable(x=IEC1, standard="IEC", width=7, digits=3), - humanReadable(x=IEC0, standard="IEC", width=7, digits=2), - humanReadable(x=IEC2, standard="IEC", width=7, digits=1)) +# Simple example: maximum addressible size of 32 bit pointer +humanReadable(2^32-1) +humanReadable(2^32-1, standard="IEC") +humanReadable(2^32-1, standard="SI") +humanReadable(2^32-1, standard="Unix") + +humanReadable(2^32-1, unit="MiB") +humanReadable(2^32-1, standard="IEC", unit="MiB") +humanReadable(2^32-1, standard="SI", unit="MB") +humanReadable(2^32-1, standard="Unix", unit="M") + +# Vector of sizes +matrix(humanReadable(c(60810, 124141, 124, 13412513), width=4)) +matrix(humanReadable(c(60810, 124141, 124, 13412513), width=4, unit="KiB")) + +# Specify digits rather than width +matrix(humanReadable(c(60810, 124141, 124, 13412513), width=NULL, digits=2)) + +# Change the justification +matrix(humanReadable(c(60810, 124141, 124, 13412513), width=NULL, + justify=c("right", "right") )) + } \keyword{misc} diff -Nru gdata-2.13.3/man/left.Rd gdata-2.17.0/man/left.Rd --- gdata-2.13.3/man/left.Rd 1970-01-01 00:00:00.000000000 +0000 +++ gdata-2.17.0/man/left.Rd 2015-04-13 23:37:08.000000000 +0000 @@ -0,0 +1,39 @@ +\name{left} +\alias{right} +\alias{left} +\title{Return the leftmost or rightmost columns of a matrix or dataframe} +\description{ + Return the leftmost or rightmost or columns of a matrix or dataframe +} +\usage{ +right(x, n = 6) +left(x, n=6) +} +\arguments{ + \item{x}{Matrix or dataframe} + \item{n}{Number of columns to return} +} +\value{ + An object consisting of the leftmost or rightmost \code{n} columns + of \code{x}. +} +\author{ + Gregory R. Warnes \email{greg@warnes.net} +} +\seealso{ + \code{\link{first}}, + \code{\link{last}}, + \code{\link[utils]{head}}, + \code{\link[utils]{tail}} +} +\examples{ + m <- matrix( 1:100, ncol=10 ) + colnames(m) <- paste("Col",1:10, sep="_") + left(m) + right(m) + + d <- as.data.frame(m) + left(d) + right(d) +} +\keyword{ manip } diff -Nru gdata-2.13.3/man/object.size.Rd gdata-2.17.0/man/object.size.Rd --- gdata-2.13.3/man/object.size.Rd 2014-01-03 19:32:43.000000000 +0000 +++ gdata-2.17.0/man/object.size.Rd 2015-04-25 06:52:28.000000000 +0000 @@ -1,70 +1,99 @@ -% File src/library/utils/man/object.size.Rd +% Come material taken from src/library/utils/man/object.size.Rd % Part of the R package, http://www.R-project.org % Copyright 1995-2007 R Core Development Team % Distributed under GPL 2 or later \name{object.size} \alias{object.size} -\alias{print.object_size} -\alias{c.object_size} -\alias{as.object_size} -\alias{is.object_size} +\alias{c.object_sizes} +\alias{as.object_sizes} +\alias{is.object_sizes} +\alias{format.object_sizes} +\alias{print.object_sizes} -\title{Report the Space Allocated for an Object} +\title{Report the Space Allocated for Objects} \description{ - Provides an estimate of the memory that is being used to store an \R object. + Provides an estimate of the memory that is being used to store \R objects. } \usage{ object.size(\dots) -\method{print}{object_size}(x, quote=FALSE, humanReadable, \dots) +\method{is}{object_sizes}(x) + +\method{as}{object_sizes}(x) + +\method{c}{object_sizes}(\dots, recursive=FALSE) + +\method{format}{object_sizes}(x, humanReadable=getOption("humanReadable"), standard="IEC", units, + digits=1, width=NULL, sep=" ", justify = c("right", "left"), +\dots) + +\method{print}{object_sizes}(x, quote=FALSE, humanReadable=getOption("humanReadable"), + standard="IEC", units, digits=1, width=NULL, sep=" ", + justify = c("right", "left"), \dots) + } \arguments{ - \item{\dots}{\code{object.size}: \R objects; \code{print}; arguments - to be passed to or from other methods.} + \item{\dots}{\code{object.size}: \R objects; + \code{print} and \code{format}: arguments to be passed to other + methods. + } \item{x}{output from \code{object.size}} \item{quote}{logical, indicating whether or not the result should be printed with surrounding quotes.} \item{humanReadable}{logical, use the \dQuote{human readable} format.} + \item{standard,units,digits,width,sep,justify}{See the man page for + \code{\link{humanReadable}}. + } + \item{recursive}{See the man page for \code{\link[base]{c}}. } } \details{ - This is a modified copy from the utils package in R as fo 2008-12-15. + \emph{This is a modified copy of the man page for utils::object.size in R + 2.2.1.} + + Exactly which parts of the memory allocation should be attributed + to which object is not clear-cut. This function merely provides a + rough indication: it should be reasonably accurate for atomic + vectors, but does not detect if elements of a list are shared, for + example. (Sharing amongst elements of a character vector is taken + into account, but not that between character vectors in a single + object.) - Exactly which parts of the memory allocation should be attributed to - which object is not clear-cut. This function merely provides a rough - indication: it should be reasonably accurate for atomic vectors, but - does not detect if elements of a list are shared, for example. - (Sharing amongst elements of a character vector is taken into account, - but not that between character vectors in a single object.) - - The calculation is of the size of the object, and excludes the space - needed to store its name in the symbol table. + The calculation is of the size of the object, and excludes the + space needed to store its name in the symbol table. - Associated space (e.g. the environment of a function and what the + Associated space (e.g., the environment of a function and what the pointer in a \code{EXTPTRSXP} points to) is not included in the calculation. - Object sizes are larger on 64-bit platforms than 32-bit ones, but will - very likely be the same on different platforms with the same word - length and pointer size. + Object sizes are larger on 64-bit builds than 32-bit ones, but + will very likely be the same on different platforms with the same + word length and pointer size. % Modificitaion start - Class of returned object is \code{c("byte", "numeric")} with + \emph{Changes} + + Class of returned object is \code{c("object_sizes", "numeric")} with appropriate \code{print} and \code{c} methods. By default \code{object.size} outputs size in bytes, but human readable format similar to \code{ls}, \code{df} or \code{du} shell - commands can be invoked with \code{options(humanReadable=TRUE)}. + commands can be displayed by calling \code{humanReadable} directly, + calling \code{print} with the argument \code{humanReadable=TRUE}, or + by setting \code{options(humanReadable=TRUE)}. + % Modificitaion end } \value{ - An object of class \code{"object.size"} with a length-one double value, - an estimate of the memory allocation attributable to the object in bytes. + A numeric vector class \code{c("object_sizes", "numeric")} containing + estimated memory allocation attributable to the objects in bytes. } \seealso{ - \code{\link{Memory-limits}} for the design limitations on object size. + \code{\link[utils]{object.size}} in package 'utils' for the standard + version of this function, + \code{\link{Memory-limits}} for the design limitations on object size, \code{\link{humanReadable}} for human readable format. } @@ -72,14 +101,35 @@ object.size(letters) object.size(ls) ## find the 10 largest objects in the base package -z <- sapply(ls("package:base"), function(x) - object.size(get(x, envir = baseenv()))) -(tmp <- as.matrix(rev(sort(z))[1:10])) +allObj <- sapply(ls("package:base"), + function(x) + object.size(get(x, envir = baseenv())) + ) + +( bigObj <- as.object_sizes(rev(sort(allObj))[1:10] ) ) +print(bigObj, humanReadable=TRUE) + + +as.object_sizes(14567567) + +\dontshow{ + optionsOrig <- options("humanReadable") +} -as.object_size(14567567) options(humanReadable=TRUE) -(z <- object.size(letters, c(letters, letters), rep(letters, 100), rep(letters, 10000))) -is.object_size(z) -as.object_size(14567567) +( + z <- object.size(letters, + c(letters, letters), + rep(letters, 100), + rep(letters, 10000) + ) +) +is.object_sizes(z) +as.object_sizes(14567567) + +\dontshow{ + options(optionsOrig) +} + } \keyword{utilities} diff -Nru gdata-2.13.3/man/reorder.Rd gdata-2.17.0/man/reorder.Rd --- gdata-2.13.3/man/reorder.Rd 2014-01-03 19:32:44.000000000 +0000 +++ gdata-2.17.0/man/reorder.Rd 2015-04-28 05:40:21.000000000 +0000 @@ -1,5 +1,3 @@ -% $Id: reorder.Rd 1435 2010-05-02 06:11:26Z warnes $ - \name{reorder.factor} \alias{reorder.factor} \title{Reorder the Levels of a Factor} @@ -7,13 +5,7 @@ Reorder the levels of a factor } \usage{ -\method{reorder}{factor}(x, - X, - FUN, - ..., - order=is.ordered(x), - new.order, - sort=mixedsort) +\method{reorder}{factor}(x, X, FUN, ..., order=is.ordered(x), new.order, sort=mixedsort) } \arguments{ \item{x}{factor} @@ -38,50 +30,59 @@ The groups are then sorted by this value, and the resulting order is used for the new factor level names. - If \code{new.order} is provided: For a numeric vector, the new factor - level names are constructed by reordering the factor levels according - to the numeric values. For vectors, \code{new.order} gives the list of - new factor level names. In either case levels omitted from - \code{new.order} will become missing (\code{NA}) values. - - If \code{sort} is provided (as it is by default): The new - factor level names are generated by applying the supplied function - to the existing factor level names. With \code{sort=mixedsort} the - factor levels are sorted so that combined numeric and character - strings are sorted in according to character rules on the character - sections (including ignoring case), and the numeric rules for the - numeric sections. See \code{\link[gtools]{mixedsort}} for details. -} -\value{ - A new factor with reordered levels + If \code{new.order} is a numeric vector, the new factor level names + are constructed by reordering the factor levels according to the + numeric values. If \code{new.order} is a chraccter vector, + \code{new.order} gives the list of new factor level names. In either + case levels omitted from \code{new.order} will become missing + (\code{NA}) values. + + If \code{sort} is provided (as it is by default): The new factor level + names are generated by calling the function specified by \code{sort} + to the existing factor level \emph{names}. With \code{sort=mixedsort} + (the default) the factor levels are sorted so that combined numeric + and character strings are sorted in according to character rules on + the character sections (including ignoring case), and the numeric + rules for the numeric sections. See \code{\link[gtools]{mixedsort}} + for details. } +\value{ A new factor with reordered levels } \author{Gregory R. Warnes \email{greg@warnes.net}} -\seealso{\code{\link{factor}} and \code{\link[stats]{reorder}}} +\seealso{ + \code{\link{factor}} + and + \code{\link[stats]{reorder}} +} \examples{ + +\dontshow{ + set.seed(123456) +} + # Create a 4 level example factor trt <- factor( sample( c("PLACEBO", "300 MG", "600 MG", "1200 MG"), 100, replace=TRUE ) ) summary(trt) # Note that the levels are not in a meaningful order. - # Change the order to something useful - # default "mixedsort" ordering + # Change the order to something useful.. + # - default "mixedsort" ordering trt2 <- reorder(trt) summary(trt2) - # using indexes: + # - using indexes: trt3 <- reorder(trt, new.order=c(4, 2, 3, 1)) summary(trt3) - # using label names: + # - using label names: trt4 <- reorder(trt, new.order=c("PLACEBO", "300 MG", "600 MG", "1200 MG")) summary(trt4) - # using frequency - trt5 <- reorder(trt, X=as.numeric(trt), FUN=length) + # - using frequency + trt5 <- reorder(trt, X=rnorm(100), FUN=mean) summary(trt5) - # drop out the '300 MG' level + # Drop out the '300 MG' level trt6 <- reorder(trt, new.order=c("PLACEBO", "600 MG", "1200 MG")) summary(trt6) } diff -Nru gdata-2.13.3/man/upperTriangle.Rd gdata-2.17.0/man/upperTriangle.Rd --- gdata-2.13.3/man/upperTriangle.Rd 2014-01-03 19:32:43.000000000 +0000 +++ gdata-2.17.0/man/upperTriangle.Rd 2015-06-29 22:33:46.000000000 +0000 @@ -8,15 +8,18 @@ Extract or replace the upper/lower triangular portion of a matrix } \usage{ -upperTriangle(x, diag=FALSE) -upperTriangle(x, diag=FALSE) <- value -lowerTriangle(x, diag=FALSE) -lowerTriangle(x, diag=FALSE) <- value +upperTriangle(x, diag=FALSE, byrow=FALSE) +upperTriangle(x, diag=FALSE, byrow=FALSE) <- value +lowerTriangle(x, diag=FALSE, byrow=FALSE) +lowerTriangle(x, diag=FALSE, byrow=FALSE) <- value } %- maybe also 'usage' for other objects documented here. \arguments{ \item{x}{Matrix} \item{diag}{Logical. If \code{TRUE}, include the matrix diagonal.} + \item{byrow}{Logical. If \code{FALSE}, return/replace elements in + column-wise order. If \code{TRUE}, return/replace elements in + row-wise order.} \item{value}{Either a single value or a vector of length equal to that of the current upper/lower triangular. Should be of a mode which can be coerced to that of \code{x}.} @@ -25,19 +28,35 @@ \value{ \code{upperTriangle(x)} and \code{lowerTriangle(x)} return the upper or lower triangle of matrix x, respectively. The assignment forms - replace the upper or lower traingular area of the - matrix with the provided value(s). + replace the upper or lower triangular area of the + matrix with the provided value(s). } +\note{ + By default, the elements are returned/replaced in R's default column-wise order. Thus + \preformatted{ lowerTriangle(x) <- upperTriangle(x)} + will not yield a symmetric matrix. Instead use: + \preformatted{ lowerTriangle(x) <- upperTriangle(x, byrow=TRUE)} + or equivalently: + \preformatted{ lowerTriangle(x, byrow=TRUE) <- upperTriangle(x)} +} + \author{Gregory R. Warnes \email{greg@warnes.net}} -\seealso{ \code{\link[base]{diag}} } +\seealso{ + \code{\link[base]{diag}}, + \code{\link[base]{lower.tri}}, + \code{\link[base]{upper.tri}} +} \examples{ x <- matrix( 1:25, nrow=5, ncol=5) x upperTriangle(x) upperTriangle(x, diag=TRUE) + upperTriangle(x, diag=TRUE, byrow=TRUE) + lowerTriangle(x) lowerTriangle(x, diag=TRUE) + lowerTriangle(x, diag=TRUE, byrow=TRUE) upperTriangle(x) <- NA x @@ -51,5 +70,11 @@ lowerTriangle(x, diag=TRUE) <- 1:15 x + ## Copy lower triangle into upper triangle to make + ## the matrix (diagonally) symmetric + x <- matrix(LETTERS[1:25], nrow=5, ncol=5, byrow=TRUE) + x + lowerTriangle(x) = upperTriangle(x, byrow=TRUE) + x } \keyword{array} diff -Nru gdata-2.13.3/man/write.fwf.Rd gdata-2.17.0/man/write.fwf.Rd --- gdata-2.13.3/man/write.fwf.Rd 2014-01-03 19:32:43.000000000 +0000 +++ gdata-2.17.0/man/write.fwf.Rd 2015-04-23 02:37:51.000000000 +0000 @@ -1,7 +1,7 @@ % write.fwf.Rd %-------------------------------------------------------------------------- % What: Write fixed width format man page -% $Id: write.fwf.Rd 1459 2010-11-12 19:08:12Z warnes $ +% $Id: write.fwf.Rd 1928 2015-04-14 22:02:01Z warnes $ % Time-stamp: <2008-08-05 12:40:32 ggorjan> %-------------------------------------------------------------------------- @@ -23,7 +23,7 @@ write.fwf(x, file="", append=FALSE, quote=FALSE, sep=" ", na="", rownames=FALSE, colnames=TRUE, rowCol=NULL, justify="left", formatInfo=FALSE, quoteInfo=TRUE, width=NULL, eol="\n", - qmethod=c("escape", "double"), \dots) + qmethod=c("escape", "double"), scientific=TRUE, \dots) } @@ -54,6 +54,8 @@ '"escape"' (default), in which case the quote character is escaped in C style by a backslash, or '"double"', in which case it is doubled. You can specify just the initial letter.} + \item{scientific}{logical, if TRUE, allow numeric values to be + formatted using scientific notation.} \item{\dots}{further arguments to \code{\link{format.info}} and \code{\link{format}} } @@ -61,10 +63,8 @@ \details{ -*F*ixed *w*idth *f*ormat is not used widely anymore. Use some other -format (say *c*omma *s*eparated *v*alues; see \code{\link{read.csv}}) if -you can. However, if you need fixed width format then \code{write.fwf} -can help you. +While *F*ixed *w*idth *f*ormat is no longer widely used, it remains +common in some disciplines. Output is similar to \code{print(x)} or \code{format(x)}. Formatting is done completely by \code{\link{format}} on a column basis. Columns in @@ -217,15 +217,22 @@ ## Force wider columns write.fwf(x=testData[, 1:5], width=20) + ## Show effect of 'scienfic' option + testData$num3 <- testData$num3 * 1e8 + write.fwf(testData, scientific=TRUE) + write.fwf(testData, scientific=FALSE) + testData$num3 <- testData$num3 / 1e8 + ## Write to file and report format and fixed width information file <- tempfile() formatInfo <- write.fwf(x=testData, file=file, formatInfo=TRUE) + formatInfo ## Read exported data back to R (note +1 due to separator) ## ... without header read.fwf(file=file, widths=formatInfo$width + 1, header=FALSE, skip=1, strip.white=TRUE) - + ## ... with header - via postimport modfication tmp <- read.fwf(file=file, widths=formatInfo$width + 1, skip=1, strip.white=TRUE) diff -Nru gdata-2.13.3/MD5 gdata-2.17.0/MD5 --- gdata-2.13.3/MD5 2014-04-06 06:00:49.000000000 +0000 +++ gdata-2.17.0/MD5 2015-07-04 07:51:58.000000000 +0000 @@ -1,11 +1,8 @@ -17f7c7f2a632f33505b7fa7436c018fc *ChangeLog -25a90c79f7e1fc0249a402d727ca2654 *DESCRIPTION +36a8c226fb2da48182140b8c2ccd925c *DESCRIPTION 905fe9c5be6e143737163bc6317e6640 *INSTALL -379df67a91f590d577221ce1002faed1 *NAMESPACE -7be64b29e9fe507512098c4f37595570 *NEWS +1383580c183d48967038864bca34c604 *NAMESPACE 92e3ca5e31d594044b8325a441142c37 *R/Args.R 94976a0bed5195b50511e200d1711280 *R/ConvertMedUnits.R -25491d8b971cbcb3a8594bb7bcaf5d07 *R/aggregate.table.R 38de1344cd5bc79f32a0b42e48b43965 *R/ans.R 78e0c21cf9e4693553a4174d9d3b3c80 *R/bindData.R df4bed53c71599dde15cbf23b4ca645a *R/case.R @@ -17,39 +14,42 @@ 3896b0c9e1e81f87adb9c876f3ecfbda *R/elem.R 0388011f483caa3314a0688a376f7cc5 *R/env.R 7f43c48b182d910961f643876fefbdf9 *R/findPerl.R +d3504262ff8d418a947fe67b33f9a188 *R/first.R 4f9c6613afb87d8360ea897bbc485fe7 *R/frameApply.R d6474a86ab986d0cbcc8712797bbb57b *R/getDateTimeParts.R -a4f086ff4289d532ac61ca2ecc5a8862 *R/installXLSXsupport.R +de896d4d3f1a2e520546d9bad8de34e0 *R/humanReadable.R +773a36e322ff8193f683a52d7027ee20 *R/installXLSXsupport.R 037266e58fb3a611b3a56bb09c898abd *R/interleave.R e9594e1749df0f487eaab95db4253e59 *R/is.what.R 67c1d25af3bb35db744d5534991d3b20 *R/keep.R +b7b1031b972faf2efe7445fb5f396132 *R/left.R b8c1e8dbe508af1afa208883503679ec *R/ll.R b75ac1b645046c8028d8014de255bd25 *R/ls.funs.R -7b718035271b60738818f9973fc656fa *R/mapLevels.R +c88c8f75673c31e0f04a8799a91d1f60 *R/mapLevels.R e7d2246f66781fe99981b2dd083dd6cb *R/matchcols.R afe62c0075aaa73974eabad0c53cf68c *R/nPairs.R 4bb20d225902c5b0a36cbb36a14f85f7 *R/nobs.R -eca2d4b7165eb8ae97e0944284120cc9 *R/object.size.R +1169ce3d329eb62aa51b036df6b24a5b *R/object.size.R c3c6e74c9238d0b99b74c09cf76b154d *R/onAttach.R 53d3ccd68f0b4b5d3e2c27d7bfaedc17 *R/read.xls.R aaace07c7a900ddec695220ad10e0d14 *R/rename.vars.R -8dbe70d52ae3d60a07aa928fc3c62413 *R/reorder.R +54fa890d6dea94a2a0a70e9a568030be *R/reorder.R cef4f8eb74136397feee111cf921676f *R/resample.R f126721102aa7f6d258e74bf6215b831 *R/sheetCount.R dffd0684af87ed05ae550eab75f0787c *R/startsWith.R -3091b03c730a8c96f8f37602315f5b42 *R/trim.R +6ff7e70b153cf189af5f517b5df071a1 *R/trim.R 05f7763f8efeef44932e4c47cfc091ff *R/trimSum.R cccb187cbc78989f5c98633224e7d01e *R/unknown.R 0c763fce55062857b133aebac8695d8c *R/unmatrix.R -d124821046808c5e8492a6db12ac0be4 *R/upperTriangle.R +24c7667dffcef1d6e327f6748a8c7bdc *R/upperTriangle.R 5a0dcdddde5d9c1ec0d215d12f323500 *R/wideByFactor.R -a568b5b2ffcd3552c2ccf06d17e88d01 *R/write.fwf.R +7fb8a64b8ed6279c97b6b38b9be73072 *R/write.fwf.R 41c99f4c80f3c613c8173f93ec9c3bc0 *R/xls2sep.R 84c93b4e26e20d6ff792637f41c71df3 *R/xlsFormats.R -8392400aabc0fd94a90a3e37b479cdba *build/vignette.rds +5f91f515b5ce0099fb8f38c82077bdea *build/vignette.rds 494447a9a3b7a389e79a556a1c356caa *data/MedUnits.rda -17f7c7f2a632f33505b7fa7436c018fc *inst/ChangeLog -7be64b29e9fe507512098c4f37595570 *inst/NEWS +987f0a271d337d959d2cb705c2c522a4 *inst/ChangeLog +c449f3a8a7472e85450b46bf30440206 *inst/NEWS ac512b1ebd4d71ac96b2a4d5288d53cb *inst/bin/xls2csv ce0b4437c51faccb3595d986e8acae80 *inst/bin/xls2csv.bat a7982b90f82857e34a253df2be42d7c1 *inst/doc/Rnews.sty @@ -57,10 +57,10 @@ f1dc90111b7898b5944c1f67abec00a2 *inst/doc/gregmisc.tex b19ba078add1b84a300ae1adb5167567 *inst/doc/mapLevels.R afcc7e1ba12e2aab12a4349fc8470d08 *inst/doc/mapLevels.Rnw -18f86f347e7cb2a3a2b04d5c7b605ca5 *inst/doc/mapLevels.pdf +c790025817a818cec1e70377b9745f97 *inst/doc/mapLevels.pdf 788f58d8791841c0dd0a9bddfa28b8fe *inst/doc/unknown.R a968a07310286934daaea108e3f915f4 *inst/doc/unknown.Rnw -11315bfa6580f66f21115f08d114a172 *inst/doc/unknown.pdf +de4e2b52d3466be1dbf1cd478bc50d87 *inst/doc/unknown.pdf 3622c5d29d09f1a179211f22acf6cdef *inst/perl/Archive/README-Archive-Zip 013677fabc8a49480cca5c10d67dd850 *inst/perl/Archive/Zip.pm da56a4326657fda95d0de93c65ed4006 *inst/perl/Archive/Zip/Archive.pm @@ -75,6 +75,9 @@ 5b969994e19eef9b0fda0c756dd529ef *inst/perl/Archive/Zip/StringMember.pm a0680f49434e681325498f3d0ce1147f *inst/perl/Archive/Zip/Tree.pm 5ad94e7c07432859fc85cec9b215b1a1 *inst/perl/Archive/Zip/ZipFileMember.pm +a12b3df60b1790a375e98cd7e11526d9 *inst/perl/Crypt/RC4.pm +050d359c44120bd9262cd61b9a773cd6 *inst/perl/Digest/Perl/MD5.pm +dfc0b868e0becf87d19a52c24740e5e0 *inst/perl/Graphics/ColorUtils.pm f8109a53128f172d5199998a0774a982 *inst/perl/IO/AtomicFile.pm cb8bf30e73340e4eba233c51dd8b2f34 *inst/perl/IO/InnerFile.pm 5886a657d7e49b133d23f7b2dbe30c21 *inst/perl/IO/Lines.pm @@ -86,40 +89,46 @@ 3669bd450d4fc4e6b883fcd7ad604caf *inst/perl/IO/WrapTie.pm 75749fd752f9b91652bcc147694f00a1 *inst/perl/OLE/README-OLE-Storage_Lite bc2eb29f789cb0c16619b5d88e6f6410 *inst/perl/OLE/Storage_Lite.pm -2120c2a09e745f340014e2847812bcb9 *inst/perl/Spreadsheet/ParseExcel.pm -66cc990f8cf7dcc9ffc145f8e79e226d *inst/perl/Spreadsheet/ParseExcel/Cell.pm -ca067073e60f54322c42f1f697ca74e0 *inst/perl/Spreadsheet/ParseExcel/Dump.pm -e91db20e75505374857f2eb64474a9b3 *inst/perl/Spreadsheet/ParseExcel/FmtDefault.pm -ac8dad0ae1b3fbc2dc45dd0c32d94a8c *inst/perl/Spreadsheet/ParseExcel/FmtJapan.pm -57d3b77da3e011dd2c4b528ecbb32423 *inst/perl/Spreadsheet/ParseExcel/FmtJapan2.pm -04ef4ce775c5af18b3bf820ebd10c41b *inst/perl/Spreadsheet/ParseExcel/FmtUnicode.pm -dc6d43df24f7bdf84ea100f4fbd48717 *inst/perl/Spreadsheet/ParseExcel/Font.pm -a66b33ae7c43dd18f9ac8f16005fedb7 *inst/perl/Spreadsheet/ParseExcel/Format.pm -65807f8d9ef5542a36210b10ee3f1e3c *inst/perl/Spreadsheet/ParseExcel/SaveParser.pm -7c66383e6456e4bf274ecd32345429b9 *inst/perl/Spreadsheet/ParseExcel/SaveParser/Workbook.pm -8e4ee9fc71392d04c1a1a4553b6f580e *inst/perl/Spreadsheet/ParseExcel/SaveParser/Worksheet.pm -ff06c50a267369ebdcb35bb500f1b965 *inst/perl/Spreadsheet/ParseExcel/Utility.pm -4245f158cab85cac7d510d71c29cf3df *inst/perl/Spreadsheet/ParseExcel/Workbook.pm -ec1611346621e6f90510acb619f5252a *inst/perl/Spreadsheet/ParseExcel/Worksheet.pm +ae9cde51f8840b6299d510ddb4360591 *inst/perl/Spreadsheet/ParseExcel.pm +24a716293e90c22e90e53395a14fcb28 *inst/perl/Spreadsheet/ParseExcel/Cell.pm +1abd6d27404bd3ce1ea077bdc647956d *inst/perl/Spreadsheet/ParseExcel/Dump.pm +82f4009b80841e82942de379549527a3 *inst/perl/Spreadsheet/ParseExcel/FmtDefault.pm +efa4e5809ddb4d7041093e8115ac50d0 *inst/perl/Spreadsheet/ParseExcel/FmtJapan.pm +2b155083f756a684723fe400966ca944 *inst/perl/Spreadsheet/ParseExcel/FmtJapan2.pm +d6eba6e2c95f108a1aac09a42e0fb76c *inst/perl/Spreadsheet/ParseExcel/FmtUnicode.pm +6b8027d0201f9e69d2fb44a3846486cf *inst/perl/Spreadsheet/ParseExcel/Font.pm +dc2fcccd7889f61bf96a462a0e26ec7c *inst/perl/Spreadsheet/ParseExcel/Format.pm +797ff88759790280da306ad8fa889c1e *inst/perl/Spreadsheet/ParseExcel/SaveParser.pm +f4d6c7214ff3ac8651710960b57c992a *inst/perl/Spreadsheet/ParseExcel/SaveParser/Workbook.pm +42a270a338246bb0489cb787f538c8a4 *inst/perl/Spreadsheet/ParseExcel/SaveParser/Worksheet.pm +b05f0f8e84a9ad9fb155af83f2d546e7 *inst/perl/Spreadsheet/ParseExcel/Utility.pm +066d26b4c80257a6016f931e6e8ae098 *inst/perl/Spreadsheet/ParseExcel/Workbook.pm +8b53d89dc8969e9840084fc34524bc55 *inst/perl/Spreadsheet/ParseExcel/Worksheet.pm +0209fe512b8c884afc8000c057723a2f *inst/perl/Spreadsheet/ParseXLSX.pm c9b8eab5a257c27463c22af1316e7241 *inst/perl/Spreadsheet/README-ParseExcel 359fb41631453c30b1c8341786d00195 *inst/perl/Spreadsheet/README-XLS -2534020557d8bfced5c2471cc74cd364 *inst/perl/Spreadsheet/XLSX.pm -d1079689d1f8f016913714bc9b3534d1 *inst/perl/Spreadsheet/XLSX/Fmt2007.pm -5e9b67c572312470f95ef4470a030adb *inst/perl/Spreadsheet/XLSX/Utility2007.pm 441b297006e72c914e99851da06d4826 *inst/perl/VERSIONS -694c5ec63d43bc79613707bd7813dc7f *inst/perl/install_modules.pl -38970d085129a053e44c2fe5f704e80e *inst/perl/module_tools.pl -95fb12c6a78ec327de63e9a04f34775f *inst/perl/sheetCount.pl -95fb12c6a78ec327de63e9a04f34775f *inst/perl/sheetNames.pl -f55aa6f304604c5f1eb96351f0dd9b82 *inst/perl/supportedFormats.pl -84e6c5093222000137198cc67f8f1da8 *inst/perl/xls2csv.pl -84e6c5093222000137198cc67f8f1da8 *inst/perl/xls2tab.pl -84e6c5093222000137198cc67f8f1da8 *inst/perl/xls2tsv.pl -59166d9d1e10ea9d544aad1d1a4ed978 *inst/xls/ExampleExcelFile.xls -59f44d54c2c09ee757af7b6724dc9d63 *inst/xls/ExampleExcelFile.xlsx +dee41d67b156f0f146020ee91157ac56 *inst/perl/XML/Twig.pm +41d922f764505fb973db5f098ece6c44 *inst/perl/XML/Twig/XPath.pm +b71548d8785cc55810b5a7d903c52012 *inst/perl/install_modules.pl +b0ff06837bf24d17a67e4da5431c3fb9 *inst/perl/module_tools.pl +44dd4baf6c1ef3999708143f3ab007fd *inst/perl/sheetCount.pl +44dd4baf6c1ef3999708143f3ab007fd *inst/perl/sheetNames.pl +f11614dea00e704edd1582714d1792e2 *inst/perl/supportedFormats.pl +4559c6f484b4e25e5cf037efef73c930 *inst/perl/xls2csv.pl +4559c6f484b4e25e5cf037efef73c930 *inst/perl/xls2tab.pl +4559c6f484b4e25e5cf037efef73c930 *inst/perl/xls2tsv.pl +799008428d5dab63e1bde17859cff753 *inst/xls/ExampleExcelFile.xls +88d5db30d66db593a854e441fcd79fca *inst/xls/ExampleExcelFile.xlsx +beadf509a237ee40d483bd0ac09672d9 *inst/xls/ExampleExcelFile_1900.xls +bdc103a6d2e0e5825c19add38ad2770b *inst/xls/ExampleExcelFile_1900.xlsx +8cfb85e7bfdc636d9b4bfb5dd251b41e *inst/xls/ExampleExcelFile_1904.xls +c2cc0620d5e325d16db0fab03afdba9e *inst/xls/ExampleExcelFile_1904.xlsx 7c16d3cfd37123f3c321c12a92b9269a *inst/xls/iris.xls 8a0467a49bfb791295925cb6a372b1ff *inst/xls/latin-1.xls 36e751188a4e3d37ce3be57d2152922a *inst/xls/latin-1.xlsx +a8cf07872660e85dab41367e2b4f08e5 *inst/xls/wide.xls +47183196217c8b121f88bfd588beffed *inst/xls/wide.xlsx 24a1020fb457c398c620457ad114e245 *man/Args.Rd b0dec88638d111fb8b0c75ebfd99cdfd *man/ConvertMedUnits.Rd 673f2150582676c574a306697b29ffa5 *man/MedUnits.Rd @@ -133,25 +142,27 @@ a471e3c2a2b1ca5550a1822d5620e848 *man/duplicated2.Rd 28b102aeb6f3c62fe6b3b40fefa4c005 *man/elem.Rd e45e3959608492f8e12298d221e605e3 *man/env.Rd +80c283bd90b24dc91a6e8216a031d733 *man/first.Rd 73aeb6a00e393012dfc4fb1e0b8fc15a *man/frameApply.Rd dc8a3653f3c70778ccce2d926194adce *man/gdata-defunct.Rd 34cfa7a16878f7d2032b49f350eec4a5 *man/gdata-package.Rd 38580f70b4b3af84ebfa4b952dd7021b *man/getDateTimePart.Rd -eb90a75f6e6d0171486d3a96626cf04b *man/humanReadable.Rd +76c13487780d8ea770544678350e3f17 *man/humanReadable.Rd 6beab4e8b711110199599f8427d1d042 *man/installXLSXsupport.Rd 26bb8febce31195f8efcf071270913bc *man/interleave.Rd 0d70b8cd533a830a68103355b1054ec5 *man/is.what.Rd 8c50e81caf14aebb11d908fbcc9fe2de *man/keep.Rd +c9e910926112da9792065043cec7d46d *man/left.Rd 4bfbaf0835fff3cbc3ba8f17f9823bfb *man/ll.Rd dfe069423abb32eaed74b2daad0a56d8 *man/ls.funs.Rd 3723cf974f55156ced750e3235445f1b *man/mapLevels.Rd 3ca3aeaf85340d25fc36c6a5275fce2c *man/matchcols.Rd 4789e9c9a034bc5665d93c80579729ef *man/nPairs.Rd 4e3ba1601ecb171596b609516d2e8911 *man/nobs.Rd -67467a836f6e35a897d6bee4d0a7808d *man/object.size.Rd +6604b93792d11b827aaa6c40c9a9cb62 *man/object.size.Rd 2ad3d3570252d4954bdf4d81ea3404fa *man/read.xls.Rd b73a198509b4fdcb4a24a85909309532 *man/rename.vars.Rd -dce038a56263e98621c3715f0aefecd3 *man/reorder.Rd +da64c27965060d0f35bed084a416a57a *man/reorder.Rd fc28b1b680997fd8ff2ab73478db4872 *man/resample.Rd 55d4019112a5759791610fd48c20d4b3 *man/sheetCount.Rd 1a3958cb8dfc8a3d50d73e51bd280a08 *man/startsWith.Rd @@ -159,16 +170,20 @@ 347e8e1afc135b4ce6f5f3a7face76d5 *man/trimSum.Rd 4f5e0665c2c046b93f45963387b7d44f *man/unknown.Rd 5b789bc21ee7f46f8a9b138f1ed36829 *man/unmatrix.Rd -9778a0a2d069cddda0f2e821143d691e *man/upperTriangle.Rd +7427fd1c31684c2392095c99c8fc7006 *man/upperTriangle.Rd ad219282ec6913083b82073691923f9b *man/wideByFactor.Rd -56d52a59a9c4f5132d5ae69673823ae8 *man/write.fwf.Rd +33f5856b0960a91d03807b781e664704 *man/write.fwf.Rd 8a9c1fe9d0316d0b98e6d353c2b7a6cf *man/xlsFormats.Rd 64fc1cde149ab7f61c266fdb295a6404 *tests/runRUnitTests.R -10fe383026608ee1cf2c8f1b70a02c5b *tests/test.read.xls.R -b829acdc86095be7759e0d001b8e9599 *tests/test.read.xls.Rout.save +89f2fc0a5b6dd6ff34fa2facd5d36127 *tests/test.humanReadable.R +fb097d7209dc5e20b33fa46afe861ac9 *tests/test.humanReadable.Rout.save +8d734967558d8d545de947ddd4ba06ac *tests/test.read.xls.R +3d44dd3a3a4a4fabd4c89c5c18d43644 *tests/test.read.xls.Rout.save +db19f080b3ed7525389cecf47481409f *tests/test.reorder.factor.R +78c0dd82c14a6d9d668ffb2fa775e635 *tests/test.reorder.factor.Rout.save dee3232474b92bcdf1ad75ca31080792 *tests/test.write.fwf.eol.R b32b0eb85790d71ea6025ae5eca71fb1 *tests/tests.write.fwf.R -dba6d4c18936f97e03225af5be4743ed *tests/tests.write.fwf.Rout.save +a604f7945af6600559d693d1db3abecc *tests/tests.write.fwf.Rout.save ed871fe534197367139b63b6c8309396 *tests/unitTests/Makefile 52c3dc4a6188f8db93bde320cb6bb5ed *tests/unitTests/report.html 11154df5a069efad335f1a9808b6cc01 *tests/unitTests/report.txt @@ -183,7 +198,7 @@ 42330b4f7f6bd1cd335f59088d6bf282 *tests/unitTests/runit.trimSum.R 80c6bc219e067e6c933b61dc3b3042c1 *tests/unitTests/runit.unknown.R 07c0f9fc38612b196f2d7449c133fde2 *tests/unitTests/runit.wideByFactor.R -095012d02d40b8c33c37e1c307ac19d0 *tests/unitTests/runit.write.fwf.R +8bd376ab7034b5f9016caff11eecbb92 *tests/unitTests/runit.write.fwf.R a7982b90f82857e34a253df2be42d7c1 *vignettes/Rnews.sty afcc7e1ba12e2aab12a4349fc8470d08 *vignettes/mapLevels.Rnw a968a07310286934daaea108e3f915f4 *vignettes/unknown.Rnw diff -Nru gdata-2.13.3/NAMESPACE gdata-2.17.0/NAMESPACE --- gdata-2.13.3/NAMESPACE 2014-04-05 01:30:44.000000000 +0000 +++ gdata-2.17.0/NAMESPACE 2015-07-03 19:22:18.000000000 +0000 @@ -3,7 +3,6 @@ .onAttach, ans, Args, - aggregate.table, bindData, case, cbindX, @@ -14,11 +13,14 @@ duplicated2, elem, env, + first, frameApply, installXLSXsupport, interleave, is.what, keep, + last, + left, ll, ls.funs, lowerTriangle, @@ -31,6 +33,7 @@ remove.vars, reorder.factor, resample, + right, sheetCount, sheetNames, startsWith, @@ -48,7 +51,7 @@ xlsFormats, ## Object size stuff - object.size, as.object_size, is.object_size, humanReadable, + object.size, as.object_sizes, is.object_sizes, humanReadable, ## getDateTime stuff getYear, getMonth, getDay, getHour, getMin, getSec, @@ -63,6 +66,8 @@ importFrom(stats, reorder) importFrom(stats, nobs) importFrom(gtools, mixedsort) +importFrom(methods, is) +importFrom(utils, data, download.file, head, read.csv, read.delim, read.table, tail, write.table) S3method(reorder, factor) @@ -125,8 +130,9 @@ S3method(nobs, lm) # now provided by stats package ## Object size stuff -S3method(print, object_size) -S3method(c, object_size) +S3method(c, object_sizes) +S3method(format, object_sizes) +S3method(print, object_sizes) ## unknown stuff S3method(isUnknown, default) @@ -151,3 +157,11 @@ S3method(trim, factor) S3method(trim, list) S3method(trim, data.frame) + +## left, right +S3method(left, data.frame) +S3method(left, matrix) +S3method(right, data.frame) +S3method(right, matrix) + + diff -Nru gdata-2.13.3/NEWS gdata-2.17.0/NEWS --- gdata-2.13.3/NEWS 2014-04-05 18:41:12.000000000 +0000 +++ gdata-2.17.0/NEWS 1970-01-01 00:00:00.000000000 +0000 @@ -1,529 +0,0 @@ -Changes in 2.13.3 (2014-04-04) ------------------------------- - -Bug Fixes - -- Unit tests were incorrectly checking for equality of optional POSIXlt - components. (Bug reported by Brian Ripley). - -Other Changes - -- 'aggregate.table' is now defunct. See '?gdata-defunct' for details. - -- Unit tests and vignettes now follow R standard practice. - -- Minor changes to clean up R CMD check warnings. - -Changes in 2.13.2 (2013-06-28) ------------------------------- - -Enhancements: - -- Simplify ll() by converting a passed list to an environment, - avoiding the need for special casing and the use of attach/detach. - -- Working of deprecation warning message in aggregate.table clarified. - -Changes in 2.13.1 (2013-03-24) ------------------------------- - -Enhancements: - -- Replaced calls to depreciated function ".path.package" with the new - public function "path.package". - - -Changes in 2.13.0 (2012-09-20) ------------------------------ - -New features: - -- New 'duplicated2' function which returns TRUE for *all* elements - that are duplicated, including the first, contributed by Liviu - Andronic. This differs from 'duplicated', which only returns the - second and following (second-to last and previous when - 'fromLast=TRUE') duplicate elements. - -- New 'ans' functon to return the value of the last evaluated - top-level function (a convenience function for accessing - .Last.value), contributed by Liviu Andonic. - -Bug Fixes: - -- On windows, warning messages printed to stdout by perl were being - included in the return value from 'system', resulting in errors in - 'sheetCount' and 'sheetNames'. Corrected. - -- The 'MedUnits' column names 'SIUnits' and 'ConventionalUnits' were - reversed and misspelled. - - -Changes in 2.12.0 (2012-09-12) ------------------------------- - -Other Changes: - -- 'stats::aggregate' was made into a generic on 27-Jan-2010, so that - attempting to call 'aggregate' on a 'table' object will now - incorrectly call 'aggregate.table'. Since 'aggregate.table' can be - replaced by a call to tapply using two index vectors, e.g. - aggregate.table(x, by1=a, by2=b, mean) - can be replaced by - tapply(x, INDEX=list(a, b), FUN=mean), - the 'aggregate.table' function will now display a warning that it - is depreciated and recommending the equivalent call to tapply. It - will be removed entirely in a future version of gdata. - -Changes in 2.11.1 (2012-08-22) ------------------------------- - -Enhancements: - -- read.xls() now supports fileEncoding argument to allow non-ascii - encoded data to be handled. See the manual page for an example. - -Bug Fixes: - -- The perl script utilized by read.xls() was incorrectly appending a - space character at the end of each line, causing problems with - character and NA entries in the final column. - - -Changes in 2.11.0 (2012-06-18) ------------------------------- - -New Features: - -- read.xls() and supporting functions now allow blank lines to be - preserved, rather than skipped, by supplying the argument - "blank.lines.skip=FALSE". The underlying perl function has been - extended to suppor this via an optional "-s" argument which, when - present, *preserves* blank lines during the conversion. (The - default behavior remains unchanged.) - -Other Changes: - -- Add SystemRequirements field specifying that perl is necessary for - gdata to function fully. - - -Changes in 2.10.6 (2012-06-12) ------------------------------- - -Bug fixes: - -- gdata::nobs.default() needs to handle logical vectors in addition to - numeric vectors. - -Changes in 2.10.{3,4,5} (2012-06-08) ------------------------------------- - -Bug fixes: - -- Mark example for installXLSsupport() as dontrun so R CMD check won't - fail on systems where PERL is not fully functional. - -- Correct name of installXLSsupport() in tests/test.read.xls.R. - -Other Changes: - -- Add dependency on R 2.13.0, since that is when stats::nobs appeared. - - -Changes in 2.10.2 (2012-06-06) ---------------------------------------- - -Bug fixes: - -- Fix issues in nobs.default identified in testing with the gmodels package. - - -Changes in 2.10.1 (2012-06-06) ------------------------------- - -Bug fixes: - -- Undo removal of 'nobs' and 'nobs.lm'. Instead define aliases for - 'nobs' and 'nobs.lm' to support backward compatibility for packages - depending on gdata. - - -Changes in 2.10.0 (2012-06-05) ------------------------------- - -New features: - -- New ls.funs() function to list all objects of class function in the - specified environment. - -- New startsWith() function to determine if a string "starts with" the - specified characters. - - -Enhancements: - -- Add 'na.strings' argument to read.xls() to convert Excel's '#DIV/0!' to NA. - - -Bug fixes: - -- Correct various R CMD check warnings - - -Other changes: - -- Base S3 method for nobs() and nobs.lm() method removed since these - are now provided in the stats package. - - -Changes in 2.9.0 (2011-09-30) ------------------------------ - -New features: - -- Add centerText() function to center text strings for a specified - width. - -- Add case() function, a vectorized variant of the base::switch() - function, which is useful for converting numeric codes into factors. - -Enhancements: - -- Minor improvements to xls2csv() man page. - -CHANGES IN 2.8.1 (2011-04-15) ------------------------------ - -Enhancements: - -- nPairs() gains a summary method that shows how many times each variable - is known, while the other variable of a pair is not - -Bug fixes: - -- Fix errors on windows when R or Perl install path includes spaces by properly quoting the path. - - -CHANGES IN 2.8.1 (2010-11-12) ------------------------------ - -Enhancements: - -- Minor improvement to Args(), read.xls() man page. - -Bug fixes: - -- Modify write.fwf() to capture and pass on additional arguments for - write.table(). This resolves a bug reported by Jan Wijffels. - -- Modify xls2sep.R to avoid use of file.access() which is unreliable - on Windows network shares. - - -CHANGES IN 2.8.0 (2010-04-03) ------------------------------ - -Enhancements: - -- When loaded, gtools (via an .onAttach() function) now checks: - 1) if perl is available - 2) whether the perl libraries for XLS support are available - 3) whether the perl libraries for XLSX support are available - - If perl is not available, an appropriate warning message is displayed. - - If necessary perl libraries are not available, a warning message is - displayed, as is a message suggesting the user run the (new) - installXLSXsupport() function to attempt to install the necessary - perl libraries. - -- The function installXLSXsupport() has been provided to install the - binary perl modules that read.xls needs to support Excel 2007+ - 'XLSX' files. - - -CHANGES IN 2.7.3 (2010-04-02) ------------------------------ - -Enhancements: - -- New xlsFormats() command to determine which Excel formats are - supported (XLS, XLSX). - -Bug Fixes: - -- No longer attempt to install perl modules Compress::Raw::Zlib and - Spreadsheet::XLSX at build/compile time. This should resolve recent - build issues, particularly on Windows. - -- All perl code can now operate (but generate warnings) when perl modules Compress::Raw::Zlib and - Spreadsheet::XLSX when are not installed. - -- Also update Greg's email address. - -CHANGES IN 2.7.1 (2010-02-19) ------------------------------ - -Enhancements: - -- on Windows attempts to locate ActiveState perl if perl= not specified and - Rtools perl would have otherwise been used in read.xls and other perl - dependent functions. - -CHANGES IN 2.7.0 (2010-01-25) ------------------------------ - -Bug Fixes: - -- Fix building of Perl libraries on Win32 - - -CHANGES IN 2.7.0 (2010-01-25) ------------------------------ - -Enhancements: - -- read.xls() now supports Excel 2007 'xlsx' files. - -- read.xls() now allows specification of worksheet by name - -- read.xls() now supports ftp URLs. - -- Improved ll() so user can limit output to specified classes - - - -New Functions: - -- sheetCount() and sheetNames() to determine the number and names of - worksheets in an Excel file, respectively. - -Bug Fixes: - -- Fix formatting warning in frameApply(). - -- Resolve crash of "ll(.GlobalEnv)" - -- - -CHANGES IN 2.6.1 (2009-07-15) ------------------------------ - -Bug Fixes - -- Modify unit tests to avoid issues related to time zones. - - -CHANGES IN 2.6.0 (2009-07-15) ------------------------------ - -Bug Fixes - -- Correct minor typos & issues in man pages for write.fwf(), - resample() (Greg Warnes) - -- Correct calculation of object sizes in env() and ll() (Gregor Gorjanc) - -New Features - -- Add support for using tab for field separator during translation - from xls format in read.xls (Greg Warnes) - -- Enhanced function object.size that returns the size of multiple - objects. There is also a handy print method that can print size of - an object in "human readable" format when - options(humanReadable=TRUE) - or - print(object.size(x), humanReadable=TRUE). - (Gregor Gorjanc) - -- New function wideByFactor that reshapes given dataset by a given - factor - it creates a "multivariate" data.frame. (Gregor Gorjanc) - -- New function nPairs that gives the number of variable pairs in a - data.frame or a matrix. (Gregor Gorjanc) - -- New functions getYear, getMonth, getDay, getHour, getMin, and getSec - for extracting the date/time parts from objects of a date/time - class. (Gregor Gorjanc) - -- New function bindData that binds two data frames into a multivariate - data frame in a different way than merge. (Gregor Gorjanc) - -Other Changes - -- Correct Greg's email address - - -CHANGES IN 2.5.0 ----------------- - -- New function .runRUnitTestsGdata that enables run of all RUnit tests during - the R CMD check as well as directly from within R. - -- Enhanced function object.size that returns the size of multiple objects. There - is also a handy print method that can print size of an object in "human readable" - format when options(humanReadable=TRUE) or print(x, humanReadable=TRUE). - -- New function bindData that binds two data frames into a multivariate data frame - in a different way than merge. - -- New function wideByFactor that reshapes given dataset by a given factor - - it creates a "multivariate" data.frame. - -- New functions getYear, getMonth, getDay, getHour, getMin, and getSec for - extracting the date/time parts from objects of a date/time class. - -- New function nPairs that gives the number of variable pairs in a data.frame - or a matrix. - -- New function trimSum that sums trimmed values. - -- New function cbindX that can bind objects with different number of rows. - -- write.fwf gains the width argument. The value for unknown can increase or - decrease the width of the columns. Additional tests and documentation fixes. - -CHANGES IN 2.4.2 (2008-05-11) ------------------------------ - -- Enhancements and bug fixes for read.xls() and xls2csv(): - - - More informative log messages when verbose=TRUE - - - File paths containing spaces or other non-traditional characters - are now properly handled - - - Better error messages, particularly when perl fails to generate - an output .csv file. - - - The 'shortcut' character "~" (meaning user's home directory) is - now properly handled in file paths. - - - XLS files created by OpenOffice are now properly handled. Thanks to - Robert Burns for pointing out the patch - (http://rt.cpan.org/Public/Bug/Display.html?id=7206) - -CHANGES IN 2.4.1 (2008-03-24) ------------------------------ - -- Update perl libraries needed by xls2csv() and read.xls() - to latest available versions on CRAN. - -- Add read.xls() to exported function list - -- Correct iris.xls example file. It didn't contain the complete - & properly formatted iris data set. Fixed. - -- Fix typo in win32 example for read.xls() - -CHANGES IN 2.4.0 (2008-01-30) ------------------------------ - -- The keep() function now includes an 'all' argument to specify how - objects with names starting with '.' are handled. - -- keep() now shows an informative warning message when a requested - object does not exist - -- New vignette "Mapping Levels of a Factor" describing the use of - mapLevels(). - -- New vignette "Working with Unknown Values" describing the use of - isUnknown() and unknownToNA(). - -- Several enhancements to read.xls() (thanks to Gabor Grothendieck): - - - New function xls2csv(), which handles converting an xls file - to a csv file and returns a connection to the - temporary csv file - - - xls2csv() and read.xls() both allow a file or a url to be specified - - - read.xls() has a new 'pattern' argument which, if supplied, - will ignore everything prior to the first line in th csv file - that matches the pattern. This is typically used if there - are a variable number of comment lines prior to the header - in which case one can specify one of the column - headings as the pattern. read.xls should - be compatible with the old read.xls. - -- Minor fixes to drop.levels(), is.what(). - -- Implementation of unit tests for most functions. - -CHANGES IN 2.3.1 (2006-10-29) ------------------------------ - -- Arguments as well as their position of reorder.factor have been changed - to conform with reorder.factor method in stats package, due to collision - bug. Argument 'make.ordered' is now 'order' and old argument 'order' is - now 'new.order'! Therefore, you have to implicitly specify new.order i.e. - - reorder(trt, new.order=c("PLACEBO", "300 MG", "600 MG", "1200 MG")) - -- trim() gains ... argument. - -- Added "unknown" methods for matrices. - -- Added c() method for factors based on mapLevels() functions. - -- Added write.fwf, which writes file in *F*ixed *W*idth *F*ormat. - -CHANGES FROM 2.1.X to 2.3.0 (2006-09-19) ---------------------------------------- - -- Added mapLevels(), which produces a map with information on levels and/or - internal integer codes. Contributed by Gregor Gorjanc. - -- Extended dropLevels() to work on the factors contained in a data - frame, as well as individual factors. - -- Add unknown(), which changes given unknown value to NA and vice - versa. Contributed by Gregor Gorjanc. - -- Extended trim() to handle a variety of data types data.frames, - lists, factors, etc. Code changes contributed by Gregor Gorjanc. - -- Added resample() command that acts like sample() except that it - _always_ samples from the arguments provided, even if only a single - argument is present. This differs from sample() which behaves - differently in this case. - -- Updated my email address. - -CHANGES IN GDATA 2.1.2 ------------------------ - - - Fixed bug in interleave.R - option to covert 1-column matrices to - vector (based on Andrew Burgess's suggestion) - - - Updated Greg and Jim's email adresses - - - ll.R: Suppressed warning message in attach() call. - - - frameApply.Rd, reorder.Rd: Remove explicit loading of - gtools in examples, so that failure to import functions from - gtools gets properly caught by running the examples. - - - upperTriangle.R, man/upperTriangle.Rd: Add functions for - extracting and modifying the upper and lower trianglular components of - matrices. - - - is.what.R: Replaced the "not.using" vector with a more robust - try(get(test)) to find out whether a particular is.* function - returns a logical of length one. - -- DESCRIPTION: Added Suggests field - -- Updated the example in frameApply - - -CHANGES IN GDATA 2.0.8 ------------------------ - - - Added DESCRIPTION and removed DESCRIPTION.in - - - Updated ll.Rd documentation - - - Fixed bug in Args.R, is.what.R, ll.R diff -Nru gdata-2.13.3/R/aggregate.table.R gdata-2.17.0/R/aggregate.table.R --- gdata-2.13.3/R/aggregate.table.R 2014-04-05 13:57:11.000000000 +0000 +++ gdata-2.17.0/R/aggregate.table.R 1970-01-01 00:00:00.000000000 +0000 @@ -1,39 +0,0 @@ -# $Id: aggregate.table.R 1788 2014-04-05 13:57:10Z warnes $ - -aggregate.table <- function(x, by1, by2, FUN=mean, ...) - { - .Defunct( - new=paste( - "tapply(X=", - deparse(substitute(x)), - ", INDEX=list(", - deparse(substitute(by1)), - ", ", - deparse(substitute(by2)), - "), FUN=", - deparse(substitute(FUN)), - if(length(list(...))>0) - { - l <- list(...) - paste(", ", - paste(names(l),"=", - deparse(substitute(...)), - sep="", - collapse=", ") - ) - }, - ")", sep=""), - package="gdata" - ) - } - -## aggregate.table <- function(x, by1, by2, FUN=mean, ... ) -## { -## -## tab <- matrix( nrow=nlevels(by1), ncol=nlevels(by2) ) -## dimnames(tab) <- list(levels(by1),levels(by2)) -## -## for(i in 1:nrow(ag)) -## tab[ as.character(ag[i,1]), as.character(ag[i,2]) ] <- ag[i,3] -## tab -## } diff -Nru gdata-2.13.3/R/first.R gdata-2.17.0/R/first.R --- gdata-2.13.3/R/first.R 1970-01-01 00:00:00.000000000 +0000 +++ gdata-2.17.0/R/first.R 2015-04-28 02:17:04.000000000 +0000 @@ -0,0 +1,3 @@ +# Simply call 'first' or 'last' with a different default value for 'n'. +first <- function(x, n=1, ...) head(x, n=n, ...) +last <- function(x, n=1, ...) tail(x, n=n, ...) diff -Nru gdata-2.13.3/R/humanReadable.R gdata-2.17.0/R/humanReadable.R --- gdata-2.13.3/R/humanReadable.R 1970-01-01 00:00:00.000000000 +0000 +++ gdata-2.17.0/R/humanReadable.R 2015-04-25 06:33:47.000000000 +0000 @@ -0,0 +1,88 @@ +humanReadable <- function(x, + units="auto", + standard=c("IEC", "SI", "Unix"), + digits=1, + width=NULL, + sep=" ", + justify = c("right", "left") + ) +{ + ## --- Setup --- + + suffix.SI <- c("B", "kB", "MB", "GB", "TB", "PB", "EB", "ZB", "YB") + suffix.IEC <- c("B", "KiB", "MiB", "GiB", "TiB", "PiB", "EiB", "ZiB", "YiB") + suffix.Unix <- c("B" , "K", "M", "G", "T", "P", "E", "Z", "Y") + + standard <- match.arg(standard) + if(length(justify)==1) justfy <- c(justify, justify) + + ## --- Functions --- + + .applyHuman <- function(x, base, suffix, digits, width, sep) + { + ## Which suffix should we use? + n <- length(suffix) + i <- pmax(pmin(floor(log(x, base)), n-1),0) + if(!is.finite(i)) i <- 0 + x <- x / base^i + ## Formatting + if(is.null(width)) + ## the same formatting for all + x <- format(round(x=x, digits=digits), nsmall=digits) + else + { + ## similar to ls, du, and df + lenX <- nchar(x) + if(lenX > width) { + digits <- pmax( width - nchar(round(x)) - 1, 0) + } + if(i == 0) digits <- 0 + x <- round(x, digits=digits) + } + c(x, suffix[i+1]) + } + + ## -- Work + + if(any(x < 0)) stop("'x' must be positive") + if(standard == "SI") + { + suffix <- suffix.SI + base <- 10^3 + } + else if (standard=="IEC") + { + suffix <- suffix.IEC + base <- 2^10 + } + else # (standard=="Unix) + { + suffix <- suffix.Unix + base <- 2^10 + } + + if(!missing(units) && units=="bytes") + { + retval <- rbind(x, "bytes") + } + else if(!missing(units) && units!="auto") + { + units <- suffix[match( toupper(units), toupper(suffix) )] + power <- match(units, suffix ) -1 + X <- x/(base^power) + X <- format.default(x=X, digits=digits, nsmall=digits) + retval <- rbind(X, rep(units, length(X))) + } + else + retval <- sapply(X=x, FUN=".applyHuman", base=base, suffix=suffix, + digits=digits, width=width, sep=sep) + + if(all(justify == "none")) + paste(trim(retval[1,]), trim(retval[2,]), sep=sep) + else + paste(format(trim(retval[1,]), justify=justify[1]), + format(trim(retval[2,]), justify=justify[2]), + sep=sep) + +} + diff -Nru gdata-2.13.3/R/installXLSXsupport.R gdata-2.17.0/R/installXLSXsupport.R --- gdata-2.13.3/R/installXLSXsupport.R 2014-01-03 19:32:42.000000000 +0000 +++ gdata-2.17.0/R/installXLSXsupport.R 2015-04-28 21:31:48.000000000 +0000 @@ -7,17 +7,18 @@ findPerl(verbose = verbose) else findPerl(perl, verbose = verbose) - + ## ## directories package.dir <- find.package('gdata') perl.dir <- file.path(package.dir,'perl') + temp.dir <- tempdir() ## ## cmd <- "install_modules.pl" sc <- file.path(perl.dir, cmd) - + ## ## @@ -53,6 +54,6 @@ else { stop("\nUnable to install Perl XLSX support libraries.\n\n") - invisible(FALSE) - } + invisible(FALSE) + } } diff -Nru gdata-2.13.3/R/left.R gdata-2.17.0/R/left.R --- gdata-2.13.3/R/left.R 1970-01-01 00:00:00.000000000 +0000 +++ gdata-2.17.0/R/left.R 2015-04-28 02:28:27.000000000 +0000 @@ -0,0 +1,38 @@ +left <- function(x, n=6L) UseMethod("left") +right <- function(x, n=6L) UseMethod("left") + +left.data.frame <- function(x, n=6) +{ + stopifnot(length(n) == 1L) + n <- if (n < 0L) + max(ncol(x) + n, 0L) + else min(n, ncol(x)) + x[, seq_len(n), drop = FALSE] +} +left.matrix <- left.data.frame + + +right.data.frame <- function (x, n = 6L, ...) +{ + stopifnot(length(n) == 1L) + ncx <- ncol(x) + n <- if (n < 0L) + max(ncx + n, 0L) + else min(n, ncx) + x[, seq.int(to = ncx, length.out = n), drop = FALSE] +} + +right.matrix <- function (x, n = 6L, addcolnums = TRUE, ...) +{ + stopifnot(length(n) == 1L) + ncx <- ncol(x) + n <- if (n < 0L) + max(ncx + n, 0L) + else min(n, ncx) + sel <- seq.int(to = ncx, length.out = n) + ans <- x[, sel, drop = FALSE] + if (addcolnums && is.null(colnames(x))) + colnames(ans) <- paste0("[", sel, ",]") + ans +} + diff -Nru gdata-2.13.3/R/mapLevels.R gdata-2.17.0/R/mapLevels.R --- gdata-2.13.3/R/mapLevels.R 2014-04-05 18:19:53.000000000 +0000 +++ gdata-2.17.0/R/mapLevels.R 2015-04-29 03:27:54.000000000 +0000 @@ -1,7 +1,7 @@ ### mapLevels.R ###------------------------------------------------------------------------ ### What: Mapping levels -### $Id: mapLevels.R 1797 2014-04-05 18:19:49Z warnes $ +### $Id: mapLevels.R 1991 2015-04-29 03:27:50Z warnes $ ### Time-stamp: <2007-04-26 13:16:18 ggorjan> ###------------------------------------------------------------------------ @@ -300,8 +300,8 @@ ## --- Mapping levels in x --- - char <- all(lapply(value, is.character)) - int <- all(lapply(value, is.integer)) + char <- all(sapply(value, is.character)) + int <- all(sapply(value, is.integer)) if(int) { # codes=TRUE if(is.integer(x)) x <- factor(x) diff -Nru gdata-2.13.3/R/object.size.R gdata-2.17.0/R/object.size.R --- gdata-2.13.3/R/object.size.R 2014-01-03 19:32:42.000000000 +0000 +++ gdata-2.17.0/R/object.size.R 2015-04-25 06:47:06.000000000 +0000 @@ -1,92 +1,93 @@ -### object.size.R ###------------------------------------------------------------------------ ### What: Print object size in human readable format - code -### $Id$ -### Time-stamp: <2008-12-30 08:05:43 ggorjan> ###------------------------------------------------------------------------ - -object.size <- function(...) +object.size <- function(...) { structure(sapply(list(...), utils::object.size), - class=c("object_size", "numeric")) + class=c("object_sizes", "numeric")) } -print.object_size <- function(x, quote=FALSE, humanReadable, ...) -{ - xOrig <- x - if(missing(humanReadable)) { - opt <- getOption("humanReadable") - humanReadable <- ifelse(!is.null(opt), opt, FALSE) - } - if(humanReadable) { - print(humanReadable(x), quote=quote, ...) - } else { - class(x) <- "numeric" - NextMethod() - } - invisible(xOrig) +print.object_sizes <- function(x, + quote=FALSE, + humanReadable=getOption("humanReadable"), + standard="IEC", + units, + digits=1, + width=NULL, + sep=" ", + justify = c("right", "left"), + ...) +{ + print(format(x, + humanReadable=humanReadable, + standard=standard, + units=units, + digits=digits, + width=width, + sep=sep, + justify=justify), + quote=quote, + ...) + + + invisible(x) } -is.object_size <- function(x) inherits(x, what="object_size") - -as.object_size <- function(x) +format.object_sizes <- function(x, + humanReadable=getOption("humanReadable"), + standard="IEC", + units, + digits=1, + width=NULL, + sep=" ", + justify = c("right", "left"), + ...) { - if(!is.numeric(x)) stop("'x' must be numeric/integer") - class(x) <- c("object_size", "numeric") - x + if( !missing(units) ) + { + if (units=="bytes") + paste(x, "bytes") + else + humanReadable(x, + standard=standard, + units=units, + digits=digits, + width=width, + sep=sep, + justify=justify + ) + } + else if( is.null(humanReadable) || humanReadable==FALSE ) + paste(x, "bytes") + else + humanReadable(x, + standard=standard, + units=units, + digits=digits, + width=width, + sep=sep, + justify=justify) + } -c.object_size <- function(..., recursive=FALSE) + + +is.object_sizes <- function(x) inherits(x, what="object_sizes") + +as.object_sizes <- function(x) { - x <- NextMethod() - if(is.numeric(x)) class(x) <- c("object_size", "numeric") + if(!is.numeric(x) || any(x<0)) stop("'x' must be a positive numeric vector") + + class(x) <- c("object_sizes", "numeric") x } -humanReadable <- function(x, standard="SI", digits=1, width=3, sep=" ") +c.object_sizes <- function(..., recursive=FALSE) { - ## --- Setup --- - - if(any(x < 0)) stop("'x' must be positive") - if(standard == "SI") { - suffix <- c("B", "kB", "MB", "GB", "TB", "PB", "EB", "ZB", "YB") - base <- 1000 - } else { - suffix <- c("B", "KiB", "MiB", "GiB", "TiB", "PiB", "EiB", "ZiB", "YiB") - base <- 1024 - } - - ## --- Apply --- - - .applyHuman <- function(x, base, suffix, digits, width, sep) - { - ## Which suffix should we use? - n <- length(suffix) - for(i in 1:n) { - if(x >= base) { - if(i < n) x <- x / base - } else { - break - } - } - ## Formatting - if(is.null(width)) { ## the same formatting for all - x <- format(round(x=x, digits=digits), nsmall=digits) - } else { ## similar to ls, du, and df - lenX <- nchar(x) - if(lenX > width) { - digitsMy <- width - (lenX - (lenX - (nchar(round(x)) + 1))) - digits <- ifelse(digitsMy > digits, digits, digitsMy) - } - if(i == 1) digits <- 0 - x <- round(x, digits=digits) - } - paste(x, suffix[i], sep=sep) - } - - sapply(X=x, FUN=".applyHuman", base=base, suffix=suffix, digits=digits, - width=width, sep=sep) + x <- NextMethod() + if(is.numeric(x)) class(x) <- c("object_sizes", "numeric") + x } ###------------------------------------------------------------------------ diff -Nru gdata-2.13.3/R/reorder.R gdata-2.17.0/R/reorder.R --- gdata-2.13.3/R/reorder.R 2014-01-03 19:32:42.000000000 +0000 +++ gdata-2.17.0/R/reorder.R 2015-04-28 06:03:14.000000000 +0000 @@ -1,7 +1,3 @@ -# $Id: reorder.R 988 2006-10-29 12:55:08Z ggorjan $ - -# Reorder the levels of a factor. - reorder.factor <- function(x, X, FUN, @@ -12,15 +8,31 @@ { constructor <- if (order) ordered else factor - if (!missing(new.order)) + if(!missing(X) || !missing(FUN)) + { + if(missing(FUN)) FUN <- 'mean' + + ## I would prefer to call stats::reorder.default directly, + ## but it exported from stats, so the relevant code is + ## replicated here: + ## --> + scores <- tapply(X = X, INDEX = x, FUN = FUN, ...) + levels <- names(base::sort(scores, na.last = TRUE)) + if(order) + ans <- ordered(x, levels=levels) + else + ans <- factor(x, levels=levels) + attr(ans, "scores") <- scores + ## <-- + return(ans) + } + else if (!missing(new.order)) { if (is.numeric(new.order)) new.order <- levels(x)[new.order] else new.order <- new.order } - else if (!missing(FUN)) - new.order <- names(sort(tapply(X, x, FUN, ...))) else new.order <- sort(levels(x)) diff -Nru gdata-2.13.3/R/trim.R gdata-2.17.0/R/trim.R --- gdata-2.13.3/R/trim.R 2014-01-03 19:32:42.000000000 +0000 +++ gdata-2.17.0/R/trim.R 2015-04-28 04:30:06.000000000 +0000 @@ -1,5 +1,3 @@ -# $Id: trim.R 1757 2013-12-16 19:58:56Z warnes $ - trim <- function(s, recode.factor=TRUE, ...) UseMethod("trim", s) @@ -19,7 +17,7 @@ if(recode.factor) { dots <- list(x=s, ...) if(is.null(dots$sort)) dots$sort <- sort - s <- do.call(what=reorder.factor, args=dots) + s <- do.call(what="reorder.factor", args=dots) } s } diff -Nru gdata-2.13.3/R/upperTriangle.R gdata-2.17.0/R/upperTriangle.R --- gdata-2.13.3/R/upperTriangle.R 2014-01-03 19:32:42.000000000 +0000 +++ gdata-2.17.0/R/upperTriangle.R 2015-06-29 22:33:46.000000000 +0000 @@ -1,22 +1,42 @@ -upperTriangle <- function(x, diag=FALSE) +upperTriangle <- function(x, diag=FALSE, byrow=FALSE) { - x[upper.tri(x, diag=diag)] + if(byrow) + t(x)[rev(upper.tri(x, diag=diag))] + else + x[upper.tri(x, diag=diag)] } -"upperTriangle<-" <- function(x, diag=FALSE, value) +"upperTriangle<-" <- function(x, diag=FALSE, byrow=FALSE, value) { - x[upper.tri(x, diag=diag)] <- value - x + if(byrow) { + ret <- t(x) + ret[rev(upper.tri(x, diag=diag))] <- value + t(ret) + } + else { + x[upper.tri(x, diag=diag)] <- value + x + } } -lowerTriangle <- function(x, diag=FALSE) +lowerTriangle <- function(x, diag=FALSE, byrow=FALSE) { + if(byrow) + t(x)[rev(lower.tri(x, diag=diag))] + else x[lower.tri(x, diag=diag)] } -"lowerTriangle<-" <- function(x, diag=FALSE, value) +"lowerTriangle<-" <- function(x, diag=FALSE, byrow=FALSE, value) { + if(byrow) { + ret <- t(x) + ret[rev(lower.tri(x, diag=diag))] <- value + t(ret) + } + else { x[lower.tri(x, diag=diag)] <- value x } +} diff -Nru gdata-2.13.3/R/write.fwf.R gdata-2.17.0/R/write.fwf.R --- gdata-2.13.3/R/write.fwf.R 2014-01-03 19:32:42.000000000 +0000 +++ gdata-2.17.0/R/write.fwf.R 2015-04-25 16:24:11.000000000 +0000 @@ -1,7 +1,7 @@ ### write.fwf.R ###------------------------------------------------------------------------ ### What: Write fixed width format - code -### $Id: write.fwf.R 1459 2010-11-12 19:08:12Z warnes $ +### $Id: write.fwf.R 1959 2015-04-25 07:56:23Z warnes $ ### Time-stamp: <2008-08-05 12:11:27 ggorjan> ###------------------------------------------------------------------------ @@ -20,21 +20,43 @@ width=NULL, eol="\n", qmethod=c("escape", "double"), + scientific=TRUE, ...) { ## --- Setup --- + dapply <- function(x, FUN, ..., simplify=TRUE) + { + if(is.data.frame(x)) + return(sapply(x, FUN, ..., simplify=simplify)) + else if(is.matrix(x)) + return(apply(x, 2, FUN, ...)) + else + stop("x must be a data.frame or a matrix") + } + if(!(is.data.frame(x) || is.matrix(x))) stop("'x' must be a data.frame or matrix") if(length(na) > 1) stop("only single value can be defined for 'na'") + if(!scientific) + { + option.scipen <- getOption("scipen") + on.exit( function() options("scipen"=option.scipen) ) + options("scipen"=100) + } + + if(rownames) { + x <- as.data.frame(x) x <- cbind(rownames(x), x) rowColVal <- ifelse(!is.null(rowCol), rowCol, "row") colnames(x)[1] <- rowColVal } colnamesMy <- colnames(x) + if(length(colnamesMy)==0) + colnamesMy <- paste( "V", 1:ncol(x), sep="") nRow <- nrow(x) nCol <- length(colnamesMy) @@ -58,22 +80,26 @@ stringsAsFactors=FALSE) ## Which columns are numeric like - isNum <- sapply(x, is.numeric) + isNum <- dapply(x, is.numeric) ## is.numeric picks also Date and POSIXt - isNum <- isNum & !(sapply(x, inherits, what="Date") | - sapply(x, inherits, what="POSIXt")) + isNum <- isNum & !(dapply(x, inherits, what="Date") | + dapply(x, inherits, what="POSIXt")) ## Which columns are factors --> convert them to character - isFac <- sapply(x, is.factor) - x[, isFac] <- lapply(x[, isFac, drop=FALSE], as.character) + isFac <- dapply(x, is.factor) + if(any(isFac)) + ## This conditional is necessary because if x is a matrix, even if + ## all(isFAC==FALSE), this assignment will coerce it to mode + ## character. This isn't a problem for dataframes. + x[, isFac] <- sapply(x[, isFac, drop=FALSE], as.character) ## Collect information about how format() will format columns. ## We need to get this info now, since format will turn all columns to character - tmp <- lapply(x, format.info, ...) + tmp <- dapply(x, format.info, ..., simplify=FALSE) + if(is.matrix(x)) tmp <- as.data.frame(tmp) tmp1 <- sapply(tmp, length) tmp <- t(as.data.frame(tmp)) retFormat$width <- tmp[, 1] - ## Collect other details for numeric columns if(any(isNum)) { ## Numeric columns with digits @@ -89,6 +115,9 @@ ## --- Format --- + ## store original object in 'y' + y <- x + ## Formatting (to character) for(i in 1:nCol) { if(widthNULL) { @@ -101,17 +130,17 @@ ## following test to "fiddle" with the value in 'na' argument since - ## NA should not increase the width of column with width 1, while wider ## value for 'na' should increase the width - test <- is.na(x[, i]) + test <- is.na(y[, i]) ## Make a copy to make sure we get character after first format() - Date class caused problems x2 <- character(length=nRow) ## Add formatted values - x2[!test] <- format(x[!test, i], justify=justify, width=tmp, ...) + x2[!test] <- format(y[!test, i], justify=justify, width=tmp, ...) ## Add 'na' value x2[test] <- na ## Replace the original x[, i] <- x2 ## Collect width (again) - tmp2 <- format.info(x[, i], ...)[1] + tmp2 <- format.info(x2, ...)[1] ## Reformat if 'na' value change the width of the column if(tmp2 != retFormat[i, "width"]) { retFormat[i, "width"] <- tmp2 @@ -128,7 +157,7 @@ ## Number of levels for "non-numeric"" columns if(any(!isNum)) { - retFormat[!isNum, "nlevels"] <- sapply(x[, !isNum, drop=FALSE], + retFormat[!isNum, "nlevels"] <- dapply(x[, !isNum, drop=FALSE], function(z) length(unique(z))) } @@ -158,7 +187,7 @@ na=na, row.names=FALSE, col.names=FALSE, - qmethod=qmethod) + qmethod=qmethod) } write.table(x=x, diff -Nru gdata-2.13.3/tests/test.humanReadable.R gdata-2.17.0/tests/test.humanReadable.R --- gdata-2.13.3/tests/test.humanReadable.R 1970-01-01 00:00:00.000000000 +0000 +++ gdata-2.17.0/tests/test.humanReadable.R 2015-04-25 16:40:38.000000000 +0000 @@ -0,0 +1,93 @@ +library(gdata) + +options(humanReadable=FALSE) + +set.seed(123456) + +baseSI <- 10 +powerSI <- seq(from=0, to=27, by=3) +SI0 <- (baseSI)^powerSI +k <- length(SI0) - 1 +SI1 <- SI0 - SI0 / c(2, runif(n=k, min=1.01, max=5.99)) +SI2 <- SI0 + SI0 / c(2, runif(n=k, min=1.01, max=5.99)) + +baseIEC <- 2 +powerIEC <- seq(from=0, to=90, by=10) +IEC0 <- (baseIEC)^powerIEC +IEC1 <- IEC0 - IEC0 / c(2, runif(n=k, min=1.01, max=5.99)) +IEC2 <- IEC0 + IEC0 / c(2, runif(n=k, min=1.01, max=5.99)) + +# Auto units, specify width +cbind(humanReadable(x=SI2, standard="SI", width=7), + humanReadable(x=SI2, standard="SI", width=5), + humanReadable(x=SI2, standard="SI", width=3), + humanReadable(x=IEC2, standard="IEC", width=7), + humanReadable(x=IEC2, standard="IEC", width=5), + humanReadable(x=IEC2, standard="IEC", width=3), + humanReadable(x=IEC2, standard="Unix", width=7), + humanReadable(x=IEC2, standard="Unix", width=5), + humanReadable(x=IEC2, standard="Unix", width=3)) + +# Auto units, specify digits +cbind(humanReadable(x=SI2, standard="SI", width=NULL, digits=7), + humanReadable(x=SI2, standard="SI", width=NULL, digits=3), + humanReadable(x=SI2, standard="SI", width=NULL, digits=2), + humanReadable(x=SI2, standard="SI", width=NULL, digits=1), + humanReadable(x=IEC2, standard="IEC", width=NULL, digits=7), + humanReadable(x=IEC2, standard="IEC", width=NULL, digits=3), + humanReadable(x=IEC2, standard="IEC", width=NULL, digits=2), + humanReadable(x=IEC2, standard="IEC", width=NULL, digits=1), + humanReadable(x=IEC2, standard="Unix", width=NULL, digits=7), + humanReadable(x=IEC2, standard="Unix", width=NULL, digits=3), + humanReadable(x=IEC2, standard="Unix", width=NULL, digits=2), + humanReadable(x=IEC2, standard="Unix", width=NULL, digits=1)) + +# Single unit, specify width +cbind(humanReadable(x=SI1, units="GB", standard="SI", width=7), + humanReadable(x=SI1, units="GB", standard="SI", width=5), + humanReadable(x=SI1, units="GB", standard="SI", width=3), + humanReadable(x=IEC1, units="GiB", standard="IEC", width=7), + humanReadable(x=IEC1, units="GiB", standard="IEC", width=5), + humanReadable(x=IEC1, units="GiB", standard="IEC", width=3), + humanReadable(x=IEC1, units="G", standard="Unix", width=7), + humanReadable(x=IEC1, units="G", standard="Unix", width=5), + humanReadable(x=IEC1, units="G", standard="Unix", width=3) + ) + +# Single unit, specify digits +cbind(humanReadable(x=SI1, units="GB", standard="SI", width=NULL, digits=7), + humanReadable(x=SI1, units="GB", standard="SI", width=NULL, digits=3), + humanReadable(x=SI1, units="GB", standard="SI", width=NULL, digits=2), + humanReadable(x=SI1, units="GB", standard="SI", width=NULL, digits=1), + humanReadable(x=IEC1, units="GiB", standard="IEC", width=NULL, digits=7), + humanReadable(x=IEC1, units="GiB", standard="IEC", width=NULL, digits=3), + humanReadable(x=IEC1, units="GiB", standard="IEC", width=NULL, digits=2), + humanReadable(x=IEC1, units="GiB", standard="IEC", width=NULL, digits=1), + humanReadable(x=IEC1, units="G", standard="Unix", width=NULL, digits=7), + humanReadable(x=IEC1, units="G", standard="Unix", width=NULL, digits=3), + humanReadable(x=IEC1, units="G", standard="Unix", width=NULL, digits=2), + humanReadable(x=IEC1, units="G", standard="Unix", width=NULL, digits=1) + ) + + +stopifnot( is.object_sizes(as.object_sizes( 2^(1:30) ) ) ) +stopifnot( format(as.object_sizes(124)) == "124 bytes") +stopifnot( format(as.object_sizes(124e8), units="auto") == "11.5 GiB") +stopifnot( format(as.object_sizes(124e8), humanReadable=TRUE) == "11.5 GiB") +stopifnot( format(as.object_sizes(124e8), units="bytes") == "1.24e+10 bytes") + +tools::assertError( as.object_sizes(-1) ) +tools::assertError( as.object_sizes('a') ) +tools::assertError( as.object_sizes(list()) ) +tools::assertError( as.object_sizes(NULL) ) +tools::assertError( as.object_sizes(0+1i) ) + +stopifnot( format(as.object_sizes(1e40) ) == "1e+40 bytes" ) +stopifnot( format(as.object_sizes(1e40), units="auto" ) == "8.271806e+15 YiB") +stopifnot( format(as.object_sizes(1e40), units="bytes") == "1e+40 bytes" ) +stopifnot( format(as.object_sizes(1e40), humanReadable=TRUE) == "8.271806e+15 YiB") +stopifnot( format(as.object_sizes(1e40), humanReadable=FALSE) == "1e+40 bytes") + +options(humanReadable=TRUE) +stopifnot( format(as.object_sizes(1e40) ) == "8.271806e+15 YiB") +options(humanReadable=FALSE) diff -Nru gdata-2.13.3/tests/test.humanReadable.Rout.save gdata-2.17.0/tests/test.humanReadable.Rout.save --- gdata-2.13.3/tests/test.humanReadable.Rout.save 1970-01-01 00:00:00.000000000 +0000 +++ gdata-2.17.0/tests/test.humanReadable.Rout.save 2015-07-03 20:05:05.000000000 +0000 @@ -0,0 +1,238 @@ + +R version 3.2.0 (2015-04-16) -- "Full of Ingredients" +Copyright (C) 2015 The R Foundation for Statistical Computing +Platform: x86_64-apple-darwin13.4.0 (64-bit) + +R is free software and comes with ABSOLUTELY NO WARRANTY. +You are welcome to redistribute it under certain conditions. +Type 'license()' or 'licence()' for distribution details. + +R is a collaborative project with many contributors. +Type 'contributors()' for more information and +'citation()' on how to cite R or R packages in publications. + +Type 'demo()' for some demos, 'help()' for on-line help, or +'help.start()' for an HTML browser interface to help. +Type 'q()' to quit R. + +> library(gdata) +gdata: read.xls support for 'XLS' (Excel 97-2004) files ENABLED. + +gdata: read.xls support for 'XLSX' (Excel 2007+) files ENABLED. + +Attaching package: 'gdata' + +The following object is masked from 'package:stats': + + nobs + +The following object is masked from 'package:utils': + + object.size + +> +> options(humanReadable=FALSE) +> +> set.seed(123456) +> +> baseSI <- 10 +> powerSI <- seq(from=0, to=27, by=3) +> SI0 <- (baseSI)^powerSI +> k <- length(SI0) - 1 +> SI1 <- SI0 - SI0 / c(2, runif(n=k, min=1.01, max=5.99)) +> SI2 <- SI0 + SI0 / c(2, runif(n=k, min=1.01, max=5.99)) +> +> baseIEC <- 2 +> powerIEC <- seq(from=0, to=90, by=10) +> IEC0 <- (baseIEC)^powerIEC +> IEC1 <- IEC0 - IEC0 / c(2, runif(n=k, min=1.01, max=5.99)) +> IEC2 <- IEC0 + IEC0 / c(2, runif(n=k, min=1.01, max=5.99)) +> +> # Auto units, specify width +> cbind(humanReadable(x=SI2, standard="SI", width=7), ++ humanReadable(x=SI2, standard="SI", width=5), ++ humanReadable(x=SI2, standard="SI", width=3), ++ humanReadable(x=IEC2, standard="IEC", width=7), ++ humanReadable(x=IEC2, standard="IEC", width=5), ++ humanReadable(x=IEC2, standard="IEC", width=3), ++ humanReadable(x=IEC2, standard="Unix", width=7), ++ humanReadable(x=IEC2, standard="Unix", width=5), ++ humanReadable(x=IEC2, standard="Unix", width=3)) + [,1] [,2] [,3] [,4] [,5] [,6] + [1,] " 2 B " " 2 B " " 2 B " " 2 B " " 2 B " " 2 B " + [2,] "1.54215 kB" "1.542 kB" " 1.5 kB" "1.18582 KiB" "1.186 KiB" " 1.2 KiB" + [3,] "1.20064 MB" "1.201 MB" " 1.2 MB" "1.19003 MiB" " 1.19 MiB" " 1.2 MiB" + [4,] "1.25207 GB" "1.252 GB" " 1.3 GB" "1.54448 GiB" "1.544 GiB" " 1.5 GiB" + [5,] "1.18121 TB" "1.181 TB" " 1.2 TB" "1.27667 TiB" "1.277 TiB" " 1.3 TiB" + [6,] " 1.1853 PB" "1.185 PB" " 1.2 PB" "1.18733 PiB" "1.187 PiB" " 1.2 PiB" + [7,] " 1.1678 EB" "1.168 EB" " 1.2 EB" "1.46271 EiB" "1.463 EiB" " 1.5 EiB" + [8,] "1.18275 ZB" "1.183 ZB" " 1.2 ZB" "1.62382 ZiB" "1.624 ZiB" " 1.6 ZiB" + [9,] "1.18568 YB" "1.186 YB" " 1.2 YB" "1.19557 YiB" "1.196 YiB" " 1.2 YiB" +[10,] "1501.49 YB" " 1501 YB" "1501 YB" "1750.35 YiB" " 1750 YiB" "1750 YiB" + [,7] [,8] [,9] + [1,] " 2 B" " 2 B" " 2 B" + [2,] "1.18582 K" "1.186 K" " 1.2 K" + [3,] "1.19003 M" " 1.19 M" " 1.2 M" + [4,] "1.54448 G" "1.544 G" " 1.5 G" + [5,] "1.27667 T" "1.277 T" " 1.3 T" + [6,] "1.18733 P" "1.187 P" " 1.2 P" + [7,] "1.46271 E" "1.463 E" " 1.5 E" + [8,] "1.62382 Z" "1.624 Z" " 1.6 Z" + [9,] "1.19557 Y" "1.196 Y" " 1.2 Y" +[10,] "1750.35 Y" " 1750 Y" "1750 Y" +> +> # Auto units, specify digits +> cbind(humanReadable(x=SI2, standard="SI", width=NULL, digits=7), ++ humanReadable(x=SI2, standard="SI", width=NULL, digits=3), ++ humanReadable(x=SI2, standard="SI", width=NULL, digits=2), ++ humanReadable(x=SI2, standard="SI", width=NULL, digits=1), ++ humanReadable(x=IEC2, standard="IEC", width=NULL, digits=7), ++ humanReadable(x=IEC2, standard="IEC", width=NULL, digits=3), ++ humanReadable(x=IEC2, standard="IEC", width=NULL, digits=2), ++ humanReadable(x=IEC2, standard="IEC", width=NULL, digits=1), ++ humanReadable(x=IEC2, standard="Unix", width=NULL, digits=7), ++ humanReadable(x=IEC2, standard="Unix", width=NULL, digits=3), ++ humanReadable(x=IEC2, standard="Unix", width=NULL, digits=2), ++ humanReadable(x=IEC2, standard="Unix", width=NULL, digits=1)) + [,1] [,2] [,3] [,4] + [1,] " 1.5000000 B " " 1.500 B " " 1.50 B " " 1.5 B " + [2,] " 1.5421535 kB" " 1.542 kB" " 1.54 kB" " 1.5 kB" + [3,] " 1.2006426 MB" " 1.201 MB" " 1.20 MB" " 1.2 MB" + [4,] " 1.2520737 GB" " 1.252 GB" " 1.25 GB" " 1.3 GB" + [5,] " 1.1812105 TB" " 1.181 TB" " 1.18 TB" " 1.2 TB" + [6,] " 1.1853010 PB" " 1.185 PB" " 1.19 PB" " 1.2 PB" + [7,] " 1.1678048 EB" " 1.168 EB" " 1.17 EB" " 1.2 EB" + [8,] " 1.1827531 ZB" " 1.183 ZB" " 1.18 ZB" " 1.2 ZB" + [9,] " 1.1856788 YB" " 1.186 YB" " 1.19 YB" " 1.2 YB" +[10,] "1501.4852409 YB" "1501.485 YB" "1501.49 YB" "1501.5 YB" + [,5] [,6] [,7] [,8] + [1,] " 1.5000000 B " " 1.500 B " " 1.50 B " " 1.5 B " + [2,] " 1.1858248 KiB" " 1.186 KiB" " 1.19 KiB" " 1.2 KiB" + [3,] " 1.1900302 MiB" " 1.190 MiB" " 1.19 MiB" " 1.2 MiB" + [4,] " 1.5444791 GiB" " 1.544 GiB" " 1.54 GiB" " 1.5 GiB" + [5,] " 1.2766723 TiB" " 1.277 TiB" " 1.28 TiB" " 1.3 TiB" + [6,] " 1.1873270 PiB" " 1.187 PiB" " 1.19 PiB" " 1.2 PiB" + [7,] " 1.4627144 EiB" " 1.463 EiB" " 1.46 EiB" " 1.5 EiB" + [8,] " 1.6238214 ZiB" " 1.624 ZiB" " 1.62 ZiB" " 1.6 ZiB" + [9,] " 1.1955693 YiB" " 1.196 YiB" " 1.20 YiB" " 1.2 YiB" +[10,] "1750.3547972 YiB" "1750.355 YiB" "1750.35 YiB" "1750.4 YiB" + [,9] [,10] [,11] [,12] + [1,] " 1.5000000 B" " 1.500 B" " 1.50 B" " 1.5 B" + [2,] " 1.1858248 K" " 1.186 K" " 1.19 K" " 1.2 K" + [3,] " 1.1900302 M" " 1.190 M" " 1.19 M" " 1.2 M" + [4,] " 1.5444791 G" " 1.544 G" " 1.54 G" " 1.5 G" + [5,] " 1.2766723 T" " 1.277 T" " 1.28 T" " 1.3 T" + [6,] " 1.1873270 P" " 1.187 P" " 1.19 P" " 1.2 P" + [7,] " 1.4627144 E" " 1.463 E" " 1.46 E" " 1.5 E" + [8,] " 1.6238214 Z" " 1.624 Z" " 1.62 Z" " 1.6 Z" + [9,] " 1.1955693 Y" " 1.196 Y" " 1.20 Y" " 1.2 Y" +[10,] "1750.3547972 Y" "1750.355 Y" "1750.35 Y" "1750.4 Y" +> +> # Single unit, specify width +> cbind(humanReadable(x=SI1, units="GB", standard="SI", width=7), ++ humanReadable(x=SI1, units="GB", standard="SI", width=5), ++ humanReadable(x=SI1, units="GB", standard="SI", width=3), ++ humanReadable(x=IEC1, units="GiB", standard="IEC", width=7), ++ humanReadable(x=IEC1, units="GiB", standard="IEC", width=5), ++ humanReadable(x=IEC1, units="GiB", standard="IEC", width=3), ++ humanReadable(x=IEC1, units="G", standard="Unix", width=7), ++ humanReadable(x=IEC1, units="G", standard="Unix", width=5), ++ humanReadable(x=IEC1, units="G", standard="Unix", width=3) ++ ) + [,1] [,2] [,3] [,4] [,5] [,6] + [1,] "5e-10 GB" "5e-10 GB" "5e-10 GB" "5e-10 GiB" "5e-10 GiB" "5e-10 GiB" + [2,] "8e-07 GB" "8e-07 GB" "8e-07 GB" "6e-07 GiB" "6e-07 GiB" "6e-07 GiB" + [3,] "8e-04 GB" "8e-04 GB" "8e-04 GB" "8e-04 GiB" "8e-04 GiB" "8e-04 GiB" + [4,] "7e-01 GB" "7e-01 GB" "7e-01 GB" "4e-01 GiB" "4e-01 GiB" "4e-01 GiB" + [5,] "6e+02 GB" "6e+02 GB" "6e+02 GB" "3e+02 GiB" "3e+02 GiB" "3e+02 GiB" + [6,] "6e+05 GB" "6e+05 GB" "6e+05 GB" "4e+05 GiB" "4e+05 GiB" "4e+05 GiB" + [7,] "5e+08 GB" "5e+08 GB" "5e+08 GB" "5e+08 GiB" "5e+08 GiB" "5e+08 GiB" + [8,] "7e+11 GB" "7e+11 GB" "7e+11 GB" "8e+11 GiB" "8e+11 GiB" "8e+11 GiB" + [9,] "3e+14 GB" "3e+14 GB" "3e+14 GB" "9e+14 GiB" "9e+14 GiB" "9e+14 GiB" +[10,] "8e+17 GB" "8e+17 GB" "8e+17 GB" "9e+17 GiB" "9e+17 GiB" "9e+17 GiB" + [,7] [,8] [,9] + [1,] "5e-10 G" "5e-10 G" "5e-10 G" + [2,] "6e-07 G" "6e-07 G" "6e-07 G" + [3,] "8e-04 G" "8e-04 G" "8e-04 G" + [4,] "4e-01 G" "4e-01 G" "4e-01 G" + [5,] "3e+02 G" "3e+02 G" "3e+02 G" + [6,] "4e+05 G" "4e+05 G" "4e+05 G" + [7,] "5e+08 G" "5e+08 G" "5e+08 G" + [8,] "8e+11 G" "8e+11 G" "8e+11 G" + [9,] "9e+14 G" "9e+14 G" "9e+14 G" +[10,] "9e+17 G" "9e+17 G" "9e+17 G" +> +> # Single unit, specify digits +> cbind(humanReadable(x=SI1, units="GB", standard="SI", width=NULL, digits=7), ++ humanReadable(x=SI1, units="GB", standard="SI", width=NULL, digits=3), ++ humanReadable(x=SI1, units="GB", standard="SI", width=NULL, digits=2), ++ humanReadable(x=SI1, units="GB", standard="SI", width=NULL, digits=1), ++ humanReadable(x=IEC1, units="GiB", standard="IEC", width=NULL, digits=7), ++ humanReadable(x=IEC1, units="GiB", standard="IEC", width=NULL, digits=3), ++ humanReadable(x=IEC1, units="GiB", standard="IEC", width=NULL, digits=2), ++ humanReadable(x=IEC1, units="GiB", standard="IEC", width=NULL, digits=1), ++ humanReadable(x=IEC1, units="G", standard="Unix", width=NULL, digits=7), ++ humanReadable(x=IEC1, units="G", standard="Unix", width=NULL, digits=3), ++ humanReadable(x=IEC1, units="G", standard="Unix", width=NULL, digits=2), ++ humanReadable(x=IEC1, units="G", standard="Unix", width=NULL, digits=1) ++ ) + [,1] [,2] [,3] [,4] + [1,] "5.000000e-10 GB" "5.00e-10 GB" "5.0e-10 GB" "5e-10 GB" + [2,] "7.993163e-07 GB" "7.99e-07 GB" "8.0e-07 GB" "8e-07 GB" + [3,] "7.900375e-04 GB" "7.90e-04 GB" "7.9e-04 GB" "8e-04 GB" + [4,] "6.619855e-01 GB" "6.62e-01 GB" "6.6e-01 GB" "7e-01 GB" + [5,] "6.311259e+02 GB" "6.31e+02 GB" "6.3e+02 GB" "6e+02 GB" + [6,] "6.440324e+05 GB" "6.44e+05 GB" "6.4e+05 GB" "6e+05 GB" + [7,] "4.994386e+08 GB" "4.99e+08 GB" "5.0e+08 GB" "5e+08 GB" + [8,] "7.277869e+11 GB" "7.28e+11 GB" "7.3e+11 GB" "7e+11 GB" + [9,] "3.291745e+14 GB" "3.29e+14 GB" "3.3e+14 GB" "3e+14 GB" +[10,] "8.313511e+17 GB" "8.31e+17 GB" "8.3e+17 GB" "8e+17 GB" + [,5] [,6] [,7] [,8] + [1,] "4.656613e-10 GiB" "4.66e-10 GiB" "4.7e-10 GiB" "5e-10 GiB" + [2,] "5.975956e-07 GiB" "5.98e-07 GiB" "6.0e-07 GiB" "6e-07 GiB" + [3,] "7.764672e-04 GiB" "7.76e-04 GiB" "7.8e-04 GiB" "8e-04 GiB" + [4,] "4.459146e-01 GiB" "4.46e-01 GiB" "4.5e-01 GiB" "4e-01 GiB" + [5,] "2.985889e+02 GiB" "2.99e+02 GiB" "3.0e+02 GiB" "3e+02 GiB" + [6,] "4.209112e+05 GiB" "4.21e+05 GiB" "4.2e+05 GiB" "4e+05 GiB" + [7,] "4.983449e+08 GiB" "4.98e+08 GiB" "5.0e+08 GiB" "5e+08 GiB" + [8,] "7.751081e+11 GiB" "7.75e+11 GiB" "7.8e+11 GiB" "8e+11 GiB" + [9,] "8.756173e+14 GiB" "8.76e+14 GiB" "8.8e+14 GiB" "9e+14 GiB" +[10,] "9.390947e+17 GiB" "9.39e+17 GiB" "9.4e+17 GiB" "9e+17 GiB" + [,9] [,10] [,11] [,12] + [1,] "4.656613e-10 G" "4.66e-10 G" "4.7e-10 G" "5e-10 G" + [2,] "5.975956e-07 G" "5.98e-07 G" "6.0e-07 G" "6e-07 G" + [3,] "7.764672e-04 G" "7.76e-04 G" "7.8e-04 G" "8e-04 G" + [4,] "4.459146e-01 G" "4.46e-01 G" "4.5e-01 G" "4e-01 G" + [5,] "2.985889e+02 G" "2.99e+02 G" "3.0e+02 G" "3e+02 G" + [6,] "4.209112e+05 G" "4.21e+05 G" "4.2e+05 G" "4e+05 G" + [7,] "4.983449e+08 G" "4.98e+08 G" "5.0e+08 G" "5e+08 G" + [8,] "7.751081e+11 G" "7.75e+11 G" "7.8e+11 G" "8e+11 G" + [9,] "8.756173e+14 G" "8.76e+14 G" "8.8e+14 G" "9e+14 G" +[10,] "9.390947e+17 G" "9.39e+17 G" "9.4e+17 G" "9e+17 G" +> +> +> stopifnot( is.object_sizes(as.object_sizes( 2^(1:30) ) ) ) +> stopifnot( format(as.object_sizes(124)) == "124 bytes") +> stopifnot( format(as.object_sizes(124e8), units="auto") == "11.5 GiB") +> stopifnot( format(as.object_sizes(124e8), humanReadable=TRUE) == "11.5 GiB") +> stopifnot( format(as.object_sizes(124e8), units="bytes") == "1.24e+10 bytes") +> +> tools::assertError( as.object_sizes(-1) ) +> tools::assertError( as.object_sizes('a') ) +> tools::assertError( as.object_sizes(list()) ) +> tools::assertError( as.object_sizes(NULL) ) +> tools::assertError( as.object_sizes(0+1i) ) +> +> stopifnot( format(as.object_sizes(1e40) ) == "1e+40 bytes" ) +> stopifnot( format(as.object_sizes(1e40), units="auto" ) == "8.271806e+15 YiB") +> stopifnot( format(as.object_sizes(1e40), units="bytes") == "1e+40 bytes" ) +> stopifnot( format(as.object_sizes(1e40), humanReadable=TRUE) == "8.271806e+15 YiB") +> stopifnot( format(as.object_sizes(1e40), humanReadable=FALSE) == "1e+40 bytes") +> +> options(humanReadable=TRUE) +> stopifnot( format(as.object_sizes(1e40) ) == "8.271806e+15 YiB") +> options(humanReadable=FALSE) +> +> proc.time() + user system elapsed + 0.421 0.052 0.464 diff -Nru gdata-2.13.3/tests/test.read.xls.R gdata-2.17.0/tests/test.read.xls.R --- gdata-2.13.3/tests/test.read.xls.R 2014-01-03 19:32:42.000000000 +0000 +++ gdata-2.17.0/tests/test.read.xls.R 2015-07-03 20:04:21.000000000 +0000 @@ -23,19 +23,35 @@ exampleFile <- file.path(path.package('gdata'),'xls', 'ExampleExcelFile.xls') -exampleFile2007 <- file.path(path.package('gdata'),'xls', +exampleFileX <- file.path(path.package('gdata'),'xls', 'ExampleExcelFile.xlsx') # see the number and names of sheets: sheetCount(exampleFile) -if( 'XLSX' %in% xlsFormats() ) - sheetCount(exampleFile2007) +if(! 'XLSX' %in% xlsFormats() ) + { + cat("************************************************************\n") + cat("** DIFF IN THIS SECTION IS EXPECTED BECAUSE PERL PACKAGES **\n") + cat("** FOR SUPPORTING XLSX ARE NOT INSTALLED **\n") + cat("************************************************************\n") + } else { + sheetCount(exampleFileX) + } + sheetNames(exampleFile) -if( 'XLSX' %in% xlsFormats() ) - sheetNames(exampleFile2007) +if(! 'XLSX' %in% xlsFormats() ) + { + cat("************************************************************\n") + cat("** DIFF IN THIS SECTION IS EXPECTED BECAUSE PERL PACKAGES **\n") + cat("** FOR SUPPORTING XLSX ARE NOT INSTALLED **\n") + cat("************************************************************\n") + } else { + sheetNames(exampleFileX) + } + example.1 <- read.xls(exampleFile, sheet=1) # default is first worksheet example.1 @@ -46,54 +62,140 @@ example.3 <- read.xls(exampleFile, sheet=3, header=FALSE) # third worksheet by number example.3 -example.4 <- read.xls(exampleFile, sheet=3, header=FALSE) # third worksheet by number +example.4 <- read.xls(exampleFile, sheet=4, header=FALSE) # fourth worksheet by number example.4 -if( 'XLSX' %in% xlsFormats() ) +if(! 'XLSX' %in% xlsFormats() ) { - example.x.1 <- read.xls(exampleFile2007, sheet=1) # default is first worksheet - print(example.x.1) - - example.x.2 <- read.xls(exampleFile2007, sheet=2) # second worksheet by number - print(example.x.2) - - example.x.3 <- read.xls(exampleFile2007, sheet=3, header=FALSE) # third worksheet by number - print(example.x.3) - - example.x.4 <- read.xls(exampleFile2007, sheet=3, header=FALSE) # third worksheet by number - print(example.x.4) - - data <- read.xls(exampleFile2007, sheet="Sheet Second") # and by name - print(data) - - # load the third worksheet, skipping the first two non-data lines... - data <- read.xls(exampleFile2007, sheet="Sheet with initial text", skip=2) - print(data) -} - + cat("************************************************************\n") + cat("** DIFF IN THIS SECTION IS EXPECTED BECAUSE PERL PACKAGES **\n") + cat("** FOR SUPPORTING XLSX ARE NOT INSTALLED **\n") + cat("************************************************************\n") + } else { + example.x.1 <- read.xls(exampleFileX, sheet=1) # default is first worksheet + print(example.x.1) + + example.x.2 <- read.xls(exampleFileX, sheet=2) # second worksheet by number + print(example.x.2) + + example.x.3 <- read.xls(exampleFileX, sheet=3, header=FALSE) # third worksheet by number + print(example.x.3) + + example.x.4 <- read.xls(exampleFileX, sheet=4, header=FALSE) # fourth worksheet by number + print(example.x.4) + + data <- read.xls(exampleFileX, sheet="Sheet Second") # and by name + print(data) + + # load the third worksheet, skipping the first two non-data lines... + data <- read.xls(exampleFileX, sheet="Sheet with initial text", skip=2) + print(data) + } ## Check handling of skip.blank.lines=FALSE example.skip <- read.xls(exampleFile, sheet=2, blank.lines.skip=FALSE) example.skip -if( 'XLSX' %in% xlsFormats() ) +if(! 'XLSX' %in% xlsFormats() ) { - example.x.skip <- read.xls(exampleFile2007, sheet=2, blank.lines.skip=FALSE) + cat("************************************************************\n") + cat("** DIFF IN THIS SECTION IS EXPECTED BECAUSE PERL PACKAGES **\n") + cat("** FOR SUPPORTING XLSX ARE NOT INSTALLED **\n") + cat("************************************************************\n") + } else { + example.x.skip <- read.xls(exampleFileX, sheet=2, blank.lines.skip=FALSE) example.x.skip } + ## Check handing of fileEncoding for latin-1 characters -latin1File <- file.path(path.package('gdata'),'xls', 'latin-1.xls') +latin1File <- file.path(path.package('gdata'),'xls', 'latin-1.xls' ) latin1FileX <- file.path(path.package('gdata'),'xls', 'latin-1.xlsx') -example.latin1 <- read.xls(latin1File, fileEncoding='latin1') +if(.Platform$OS.type=="unix") + { + example.latin1 <- read.xls(latin1File, + fileEncoding='latin1', + encoding='latin1', + stringsAsFactors=FALSE) + } else { + example.latin1 <- read.xls(latin1File, + #fileEncoding='latin1', + encoding='latin1', + stringsAsFactors=FALSE) + } -if( 'XLSX' %in% xlsFormats() ) +if(! 'XLSX' %in% xlsFormats() ) { - example.latin1.x <- read.xls(latin1FileX, fileEncoding='latin1') + cat("************************************************************\n") + cat("** DIFF IN THIS SECTION IS EXPECTED BECAUSE PERL PACKAGES **\n") + cat("** FOR SUPPORTING XLSX ARE NOT INSTALLED **\n") + cat("************************************************************\n") + } else { + if(.Platform$OS.type=="unix") + { + example.latin1.x <- read.xls(latin1FileX, + fileEncoding='latin1', + encoding='latin1', + stringsAsFactors=FALSE) + } else { + example.latin1.x <- read.xls(latin1FileX, + #fileEncoding='latin1', + encoding='latin1', + stringsAsFactors=FALSE) + } } +## Check handling of very wide file + +wideFile <- file.path(path.package('gdata'),'xls', 'wide.xls' ) +wideFileX <- file.path(path.package('gdata'),'xls', 'wide.xlsx') + +example.wide <- read.xls(wideFile) +stopifnot(dim(example.wide)==c(0,256)) + +if( !'XLSX' %in% xlsFormats() ) + { + cat("************************************************************\n") + cat("** DIFF IN THIS SECTION IS EXPECTED BECAUSE PERL PACKAGES **\n") + cat("** FOR SUPPORTING XLSX ARE NOT INSTALLED **\n") + cat("************************************************************\n") + } else { + example.wide.x <- read.xls(wideFileX) + stopifnot(dim(example.wide.x)==c(0,16384)) + } + +## Check handling of files with dates calulcated relative to +## 1900-01-01 and 1904-01-01 + +file.1900 <- file.path(path.package('gdata'),'xls', 'ExampleExcelFile_1900.xls' ) +file.1904 <- file.path(path.package('gdata'),'xls', 'ExampleExcelFile_1904.xls' ) +fileX.1900 <- file.path(path.package('gdata'),'xls', 'ExampleExcelFile_1900.xlsx') +fileX.1904 <- file.path(path.package('gdata'),'xls', 'ExampleExcelFile_1904.xlsx') + +example.1900 <- read.xls(file.1900, sheet=3, header=FALSE) +example.1900 + +example.1904 <- read.xls(file.1904, sheet=3, header=FALSE) +example.1904 + +exampleX.1900 <- read.xls(file.1900, sheet=3, header=FALSE) +exampleX.1900 + +exampleX.1904 <- read.xls(file.1904, sheet=3, header=FALSE) +exampleX.1904 + +# all colmns should be identical +stopifnot( na.omit(example.1900 == exampleX.1900) ) +stopifnot( na.omit(example.1904 == exampleX.1904) ) + +# column 8 will differ by 1462 due to different date baselines (1900 vs 1904) +stopifnot( na.omit(example.1900 [,-8] == example.1904 [,-8]) ) +stopifnot( na.omit(exampleX.1900[,-8] == exampleX.1904[,-8]) ) + +stopifnot( na.omit(example.1900 [,8] - example.1904 [,8]) == 1462 ) +stopifnot( na.omit(exampleX.1900[,8] - exampleX.1904[,8]) == 1462 ) diff -Nru gdata-2.13.3/tests/test.read.xls.Rout.save gdata-2.17.0/tests/test.read.xls.Rout.save --- gdata-2.13.3/tests/test.read.xls.Rout.save 2014-01-03 19:32:42.000000000 +0000 +++ gdata-2.17.0/tests/test.read.xls.Rout.save 2015-07-03 20:05:20.000000000 +0000 @@ -1,7 +1,7 @@ -R version 3.0.1 (2013-05-16) -- "Good Sport" -Copyright (C) 2013 The R Foundation for Statistical Computing -Platform: i686-pc-linux-gnu (32-bit) +R version 3.2.0 (2015-04-16) -- "Full of Ingredients" +Copyright (C) 2015 The R Foundation for Statistical Computing +Platform: x86_64-apple-darwin13.4.0 (64-bit) R is free software and comes with ABSOLUTELY NO WARRANTY. You are welcome to redistribute it under certain conditions. @@ -507,26 +507,42 @@ > exampleFile <- file.path(path.package('gdata'),'xls', + 'ExampleExcelFile.xls') > -> exampleFile2007 <- file.path(path.package('gdata'),'xls', +> exampleFileX <- file.path(path.package('gdata'),'xls', + 'ExampleExcelFile.xlsx') > > # see the number and names of sheets: > sheetCount(exampleFile) [1] 4 > -> if( 'XLSX' %in% xlsFormats() ) -+ sheetCount(exampleFile2007) +> if(! 'XLSX' %in% xlsFormats() ) ++ { ++ cat("************************************************************\n") ++ cat("** DIFF IN THIS SECTION IS EXPECTED BECAUSE PERL PACKAGES **\n") ++ cat("** FOR SUPPORTING XLSX ARE NOT INSTALLED **\n") ++ cat("************************************************************\n") ++ } else { ++ sheetCount(exampleFileX) ++ } [1] 4 > +> > sheetNames(exampleFile) [1] "Sheet First" "Sheet Second" [3] "Sheet with a very long name!" "Sheet with initial text" > -> if( 'XLSX' %in% xlsFormats() ) -+ sheetNames(exampleFile2007) +> if(! 'XLSX' %in% xlsFormats() ) ++ { ++ cat("************************************************************\n") ++ cat("** DIFF IN THIS SECTION IS EXPECTED BECAUSE PERL PACKAGES **\n") ++ cat("** FOR SUPPORTING XLSX ARE NOT INSTALLED **\n") ++ cat("************************************************************\n") ++ } else { ++ sheetNames(exampleFileX) ++ } [1] "Sheet First" "Sheet Second" [3] "Sheet with a very long name!" "Sheet with initial text" > +> > example.1 <- read.xls(exampleFile, sheet=1) # default is first worksheet > example.1 A B C @@ -548,53 +564,74 @@ > > example.3 <- read.xls(exampleFile, sheet=3, header=FALSE) # third worksheet by number > example.3 - V1 V2 V3 V4 V5 V6 -1 1 2001-01-01 1:01 0.2058182 NA A -2 2 2002-02-02 2:02 0.2910708 NA B -3 3 2003-03-03 3:03 0.3564875 -0.84147098 C -4 4 2004-04-04 4:04 0.4116363 0.70807342 -5 5 2005-05-05 5:05 0.4602234 0.50136797 A -6 6 2006-06-06 6:06 NA 0.25136984 B -7 7 2007-07-07 7:07 0.5445436 0.06318679 B -8 8 2008-08-08 8:08 0.5821416 NA C -9 9 2009-09-09 9:09 0.6174545 0.00000000 A -10 10 2010-10-10 10:10 0.6508541 0.00000000 A + V1 V2 V3 V4 V5 V6 V7 +1 1 2001-01-01 01:01:00.00 0.2058182 NA A 2001-01-01 01:01:01.01 +2 2 2002-02-02 02:02:00.00 0.2910708 NA B 2002-02-02 02:02:02.02 +3 3 2003-03-03 03:03:00.00 0.3564875 -0.84147098 C 2003-03-03 03:03:03.03 +4 4 2004-04-04 04:04:00.00 0.4116363 0.70807342 2004-04-04 04:04:04.04 +5 5 2005-05-05 05:05:00.00 0.4602234 0.50136797 A 2005-05-05 05:05:05.05 +6 6 2006-06-06 06:06:00.00 NA 0.25136984 B 2006-06-06 06:06:06.06 +7 7 2007-07-07 07:07:00.00 0.5445436 0.06318679 B 2007-07-07 07:07:07.07 +8 8 2008-08-08 08:08:00.00 0.5821416 NA C 2008-08-08 08:08:08.08 +9 9 2009-09-09 09:09:00.00 0.6174545 0.00000000 A 2009-09-09 09:09:09.09 +10 10 2010-10-10 10:10:00.00 0.6508541 0.00000000 A 2010-10-10 10:10:10.10 + V8 +1 36892.04 +2 37289.08 +3 37683.13 +4 38081.17 +5 38477.21 +6 38874.25 +7 39270.30 +8 39668.34 +9 40065.38 +10 40461.42 > -> example.4 <- read.xls(exampleFile, sheet=3, header=FALSE) # third worksheet by number +> example.4 <- read.xls(exampleFile, sheet=4, header=FALSE) # fourth worksheet by number > example.4 - V1 V2 V3 V4 V5 V6 -1 1 2001-01-01 1:01 0.2058182 NA A -2 2 2002-02-02 2:02 0.2910708 NA B -3 3 2003-03-03 3:03 0.3564875 -0.84147098 C -4 4 2004-04-04 4:04 0.4116363 0.70807342 -5 5 2005-05-05 5:05 0.4602234 0.50136797 A -6 6 2006-06-06 6:06 NA 0.25136984 B -7 7 2007-07-07 7:07 0.5445436 0.06318679 B -8 8 2008-08-08 8:08 0.5821416 NA C -9 9 2009-09-09 9:09 0.6174545 0.00000000 A -10 10 2010-10-10 10:10 0.6508541 0.00000000 A + V1 +1 This line contains text that would need to be skipped to get to the data +2 +3 +4 +5 +6 +7 + V2 V3 V4 V5 V6 V7 +1 +2 This line too! +3 D E F G Factor +4 FirstRow 1 <NA> <NA> <NA> Red +5 SecondRow 2 1 <NA> <NA> Green +6 ThirdRow 3 2 1 <NA> Red +7 FourthRow 4 3 2 1 Black > -> if( 'XLSX' %in% xlsFormats() ) +> if(! 'XLSX' %in% xlsFormats() ) + { -+ example.x.1 <- read.xls(exampleFile2007, sheet=1) # default is first worksheet -+ print(example.x.1) ++ cat("************************************************************\n") ++ cat("** DIFF IN THIS SECTION IS EXPECTED BECAUSE PERL PACKAGES **\n") ++ cat("** FOR SUPPORTING XLSX ARE NOT INSTALLED **\n") ++ cat("************************************************************\n") ++ } else { ++ example.x.1 <- read.xls(exampleFileX, sheet=1) # default is first worksheet ++ print(example.x.1) + -+ example.x.2 <- read.xls(exampleFile2007, sheet=2) # second worksheet by number -+ print(example.x.2) ++ example.x.2 <- read.xls(exampleFileX, sheet=2) # second worksheet by number ++ print(example.x.2) + -+ example.x.3 <- read.xls(exampleFile2007, sheet=3, header=FALSE) # third worksheet by number -+ print(example.x.3) ++ example.x.3 <- read.xls(exampleFileX, sheet=3, header=FALSE) # third worksheet by number ++ print(example.x.3) + -+ example.x.4 <- read.xls(exampleFile2007, sheet=3, header=FALSE) # third worksheet by number -+ print(example.x.4) ++ example.x.4 <- read.xls(exampleFileX, sheet=4, header=FALSE) # fourth worksheet by number ++ print(example.x.4) + -+ data <- read.xls(exampleFile2007, sheet="Sheet Second") # and by name -+ print(data) ++ data <- read.xls(exampleFileX, sheet="Sheet Second") # and by name ++ print(data) + -+ # load the third worksheet, skipping the first two non-data lines... -+ data <- read.xls(exampleFile2007, sheet="Sheet with initial text", skip=2) -+ print(data) -+ } ++ # load the third worksheet, skipping the first two non-data lines... ++ data <- read.xls(exampleFileX, sheet="Sheet with initial text", skip=2) ++ print(data) ++ } A B C 1 1 1 1 2 2 4 8 @@ -608,28 +645,44 @@ 2 SecondRow 2 1 NA NA Green 3 ThirdRow 3 2 1 NA Red 4 FourthRow 4 3 2 1 Black - V1 V2 V3 V4 V5 V6 -1 1 2001-01-01 1:01 0.2058182 NA A -2 2 2002-02-02 2:02 0.2910708 NA B -3 3 2003-03-03 3:03 0.3564875 -0.84147098 C -4 4 2004-04-04 4:04 0.4116363 0.70807342 -5 5 2005-05-05 5:05 0.4602234 0.50136797 A -6 6 2006-06-06 6:06 NA 0.25136984 B -7 7 2007-07-07 7:07 0.5445436 0.06318679 B -8 8 2008-08-08 8:08 0.5821416 NA C -9 9 2009-09-09 9:09 0.6174545 0.00000000 A -10 10 2010-10-10 10:10 0.6508541 0.00000000 A - V1 V2 V3 V4 V5 V6 -1 1 2001-01-01 1:01 0.2058182 NA A -2 2 2002-02-02 2:02 0.2910708 NA B -3 3 2003-03-03 3:03 0.3564875 -0.84147098 C -4 4 2004-04-04 4:04 0.4116363 0.70807342 -5 5 2005-05-05 5:05 0.4602234 0.50136797 A -6 6 2006-06-06 6:06 NA 0.25136984 B -7 7 2007-07-07 7:07 0.5445436 0.06318679 B -8 8 2008-08-08 8:08 0.5821416 NA C -9 9 2009-09-09 9:09 0.6174545 0.00000000 A -10 10 2010-10-10 10:10 0.6508541 0.00000000 A + V1 V2 V3 V4 V5 V6 V7 +1 1 2001-01-01 01:01:00.00 0.2058182 NA A 2001-01-01 01:01:01.01 +2 2 2002-02-02 02:02:00.00 0.2910708 NA B 2002-02-02 02:02:02.02 +3 3 2003-03-03 03:03:00.00 0.3564875 -0.84147098 C 2003-03-03 03:03:03.03 +4 4 2004-04-04 04:04:00.00 0.4116363 0.70807342 2004-04-04 04:04:04.04 +5 5 2005-05-05 05:05:00.00 0.4602234 0.50136797 A 2005-05-05 05:05:05.05 +6 6 2006-06-06 06:06:00.00 NA 0.25136984 B 2006-06-06 06:06:06.06 +7 7 2007-07-07 07:07:00.00 0.5445436 0.06318679 B 2007-07-07 07:07:07.07 +8 8 2008-08-08 08:08:00.00 0.5821416 NA C 2008-08-08 08:08:08.08 +9 9 2009-09-09 09:09:00.00 0.6174545 0.00000000 A 2009-09-09 09:09:09.09 +10 10 2010-10-10 10:10:00.00 0.6508541 0.00000000 A 2010-10-10 10:10:10.10 + V8 +1 36892.04 +2 37289.08 +3 37683.13 +4 38081.17 +5 38477.21 +6 38874.25 +7 39270.30 +8 39668.34 +9 40065.38 +10 40461.42 + V1 +1 This line contains text that would need to be skipped to get to the data +2 +3 +4 +5 +6 +7 + V2 V3 V4 V5 V6 V7 +1 +2 This line too! +3 D E F G Factor +4 FirstRow 1 <NA> <NA> <NA> Red +5 SecondRow 2 1 <NA> <NA> Green +6 ThirdRow 3 2 1 <NA> Red +7 FourthRow 4 3 2 1 Black X D E. F G Factor 1 FirstRow 1 NA NA NA Red 2 SecondRow 2 1 NA NA Green @@ -641,7 +694,6 @@ 3 NA ThirdRow 3 2 1 NA Red 4 NA FourthRow 4 3 2 1 Black > -> > ## Check handling of skip.blank.lines=FALSE > > example.skip <- read.xls(exampleFile, sheet=2, blank.lines.skip=FALSE) @@ -653,9 +705,14 @@ 4 ThirdRow 3 2 1 NA Red 5 FourthRow 4 3 2 1 Black > -> if( 'XLSX' %in% xlsFormats() ) +> if(! 'XLSX' %in% xlsFormats() ) + { -+ example.x.skip <- read.xls(exampleFile2007, sheet=2, blank.lines.skip=FALSE) ++ cat("************************************************************\n") ++ cat("** DIFF IN THIS SECTION IS EXPECTED BECAUSE PERL PACKAGES **\n") ++ cat("** FOR SUPPORTING XLSX ARE NOT INSTALLED **\n") ++ cat("************************************************************\n") ++ } else { ++ example.x.skip <- read.xls(exampleFileX, sheet=2, blank.lines.skip=FALSE) + example.x.skip + } X D E. F G Factor @@ -666,20 +723,185 @@ 5 FourthRow 4 3 2 1 Black > > +> > ## Check handing of fileEncoding for latin-1 characters > -> latin1File <- file.path(path.package('gdata'),'xls', 'latin-1.xls') +> latin1File <- file.path(path.package('gdata'),'xls', 'latin-1.xls' ) > latin1FileX <- file.path(path.package('gdata'),'xls', 'latin-1.xlsx') > -> example.latin1 <- read.xls(latin1File, fileEncoding='latin1') +> if(.Platform$OS.type=="unix") ++ { ++ example.latin1 <- read.xls(latin1File, ++ fileEncoding='latin1', ++ encoding='latin1', ++ stringsAsFactors=FALSE) ++ } else { ++ example.latin1 <- read.xls(latin1File, ++ #fileEncoding='latin1', ++ encoding='latin1', ++ stringsAsFactors=FALSE) ++ } > -> if( 'XLSX' %in% xlsFormats() ) +> if(! 'XLSX' %in% xlsFormats() ) + { -+ example.latin1.x <- read.xls(latin1FileX, fileEncoding='latin1') ++ cat("************************************************************\n") ++ cat("** DIFF IN THIS SECTION IS EXPECTED BECAUSE PERL PACKAGES **\n") ++ cat("** FOR SUPPORTING XLSX ARE NOT INSTALLED **\n") ++ cat("************************************************************\n") ++ } else { ++ if(.Platform$OS.type=="unix") ++ { ++ example.latin1.x <- read.xls(latin1FileX, ++ fileEncoding='latin1', ++ encoding='latin1', ++ stringsAsFactors=FALSE) ++ } else { ++ example.latin1.x <- read.xls(latin1FileX, ++ #fileEncoding='latin1', ++ encoding='latin1', ++ stringsAsFactors=FALSE) ++ } + } > > +> ## Check handling of very wide file +> +> wideFile <- file.path(path.package('gdata'),'xls', 'wide.xls' ) +> wideFileX <- file.path(path.package('gdata'),'xls', 'wide.xlsx') +> +> example.wide <- read.xls(wideFile) +> stopifnot(dim(example.wide)==c(0,256)) +> +> if( !'XLSX' %in% xlsFormats() ) ++ { ++ cat("************************************************************\n") ++ cat("** DIFF IN THIS SECTION IS EXPECTED BECAUSE PERL PACKAGES **\n") ++ cat("** FOR SUPPORTING XLSX ARE NOT INSTALLED **\n") ++ cat("************************************************************\n") ++ } else { ++ example.wide.x <- read.xls(wideFileX) ++ stopifnot(dim(example.wide.x)==c(0,16384)) ++ } +> +> ## Check handling of files with dates calulcated relative to +> ## 1900-01-01 and 1904-01-01 +> +> file.1900 <- file.path(path.package('gdata'),'xls', 'ExampleExcelFile_1900.xls' ) +> file.1904 <- file.path(path.package('gdata'),'xls', 'ExampleExcelFile_1904.xls' ) +> fileX.1900 <- file.path(path.package('gdata'),'xls', 'ExampleExcelFile_1900.xlsx') +> fileX.1904 <- file.path(path.package('gdata'),'xls', 'ExampleExcelFile_1904.xlsx') +> +> example.1900 <- read.xls(file.1900, sheet=3, header=FALSE) +> example.1900 + V1 V2 V3 V4 V5 V6 V7 +1 1 2001-01-01 01:01:00.00 0.2058182 NA A 2001-01-01 01:01:01.01 +2 2 2002-02-02 02:02:00.00 0.2910708 NA B 2002-02-02 02:02:02.02 +3 3 2003-03-03 03:03:00.00 0.3564875 -0.84147098 C 2003-03-03 03:03:03.03 +4 4 2004-04-04 04:04:00.00 0.4116363 0.70807342 2004-04-04 04:04:04.04 +5 5 2005-05-05 05:05:00.00 0.4602234 0.50136797 A 2005-05-05 05:05:05.05 +6 6 2006-06-06 06:06:00.00 NA 0.25136984 B 2006-06-06 06:06:06.06 +7 7 2007-07-07 07:07:00.00 0.5445436 0.06318679 B 2007-07-07 07:07:07.07 +8 8 2008-08-08 08:08:00.00 0.5821416 NA C 2008-08-08 08:08:08.08 +9 9 2009-09-09 09:09:00.00 0.6174545 0.00000000 A 2009-09-09 09:09:09.09 +10 10 2010-10-10 10:10:00.00 0.6508541 0.00000000 A 2010-10-10 10:10:10.10 + V8 +1 36892.04 +2 37289.08 +3 37683.13 +4 38081.17 +5 38477.21 +6 38874.25 +7 39270.30 +8 39668.34 +9 40065.38 +10 40461.42 +> +> example.1904 <- read.xls(file.1904, sheet=3, header=FALSE) +> example.1904 + V1 V2 V3 V4 V5 V6 V7 +1 1 2001-01-01 01:01:00.00 0.2058182 NA A 2001-01-01 01:01:01.01 +2 2 2002-02-02 02:02:00.00 0.2910708 NA B 2002-02-02 02:02:02.02 +3 3 2003-03-03 03:03:00.00 0.3564875 -0.84147098 C 2003-03-03 03:03:03.03 +4 4 2004-04-04 04:04:00.00 0.4116363 0.70807342 2004-04-04 04:04:04.04 +5 5 2005-05-05 05:05:00.00 0.4602234 0.50136797 A 2005-05-05 05:05:05.05 +6 6 2006-06-06 06:06:00.00 NA 0.25136984 B 2006-06-06 06:06:06.06 +7 7 2007-07-07 07:07:00.00 0.5445436 0.06318679 B 2007-07-07 07:07:07.07 +8 8 2008-08-08 08:08:00.00 0.5821416 NA C 2008-08-08 08:08:08.08 +9 9 2009-09-09 09:09:00.00 0.6174545 0.00000000 A 2009-09-09 09:09:09.09 +10 10 2010-10-10 10:10:00.00 0.6508541 0.00000000 A 2010-10-10 10:10:10.10 + V8 +1 35430.04 +2 35827.08 +3 36221.13 +4 36619.17 +5 37015.21 +6 37412.25 +7 37808.30 +8 38206.34 +9 38603.38 +10 38999.42 +> +> exampleX.1900 <- read.xls(file.1900, sheet=3, header=FALSE) +> exampleX.1900 + V1 V2 V3 V4 V5 V6 V7 +1 1 2001-01-01 01:01:00.00 0.2058182 NA A 2001-01-01 01:01:01.01 +2 2 2002-02-02 02:02:00.00 0.2910708 NA B 2002-02-02 02:02:02.02 +3 3 2003-03-03 03:03:00.00 0.3564875 -0.84147098 C 2003-03-03 03:03:03.03 +4 4 2004-04-04 04:04:00.00 0.4116363 0.70807342 2004-04-04 04:04:04.04 +5 5 2005-05-05 05:05:00.00 0.4602234 0.50136797 A 2005-05-05 05:05:05.05 +6 6 2006-06-06 06:06:00.00 NA 0.25136984 B 2006-06-06 06:06:06.06 +7 7 2007-07-07 07:07:00.00 0.5445436 0.06318679 B 2007-07-07 07:07:07.07 +8 8 2008-08-08 08:08:00.00 0.5821416 NA C 2008-08-08 08:08:08.08 +9 9 2009-09-09 09:09:00.00 0.6174545 0.00000000 A 2009-09-09 09:09:09.09 +10 10 2010-10-10 10:10:00.00 0.6508541 0.00000000 A 2010-10-10 10:10:10.10 + V8 +1 36892.04 +2 37289.08 +3 37683.13 +4 38081.17 +5 38477.21 +6 38874.25 +7 39270.30 +8 39668.34 +9 40065.38 +10 40461.42 +> +> exampleX.1904 <- read.xls(file.1904, sheet=3, header=FALSE) +> exampleX.1904 + V1 V2 V3 V4 V5 V6 V7 +1 1 2001-01-01 01:01:00.00 0.2058182 NA A 2001-01-01 01:01:01.01 +2 2 2002-02-02 02:02:00.00 0.2910708 NA B 2002-02-02 02:02:02.02 +3 3 2003-03-03 03:03:00.00 0.3564875 -0.84147098 C 2003-03-03 03:03:03.03 +4 4 2004-04-04 04:04:00.00 0.4116363 0.70807342 2004-04-04 04:04:04.04 +5 5 2005-05-05 05:05:00.00 0.4602234 0.50136797 A 2005-05-05 05:05:05.05 +6 6 2006-06-06 06:06:00.00 NA 0.25136984 B 2006-06-06 06:06:06.06 +7 7 2007-07-07 07:07:00.00 0.5445436 0.06318679 B 2007-07-07 07:07:07.07 +8 8 2008-08-08 08:08:00.00 0.5821416 NA C 2008-08-08 08:08:08.08 +9 9 2009-09-09 09:09:00.00 0.6174545 0.00000000 A 2009-09-09 09:09:09.09 +10 10 2010-10-10 10:10:00.00 0.6508541 0.00000000 A 2010-10-10 10:10:10.10 + V8 +1 35430.04 +2 35827.08 +3 36221.13 +4 36619.17 +5 37015.21 +6 37412.25 +7 37808.30 +8 38206.34 +9 38603.38 +10 38999.42 +> +> # all colmns should be identical +> stopifnot( na.omit(example.1900 == exampleX.1900) ) +> stopifnot( na.omit(example.1904 == exampleX.1904) ) +> +> # column 8 will differ by 1462 due to different date baselines (1900 vs 1904) +> stopifnot( na.omit(example.1900 [,-8] == example.1904 [,-8]) ) +> stopifnot( na.omit(exampleX.1900[,-8] == exampleX.1904[,-8]) ) +> +> stopifnot( na.omit(example.1900 [,8] - example.1904 [,8]) == 1462 ) +> stopifnot( na.omit(exampleX.1900[,8] - exampleX.1904[,8]) == 1462 ) > > proc.time() user system elapsed - 10.072 1.468 12.094 + 13.495 0.834 14.549 diff -Nru gdata-2.13.3/tests/test.reorder.factor.R gdata-2.17.0/tests/test.reorder.factor.R --- gdata-2.13.3/tests/test.reorder.factor.R 1970-01-01 00:00:00.000000000 +0000 +++ gdata-2.17.0/tests/test.reorder.factor.R 2015-04-23 02:37:51.000000000 +0000 @@ -0,0 +1,11 @@ +## Test results before and after loading gdata + +m <- factor(c('a','b','c')) + +( m1 <- reorder(m, X=c(3, 2, 1)) ) + +library(gdata) + +( m2 <- reorder(m, X=c(3, 2, 1)) ) + +stopifnot(identical(m1,m2)) diff -Nru gdata-2.13.3/tests/test.reorder.factor.Rout.save gdata-2.17.0/tests/test.reorder.factor.Rout.save --- gdata-2.13.3/tests/test.reorder.factor.Rout.save 1970-01-01 00:00:00.000000000 +0000 +++ gdata-2.17.0/tests/test.reorder.factor.Rout.save 2015-07-03 20:05:20.000000000 +0000 @@ -0,0 +1,56 @@ + +R version 3.2.0 (2015-04-16) -- "Full of Ingredients" +Copyright (C) 2015 The R Foundation for Statistical Computing +Platform: x86_64-apple-darwin13.4.0 (64-bit) + +R is free software and comes with ABSOLUTELY NO WARRANTY. +You are welcome to redistribute it under certain conditions. +Type 'license()' or 'licence()' for distribution details. + +R is a collaborative project with many contributors. +Type 'contributors()' for more information and +'citation()' on how to cite R or R packages in publications. + +Type 'demo()' for some demos, 'help()' for on-line help, or +'help.start()' for an HTML browser interface to help. +Type 'q()' to quit R. + +> ## Test results before and after loading gdata +> +> m <- factor(c('a','b','c')) +> +> ( m1 <- reorder(m, X=c(3, 2, 1)) ) +[1] a b c +attr(,"scores") +a b c +3 2 1 +Levels: c b a +> +> library(gdata) +gdata: read.xls support for 'XLS' (Excel 97-2004) files ENABLED. + +gdata: read.xls support for 'XLSX' (Excel 2007+) files ENABLED. + +Attaching package: 'gdata' + +The following object is masked from 'package:stats': + + nobs + +The following object is masked from 'package:utils': + + object.size + +> +> ( m2 <- reorder(m, X=c(3, 2, 1)) ) +[1] a b c +attr(,"scores") +a b c +3 2 1 +Levels: c b a +> +> stopifnot(identical(m1,m2)) +> +> proc.time() + user system elapsed + 0.341 0.046 0.381 diff -Nru gdata-2.13.3/tests/tests.write.fwf.Rout.save gdata-2.17.0/tests/tests.write.fwf.Rout.save --- gdata-2.13.3/tests/tests.write.fwf.Rout.save 2014-01-03 19:32:42.000000000 +0000 +++ gdata-2.17.0/tests/tests.write.fwf.Rout.save 2015-07-03 20:05:21.000000000 +0000 @@ -1,7 +1,7 @@ -R version 3.0.1 (2013-05-16) -- "Good Sport" -Copyright (C) 2013 The R Foundation for Statistical Computing -Platform: i686-pc-linux-gnu (32-bit) +R version 3.2.0 (2015-04-16) -- "Full of Ingredients" +Copyright (C) 2015 The R Foundation for Statistical Computing +Platform: x86_64-apple-darwin13.4.0 (64-bit) R is free software and comes with ABSOLUTELY NO WARRANTY. You are welcome to redistribute it under certain conditions. @@ -231,4 +231,4 @@ > > proc.time() user system elapsed - 1.464 0.152 1.631 + 0.426 0.048 0.462 diff -Nru gdata-2.13.3/tests/unitTests/runit.write.fwf.R gdata-2.17.0/tests/unitTests/runit.write.fwf.R --- gdata-2.13.3/tests/unitTests/runit.write.fwf.R 2014-04-05 02:23:56.000000000 +0000 +++ gdata-2.17.0/tests/unitTests/runit.write.fwf.R 2015-04-25 16:23:33.000000000 +0000 @@ -1,7 +1,7 @@ ### runit.write.fwf.R ###------------------------------------------------------------------------ ### What: Unit tests for write.fwf -### $Id: runit.write.fwf.R 1784 2014-04-05 02:23:45Z warnes $ +### $Id: runit.write.fwf.R 1966 2015-04-25 16:23:31Z warnes $ ### Time-stamp: <2008-08-05 11:58:50 ggorjan> ###------------------------------------------------------------------------ @@ -59,9 +59,17 @@ digits=c(0, 1), exp=c(0, 0), stringsAsFactors=FALSE) - formatInfo <- write.fwf(testData[, c("num1", "num2")], formatInfo=TRUE) + + testData1 <- testData[, c("num1", "num2")] + testData1M <- as.matrix(testData1) + + formatInfo <- write.fwf(testData1, formatInfo=TRUE) checkEquals(formatInfo, formatInfoT) + formatInfoM <- write.fwf(testData1M, formatInfo=TRUE) + checkEquals(formatInfoM, formatInfoT) + + ## scientific notation dd <- options("digits"); options(digits = 7) testData2 <- data.frame(a=123, b=pi, c=1e8, d=1e222) @@ -91,39 +99,66 @@ digits=c(0, 0, 1), exp=c(0, 0, 0), stringsAsFactors=FALSE) - formatInfoR <- write.fwf(testData[, c("num1", "num2")], formatInfo=TRUE, - rownames=TRUE, rowCol="row") + testData3 <- testData[, c("num1", "num2")] + testData3M <- as.matrix(testData3) + + formatInfoR <- write.fwf(testData3, formatInfo=TRUE, rownames=TRUE, + rowCol="row") checkEquals(formatInfoR, formatInfoTR) + formatInfoR <- write.fwf(testData3M, formatInfo=TRUE, rownames=TRUE, + rowCol="row") + checkEquals(formatInfoR, formatInfoTR) + + ## quoteInfo alone does not have any effect - formatInfoI <- write.fwf(testData[, c("num1", "num2")], formatInfo=TRUE, - quoteInfo=TRUE) + formatInfoI <- write.fwf(testData3, formatInfo=TRUE, quoteInfo=TRUE) + checkEquals(formatInfoI, formatInfoT) + + formatInfoI <- write.fwf(testData3M, formatInfo=TRUE, quoteInfo=TRUE) checkEquals(formatInfoI, formatInfoT) ## quote - formatInfoQ <- write.fwf(testData[, c("num1", "num2")], formatInfo=TRUE, - quote=TRUE) formatInfoTQ <- formatInfoT formatInfoTQ$position <- c(1, 6) formatInfoTQ$width <- c(4, 5) + + formatInfoQ <- write.fwf(testData3, formatInfo=TRUE, quote=TRUE) + checkEquals(formatInfoQ, formatInfoTQ) + + formatInfoQ <- write.fwf(testData3M, formatInfo=TRUE, quote=TRUE) checkEquals(formatInfoQ, formatInfoTQ) ## quote without quoteInfo - formatInfoQI <- write.fwf(testData[, c("num1", "num2")], formatInfo=TRUE, - quote=TRUE, quoteInfo=FALSE) formatInfoTQI <- formatInfoT formatInfoTQI$position <- c(2, 6) + + formatInfoQI <- write.fwf(testData3, formatInfo=TRUE, quote=TRUE, + quoteInfo=FALSE) + checkEquals(formatInfoQI, formatInfoTQI) + + formatInfoQI <- write.fwf(testData3M, formatInfo=TRUE, quote=TRUE, + quoteInfo=FALSE) checkEquals(formatInfoQI, formatInfoTQI) ## width ## --> default width for num1 is 2 - formatInfo <- write.fwf(testData[, "num1", drop=FALSE], width=10, formatInfo=TRUE) + testData4 <- testData[, "num1", drop=FALSE] + testData4M <- as.matrix(testData[, "num1", drop=FALSE]) + + formatInfo <- write.fwf(testData4, width=10, formatInfo=TRUE) + checkEquals(formatInfo$width, 10) + + formatInfo <- write.fwf(testData4M, width=10, formatInfo=TRUE) checkEquals(formatInfo$width, 10) ## too small value in width (this also tests recycling) ## --> proper width for num1 is 2, while for num2 it is 3 checkException(write.fwf(testData[, c("num1", "num2")], width=2)) checkException(write.fwf(testData[, c("num1", "num2")], width=c(2, 1))) + + ## Done + cat("\nDONE.\n\n") } ### }}}