diff -Nru cabal-install-3.2-3.2+git20191216.2.e076113/debian/changelog cabal-install-3.2-3.2+git20200127.2.b84fd4f/debian/changelog --- cabal-install-3.2-3.2+git20191216.2.e076113/debian/changelog 2019-12-17 14:09:00.000000000 +0000 +++ cabal-install-3.2-3.2+git20200127.2.b84fd4f/debian/changelog 2020-01-30 20:21:19.000000000 +0000 @@ -1,5 +1,5 @@ -cabal-install-3.2 (3.2+git20191216.2.e076113-6~19.10) eoan; urgency=medium +cabal-install-3.2 (3.2+git20200127.2.b84fd4f-6~19.10) eoan; urgency=medium * Initial release - -- Herbert Valerio Riedel Tue, 17 Dec 2019 15:09:00 +0100 + -- Herbert Valerio Riedel Thu, 30 Jan 2020 21:21:19 +0100 diff -Nru cabal-install-3.2-3.2+git20191216.2.e076113/debian/control cabal-install-3.2-3.2+git20200127.2.b84fd4f/debian/control --- cabal-install-3.2-3.2+git20191216.2.e076113/debian/control 2019-12-17 14:07:37.000000000 +0000 +++ cabal-install-3.2-3.2+git20200127.2.b84fd4f/debian/control 2020-01-30 20:20:00.000000000 +0000 @@ -2,7 +2,7 @@ Section: universe/haskell Priority: extra Maintainer: Herbert Valerio Riedel -Build-Depends: debhelper (>= 8.0.0), zlib1g-dev, ghc-8.4.4 +Build-Depends: debhelper (>= 8.0.0), zlib1g-dev, ghc-8.6.5 Standards-Version: 3.9.4 Homepage: http://www.haskell.org/ghc/ diff -Nru cabal-install-3.2-3.2+git20191216.2.e076113/debian/rules cabal-install-3.2-3.2+git20200127.2.b84fd4f/debian/rules --- cabal-install-3.2-3.2+git20191216.2.e076113/debian/rules 2019-12-17 14:07:37.000000000 +0000 +++ cabal-install-3.2-3.2+git20200127.2.b84fd4f/debian/rules 2020-01-30 20:20:00.000000000 +0000 @@ -6,7 +6,7 @@ dh $@ override_dh_auto_install: - PATH=/opt/ghc/8.4.4/bin:$$PATH PREFIX=$(CURDIR)/debian/cabal-install-3.2/opt/cabal/3.2 $(CURDIR)/build.sh + PATH=/opt/ghc/8.6.5/bin:$$PATH PREFIX=$(CURDIR)/debian/cabal-install-3.2/opt/cabal/3.2 $(CURDIR)/build.sh mkdir -p $(CURDIR)/debian/cabal-install-3.2/opt/ghc/bin mkdir -p $(CURDIR)/debian/cabal-install-3.2/opt/cabal/bin rm -rf $(CURDIR)/debian/cabal-install-3.2/opt/cabal/3.2/lib diff -Nru cabal-install-3.2-3.2+git20191216.2.e076113/src/buildplan.lst cabal-install-3.2-3.2+git20200127.2.b84fd4f/src/buildplan.lst --- cabal-install-3.2-3.2+git20191216.2.e076113/src/buildplan.lst 2019-12-17 14:07:21.000000000 +0000 +++ cabal-install-3.2-3.2+git20200127.2.b84fd4f/src/buildplan.lst 2020-01-30 20:19:47.000000000 +0000 @@ -1,23 +1,19 @@ +Cabal-3.2.0.0 base16-bytestring-0.1.1.6 base64-bytestring-1.0.0.2 cryptohash-sha256-0.11.101.0 echo-0.1.3 ed25519-0.0.5.0 +hashable-1.3.0.0 lukko-0.1.1.1 -mtl-2.2.2 network-3.1.1.1 +network-uri-2.6.1.0 random-1.1 -stm-2.5.0.0 tar-0.5.1.1 -text-1.2.4.0 zlib-0.6.2.1 resolv-0.1.1.2 -edit-distance-0.2.2.1 -parsec-3.1.14.0 -hashable-1.3.0.0 -network-uri-2.6.1.0 -Cabal-3.2.0.0 async-2.2.2 HTTP-4000.3.14 +edit-distance-0.2.2.1 hackage-security-0.6.0.0 cabal-install-3.2.0.0 diff -Nru cabal-install-3.2-3.2+git20191216.2.e076113/src/Cabal-3.2.0.0/ChangeLog.md cabal-install-3.2-3.2+git20200127.2.b84fd4f/src/Cabal-3.2.0.0/ChangeLog.md --- cabal-install-3.2-3.2+git20191216.2.e076113/src/Cabal-3.2.0.0/ChangeLog.md 2019-12-17 14:07:17.000000000 +0000 +++ cabal-install-3.2-3.2+git20200127.2.b84fd4f/src/Cabal-3.2.0.0/ChangeLog.md 2020-01-30 20:19:40.000000000 +0000 @@ -1,10 +1,40 @@ -# 3.1.0.0 (current development version) +# 3.2.0.0 [Someone](mailto:somewhere@example.com) February 2020 * `cabal check` verifies `cpp-options` more pedantically, allowing only options starting with `-D` and `-U`. * Don’t rebuild world when new ghc flags that affect how error messages are presented is specified. + * Fix multilib build-depends parsing (#5846) + * Change free text `String` fields to use `ShortText` in package description + and installed packge info. + * Split `Distribution.Types.Flag` and `Distribution.Types.ConfVar` + `Distribution.Types.GenericPackageDescription` + * Add GHC-8.10 support, including new extensions to + `Language.Haskell.Extension` + * Use more `NonEmpty` instead of ordinary lists + * Add `Distribution.Utils.Structured` for fingeprinting `Binary` blobs + * Add `null`, `length` and `unsafeFromUTF8BS` to `Distribution.Utils.ShortText` + * Refactor `Distribution.Utils.IOData` module + * Rename `Distribution.Compat.MD5` to `Distribution.Utils.MD5` + * Add `safeHead`, `safeTail`, `safeLast` to `Distribution.Utils.Generic` + * Add `unsnoc` and `unsnocNE` to `Distribution.Utils.Generic` + * Add `Set'` modifier to `Distribution.Parsec.Newtypes` + * Add `Distribution.Compat.Async` - ---- +# 3.0.1.0 TBW + * Add GHC-8.8 flags to normaliseGhcFlags + ([#6379](https://github.com/haskell/cabal/pull/6379)) + * Typo fixes + ([#6372](https://github.com/haskell/cabal/pull/6372)) + * Limit version number parts to contain at most 9 digits + ([#6386](https://github.com/haskell/cabal/pull/6386) + * Fix boundless sublibrary depedency parse failure + ([#5846](https://github.com/haskell/cabal/issues/5846)) + * `cabal check` verifies `cpp-options` more pedantically, allowing only + options starting with `-D` and `-U`. + * Don’t rebuild world when new ghc flags that affect how error + messages are presented is specified. + * Fix dropExeExtension behaviour on Windows + ([#6287](https://github.com/haskell/cabal/pull/6287) # 3.0.0.0 [Mikhail Glushenkov](mailto:mikhail.glushenkov@gmail.com) August 2019 * The 3.0 migration guide gives advice on adapting Custom setup diff -Nru cabal-install-3.2-3.2+git20191216.2.e076113/src/Cabal-3.2.0.0/Distribution/Compat/Async.hs cabal-install-3.2-3.2+git20200127.2.b84fd4f/src/Cabal-3.2.0.0/Distribution/Compat/Async.hs --- cabal-install-3.2-3.2+git20191216.2.e076113/src/Cabal-3.2.0.0/Distribution/Compat/Async.hs 2019-12-17 14:07:17.000000000 +0000 +++ cabal-install-3.2-3.2+git20200127.2.b84fd4f/src/Cabal-3.2.0.0/Distribution/Compat/Async.hs 2020-01-30 20:19:39.000000000 +0000 @@ -6,6 +6,8 @@ -- Copyright (c) 2012, Simon Marlow -- Licensed under BSD-3-Clause -- +-- @since 3.2.0.0 +-- module Distribution.Compat.Async ( AsyncM, withAsync, waitCatch, diff -Nru cabal-install-3.2-3.2+git20191216.2.e076113/src/Cabal-3.2.0.0/Distribution/Fields/Field.hs cabal-install-3.2-3.2+git20200127.2.b84fd4f/src/Cabal-3.2.0.0/Distribution/Fields/Field.hs --- cabal-install-3.2-3.2+git20191216.2.e076113/src/Cabal-3.2.0.0/Distribution/Fields/Field.hs 2019-12-17 14:07:17.000000000 +0000 +++ cabal-install-3.2-3.2+git20200127.2.b84fd4f/src/Cabal-3.2.0.0/Distribution/Fields/Field.hs 2020-01-30 20:19:39.000000000 +0000 @@ -73,7 +73,7 @@ -- | Section arguments, e.g. name of the library data SectionArg ann = SecArgName !ann !ByteString - -- ^ identifier, or omething which loos like number. Also many dot numbers, i.e. "7.6.3" + -- ^ identifier, or something which looks like number. Also many dot numbers, i.e. "7.6.3" | SecArgStr !ann !ByteString -- ^ quoted string | SecArgOther !ann !ByteString diff -Nru cabal-install-3.2-3.2+git20191216.2.e076113/src/Cabal-3.2.0.0/Distribution/PackageDescription/Check.hs cabal-install-3.2-3.2+git20200127.2.b84fd4f/src/Cabal-3.2.0.0/Distribution/PackageDescription/Check.hs --- cabal-install-3.2-3.2+git20191216.2.e076113/src/Cabal-3.2.0.0/Distribution/PackageDescription/Check.hs 2019-12-17 14:07:17.000000000 +0000 +++ cabal-install-3.2-3.2+git20200127.2.b84fd4f/src/Cabal-3.2.0.0/Distribution/PackageDescription/Check.hs 2020-01-30 20:19:38.000000000 +0000 @@ -56,7 +56,7 @@ import Distribution.Types.ExeDependency import Distribution.Types.LibraryName import Distribution.Types.UnqualComponentName -import Distribution.Utils.Generic (isAscii, safeInit) +import Distribution.Utils.Generic (isAscii) import Distribution.Verbosity import Distribution.Version import Language.Haskell.Extension diff -Nru cabal-install-3.2-3.2+git20191216.2.e076113/src/Cabal-3.2.0.0/Distribution/Parsec/Newtypes.hs cabal-install-3.2-3.2+git20200127.2.b84fd4f/src/Cabal-3.2.0.0/Distribution/Parsec/Newtypes.hs --- cabal-install-3.2-3.2+git20191216.2.e076113/src/Cabal-3.2.0.0/Distribution/Parsec/Newtypes.hs 2019-12-17 14:07:17.000000000 +0000 +++ cabal-install-3.2-3.2+git20200127.2.b84fd4f/src/Cabal-3.2.0.0/Distribution/Parsec/Newtypes.hs 2020-01-30 20:19:39.000000000 +0000 @@ -122,6 +122,8 @@ pretty = prettySep (Proxy :: Proxy sep) . map (pretty . (pack :: a -> b)) . unpack -- | Like 'List', but for 'Set'. +-- +-- @since 3.2.0.0 newtype Set' sep b a = Set' { _getSet :: Set a } -- | 'alaSet' and 'alaSet'' are simply 'Set'' constructor, with additional phantom @@ -136,10 +138,13 @@ -- >>> unpack' (alaSet' FSep Token) <$> eitherParsec "foo bar foo" -- Right (fromList ["bar","foo"]) -- +-- @since 3.2.0.0 alaSet :: sep -> Set a -> Set' sep (Identity a) a alaSet _ = Set' -- | More general version of 'alaSet'. +-- +-- @since 3.2.0.0 alaSet' :: sep -> (a -> b) -> Set a -> Set' sep b a alaSet' _ _ = Set' diff -Nru cabal-install-3.2-3.2+git20191216.2.e076113/src/Cabal-3.2.0.0/Distribution/Simple/Compiler.hs cabal-install-3.2-3.2+git20200127.2.b84fd4f/src/Cabal-3.2.0.0/Distribution/Simple/Compiler.hs --- cabal-install-3.2-3.2+git20191216.2.e076113/src/Cabal-3.2.0.0/Distribution/Simple/Compiler.hs 2019-12-17 14:07:17.000000000 +0000 +++ cabal-install-3.2-3.2+git20200127.2.b84fd4f/src/Cabal-3.2.0.0/Distribution/Simple/Compiler.hs 2020-01-30 20:19:39.000000000 +0000 @@ -73,7 +73,6 @@ import Prelude () import Distribution.Compat.Prelude -import Distribution.Utils.Generic(safeLast) import Distribution.Pretty import Distribution.Compiler diff -Nru cabal-install-3.2-3.2+git20191216.2.e076113/src/Cabal-3.2.0.0/Distribution/Simple/Configure.hs cabal-install-3.2-3.2+git20200127.2.b84fd4f/src/Cabal-3.2.0.0/Distribution/Simple/Configure.hs --- cabal-install-3.2-3.2+git20191216.2.e076113/src/Cabal-3.2.0.0/Distribution/Simple/Configure.hs 2019-12-17 14:07:17.000000000 +0000 +++ cabal-install-3.2-3.2+git20200127.2.b84fd4f/src/Cabal-3.2.0.0/Distribution/Simple/Configure.hs 2020-01-30 20:19:39.000000000 +0000 @@ -102,7 +102,6 @@ import Distribution.Backpack.PreExistingComponent import Distribution.Backpack.ConfiguredComponent (newPackageDepsBehaviour) import Distribution.Backpack.Id -import Distribution.Utils.Generic import Distribution.Utils.LogProgress import qualified Distribution.Simple.GHC as GHC @@ -972,7 +971,7 @@ -- Reinterpret the "package name" as an unqualified component -- name = LSubLibName $ packageNameToUnqualComponentName depName - -- Check whether a libray exists and is visible. + -- Check whether a library exists and is visible. -- We don't disambiguate between dependency on non-existent or private -- library yet, so we just return a bool and later report a generic error. visible lib = maybe diff -Nru cabal-install-3.2-3.2+git20191216.2.e076113/src/Cabal-3.2.0.0/Distribution/Simple/HaskellSuite.hs cabal-install-3.2-3.2+git20200127.2.b84fd4f/src/Cabal-3.2.0.0/Distribution/Simple/HaskellSuite.hs --- cabal-install-3.2-3.2+git20191216.2.e076113/src/Cabal-3.2.0.0/Distribution/Simple/HaskellSuite.hs 2019-12-17 14:07:17.000000000 +0000 +++ cabal-install-3.2-3.2+git20200127.2.b84fd4f/src/Cabal-3.2.0.0/Distribution/Simple/HaskellSuite.hs 2020-01-30 20:19:39.000000000 +0000 @@ -26,7 +26,6 @@ import Distribution.Simple.LocalBuildInfo import Distribution.System (Platform) import Distribution.Compat.Exception -import Distribution.Utils.Generic import Language.Haskell.Extension import Distribution.Simple.Program.Builtin diff -Nru cabal-install-3.2-3.2+git20191216.2.e076113/src/Cabal-3.2.0.0/Distribution/Simple/Test/Log.hs cabal-install-3.2-3.2+git20200127.2.b84fd4f/src/Cabal-3.2.0.0/Distribution/Simple/Test/Log.hs --- cabal-install-3.2-3.2+git20191216.2.e076113/src/Cabal-3.2.0.0/Distribution/Simple/Test/Log.hs 2019-12-17 14:07:17.000000000 +0000 +++ cabal-install-3.2-3.2+git20200127.2.b84fd4f/src/Cabal-3.2.0.0/Distribution/Simple/Test/Log.hs 2020-01-30 20:19:39.000000000 +0000 @@ -143,7 +143,7 @@ where addTriple (p1, f1, e1) (p2, f2, e2) = (p1 + p2, f1 + f2, e1 + e2) --- | Print a summary of a single test case's result to the console, supressing +-- | Print a summary of a single test case's result to the console, suppressing -- output for certain verbosity or test filter levels. summarizeTest :: Verbosity -> TestShowDetails -> TestLogs -> IO () summarizeTest _ _ (GroupLogs {}) = return () diff -Nru cabal-install-3.2-3.2+git20191216.2.e076113/src/Cabal-3.2.0.0/Distribution/Simple/Utils.hs cabal-install-3.2-3.2+git20200127.2.b84fd4f/src/Cabal-3.2.0.0/Distribution/Simple/Utils.hs --- cabal-install-3.2-3.2+git20191216.2.e076113/src/Cabal-3.2.0.0/Distribution/Simple/Utils.hs 2019-12-17 14:07:17.000000000 +0000 +++ cabal-install-3.2-3.2+git20200127.2.b84fd4f/src/Cabal-3.2.0.0/Distribution/Simple/Utils.hs 2020-01-30 20:19:39.000000000 +0000 @@ -154,6 +154,8 @@ ordNubRight, safeHead, safeTail, + safeLast, + safeInit, unintersperse, wrapText, wrapLine, @@ -248,7 +250,7 @@ #elif defined(CABAL_VERSION) cabalVersion = mkVersion [CABAL_VERSION] #else -cabalVersion = mkVersion [1,9999] --used when bootstrapping +cabalVersion = mkVersion [3,0] --used when bootstrapping #endif -- ---------------------------------------------------------------------------- diff -Nru cabal-install-3.2-3.2+git20191216.2.e076113/src/Cabal-3.2.0.0/Distribution/Types/BuildInfo.hs cabal-install-3.2-3.2+git20200127.2.b84fd4f/src/Cabal-3.2.0.0/Distribution/Types/BuildInfo.hs --- cabal-install-3.2-3.2+git20191216.2.e076113/src/Cabal-3.2.0.0/Distribution/Types/BuildInfo.hs 2019-12-17 14:07:17.000000000 +0000 +++ cabal-install-3.2-3.2+git20200127.2.b84fd4f/src/Cabal-3.2.0.0/Distribution/Types/BuildInfo.hs 2020-01-30 20:19:39.000000000 +0000 @@ -82,7 +82,7 @@ -- Example 2: a library that is being built by a foreing tool (e.g. rust) -- and copied and registered together with this library. The -- logic on how this library is built will have to be encoded in a - -- custom Setup for now. Oherwise cabal would need to lear how to + -- custom Setup for now. Otherwise cabal would need to lear how to -- call arbitrary library builders. extraLibFlavours :: [String], -- ^ Hidden Flag. This set of strings, will be appended to all libraries when -- copying. E.g. [libHS_ | flavour <- extraLibFlavours]. This diff -Nru cabal-install-3.2-3.2+git20191216.2.e076113/src/Cabal-3.2.0.0/Distribution/Types/PackageDescription.hs cabal-install-3.2-3.2+git20200127.2.b84fd4f/src/Cabal-3.2.0.0/Distribution/Types/PackageDescription.hs --- cabal-install-3.2-3.2+git20191216.2.e076113/src/Cabal-3.2.0.0/Distribution/Types/PackageDescription.hs 2019-12-17 14:07:17.000000000 +0000 +++ cabal-install-3.2-3.2+git20200127.2.b84fd4f/src/Cabal-3.2.0.0/Distribution/Types/PackageDescription.hs 2020-01-30 20:19:39.000000000 +0000 @@ -475,6 +475,6 @@ <*> (traverse . L.buildInfo) f x6 -- benchmarks <*> pure a20 -- data files <*> pure a21 -- data dir - <*> pure a22 -- exta src files + <*> pure a22 -- extra src files <*> pure a23 -- extra temp files <*> pure a24 -- extra doc files diff -Nru cabal-install-3.2-3.2+git20191216.2.e076113/src/Cabal-3.2.0.0/Distribution/Utils/Generic.hs cabal-install-3.2-3.2+git20200127.2.b84fd4f/src/Cabal-3.2.0.0/Distribution/Utils/Generic.hs --- cabal-install-3.2-3.2+git20191216.2.e076113/src/Cabal-3.2.0.0/Distribution/Utils/Generic.hs 2019-12-17 14:07:17.000000000 +0000 +++ cabal-install-3.2-3.2+git20200127.2.b84fd4f/src/Cabal-3.2.0.0/Distribution/Utils/Generic.hs 2020-01-30 20:19:39.000000000 +0000 @@ -369,21 +369,29 @@ bSet = Set.fromList b -- | A total variant of 'head'. +-- +-- @since 3.2.0.0 safeHead :: [a] -> Maybe a safeHead [] = Nothing safeHead (x:_) = Just x -- | A total variant of 'tail'. +-- +-- @since 3.2.0.0 safeTail :: [a] -> [a] safeTail [] = [] safeTail (_:xs) = xs -- | A total variant of 'last'. +-- +-- @since 3.2.0.0 safeLast :: [a] -> Maybe a safeLast [] = Nothing safeLast (x:xs) = Just (foldl (\_ a -> a) x xs) -- | A total variant of 'init'. +-- +-- @since 3.2.0.0 safeInit :: [a] -> [a] safeInit [] = [] safeInit [_] = [] @@ -485,6 +493,8 @@ -- >>> unsnoc [] -- Nothing -- +-- @since 3.2.0.0 +-- unsnoc :: [a] -> Maybe ([a], a) unsnoc [] = Nothing unsnoc (x:xs) = Just (unsnocNE (x :| xs)) @@ -499,6 +509,8 @@ -- >>> unsnocNE (1 :| []) -- ([],1) -- +-- @since 3.2.0.0 +-- unsnocNE :: NonEmpty a -> ([a], a) unsnocNE (x:|xs) = go x xs where go y [] = ([], y) diff -Nru cabal-install-3.2-3.2+git20191216.2.e076113/src/Cabal-3.2.0.0/Distribution/Utils/IOData.hs cabal-install-3.2-3.2+git20200127.2.b84fd4f/src/Cabal-3.2.0.0/Distribution/Utils/IOData.hs --- cabal-install-3.2-3.2+git20191216.2.e076113/src/Cabal-3.2.0.0/Distribution/Utils/IOData.hs 2019-12-17 14:07:17.000000000 +0000 +++ cabal-install-3.2-3.2+git20200127.2.b84fd4f/src/Cabal-3.2.0.0/Distribution/Utils/IOData.hs 2020-01-30 20:19:38.000000000 +0000 @@ -78,7 +78,7 @@ -- This is the dual operation ot 'hGetIODataContents', -- and consequently the handle is closed with `hClose`. -- --- /Note:/ this performes lazy-IO. +-- /Note:/ this performs lazy-IO. -- -- @since 2.2 hPutContents :: System.IO.Handle -> IOData -> Prelude.IO () diff -Nru cabal-install-3.2-3.2+git20191216.2.e076113/src/Cabal-3.2.0.0/Distribution/Utils/MD5.hs cabal-install-3.2-3.2+git20200127.2.b84fd4f/src/Cabal-3.2.0.0/Distribution/Utils/MD5.hs --- cabal-install-3.2-3.2+git20191216.2.e076113/src/Cabal-3.2.0.0/Distribution/Utils/MD5.hs 2019-12-17 14:07:17.000000000 +0000 +++ cabal-install-3.2-3.2+git20200127.2.b84fd4f/src/Cabal-3.2.0.0/Distribution/Utils/MD5.hs 2020-01-30 20:19:38.000000000 +0000 @@ -28,21 +28,25 @@ -- >>> showMD5 $ md5 $ BS.pack [0..127] -- "37eff01866ba3f538421b30b7cbefcac" -- +-- @since 3.2.0.0 showMD5 :: MD5 -> String showMD5 (Fingerprint a b) = pad a' ++ pad b' where a' = showHex a "" b' = showHex b "" pad s = replicate (16 - length s) '0' ++ s +-- | @since 3.2.0.0 md5 :: BS.ByteString -> MD5 md5 bs = unsafeDupablePerformIO $ BS.unsafeUseAsCStringLen bs $ \(ptr, len) -> fingerprintData (castPtr ptr) len +-- | @since 3.2.0.0 binaryPutMD5 :: MD5 -> Put binaryPutMD5 (Fingerprint a b) = do putWord64le a putWord64le b +-- | @since 3.2.0.0 binaryGetMD5 :: Get MD5 binaryGetMD5 = do a <- getWord64le diff -Nru cabal-install-3.2-3.2+git20191216.2.e076113/src/Cabal-3.2.0.0/Distribution/Utils/ShortText.hs cabal-install-3.2-3.2+git20200127.2.b84fd4f/src/Cabal-3.2.0.0/Distribution/Utils/ShortText.hs --- cabal-install-3.2-3.2+git20191216.2.e076113/src/Cabal-3.2.0.0/Distribution/Utils/ShortText.hs 2019-12-17 14:07:17.000000000 +0000 +++ cabal-install-3.2-3.2+git20200127.2.b84fd4f/src/Cabal-3.2.0.0/Distribution/Utils/ShortText.hs 2020-01-30 20:19:39.000000000 +0000 @@ -71,9 +71,13 @@ fromShortText :: ShortText -> String -- | Convert from UTF-8 encoded strict 'ByteString'. +-- +-- @since 3.2.0.0 unsafeFromUTF8BS :: BS.ByteString -> ShortText -- | Text whether 'ShortText' is empty. +-- +-- @since 3.2.0.0 null :: ShortText -> Bool -- | Compact representation of short 'Strings' @@ -148,6 +152,8 @@ fromString = toShortText -- | /O(n)/. Length in characters. /Slow/ as converts to string. +-- +-- @since 3.2.0.0 length :: ShortText -> Int length = List.length . fromShortText -- Note: avoid using it, we use it @cabal check@ implementation, where it's ok. diff -Nru cabal-install-3.2-3.2+git20191216.2.e076113/src/Cabal-3.2.0.0/Distribution/Utils/Structured.hs cabal-install-3.2-3.2+git20200127.2.b84fd4f/src/Cabal-3.2.0.0/Distribution/Utils/Structured.hs --- cabal-install-3.2-3.2+git20191216.2.e076113/src/Cabal-3.2.0.0/Distribution/Utils/Structured.hs 2019-12-17 14:07:17.000000000 +0000 +++ cabal-install-3.2-3.2+git20200127.2.b84fd4f/src/Cabal-3.2.0.0/Distribution/Utils/Structured.hs 2020-01-30 20:19:38.000000000 +0000 @@ -49,8 +49,10 @@ -- | These functions operate like @binary@'s counterparts, -- but the serialised version has a structure hash in front. structuredEncode, + structuredEncodeFile, structuredDecode, structuredDecodeOrFailIO, + structuredDecodeFileOrFail, -- * Structured class Structured (structure), MD5, @@ -178,7 +180,7 @@ -- | Flatten 'Structure' into something we can calculate hash of. -- -- As 'Structure' can be potentially infinite. For mutually recursive types, --- we keep track of 'TypeRep's, and put just 'TypeRep' name when it's occured +-- we keep track of 'TypeRep's, and put just 'TypeRep' name when it's occurred -- another time. structureBuilder :: Structure -> Builder.Builder structureBuilder s0 = State.evalState (go s0) Map.empty where @@ -234,6 +236,8 @@ -- instance 'Structured' Record -- @ -- +-- @since 3.2.0.0 +-- class Typeable a => Structured a where structure :: Proxy a -> Structure default structure :: (Generic a, GStructured (Rep a)) => Proxy a -> Structure @@ -262,6 +266,10 @@ => a -> LBS.ByteString structuredEncode x = Binary.encode (Tag :: Tag a, x) +-- | Lazily serialise a value to a file +structuredEncodeFile :: (Binary.Binary a, Structured a) => FilePath -> a -> IO () +structuredEncodeFile f = LBS.writeFile f . structuredEncode + -- | Structured 'Binary.decode'. -- Decode a value from a lazy 'LBS.ByteString', reconstructing the original structure. -- Throws pure exception on invalid inputs. @@ -280,6 +288,10 @@ handler (ErrorCall str) = return $ Left str #endif +-- | Lazily reconstruct a value previously written to a file. +structuredDecodeFileOrFail :: (Binary.Binary a, Structured a) => FilePath -> IO (Either String a) +structuredDecodeFileOrFail f = structuredDecodeOrFailIO =<< LBS.readFile f + ------------------------------------------------------------------------------- -- Helper data ------------------------------------------------------------------------------- diff -Nru cabal-install-3.2-3.2+git20191216.2.e076113/src/Cabal-3.2.0.0/doc/installing-packages.rst cabal-install-3.2-3.2+git20200127.2.b84fd4f/src/Cabal-3.2.0.0/doc/installing-packages.rst --- cabal-install-3.2-3.2+git20191216.2.e076113/src/Cabal-3.2.0.0/doc/installing-packages.rst 2019-12-17 14:07:17.000000000 +0000 +++ cabal-install-3.2-3.2+git20200127.2.b84fd4f/src/Cabal-3.2.0.0/doc/installing-packages.rst 2020-01-30 20:19:40.000000000 +0000 @@ -57,7 +57,7 @@ anything; packages downloaded from this repository will be cached under ``~/.cabal/packages/hackage.haskell.org`` (or whatever name you specify; you can change the prefix by changing the value of -``remote-repo-cache``). If you want, you can configure multiple +:cfg-field:`remote-repo-cache`). If you want, you can configure multiple repositories, and ``cabal`` will combine them and be able to download packages from any of them. @@ -90,14 +90,39 @@ ``cabal`` will download the ``root.json`` field and use it without verification. Although this bootstrapping step is then unsafe, all subsequent access is secure (provided that the downloaded ``root.json`` -was not tempered with). Of course, adding ``root-keys`` and +was not tampered with). Of course, adding ``root-keys`` and ``key-threshold`` to your repository specification only shifts the problem, because now you somehow need to make sure that the key IDs you received were the right ones. How that is done is however outside the scope of ``cabal`` proper. More information about the security infrastructure can be found at -https://github.com/well-typed/hackage-security. +https://github.com/haskell/hackage-security. + +Local no-index repositories +^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +It's possible to use a directory of `.tar.gz` package files as a local package +repository. + +:: + + repository my-local-repository + url: file+noindex:///absolute/path/to/directory + +``cabal`` will construct the index automatically from the +``package-name-version.tar.gz`` files in the directory, and will use optional +corresponding ``package-name-version.cabal`` files as new revisions. + +The index is cached inside the given directory. If the directory is not +writable, you can append ``#shared-cache`` fragment to the URI, +then the cache will be stored inside the :cfg-field:`remote-repo-cache` directory. +The part of the path will be used to determine the cache key part. + +.. note:: + The URI scheme ``file:`` is interpreted as a remote repository, + as described in the previous sections, thus requiring manual construction + of ``01-index.tar`` file. Legacy repositories ^^^^^^^^^^^^^^^^^^^ @@ -120,7 +145,7 @@ ``http://hackage.haskell.org/packages/archive`` will be silently translated to ``http://hackage.haskell.org/``. -The second kind of legacy repositories are so-called “local” +The second kind of legacy repositories are so-called “(legacy) local” repositories: :: diff -Nru cabal-install-3.2-3.2+git20191216.2.e076113/src/Cabal-3.2.0.0/doc/nix-local-build.rst cabal-install-3.2-3.2+git20200127.2.b84fd4f/src/Cabal-3.2.0.0/doc/nix-local-build.rst --- cabal-install-3.2-3.2+git20191216.2.e076113/src/Cabal-3.2.0.0/doc/nix-local-build.rst 2019-12-17 14:07:17.000000000 +0000 +++ cabal-install-3.2-3.2+git20200127.2.b84fd4f/src/Cabal-3.2.0.0/doc/nix-local-build.rst 2020-01-30 20:19:40.000000000 +0000 @@ -51,7 +51,7 @@ :: - $ cabal v2-build + $ cabal v2-build all To build a specific package, you can either run ``v2-build`` from the directory of the package in question: @@ -2115,6 +2115,23 @@ The command line variant of this field is ``--(no-)count-conflicts``. +.. cfg-field:: fine-grained-conflicts: boolean + --fine-grained-conflicts + --no-fine-grained-conflicts + :synopsis: Skip a version of a package if it does not resolve any conflicts + encountered in the last version (solver optimization). + + :default: True + + When enabled, the solver will skip a version of a package if it does not + resolve any of the conflicts encountered in the last version of that + package. For example, if ``foo-1.2`` depended on ``bar``, and the solver + couldn't find consistent versions for ``bar``'s dependencies, then the + solver would skip ``foo-1.1`` if it also depended on ``bar``. + + The command line variant of this field is + ``--(no-)fine-grained-conflicts``. + .. cfg-field:: minimize-conflict-set: boolean --minimize-conflict-set --no-minimize-conflict-set diff -Nru cabal-install-3.2-3.2+git20191216.2.e076113/src/Cabal-3.2.0.0/tests/ParserTests/errors/big-version.cabal cabal-install-3.2-3.2+git20200127.2.b84fd4f/src/Cabal-3.2.0.0/tests/ParserTests/errors/big-version.cabal --- cabal-install-3.2-3.2+git20191216.2.e076113/src/Cabal-3.2.0.0/tests/ParserTests/errors/big-version.cabal 2019-12-17 14:07:17.000000000 +0000 +++ cabal-install-3.2-3.2+git20200127.2.b84fd4f/src/Cabal-3.2.0.0/tests/ParserTests/errors/big-version.cabal 2020-01-30 20:19:40.000000000 +0000 @@ -1,5 +1,5 @@ cabal-version: 3.0 -name: big-vesion +name: big-version -- 10 digits version: 1234567890 diff -Nru cabal-install-3.2-3.2+git20191216.2.e076113/src/Cabal-3.2.0.0/tests/ParserTests/regressions/big-version.cabal cabal-install-3.2-3.2+git20200127.2.b84fd4f/src/Cabal-3.2.0.0/tests/ParserTests/regressions/big-version.cabal --- cabal-install-3.2-3.2+git20191216.2.e076113/src/Cabal-3.2.0.0/tests/ParserTests/regressions/big-version.cabal 2019-12-17 14:07:17.000000000 +0000 +++ cabal-install-3.2-3.2+git20200127.2.b84fd4f/src/Cabal-3.2.0.0/tests/ParserTests/regressions/big-version.cabal 2020-01-30 20:19:40.000000000 +0000 @@ -1,5 +1,5 @@ cabal-version: 3.0 -name: big-vesion +name: big-version -- 9 digits version: 123456789 diff -Nru cabal-install-3.2-3.2+git20191216.2.e076113/src/Cabal-3.2.0.0/tests/ParserTests/regressions/big-version.expr cabal-install-3.2-3.2+git20200127.2.b84fd4f/src/Cabal-3.2.0.0/tests/ParserTests/regressions/big-version.expr --- cabal-install-3.2-3.2+git20191216.2.e076113/src/Cabal-3.2.0.0/tests/ParserTests/regressions/big-version.expr 2019-12-17 14:07:17.000000000 +0000 +++ cabal-install-3.2-3.2+git20200127.2.b84fd4f/src/Cabal-3.2.0.0/tests/ParserTests/regressions/big-version.expr 2020-01-30 20:19:40.000000000 +0000 @@ -82,7 +82,7 @@ licenseRaw = Left NONE, maintainer = "", package = PackageIdentifier - {pkgName = `PackageName "big-vesion"`, + {pkgName = `PackageName "big-version"`, pkgVersion = `mkVersion [123456789]`}, pkgUrl = "", setupBuildInfo = Nothing, diff -Nru cabal-install-3.2-3.2+git20191216.2.e076113/src/Cabal-3.2.0.0/tests/ParserTests/regressions/big-version.format cabal-install-3.2-3.2+git20200127.2.b84fd4f/src/Cabal-3.2.0.0/tests/ParserTests/regressions/big-version.format --- cabal-install-3.2-3.2+git20191216.2.e076113/src/Cabal-3.2.0.0/tests/ParserTests/regressions/big-version.format 2019-12-17 14:07:17.000000000 +0000 +++ cabal-install-3.2-3.2+git20200127.2.b84fd4f/src/Cabal-3.2.0.0/tests/ParserTests/regressions/big-version.format 2020-01-30 20:19:40.000000000 +0000 @@ -1,5 +1,5 @@ cabal-version: 3.0 -name: big-vesion +name: big-version version: 123456789 library diff -Nru cabal-install-3.2-3.2+git20191216.2.e076113/src/Cabal-3.2.0.0/tests/Test/Laws.hs cabal-install-3.2-3.2+git20200127.2.b84fd4f/src/Cabal-3.2.0.0/tests/Test/Laws.hs --- cabal-install-3.2-3.2+git20191216.2.e076113/src/Cabal-3.2.0.0/tests/Test/Laws.hs 2019-12-17 14:07:17.000000000 +0000 +++ cabal-install-3.2-3.2+git20200127.2.b84fd4f/src/Cabal-3.2.0.0/tests/Test/Laws.hs 2020-01-30 20:19:40.000000000 +0000 @@ -53,7 +53,7 @@ monoid_2 x y z = (x `mappend` y) `mappend` z == x `mappend` (y `mappend` z) --- | The 'mconcat' definition. It can be overidden for the sake of effeciency +-- | The 'mconcat' definition. It can be overidden for the sake of efficiency -- but it must still satisfy the property given by the default definition: -- -- > mconcat = foldr mappend mempty diff -Nru cabal-install-3.2-3.2+git20191216.2.e076113/src/cabal-install-3.2.0.0/cabal-install.cabal cabal-install-3.2-3.2+git20200127.2.b84fd4f/src/cabal-install-3.2.0.0/cabal-install.cabal --- cabal-install-3.2-3.2+git20191216.2.e076113/src/cabal-install-3.2.0.0/cabal-install.cabal 2019-12-17 14:07:19.000000000 +0000 +++ cabal-install-3.2-3.2+git20200127.2.b84fd4f/src/cabal-install-3.2.0.0/cabal-install.cabal 2020-01-30 20:19:45.000000000 +0000 @@ -177,6 +177,7 @@ Distribution.Client.CmdInstall.ClientInstallFlags Distribution.Client.CmdRepl Distribution.Client.CmdRun + Distribution.Client.CmdRun.ClientRunFlags Distribution.Client.CmdTest Distribution.Client.CmdLegacy Distribution.Client.CmdSdist @@ -203,6 +204,7 @@ Distribution.Client.Glob Distribution.Client.GlobalFlags Distribution.Client.Haddock + Distribution.Client.HashValue Distribution.Client.HttpUtils Distribution.Client.IndexUtils Distribution.Client.IndexUtils.Timestamp diff -Nru cabal-install-3.2-3.2+git20191216.2.e076113/src/cabal-install-3.2.0.0/changelog cabal-install-3.2-3.2+git20200127.2.b84fd4f/src/cabal-install-3.2.0.0/changelog --- cabal-install-3.2-3.2+git20191216.2.e076113/src/cabal-install-3.2.0.0/changelog 2019-12-17 14:07:19.000000000 +0000 +++ cabal-install-3.2-3.2+git20200127.2.b84fd4f/src/cabal-install-3.2.0.0/changelog 2020-01-30 20:19:45.000000000 +0000 @@ -1,15 +1,45 @@ -*-change-log-*- -3.1.0.0 (current development version) +3.2.0.0 Someone February 2020 * `v2-build` (and other `v2-`prefixed commands) now accept the `--benchmark-option(s)` flags, which pass options to benchmark executables (analogous to how `--test-option(s)` works). (#6209) + * Add solver optimization to skip a version of a package if it does not resolve + any conflicts encountered in the last version, controlled by flag + '--fine-grained-conflicts'. (#5918) + * `cabal v2-exec` doesn't fail in clean package (#6479) + * Show full ABI hash for installed packages in solver log (#5892) + * Create incoming directory even for empty packages (#4130) + * Start GHCi with `main-is` module in scope (#6311) + * Implement `--benchmark-options` for `v2-bench` (#6224) + * Fix store-dir in ghc env files generated by `cabal install --lib + --package-env` (#6298) + * `cabal v2-run` works with `.lhs` files (#6134) + * `subdir` in source-repository-package accepts multiple entries (#5472) + +3.0.1.0 TBW December 2019 + * Create store incoming directory + ([#4130](https://github.com/haskell/cabal/issues/4130)) + * `fetchRepoTarball` output is not marked + ([#6385](https://github.com/haskell/cabal/pull/6385)) + * Update `setupMinCabalVersionConstraint` for GHC-8.8 + ([#6217](https://github.com/haskell/cabal/pull/6217)) + * Implement `cabal install --ignore-project` + ([#5919](https://github.com/haskell/cabal/issues/5919)) + * `cabal install executable` solver isn't affected by default + environment contents + ([#6410](https://github.com/haskell/cabal/issues/6410)) + * Use `lukko` for file locking + ([#6345](https://github.com/haskell/cabal/pull/6345)) + * Use `hackage-security-0.6` + ([#6388](https://github.com/haskell/cabal/pull/6388)) + * Other dependency upgrades 3.0.0.0 Mikhail Glushenkov August 2019 - * Parse comma-separated lists for extra-prog-path, extra-lib-dirs, extra-framework-dirs, - and extra-include-dirs as actual lists. (#5420) * `v2-haddock` fails on `haddock` failures (#5977) * `v2-run` works when given `File.lhs` literate file. (#6134) + * Parse comma-separated lists for extra-prog-path, extra-lib-dirs, extra-framework-dirs, + and extra-include-dirs as actual lists. (#5420) * `v2-repl` no longer changes directory to a randomized temporary folder when used outside of a project. (#5544) * `install-method` and `overwrite-policy` in `.cabal/config` now actually work. (#5942) diff -Nru cabal-install-3.2-3.2+git20191216.2.e076113/src/cabal-install-3.2.0.0/Distribution/Client/CmdExec.hs cabal-install-3.2-3.2+git20200127.2.b84fd4f/src/cabal-install-3.2.0.0/Distribution/Client/CmdExec.hs --- cabal-install-3.2-3.2+git20191216.2.e076113/src/cabal-install-3.2.0.0/Distribution/Client/CmdExec.hs 2019-12-17 14:07:19.000000000 +0000 +++ cabal-install-3.2-3.2+git20200127.2.b84fd4f/src/cabal-install-3.2.0.0/Distribution/Client/CmdExec.hs 2020-01-30 20:19:44.000000000 +0000 @@ -82,6 +82,7 @@ import Distribution.Simple.Utils ( die' , info + , createDirectoryIfMissingVerbose , withTempDirectory , wrapText ) @@ -223,11 +224,8 @@ -> PostBuildProjectStatus -> ([(String, Maybe String)] -> IO a) -> IO a -withTempEnvFile verbosity - baseCtx - buildCtx - buildStatus - action = +withTempEnvFile verbosity baseCtx buildCtx buildStatus action = do + createDirectoryIfMissingVerbose verbosity True (distTempDirectory (distDirLayout baseCtx)) withTempDirectory verbosity (distTempDirectory (distDirLayout baseCtx)) diff -Nru cabal-install-3.2-3.2+git20191216.2.e076113/src/cabal-install-3.2.0.0/Distribution/Client/CmdRun/ClientRunFlags.hs cabal-install-3.2-3.2+git20200127.2.b84fd4f/src/cabal-install-3.2.0.0/Distribution/Client/CmdRun/ClientRunFlags.hs --- cabal-install-3.2-3.2+git20191216.2.e076113/src/cabal-install-3.2.0.0/Distribution/Client/CmdRun/ClientRunFlags.hs 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-3.2-3.2+git20200127.2.b84fd4f/src/cabal-install-3.2.0.0/Distribution/Client/CmdRun/ClientRunFlags.hs 2020-01-30 20:19:44.000000000 +0000 @@ -0,0 +1,39 @@ +{-# LANGUAGE DeriveGeneric #-} +{-# LANGUAGE LambdaCase #-} +module Distribution.Client.CmdRun.ClientRunFlags +( ClientRunFlags(..) +, defaultClientRunFlags +, clientRunOptions +) where + +import Distribution.Client.Compat.Prelude + +import Distribution.Simple.Command (OptionField (..), ShowOrParseArgs (..), option) +import Distribution.Simple.Setup (Flag (..), toFlag, trueArg) + +data ClientRunFlags = ClientRunFlags + { crunIgnoreProject :: Flag Bool + } deriving (Eq, Show, Generic) + +instance Monoid ClientRunFlags where + mempty = gmempty + mappend = (<>) + +instance Semigroup ClientRunFlags where + (<>) = gmappend + +instance Binary ClientRunFlags +instance Structured ClientRunFlags + +defaultClientRunFlags :: ClientRunFlags +defaultClientRunFlags = ClientRunFlags + { crunIgnoreProject = toFlag False + } + +clientRunOptions :: ShowOrParseArgs -> [OptionField ClientRunFlags] +clientRunOptions _ = + [ option "z" ["ignore-project"] + "Ignore local project configuration" + crunIgnoreProject (\v flags -> flags { crunIgnoreProject = v }) + trueArg + ] diff -Nru cabal-install-3.2-3.2+git20191216.2.e076113/src/cabal-install-3.2.0.0/Distribution/Client/CmdRun.hs cabal-install-3.2-3.2+git20200127.2.b84fd4f/src/cabal-install-3.2.0.0/Distribution/Client/CmdRun.hs --- cabal-install-3.2-3.2+git20191216.2.e076113/src/cabal-install-3.2.0.0/Distribution/Client/CmdRun.hs 2019-12-17 14:07:19.000000000 +0000 +++ cabal-install-3.2-3.2+git20200127.2.b84fd4f/src/cabal-install-3.2.0.0/Distribution/Client/CmdRun.hs 2020-01-30 20:19:44.000000000 +0000 @@ -23,15 +23,20 @@ import Distribution.Client.ProjectOrchestration import Distribution.Client.CmdErrorMessages +import Distribution.Client.CmdRun.ClientRunFlags + import Distribution.Client.Setup - ( GlobalFlags(..), ConfigFlags(..), ConfigExFlags, InstallFlags ) + ( GlobalFlags(..), ConfigFlags(..), ConfigExFlags, InstallFlags(..) + , configureExOptions, haddockOptions, installOptions, testOptions + , benchmarkOptions, configureOptions, liftOptions ) +import Distribution.Solver.Types.ConstraintSource + ( ConstraintSource(..) ) import Distribution.Client.GlobalFlags ( defaultGlobalFlags ) -import qualified Distribution.Client.Setup as Client import Distribution.Simple.Setup ( HaddockFlags, TestFlags, BenchmarkFlags, fromFlagOrDefault ) import Distribution.Simple.Command - ( CommandUI(..), usageAlternatives ) + ( CommandUI(..), OptionField (..), usageAlternatives ) import Distribution.Types.ComponentName ( showComponentName ) import Distribution.Deprecated.Text @@ -45,7 +50,7 @@ ( establishDummyProjectBaseContext ) import Distribution.Client.ProjectConfig ( ProjectConfig(..), ProjectConfigShared(..) - , withProjectOrGlobalConfig ) + , withProjectOrGlobalConfigIgn ) import Distribution.Client.ProjectPlanning ( ElaboratedConfiguredPackage(..) , ElaboratedInstallPlan, binDirectoryFor ) @@ -109,43 +114,74 @@ runCommand :: CommandUI ( ConfigFlags, ConfigExFlags, InstallFlags , HaddockFlags, TestFlags, BenchmarkFlags + , ClientRunFlags ) -runCommand = Client.installCommand { - commandName = "v2-run", - commandSynopsis = "Run an executable.", - commandUsage = usageAlternatives "v2-run" - [ "[TARGET] [FLAGS] [-- EXECUTABLE_FLAGS]" ], - commandDescription = Just $ \pname -> wrapText $ - "Runs the specified executable-like component (an executable, a test, " - ++ "or a benchmark), first ensuring it is up to date.\n\n" - - ++ "Any executable-like component in any package in the project can be " - ++ "specified. A package can be specified if contains just one " - ++ "executable-like. The default is to use the package in the current " - ++ "directory if it contains just one executable-like.\n\n" - - ++ "Extra arguments can be passed to the program, but use '--' to " - ++ "separate arguments for the program from arguments for " ++ pname - ++ ". The executable is run in an environment where it can find its " - ++ "data files inplace in the build tree.\n\n" - - ++ "Dependencies are built or rebuilt as necessary. Additional " - ++ "configuration flags can be specified on the command line and these " - ++ "extend the project configuration from the 'cabal.project', " - ++ "'cabal.project.local' and other files.", - commandNotes = Just $ \pname -> - "Examples:\n" - ++ " " ++ pname ++ " v2-run\n" - ++ " Run the executable-like in the package in the current directory\n" - ++ " " ++ pname ++ " v2-run foo-tool\n" - ++ " Run the named executable-like (in any package in the project)\n" - ++ " " ++ pname ++ " v2-run pkgfoo:foo-tool\n" - ++ " Run the executable-like 'foo-tool' in the package 'pkgfoo'\n" - ++ " " ++ pname ++ " v2-run foo -O2 -- dothing --fooflag\n" - ++ " Build with '-O2' and run the program, passing it extra arguments.\n\n" - - ++ cmdCommonHelpTextNewBuildBeta +runCommand = CommandUI + { commandName = "v2-run" + , commandSynopsis = "Run an executable." + , commandUsage = usageAlternatives "v2-run" + [ "[TARGET] [FLAGS] [-- EXECUTABLE_FLAGS]" ] + , commandDescription = Just $ \pname -> wrapText $ + "Runs the specified executable-like component (an executable, a test, " + ++ "or a benchmark), first ensuring it is up to date.\n\n" + + ++ "Any executable-like component in any package in the project can be " + ++ "specified. A package can be specified if contains just one " + ++ "executable-like. The default is to use the package in the current " + ++ "directory if it contains just one executable-like.\n\n" + + ++ "Extra arguments can be passed to the program, but use '--' to " + ++ "separate arguments for the program from arguments for " ++ pname + ++ ". The executable is run in an environment where it can find its " + ++ "data files inplace in the build tree.\n\n" + + ++ "Dependencies are built or rebuilt as necessary. Additional " + ++ "configuration flags can be specified on the command line and these " + ++ "extend the project configuration from the 'cabal.project', " + ++ "'cabal.project.local' and other files." + , commandNotes = Just $ \pname -> + "Examples:\n" + ++ " " ++ pname ++ " v2-run\n" + ++ " Run the executable-like in the package in the current directory\n" + ++ " " ++ pname ++ " v2-run foo-tool\n" + ++ " Run the named executable-like (in any package in the project)\n" + ++ " " ++ pname ++ " v2-run pkgfoo:foo-tool\n" + ++ " Run the executable-like 'foo-tool' in the package 'pkgfoo'\n" + ++ " " ++ pname ++ " v2-run foo -O2 -- dothing --fooflag\n" + ++ " Build with '-O2' and run the program, passing it extra arguments.\n\n" + + ++ cmdCommonHelpTextNewBuildBeta + , commandDefaultFlags = (mempty, mempty, mempty, mempty, mempty, mempty, mempty) + , commandOptions = \showOrParseArgs -> + liftOptions get1 set1 + -- Note: [Hidden Flags] + -- hide "constraint", "dependency", and + -- "exact-configuration" from the configure options. + (filter ((`notElem` ["constraint", "dependency" + , "exact-configuration"]) + . optionName) $ + configureOptions showOrParseArgs) + ++ liftOptions get2 set2 (configureExOptions showOrParseArgs ConstraintSourceCommandlineFlag) + ++ liftOptions get3 set3 + -- hide "target-package-db" flag from the + -- install options. + (filter ((`notElem` ["target-package-db"]) + . optionName) $ + installOptions showOrParseArgs) + ++ liftOptions get4 set4 (haddockOptions showOrParseArgs) + ++ liftOptions get5 set5 (testOptions showOrParseArgs) + ++ liftOptions get6 set6 (benchmarkOptions showOrParseArgs) + ++ liftOptions get7 set7 (clientRunOptions showOrParseArgs) } + where + get1 (a,_,_,_,_,_,_) = a; set1 a (_,b,c,d,e,f,g) = (a,b,c,d,e,f,g) + get2 (_,b,_,_,_,_,_) = b; set2 b (a,_,c,d,e,f,g) = (a,b,c,d,e,f,g) + get3 (_,_,c,_,_,_,_) = c; set3 c (a,b,_,d,e,f,g) = (a,b,c,d,e,f,g) + get4 (_,_,_,d,_,_,_) = d; set4 d (a,b,c,_,e,f,g) = (a,b,c,d,e,f,g) + get5 (_,_,_,_,e,_,_) = e; set5 e (a,b,c,d,_,f,g) = (a,b,c,d,e,f,g) + get6 (_,_,_,_,_,f,_) = f; set6 f (a,b,c,d,e,_,g) = (a,b,c,d,e,f,g) + get7 (_,_,_,_,_,_,g) = g; set7 g (a,b,c,d,e,f,_) = (a,b,c,d,e,f,g) + -- | The @run@ command runs a specified executable-like component, building it -- first if necessary. The component can be either an executable, a test, @@ -156,10 +192,12 @@ -- "Distribution.Client.ProjectOrchestration" -- runAction :: ( ConfigFlags, ConfigExFlags, InstallFlags - , HaddockFlags, TestFlags, BenchmarkFlags ) + , HaddockFlags, TestFlags, BenchmarkFlags + , ClientRunFlags ) -> [String] -> GlobalFlags -> IO () runAction ( configFlags, configExFlags, installFlags - , haddockFlags, testFlags, benchmarkFlags ) + , haddockFlags, testFlags, benchmarkFlags + , clientRunFlags ) targetStrings globalFlags = do globalTmp <- getTemporaryDirectory tempDir <- createTempDirectory globalTmp "cabal-repl." @@ -170,7 +208,10 @@ without config = establishDummyProjectBaseContext verbosity (config <> cliConfig) tempDir [] OtherCommand - baseCtx <- withProjectOrGlobalConfig verbosity globalConfigFlag with without + let + ignoreProject = fromFlagOrDefault False (crunIgnoreProject clientRunFlags) + + baseCtx <- withProjectOrGlobalConfigIgn ignoreProject verbosity globalConfigFlag with without let scriptOrError script err = do diff -Nru cabal-install-3.2-3.2+git20191216.2.e076113/src/cabal-install-3.2.0.0/Distribution/Client/CmdUpdate.hs cabal-install-3.2-3.2+git20200127.2.b84fd4f/src/cabal-install-3.2.0.0/Distribution/Client/CmdUpdate.hs --- cabal-install-3.2-3.2+git20191216.2.e076113/src/cabal-install-3.2.0.0/Distribution/Client/CmdUpdate.hs 2019-12-17 14:07:19.000000000 +0000 +++ cabal-install-3.2-3.2+git20200127.2.b84fd4f/src/cabal-install-3.2.0.0/Distribution/Client/CmdUpdate.hs 2020-01-30 20:19:44.000000000 +0000 @@ -186,7 +186,8 @@ updateRepo verbosity _updateFlags repoCtxt (repo, indexState) = do transport <- repoContextGetTransport repoCtxt case repo of - RepoLocal{..} -> return () + RepoLocal{} -> return () + RepoLocalNoIndex{} -> return () RepoRemote{..} -> do downloadResult <- downloadIndex transport verbosity repoRemote repoLocalDir diff -Nru cabal-install-3.2-3.2+git20191216.2.e076113/src/cabal-install-3.2.0.0/Distribution/Client/Config.hs cabal-install-3.2-3.2+git20200127.2.b84fd4f/src/cabal-install-3.2.0.0/Distribution/Client/Config.hs --- cabal-install-3.2-3.2+git20191216.2.e076113/src/cabal-install-3.2.0.0/Distribution/Client/Config.hs 2019-12-17 14:07:19.000000000 +0000 +++ cabal-install-3.2-3.2+git20200127.2.b84fd4f/src/cabal-install-3.2.0.0/Distribution/Client/Config.hs 2020-01-30 20:19:44.000000000 +0000 @@ -41,7 +41,8 @@ userConfigUpdate, createDefaultConfigFile, - remoteRepoFields + remoteRepoFields, + postProcessRepo, ) where import Language.Haskell.Extension ( Language(Haskell2010) ) @@ -50,7 +51,7 @@ ( viewAsFieldDescr ) import Distribution.Client.Types - ( RemoteRepo(..), Username(..), Password(..), emptyRemoteRepo + ( RemoteRepo(..), LocalRepo (..), Username(..), Password(..), emptyRemoteRepo , AllowOlder(..), AllowNewer(..), RelaxDeps(..), isRelaxDeps ) import Distribution.Client.BuildReports.Types @@ -64,7 +65,7 @@ , InstallFlags(..), installOptions, defaultInstallFlags , UploadFlags(..), uploadCommand , ReportFlags(..), reportCommand - , showRepo, parseRepo, readRepo ) + , showRemoteRepo, parseRemoteRepo, readRemoteRepo ) import Distribution.Client.CmdInstall.ClientInstallFlags ( ClientInstallFlags(..), defaultClientInstallFlags , clientInstallOptions ) @@ -92,7 +93,7 @@ , locatedErrorMsg, showPWarning , readFields, warning, lineNo , simpleField, listField, spaceListField - , parseFilePathQ, parseOptCommaList, parseTokenQ ) + , parseFilePathQ, parseOptCommaList, parseTokenQ, syntaxError) import Distribution.Client.ParseUtils ( parseFields, ppFields, ppSection ) import Distribution.Client.HttpUtils @@ -252,6 +253,7 @@ globalRemoteRepos = lastNonEmptyNL globalRemoteRepos, globalCacheDir = combine globalCacheDir, globalLocalRepos = lastNonEmptyNL globalLocalRepos, + globalLocalNoIndexRepos = lastNonEmptyNL globalLocalNoIndexRepos, globalLogsDir = combine globalLogsDir, globalWorldFile = combine globalWorldFile, globalRequireSandbox = combine globalRequireSandbox, @@ -310,6 +312,7 @@ installMaxBackjumps = combine installMaxBackjumps, installReorderGoals = combine installReorderGoals, installCountConflicts = combine installCountConflicts, + installFineGrainedConflicts = combine installFineGrainedConflicts, installMinimizeConflictSet = combine installMinimizeConflictSet, installIndependentGoals = combine installIndependentGoals, installShadowPkgs = combine installShadowPkgs, @@ -1034,7 +1037,7 @@ deprecatedFieldDescriptions = [ liftGlobalFlag $ listField "repos" - (Disp.text . showRepo) parseRepo + (Disp.text . showRemoteRepo) parseRemoteRepo (fromNubList . globalRemoteRepos) (\rs cfg -> cfg { globalRemoteRepos = toNubList rs }) , liftGlobalFlag $ @@ -1117,9 +1120,9 @@ let init0 = savedInitFlags config user0 = savedUserInstallDirs config global0 = savedGlobalInstallDirs config - (remoteRepoSections0, haddockFlags, initFlags, user, global, paths, args) <- + (remoteRepoSections0, localRepoSections0, haddockFlags, initFlags, user, global, paths, args) <- foldM parseSections - ([], savedHaddockFlags config, init0, user0, global0, [], []) + ([], [], savedHaddockFlags config, init0, user0, global0, [], []) knownSections let remoteRepoSections = @@ -1127,9 +1130,15 @@ . nubBy ((==) `on` remoteRepoName) $ remoteRepoSections0 + let localRepoSections = + reverse + . nubBy ((==) `on` localRepoName) + $ localRepoSections0 + return . fixConfigMultilines $ config { savedGlobalFlags = (savedGlobalFlags config) { globalRemoteRepos = toNubList remoteRepoSections, + globalLocalNoIndexRepos = toNubList localRepoSections, -- the global extra prog path comes from the configure flag prog path globalProgPathExtra = configProgramPathExtra (savedConfigureFlags config) }, @@ -1185,61 +1194,57 @@ parse = parseFields (configFieldDescriptions src ++ deprecatedFieldDescriptions) initial - parseSections (rs, h, i, u, g, p, a) - (ParseUtils.Section _ "repository" name fs) = do + parseSections (rs, ls, h, i, u, g, p, a) + (ParseUtils.Section lineno "repository" name fs) = do r' <- parseFields remoteRepoFields (emptyRemoteRepo name) fs - when (remoteRepoKeyThreshold r' > length (remoteRepoRootKeys r')) $ - warning $ "'key-threshold' for repository " ++ show (remoteRepoName r') - ++ " higher than number of keys" - when (not (null (remoteRepoRootKeys r')) - && remoteRepoSecure r' /= Just True) $ - warning $ "'root-keys' for repository " ++ show (remoteRepoName r') - ++ " non-empty, but 'secure' not set to True." - return (r':rs, h, i, u, g, p, a) + r'' <- postProcessRepo lineno name r' + case r'' of + Left local -> return (rs, local:ls, h, i, u, g, p, a) + Right remote -> return (remote:rs, ls, h, i, u, g, p, a) - parseSections (rs, h, i, u, g, p, a) + parseSections (rs, ls, h, i, u, g, p, a) (ParseUtils.F lno "remote-repo" raw) = do - let mr' = readRepo raw + let mr' = readRemoteRepo raw r' <- maybe (ParseFailed $ NoParse "remote-repo" lno) return mr' - return (r':rs, h, i, u, g, p, a) + return (r':rs, ls, h, i, u, g, p, a) - parseSections accum@(rs, h, i, u, g, p, a) + parseSections accum@(rs, ls, h, i, u, g, p, a) (ParseUtils.Section _ "haddock" name fs) | name == "" = do h' <- parseFields haddockFlagsFields h fs - return (rs, h', i, u, g, p, a) + return (rs, ls, h', i, u, g, p, a) | otherwise = do warning "The 'haddock' section should be unnamed" return accum - parseSections accum@(rs, h, i, u, g, p, a) + parseSections accum@(rs, ls, h, i, u, g, p, a) (ParseUtils.Section _ "init" name fs) | name == "" = do i' <- parseFields initFlagsFields i fs - return (rs, h, i', u, g, p, a) + return (rs, ls, h, i', u, g, p, a) | otherwise = do warning "The 'init' section should be unnamed" return accum - parseSections accum@(rs, h, i, u, g, p, a) + parseSections accum@(rs, ls, h, i, u, g, p, a) (ParseUtils.Section _ "install-dirs" name fs) | name' == "user" = do u' <- parseFields installDirsFields u fs - return (rs, h, i, u', g, p, a) + return (rs, ls, h, i, u', g, p, a) | name' == "global" = do g' <- parseFields installDirsFields g fs - return (rs, h, i, u, g', p, a) + return (rs, ls, h, i, u, g', p, a) | otherwise = do warning "The 'install-paths' section should be for 'user' or 'global'" return accum where name' = lowercase name - parseSections accum@(rs, h, i, u, g, p, a) + parseSections accum@(rs, ls, h, i, u, g, p, a) (ParseUtils.Section _ "program-locations" name fs) | name == "" = do p' <- parseFields withProgramsFields p fs - return (rs, h, i, u, g, p', a) + return (rs, ls, h, i, u, g, p', a) | otherwise = do warning "The 'program-locations' section should be unnamed" return accum - parseSections accum@(rs, h, i, u, g, p, a) + parseSections accum@(rs, ls, h, i, u, g, p, a) (ParseUtils.Section _ "program-default-options" name fs) | name == "" = do a' <- parseFields withProgramOptionsFields a fs - return (rs, h, i, u, g, p, a') + return (rs, ls, h, i, u, g, p, a') | otherwise = do warning "The 'program-default-options' section should be unnamed" return accum @@ -1247,6 +1252,34 @@ warning $ "Unrecognized stanza on line " ++ show (lineNo f) return accum +postProcessRepo :: Int -> String -> RemoteRepo -> ParseResult (Either LocalRepo RemoteRepo) +postProcessRepo lineno reponame repo0 = do + when (null reponame) $ + syntaxError lineno $ "a 'repository' section requires the " + ++ "repository name as an argument" + + case uriScheme (remoteRepoURI repo0) of + -- TODO: check that there are no authority, query or fragment + -- Note: the trailing colon is important + "file+noindex:" -> do + let uri = remoteRepoURI repo0 + return $ Left $ LocalRepo reponame (uriPath uri) (uriFragment uri == "#shared-cache") + + _ -> do + let repo = repo0 { remoteRepoName = reponame } + + when (remoteRepoKeyThreshold repo > length (remoteRepoRootKeys repo)) $ + warning $ "'key-threshold' for repository " + ++ show (remoteRepoName repo) + ++ " higher than number of keys" + + when (not (null (remoteRepoRootKeys repo)) && remoteRepoSecure repo /= Just True) $ + warning $ "'root-keys' for repository " + ++ show (remoteRepoName repo) + ++ " non-empty, but 'secure' not set to True." + + return $ Right repo + showConfig :: SavedConfig -> String showConfig = showConfigWithComments mempty @@ -1297,7 +1330,7 @@ ppRemoteRepoSection :: RemoteRepo -> RemoteRepo -> Doc ppRemoteRepoSection def vals = ppSection "repository" (remoteRepoName vals) - remoteRepoFields (Just def) vals + remoteRepoFields (Just def) vals remoteRepoFields :: [FieldDescr RemoteRepo] remoteRepoFields = diff -Nru cabal-install-3.2-3.2+git20191216.2.e076113/src/cabal-install-3.2.0.0/Distribution/Client/Dependency.hs cabal-install-3.2-3.2+git20200127.2.b84fd4f/src/cabal-install-3.2.0.0/Distribution/Client/Dependency.hs --- cabal-install-3.2-3.2+git20191216.2.e076113/src/cabal-install-3.2.0.0/Distribution/Client/Dependency.hs 2019-12-17 14:07:19.000000000 +0000 +++ cabal-install-3.2-3.2+git20200127.2.b84fd4f/src/cabal-install-3.2.0.0/Distribution/Client/Dependency.hs 2020-01-30 20:19:44.000000000 +0000 @@ -47,6 +47,7 @@ setPreferenceDefault, setReorderGoals, setCountConflicts, + setFineGrainedConflicts, setMinimizeConflictSet, setIndependentGoals, setAvoidReinstalls, @@ -159,6 +160,7 @@ depResolverSourcePkgIndex :: PackageIndex.PackageIndex UnresolvedSourcePackage, depResolverReorderGoals :: ReorderGoals, depResolverCountConflicts :: CountConflicts, + depResolverFineGrainedConflicts :: FineGrainedConflicts, depResolverMinimizeConflictSet :: MinimizeConflictSet, depResolverIndependentGoals :: IndependentGoals, depResolverAvoidReinstalls :: AvoidReinstalls, @@ -197,6 +199,7 @@ ++ "\nstrategy: " ++ show (depResolverPreferenceDefault p) ++ "\nreorder goals: " ++ show (asBool (depResolverReorderGoals p)) ++ "\ncount conflicts: " ++ show (asBool (depResolverCountConflicts p)) + ++ "\nfine grained conflicts: " ++ show (asBool (depResolverFineGrainedConflicts p)) ++ "\nminimize conflict set: " ++ show (asBool (depResolverMinimizeConflictSet p)) ++ "\nindependent goals: " ++ show (asBool (depResolverIndependentGoals p)) ++ "\navoid reinstalls: " ++ show (asBool (depResolverAvoidReinstalls p)) @@ -254,6 +257,7 @@ depResolverSourcePkgIndex = sourcePkgIndex, depResolverReorderGoals = ReorderGoals False, depResolverCountConflicts = CountConflicts True, + depResolverFineGrainedConflicts = FineGrainedConflicts True, depResolverMinimizeConflictSet = MinimizeConflictSet False, depResolverIndependentGoals = IndependentGoals False, depResolverAvoidReinstalls = AvoidReinstalls False, @@ -310,6 +314,12 @@ depResolverCountConflicts = count } +setFineGrainedConflicts :: FineGrainedConflicts -> DepResolverParams -> DepResolverParams +setFineGrainedConflicts fineGrained params = + params { + depResolverFineGrainedConflicts = fineGrained + } + setMinimizeConflictSet :: MinimizeConflictSet -> DepResolverParams -> DepResolverParams setMinimizeConflictSet minimize params = params { @@ -755,7 +765,8 @@ Step (showDepResolverParams finalparams) $ fmap (validateSolverResult platform comp indGoals) - $ runSolver solver (SolverConfig reordGoals cntConflicts minimize indGoals noReinstalls + $ runSolver solver (SolverConfig reordGoals cntConflicts fineGrained minimize + indGoals noReinstalls shadowing strFlags allowBootLibs onlyConstrained_ maxBkjumps enableBj solveExes order verbosity (PruneAfterFirstSuccess False)) platform comp installedPkgIndex sourcePkgIndex @@ -769,6 +780,7 @@ sourcePkgIndex reordGoals cntConflicts + fineGrained minimize indGoals noReinstalls @@ -1015,9 +1027,9 @@ -> Either [ResolveNoDepsError] [UnresolvedSourcePackage] resolveWithoutDependencies (DepResolverParams targets constraints prefs defpref installedPkgIndex sourcePkgIndex - _reorderGoals _countConflicts _minimizeConflictSet - _indGoals _avoidReinstalls _shadowing _strFlags - _maxBjumps _enableBj _solveExes + _reorderGoals _countConflicts _fineGrained + _minimizeConflictSet _indGoals _avoidReinstalls + _shadowing _strFlags _maxBjumps _enableBj _solveExes _allowBootLibInstalls _onlyConstrained _order _verbosity) = collectEithers $ map selectPackage (Set.toList targets) where diff -Nru cabal-install-3.2-3.2+git20191216.2.e076113/src/cabal-install-3.2.0.0/Distribution/Client/DistDirLayout.hs cabal-install-3.2-3.2+git20200127.2.b84fd4f/src/cabal-install-3.2.0.0/Distribution/Client/DistDirLayout.hs --- cabal-install-3.2-3.2+git20191216.2.e076113/src/cabal-install-3.2.0.0/Distribution/Client/DistDirLayout.hs 2019-12-17 14:07:19.000000000 +0000 +++ cabal-install-3.2-3.2+git20200127.2.b84fd4f/src/cabal-install-3.2.0.0/Distribution/Client/DistDirLayout.hs 2020-01-30 20:19:44.000000000 +0000 @@ -155,11 +155,11 @@ -- | Information about the root directory of the project. -- --- It can either be an implict project root in the current dir if no +-- It can either be an implicit project root in the current dir if no -- @cabal.project@ file is found, or an explicit root if the file is found. -- data ProjectRoot = - -- | -- ^ An implict project root. It contains the absolute project + -- | -- ^ An implicit project root. It contains the absolute project -- root dir. ProjectRootImplicit FilePath diff -Nru cabal-install-3.2-3.2+git20191216.2.e076113/src/cabal-install-3.2.0.0/Distribution/Client/Fetch.hs cabal-install-3.2-3.2+git20200127.2.b84fd4f/src/cabal-install-3.2.0.0/Distribution/Client/Fetch.hs --- cabal-install-3.2-3.2+git20191216.2.e076113/src/cabal-install-3.2.0.0/Distribution/Client/Fetch.hs 2019-12-17 14:07:19.000000000 +0000 +++ cabal-install-3.2-3.2+git20200127.2.b84fd4f/src/cabal-install-3.2.0.0/Distribution/Client/Fetch.hs 2020-01-30 20:19:44.000000000 +0000 @@ -162,6 +162,8 @@ . setCountConflicts countConflicts + . setFineGrainedConflicts fineGrainedConflicts + . setMinimizeConflictSet minimizeConflictSet . setShadowPkgs shadowPkgs @@ -199,6 +201,7 @@ reorderGoals = fromFlag (fetchReorderGoals fetchFlags) countConflicts = fromFlag (fetchCountConflicts fetchFlags) + fineGrainedConflicts = fromFlag (fetchFineGrainedConflicts fetchFlags) minimizeConflictSet = fromFlag (fetchMinimizeConflictSet fetchFlags) independentGoals = fromFlag (fetchIndependentGoals fetchFlags) shadowPkgs = fromFlag (fetchShadowPkgs fetchFlags) diff -Nru cabal-install-3.2-3.2+git20191216.2.e076113/src/cabal-install-3.2.0.0/Distribution/Client/FetchUtils.hs cabal-install-3.2-3.2+git20200127.2.b84fd4f/src/cabal-install-3.2.0.0/Distribution/Client/FetchUtils.hs --- cabal-install-3.2-3.2+git20191216.2.e076113/src/cabal-install-3.2.0.0/Distribution/Client/FetchUtils.hs 2019-12-17 14:07:19.000000000 +0000 +++ cabal-install-3.2-3.2+git20200127.2.b84fd4f/src/cabal-install-3.2.0.0/Distribution/Client/FetchUtils.hs 2020-01-30 20:19:44.000000000 +0000 @@ -177,6 +177,7 @@ downloadRepoPackage = case repo of RepoLocal{..} -> return (packageFile repo pkgid) + RepoLocalNoIndex{..} -> return (packageFile repo pkgid) RepoRemote{..} -> do transport <- repoContextGetTransport repoCtxt @@ -292,6 +293,7 @@ -- the tarball for a given @PackageIdentifer@ is stored. -- packageDir :: Repo -> PackageId -> FilePath +packageDir (RepoLocalNoIndex (LocalRepo _ dir _) _) _pkgid = dir packageDir repo pkgid = repoLocalDir repo display (packageName pkgid) display (packageVersion pkgid) diff -Nru cabal-install-3.2-3.2+git20191216.2.e076113/src/cabal-install-3.2.0.0/Distribution/Client/Freeze.hs cabal-install-3.2-3.2+git20200127.2.b84fd4f/src/cabal-install-3.2.0.0/Distribution/Client/Freeze.hs --- cabal-install-3.2-3.2+git20191216.2.e076113/src/cabal-install-3.2.0.0/Distribution/Client/Freeze.hs 2019-12-17 14:07:19.000000000 +0000 +++ cabal-install-3.2-3.2+git20200127.2.b84fd4f/src/cabal-install-3.2.0.0/Distribution/Client/Freeze.hs 2020-01-30 20:19:44.000000000 +0000 @@ -175,6 +175,8 @@ . setCountConflicts countConflicts + . setFineGrainedConflicts fineGrainedConflicts + . setMinimizeConflictSet minimizeConflictSet . setShadowPkgs shadowPkgs @@ -207,6 +209,7 @@ reorderGoals = fromFlag (freezeReorderGoals freezeFlags) countConflicts = fromFlag (freezeCountConflicts freezeFlags) + fineGrainedConflicts = fromFlag (freezeFineGrainedConflicts freezeFlags) minimizeConflictSet = fromFlag (freezeMinimizeConflictSet freezeFlags) independentGoals = fromFlag (freezeIndependentGoals freezeFlags) shadowPkgs = fromFlag (freezeShadowPkgs freezeFlags) diff -Nru cabal-install-3.2-3.2+git20191216.2.e076113/src/cabal-install-3.2.0.0/Distribution/Client/GlobalFlags.hs cabal-install-3.2-3.2+git20200127.2.b84fd4f/src/cabal-install-3.2.0.0/Distribution/Client/GlobalFlags.hs --- cabal-install-3.2-3.2+git20191216.2.e076113/src/cabal-install-3.2.0.0/Distribution/Client/GlobalFlags.hs 2019-12-17 14:07:19.000000000 +0000 +++ cabal-install-3.2-3.2+git20200127.2.b84fd4f/src/cabal-install-3.2.0.0/Distribution/Client/GlobalFlags.hs 2020-01-30 20:19:44.000000000 +0000 @@ -17,7 +17,7 @@ import Distribution.Client.Compat.Prelude import Distribution.Client.Types - ( Repo(..), RemoteRepo(..) ) + ( Repo(..), RemoteRepo(..), LocalRepo (..), localRepoCacheKey ) import Distribution.Simple.Setup ( Flag(..), fromFlag, flagToMaybe ) import Distribution.Utils.NubList @@ -27,7 +27,7 @@ import Distribution.Verbosity ( Verbosity ) import Distribution.Simple.Utils - ( info ) + ( info, warn ) import Control.Concurrent ( MVar, newMVar, modifyMVar ) @@ -48,6 +48,8 @@ import qualified Distribution.Client.Security.HTTP as Sec.HTTP import qualified Distribution.Client.Security.DNS as Sec.DNS +import qualified System.FilePath.Posix as FilePath.Posix + -- ------------------------------------------------------------ -- * Global flags -- ------------------------------------------------------------ @@ -62,6 +64,7 @@ globalRemoteRepos :: NubList RemoteRepo, -- ^ Available Hackage servers. globalCacheDir :: Flag FilePath, globalLocalRepos :: NubList FilePath, + globalLocalNoIndexRepos :: NubList LocalRepo, globalLogsDir :: Flag FilePath, globalWorldFile :: Flag FilePath, globalRequireSandbox :: Flag Bool, @@ -83,6 +86,7 @@ globalRemoteRepos = mempty, globalCacheDir = mempty, globalLocalRepos = mempty, + globalLocalNoIndexRepos = mempty, globalLogsDir = mempty, globalWorldFile = mempty, globalRequireSandbox = Flag False, @@ -141,20 +145,25 @@ withRepoContext verbosity globalFlags = withRepoContext' verbosity - (fromNubList (globalRemoteRepos globalFlags)) - (fromNubList (globalLocalRepos globalFlags)) - (fromFlag (globalCacheDir globalFlags)) - (flagToMaybe (globalHttpTransport globalFlags)) - (flagToMaybe (globalIgnoreExpiry globalFlags)) - (fromNubList (globalProgPathExtra globalFlags)) + (fromNubList (globalRemoteRepos globalFlags)) + (fromNubList (globalLocalRepos globalFlags)) + (fromNubList (globalLocalNoIndexRepos globalFlags)) + (fromFlag (globalCacheDir globalFlags)) + (flagToMaybe (globalHttpTransport globalFlags)) + (flagToMaybe (globalIgnoreExpiry globalFlags)) + (fromNubList (globalProgPathExtra globalFlags)) -withRepoContext' :: Verbosity -> [RemoteRepo] -> [FilePath] +withRepoContext' :: Verbosity -> [RemoteRepo] -> [FilePath] -> [LocalRepo] -> FilePath -> Maybe String -> Maybe Bool -> [FilePath] -> (RepoContext -> IO a) -> IO a -withRepoContext' verbosity remoteRepos localRepos +withRepoContext' verbosity remoteRepos localRepos localNoIndexRepos sharedCacheDir httpTransport ignoreExpiry extraPaths = \callback -> do + for_ localNoIndexRepos $ \local -> + unless (FilePath.Posix.isAbsolute (localRepoPath local)) $ + warn verbosity $ "file+noindex " ++ localRepoName local ++ " repository path is not absolute; this is fragile, and not recommended" + transportRef <- newMVar Nothing let httpLib = Sec.HTTP.transportAdapter verbosity @@ -162,6 +171,7 @@ initSecureRepos verbosity httpLib secureRemoteRepos $ \secureRepos' -> callback RepoContext { repoContextRepos = allRemoteRepos + ++ allLocalNoIndexRepos ++ map RepoLocal localRepos , repoContextGetTransport = getTransport transportRef , repoContextWithSecureRepo = withSecureRepo secureRepos' @@ -170,6 +180,8 @@ where secureRemoteRepos = [ (remote, cacheDir) | RepoSecure remote cacheDir <- allRemoteRepos ] + + allRemoteRepos :: [Repo] allRemoteRepos = [ (if isSecure then RepoSecure else RepoRemote) remote cacheDir | remote <- remoteRepos @@ -177,6 +189,14 @@ isSecure = remoteRepoSecure remote == Just True ] + allLocalNoIndexRepos :: [Repo] + allLocalNoIndexRepos = + [ RepoLocalNoIndex local cacheDir + | local <- localNoIndexRepos + , let cacheDir | localRepoSharedCache local = sharedCacheDir localRepoCacheKey local + | otherwise = localRepoPath local + ] + getTransport :: MVar (Maybe HttpTransport) -> IO HttpTransport getTransport transportRef = modifyMVar transportRef $ \mTransport -> do diff -Nru cabal-install-3.2-3.2+git20191216.2.e076113/src/cabal-install-3.2.0.0/Distribution/Client/HashValue.hs cabal-install-3.2-3.2+git20200127.2.b84fd4f/src/cabal-install-3.2.0.0/Distribution/Client/HashValue.hs --- cabal-install-3.2-3.2+git20191216.2.e076113/src/cabal-install-3.2.0.0/Distribution/Client/HashValue.hs 1970-01-01 00:00:00.000000000 +0000 +++ cabal-install-3.2-3.2+git20200127.2.b84fd4f/src/cabal-install-3.2.0.0/Distribution/Client/HashValue.hs 2020-01-30 20:19:44.000000000 +0000 @@ -0,0 +1,86 @@ +{-# LANGUAGE DeriveDataTypeable #-} +{-# LANGUAGE DeriveGeneric #-} +module Distribution.Client.HashValue ( + HashValue, + hashValue, + truncateHash, + showHashValue, + readFileHashValue, + hashFromTUF, + ) where + +import Distribution.Client.Compat.Prelude +import Prelude () + +import qualified Hackage.Security.Client as Sec + +import qualified Crypto.Hash.SHA256 as SHA256 +import qualified Data.ByteString.Base16 as Base16 +import qualified Data.ByteString.Char8 as BS +import qualified Data.ByteString.Lazy.Char8 as LBS + +import Control.Exception (evaluate) +import System.IO (IOMode (..), withBinaryFile) + +----------------------------------------------- +-- The specific choice of hash implementation +-- + +-- Is a crypto hash necessary here? One thing to consider is who controls the +-- inputs and what's the result of a hash collision. Obviously we should not +-- install packages we don't trust because they can run all sorts of code, but +-- if I've checked there's no TH, no custom Setup etc, is there still a +-- problem? If someone provided us a tarball that hashed to the same value as +-- some other package and we installed it, we could end up re-using that +-- installed package in place of another one we wanted. So yes, in general +-- there is some value in preventing intentional hash collisions in installed +-- package ids. + +newtype HashValue = HashValue BS.ByteString + deriving (Eq, Generic, Show, Typeable) + +-- Cannot do any sensible validation here. Although we use SHA256 +-- for stuff we hash ourselves, we can also get hashes from TUF +-- and that can in principle use different hash functions in future. +-- +-- Therefore, we simply derive this structurally. +instance Binary HashValue +instance Structured HashValue + +-- | Hash some data. Currently uses SHA256. +-- +hashValue :: LBS.ByteString -> HashValue +hashValue = HashValue . SHA256.hashlazy + +showHashValue :: HashValue -> String +showHashValue (HashValue digest) = BS.unpack (Base16.encode digest) + +-- | Hash the content of a file. Uses SHA256. +-- +readFileHashValue :: FilePath -> IO HashValue +readFileHashValue tarball = + withBinaryFile tarball ReadMode $ \hnd -> + evaluate . hashValue =<< LBS.hGetContents hnd + +-- | Convert a hash from TUF metadata into a 'PackageSourceHash'. +-- +-- Note that TUF hashes don't neessarily have to be SHA256, since it can +-- support new algorithms in future. +-- +hashFromTUF :: Sec.Hash -> HashValue +hashFromTUF (Sec.Hash hashstr) = + --TODO: [code cleanup] either we should get TUF to use raw bytestrings or + -- perhaps we should also just use a base16 string as the internal rep. + case Base16.decode (BS.pack hashstr) of + (hash, trailing) | not (BS.null hash) && BS.null trailing + -> HashValue hash + _ -> error "hashFromTUF: cannot decode base16 hash" + + +-- | Truncate a 32 byte SHA256 hash to +-- +-- For example 20 bytes render as 40 hex chars, which we use for unit-ids. +-- Or even 4 bytes for 'hashedInstalledPackageIdShort' +-- +truncateHash :: Int -> HashValue -> HashValue +truncateHash n (HashValue h) = HashValue (BS.take n h) diff -Nru cabal-install-3.2-3.2+git20191216.2.e076113/src/cabal-install-3.2.0.0/Distribution/Client/IndexUtils.hs cabal-install-3.2-3.2+git20200127.2.b84fd4f/src/cabal-install-3.2.0.0/Distribution/Client/IndexUtils.hs --- cabal-install-3.2-3.2+git20191216.2.e076113/src/cabal-install-3.2.0.0/Distribution/Client/IndexUtils.hs 2019-12-17 14:07:19.000000000 +0000 +++ cabal-install-3.2-3.2+git20200127.2.b84fd4f/src/cabal-install-3.2.0.0/Distribution/Client/IndexUtils.hs 2020-01-30 20:19:44.000000000 +0000 @@ -3,6 +3,7 @@ {-# LANGUAGE RecordWildCards #-} {-# LANGUAGE BangPatterns #-} {-# LANGUAGE OverloadedStrings #-} +{-# LANGUAGE ScopedTypeVariables #-} {-# LANGUAGE GADTs #-} ----------------------------------------------------------------------------- @@ -50,6 +51,8 @@ import Distribution.Client.IndexUtils.Timestamp import Distribution.Client.Types import Distribution.Verbosity +import Distribution.Pretty (prettyShow) +import Distribution.Parsec (simpleParsec) import Distribution.Package ( PackageId, PackageIdentifier(..), mkPackageName @@ -70,7 +73,7 @@ import Distribution.Deprecated.Text ( display, simpleParse ) import Distribution.Simple.Utils - ( die', warn, info ) + ( die', warn, info, createDirectoryIfMissingVerbose ) import Distribution.Client.Setup ( RepoContext(..) ) @@ -83,9 +86,11 @@ import Distribution.Solver.Types.SourcePackage import qualified Data.Map as Map +import qualified Data.Set as Set import Control.DeepSeq import Control.Monad import Control.Exception +import Data.List (stripPrefix) import qualified Data.ByteString.Lazy as BS import qualified Data.ByteString.Lazy.Char8 as BS.Char8 import qualified Data.ByteString.Char8 as BSS @@ -93,17 +98,19 @@ import Distribution.Client.GZipUtils (maybeDecompress) import Distribution.Client.Utils ( byteStringToFilePath , tryFindAddSourcePackageDesc ) -import Distribution.Compat.Binary +import Distribution.Utils.Structured (Structured (..), nominalStructure, structuredEncodeFile, structuredDecodeFileOrFail) import Distribution.Compat.Exception (catchIO) import Distribution.Compat.Time (getFileAge, getModTime) import System.Directory (doesFileExist, doesDirectoryExist) import System.FilePath - ( (), (<.>), takeExtension, replaceExtension, splitDirectories, normalise ) -import System.FilePath.Posix as FilePath.Posix - ( takeFileName ) + ( (), (<.>), takeFileName, takeExtension, replaceExtension, splitDirectories, normalise, takeDirectory ) +import qualified System.FilePath.Posix as FilePath.Posix import System.IO import System.IO.Unsafe (unsafeInterleaveIO) import System.IO.Error (isDoesNotExistError) +import Distribution.Compat.Directory (listDirectory) + +import qualified Codec.Compression.GZip as GZip import qualified Hackage.Security.Client as Sec import qualified Hackage.Security.Util.Some as Sec @@ -130,9 +137,10 @@ indexBaseName repo = repoLocalDir repo fn where fn = case repo of - RepoSecure {} -> "01-index" - RepoRemote {} -> "00-index" - RepoLocal {} -> "00-index" + RepoSecure {} -> "01-index" + RepoRemote {} -> "00-index" + RepoLocal {} -> "00-index" + RepoLocalNoIndex {} -> "noindex" ------------------------------------------------------------------------ -- Reading the source package index @@ -218,7 +226,12 @@ describeState (IndexStateTime time) = "historical state as of " ++ display time pkgss <- forM (repoContextRepos repoCtxt) $ \r -> do - let rname = maybe "" remoteRepoName $ maybeRepoRemote r + let rname = case r of + RepoRemote remote _ -> remoteRepoName remote + RepoSecure remote _ -> remoteRepoName remote + RepoLocalNoIndex local _ -> localRepoName local + RepoLocal _ -> "" + info verbosity ("Reading available packages of " ++ rname ++ "...") idxState <- case mb_idxState of @@ -240,6 +253,7 @@ unless (idxState == IndexStateHead) $ case r of RepoLocal path -> warn verbosity ("index-state ignored for old-format repositories (local repository '" ++ path ++ "')") + RepoLocalNoIndex {} -> warn verbosity "index-state ignored for file+noindex repositories" RepoRemote {} -> warn verbosity ("index-state ignored for old-format (remote repository '" ++ rname ++ "')") RepoSecure {} -> pure () @@ -301,7 +315,7 @@ -> IO (PackageIndex UnresolvedSourcePackage, [Dependency], IndexStateInfo) readRepoIndex verbosity repoCtxt repo idxState = handleNotFound $ do - warnIfIndexIsOld =<< getIndexFileAge repo + when (isRepoRemote repo) $ warnIfIndexIsOld =<< getIndexFileAge repo updateRepoIndexCache verbosity (RepoIndex repoCtxt repo) readPackageIndexCacheFile verbosity mkAvailablePackage (RepoIndex repoCtxt repo) @@ -330,6 +344,10 @@ RepoLocal{..} -> warn verbosity $ "The package list for the local repo '" ++ repoLocalDir ++ "' is missing. The repo is invalid." + RepoLocalNoIndex local _ -> warn verbosity $ + "Error during construction of local+noindex " + ++ localRepoName local ++ " repository index: " + ++ show e return (mempty,mempty,emptyStateInfo) else ioError e @@ -338,11 +356,12 @@ when (dt >= isOldThreshold) $ case repo of RepoRemote{..} -> warn verbosity $ errOutdatedPackageList repoRemote dt RepoSecure{..} -> warn verbosity $ errOutdatedPackageList repoRemote dt - RepoLocal{..} -> return () + RepoLocal{} -> return () + RepoLocalNoIndex {} -> return () errMissingPackageList repoRemote = "The package list for '" ++ remoteRepoName repoRemote - ++ "' does not exist. Run 'cabal update' to download it." + ++ "' does not exist. Run 'cabal update' to download it." ++ show repoRemote errOutdatedPackageList repoRemote dt = "The package list for '" ++ remoteRepoName repoRemote ++ "' is " ++ shows (floor dt :: Int) " days old.\nRun " @@ -366,18 +385,23 @@ -- updateRepoIndexCache :: Verbosity -> Index -> IO () updateRepoIndexCache verbosity index = - whenCacheOutOfDate index $ do - updatePackageIndexCacheFile verbosity index + whenCacheOutOfDate index $ updatePackageIndexCacheFile verbosity index whenCacheOutOfDate :: Index -> IO () -> IO () whenCacheOutOfDate index action = do exists <- doesFileExist $ cacheFile index if not exists - then action - else do - indexTime <- getModTime $ indexFile index - cacheTime <- getModTime $ cacheFile index - when (indexTime > cacheTime) action + then action + else if localNoIndex index + then return () -- TODO: don't update cache for local+noindex repositories + else do + indexTime <- getModTime $ indexFile index + cacheTime <- getModTime $ cacheFile index + when (indexTime > cacheTime) action + +localNoIndex :: Index -> Bool +localNoIndex (RepoIndex _ (RepoLocalNoIndex {})) = True +localNoIndex _ = False ------------------------------------------------------------------------ -- Reading the index file @@ -391,9 +415,10 @@ -- | A build tree reference is either a link or a snapshot. data BuildTreeRefType = SnapshotRef | LinkRef - deriving (Eq,Generic) + deriving (Eq,Show,Generic) instance Binary BuildTreeRefType +instance Structured BuildTreeRefType refTypeFromTypeCode :: Tar.TypeCode -> BuildTreeRefType refTypeFromTypeCode t @@ -492,7 +517,7 @@ extractPrefs :: Tar.Entry -> Maybe [Dependency] extractPrefs entry = case Tar.entryContent entry of Tar.NormalFile content _ - | takeFileName entrypath == "preferred-versions" + | FilePath.Posix.takeFileName entrypath == "preferred-versions" -> Just prefs where entrypath = Tar.entryPath entry @@ -562,20 +587,27 @@ RepoSecure {} -> True RepoRemote {} -> False RepoLocal {} -> False + RepoLocalNoIndex {} -> True is01Index (SandboxIndex _) = False updatePackageIndexCacheFile :: Verbosity -> Index -> IO () updatePackageIndexCacheFile verbosity index = do info verbosity ("Updating index cache file " ++ cacheFile index ++ " ...") - withIndexEntries verbosity index $ \entries -> do - let !maxTs = maximumTimestamp (map cacheEntryTimestamp entries) - cache = Cache { cacheHeadTs = maxTs - , cacheEntries = entries - } - writeIndexCache index cache - info verbosity ("Index cache updated to index-state " - ++ display (cacheHeadTs cache)) + withIndexEntries verbosity index callback callbackNoIndex + where + callback entries = do + let !maxTs = maximumTimestamp (map cacheEntryTimestamp entries) + cache = Cache { cacheHeadTs = maxTs + , cacheEntries = entries + } + writeIndexCache index cache + info verbosity ("Index cache updated to index-state " + ++ display (cacheHeadTs cache)) + + callbackNoIndex entries = do + writeNoIndexCache verbosity index $ NoIndexCache entries + info verbosity "Index cache updated" -- | Read the index (for the purpose of building a cache) -- @@ -597,8 +629,12 @@ -- TODO: It would be nicer if we actually incrementally updated @cabal@'s -- cache, rather than reconstruct it from zero on each update. However, this -- would require a change in the cache format. -withIndexEntries :: Verbosity -> Index -> ([IndexCacheEntry] -> IO a) -> IO a -withIndexEntries _ (RepoIndex repoCtxt repo@RepoSecure{..}) callback = +withIndexEntries + :: Verbosity -> Index + -> ([IndexCacheEntry] -> IO a) + -> ([NoIndexCacheEntry] -> IO a) + -> IO a +withIndexEntries _ (RepoIndex repoCtxt repo@RepoSecure{..}) callback _ = repoContextWithSecureRepo repoCtxt repo $ \repoSecure -> Sec.withIndex repoSecure $ \Sec.IndexCallbacks{..} -> do -- Incrementally (lazily) read all the entries in the tar file in order, @@ -625,7 +661,60 @@ timestamp = fromMaybe (error "withIndexEntries: invalid timestamp") $ epochTimeToTimestamp $ Sec.indexEntryTime sie -withIndexEntries verbosity index callback = do -- non-secure repositories +withIndexEntries verbosity (RepoIndex _repoCtxt (RepoLocalNoIndex (LocalRepo name localDir _) _cacheDir)) _ callback = do + dirContents <- listDirectory localDir + let contentSet = Set.fromList dirContents + + entries <- handle handler $ fmap catMaybes $ forM dirContents $ \file -> do + case isTarGz file of + Nothing -> do + unless (takeFileName file == "noindex.cache" || ".cabal" `isSuffixOf` file) $ + info verbosity $ "Skipping " ++ file + return Nothing + Just pkgid | cabalPath `Set.member` contentSet -> do + contents <- BSS.readFile (localDir cabalPath) + forM (parseGenericPackageDescriptionMaybe contents) $ \gpd -> + return (CacheGPD gpd contents) + where + cabalPath = prettyShow pkgid ++ ".cabal" + Just pkgId -> do + -- check for the right named .cabal file in the compressed tarball + tarGz <- BS.readFile (localDir file) + let tar = GZip.decompress tarGz + entries = Tar.read tar + + case Tar.foldEntries (readCabalEntry pkgId) Nothing (const Nothing) entries of + Just ce -> return (Just ce) + Nothing -> die' verbosity $ "Cannot read .cabal file inside " ++ file + + info verbosity $ "Entries in file+noindex repository " ++ name + for_ entries $ \(CacheGPD gpd _) -> + info verbosity $ "- " ++ prettyShow (package $ Distribution.PackageDescription.packageDescription gpd) + + callback entries + where + handler :: IOException -> IO a + handler e = die' verbosity $ "Error while updating index for " ++ name ++ " repository " ++ show e + + isTarGz :: FilePath -> Maybe PackageIdentifier + isTarGz fp = do + pfx <- stripSuffix ".tar.gz" fp + simpleParsec pfx + + stripSuffix sfx str = fmap reverse (stripPrefix (reverse sfx) (reverse str)) + + -- look for /.cabal inside the tarball + readCabalEntry :: PackageIdentifier -> Tar.Entry -> Maybe NoIndexCacheEntry -> Maybe NoIndexCacheEntry + readCabalEntry pkgId entry Nothing + | filename == Tar.entryPath entry + , Tar.NormalFile contents _ <- Tar.entryContent entry + = let bs = BS.toStrict contents + in fmap (\gpd -> CacheGPD gpd bs) $ parseGenericPackageDescriptionMaybe bs + where + filename = prettyShow pkgId FilePath.Posix. prettyShow (packageName pkgId) ++ ".cabal" + readCabalEntry _ _ x = x + +withIndexEntries verbosity index callback _ = do -- non-secure repositories withFile (indexFile index) ReadMode $ \h -> do bs <- maybeDecompress `fmap` BS.hGetContents h pkgsOrPrefs <- lazySequence $ parsePackageIndex verbosity bs @@ -642,13 +731,18 @@ -> Index -> IndexState -> IO (PackageIndex pkg, [Dependency], IndexStateInfo) -readPackageIndexCacheFile verbosity mkPkg index idxState = do - cache0 <- readIndexCache verbosity index - indexHnd <- openFile (indexFile index) ReadMode - let (cache,isi) = filterCache idxState cache0 - (pkgs,deps) <- packageIndexFromCache verbosity mkPkg indexHnd cache - pure (pkgs,deps,isi) - +readPackageIndexCacheFile verbosity mkPkg index idxState + | localNoIndex index = do + cache0 <- readNoIndexCache verbosity index + pkgs <- packageNoIndexFromCache verbosity mkPkg cache0 + pure (pkgs, [], emptyStateInfo) + + | otherwise = do + cache0 <- readIndexCache verbosity index + indexHnd <- openFile (indexFile index) ReadMode + let (cache,isi) = filterCache idxState cache0 + (pkgs,deps) <- packageIndexFromCache verbosity mkPkg indexHnd cache + pure (pkgs,deps,isi) packageIndexFromCache :: Package pkg => Verbosity @@ -661,6 +755,21 @@ pkgIndex <- evaluate $ PackageIndex.fromList pkgs return (pkgIndex, prefs) +packageNoIndexFromCache + :: forall pkg. Package pkg + => Verbosity + -> (PackageEntry -> pkg) + -> NoIndexCache + -> IO (PackageIndex pkg) +packageNoIndexFromCache _verbosity mkPkg cache = + evaluate $ PackageIndex.fromList pkgs + where + pkgs = + [ mkPkg $ NormalPackage pkgId gpd (BS.fromStrict bs) 0 + | CacheGPD gpd bs <- noIndexCacheEntries cache + , let pkgId = package $ Distribution.PackageDescription.packageDescription gpd + ] + -- | Read package list -- -- The result package releases and preference entries are guaranteed @@ -749,8 +858,7 @@ ------------------------------------------------------------------------ --- Index cache data structure --- +-- Index cache data structure -- -- | Read the 'Index' cache from the filesystem -- @@ -773,20 +881,46 @@ Right res -> return (hashConsCache res) +readNoIndexCache :: Verbosity -> Index -> IO NoIndexCache +readNoIndexCache verbosity index = do + cacheOrFail <- readNoIndexCache' index + case cacheOrFail of + Left msg -> do + warn verbosity $ concat + [ "Parsing the index cache failed (", msg, "). " + , "Trying to regenerate the index cache..." + ] + + updatePackageIndexCacheFile verbosity index + + either (die' verbosity) return =<< readNoIndexCache' index + + -- we don't hash cons local repository cache, they are hopefully small + Right res -> return res + -- | Read the 'Index' cache from the filesystem without attempting to -- regenerate on parsing failures. readIndexCache' :: Index -> IO (Either String Cache) readIndexCache' index - | is01Index index = decodeFileOrFail' (cacheFile index) + | is01Index index = structuredDecodeFileOrFail (cacheFile index) | otherwise = liftM (Right .read00IndexCache) $ BSS.readFile (cacheFile index) +readNoIndexCache' :: Index -> IO (Either String NoIndexCache) +readNoIndexCache' index = structuredDecodeFileOrFail (cacheFile index) + -- | Write the 'Index' cache to the filesystem writeIndexCache :: Index -> Cache -> IO () writeIndexCache index cache - | is01Index index = encodeFile (cacheFile index) cache + | is01Index index = structuredEncodeFile (cacheFile index) cache | otherwise = writeFile (cacheFile index) (show00IndexCache cache) +writeNoIndexCache :: Verbosity -> Index -> NoIndexCache -> IO () +writeNoIndexCache verbosity index cache = do + let path = cacheFile index + createDirectoryIfMissingVerbose verbosity True (takeDirectory path) + structuredEncodeFile path cache + -- | Write the 'IndexState' to the filesystem writeIndexTimestamp :: Index -> IndexState -> IO () writeIndexTimestamp index st @@ -852,28 +986,44 @@ -- 'cacheEntries' , cacheEntries :: [IndexCacheEntry] } + deriving (Show, Generic) instance NFData Cache where rnf = rnf . cacheEntries +-- | Cache format for 'file+noindex' repositories +newtype NoIndexCache = NoIndexCache + { noIndexCacheEntries :: [NoIndexCacheEntry] + } + deriving (Show, Generic) + +instance NFData NoIndexCache where + rnf = rnf . noIndexCacheEntries + -- | Tar files are block structured with 512 byte blocks. Every header and file -- content starts on a block boundary. -- type BlockNo = Word32 -- Tar.TarEntryOffset - data IndexCacheEntry = CachePackageId PackageId !BlockNo !Timestamp | CachePreference Dependency !BlockNo !Timestamp | CacheBuildTreeRef !BuildTreeRefType !BlockNo -- NB: CacheBuildTreeRef is irrelevant for 01-index & v2-build - deriving (Eq,Generic) + deriving (Eq,Show,Generic) + +data NoIndexCacheEntry + = CacheGPD GenericPackageDescription !BSS.ByteString + deriving (Eq,Show,Generic) instance NFData IndexCacheEntry where rnf (CachePackageId pkgid _ _) = rnf pkgid rnf (CachePreference dep _ _) = rnf dep rnf (CacheBuildTreeRef _ _) = () +instance NFData NoIndexCacheEntry where + rnf (CacheGPD gpd bs) = rnf gpd `seq` rnf bs + cacheEntryTimestamp :: IndexCacheEntry -> Timestamp cacheEntryTimestamp (CacheBuildTreeRef _ _) = nullTimestamp cacheEntryTimestamp (CachePreference _ _ ts) = ts @@ -882,24 +1032,26 @@ ---------------------------------------------------------------------------- -- new binary 01-index.cache format -instance Binary Cache where - put (Cache headTs ents) = do - -- magic / format version - -- - -- NB: this currently encodes word-size implicitly; when we - -- switch to CBOR encoding, we will have a platform - -- independent binary encoding - put (0xcaba1002::Word) - put headTs - put ents +instance Binary Cache +instance Binary IndexCacheEntry +instance Binary NoIndexCache + +instance Structured Cache +instance Structured IndexCacheEntry +instance Structured NoIndexCache + +-- | We need to save only .cabal file contents +instance Binary NoIndexCacheEntry where + put (CacheGPD _ bs) = put bs get = do - magic <- get - when (magic /= (0xcaba1002::Word)) $ - fail ("01-index.cache: unexpected magic marker encountered: " ++ show magic) - Cache <$> get <*> get + bs <- get + case parseGenericPackageDescriptionMaybe bs of + Just gpd -> return (CacheGPD gpd bs) + Nothing -> fail "Failed to parse GPD" -instance Binary IndexCacheEntry +instance Structured NoIndexCacheEntry where + structure = nominalStructure ---------------------------------------------------------------------------- -- legacy 00-index.cache format @@ -972,16 +1124,19 @@ show00IndexCacheEntry :: IndexCacheEntry -> String show00IndexCacheEntry entry = unwords $ case entry of - CachePackageId pkgid b _ -> [ packageKey - , display (packageName pkgid) - , display (packageVersion pkgid) - , blocknoKey - , show b - ] - CacheBuildTreeRef tr b -> [ buildTreeRefKey - , [typeCodeFromRefType tr] - , show b - ] - CachePreference dep _ _ -> [ preferredVersionKey - , display dep - ] + CachePackageId pkgid b _ -> + [ packageKey + , display (packageName pkgid) + , display (packageVersion pkgid) + , blocknoKey + , show b + ] + CacheBuildTreeRef tr b -> + [ buildTreeRefKey + , [typeCodeFromRefType tr] + , show b + ] + CachePreference dep _ _ -> + [ preferredVersionKey + , display dep + ] diff -Nru cabal-install-3.2-3.2+git20191216.2.e076113/src/cabal-install-3.2.0.0/Distribution/Client/Install.hs cabal-install-3.2-3.2+git20200127.2.b84fd4f/src/cabal-install-3.2.0.0/Distribution/Client/Install.hs --- cabal-install-3.2-3.2+git20191216.2.e076113/src/cabal-install-3.2.0.0/Distribution/Client/Install.hs 2019-12-17 14:07:19.000000000 +0000 +++ cabal-install-3.2-3.2+git20200127.2.b84fd4f/src/cabal-install-3.2.0.0/Distribution/Client/Install.hs 2020-01-30 20:19:44.000000000 +0000 @@ -395,6 +395,8 @@ . setCountConflicts countConflicts + . setFineGrainedConflicts fineGrainedConflicts + . setMinimizeConflictSet minimizeConflictSet . setAvoidReinstalls avoidReinstalls @@ -463,6 +465,7 @@ fromFlag (installReinstall installFlags) reorderGoals = fromFlag (installReorderGoals installFlags) countConflicts = fromFlag (installCountConflicts installFlags) + fineGrainedConflicts = fromFlag (installFineGrainedConflicts installFlags) minimizeConflictSet = fromFlag (installMinimizeConflictSet installFlags) independentGoals = fromFlag (installIndependentGoals installFlags) avoidReinstalls = fromFlag (installAvoidReinstalls installFlags) diff -Nru cabal-install-3.2-3.2+git20191216.2.e076113/src/cabal-install-3.2.0.0/Distribution/Client/PackageHash.hs cabal-install-3.2-3.2+git20200127.2.b84fd4f/src/cabal-install-3.2.0.0/Distribution/Client/PackageHash.hs --- cabal-install-3.2-3.2+git20191216.2.e076113/src/cabal-install-3.2.0.0/Distribution/Client/PackageHash.hs 2019-12-17 14:07:19.000000000 +0000 +++ cabal-install-3.2-3.2+git20200127.2.b84fd4f/src/cabal-install-3.2.0.0/Distribution/Client/PackageHash.hs 2020-01-30 20:19:44.000000000 +0000 @@ -20,13 +20,6 @@ -- ** Platform-specific variations hashedInstalledPackageIdLong, hashedInstalledPackageIdShort, - - -- * Low level hash choice - HashValue, - hashValue, - showHashValue, - readFileHashValue, - hashFromTUF, ) where import Prelude () @@ -48,23 +41,16 @@ import Distribution.Deprecated.Text ( display ) import Distribution.Types.PkgconfigVersion (PkgconfigVersion) +import Distribution.Client.HashValue import Distribution.Client.Types ( InstalledPackageId ) import qualified Distribution.Solver.Types.ComponentDeps as CD -import qualified Hackage.Security.Client as Sec - -import qualified Crypto.Hash.SHA256 as SHA256 -import qualified Data.ByteString.Base16 as Base16 -import qualified Data.ByteString.Char8 as BS import qualified Data.ByteString.Lazy.Char8 as LBS import qualified Data.Map as Map import qualified Data.Set as Set import Data.Function (on) -import Control.Exception (evaluate) -import System.IO (withBinaryFile, IOMode(..)) - ------------------------------- -- Calculating package hashes @@ -121,15 +107,11 @@ -- max length now 64 [ truncateStr 14 (display name) , truncateStr 8 (display version) - , showHashValue (truncateHash (hashPackageHashInputs pkghashinputs)) + , showHashValue (truncateHash 20 (hashPackageHashInputs pkghashinputs)) ] where PackageIdentifier name version = pkgHashPkgId - -- Truncate a 32 byte SHA256 hash to 160bits, 20 bytes :-( - -- It'll render as 40 hex chars. - truncateHash (HashValue h) = HashValue (BS.take 20 h) - -- Truncate a string, with a visual indication that it is truncated. truncateStr n s | length s <= n = s | otherwise = take (n-1) s ++ "_" @@ -163,11 +145,10 @@ intercalate "-" [ filter (not . flip elem "aeiou") (display name) , display version - , showHashValue (truncateHash (hashPackageHashInputs pkghashinputs)) + , showHashValue (truncateHash 4 (hashPackageHashInputs pkghashinputs)) ] where PackageIdentifier name version = pkgHashPkgId - truncateHash (HashValue h) = HashValue (BS.take 4 h) -- | All the information that contribues to a package's hash, and thus its -- 'InstalledPackageId'. @@ -330,57 +311,3 @@ | otherwise = entry key format value showFlagAssignment = unwords . map showFlagValue . sortBy (compare `on` fst) . unFlagAssignment - ------------------------------------------------ --- The specific choice of hash implementation --- - --- Is a crypto hash necessary here? One thing to consider is who controls the --- inputs and what's the result of a hash collision. Obviously we should not --- install packages we don't trust because they can run all sorts of code, but --- if I've checked there's no TH, no custom Setup etc, is there still a --- problem? If someone provided us a tarball that hashed to the same value as --- some other package and we installed it, we could end up re-using that --- installed package in place of another one we wanted. So yes, in general --- there is some value in preventing intentional hash collisions in installed --- package ids. - -newtype HashValue = HashValue BS.ByteString - deriving (Eq, Generic, Show, Typeable) - --- Cannot do any sensible validation here. Although we use SHA256 --- for stuff we hash ourselves, we can also get hashes from TUF --- and that can in principle use different hash functions in future. --- --- Therefore, we simply derive this structurally. -instance Binary HashValue -instance Structured HashValue - --- | Hash some data. Currently uses SHA256. --- -hashValue :: LBS.ByteString -> HashValue -hashValue = HashValue . SHA256.hashlazy - -showHashValue :: HashValue -> String -showHashValue (HashValue digest) = BS.unpack (Base16.encode digest) - --- | Hash the content of a file. Uses SHA256. --- -readFileHashValue :: FilePath -> IO HashValue -readFileHashValue tarball = - withBinaryFile tarball ReadMode $ \hnd -> - evaluate . hashValue =<< LBS.hGetContents hnd - --- | Convert a hash from TUF metadata into a 'PackageSourceHash'. --- --- Note that TUF hashes don't neessarily have to be SHA256, since it can --- support new algorithms in future. --- -hashFromTUF :: Sec.Hash -> HashValue -hashFromTUF (Sec.Hash hashstr) = - --TODO: [code cleanup] either we should get TUF to use raw bytestrings or - -- perhaps we should also just use a base16 string as the internal rep. - case Base16.decode (BS.pack hashstr) of - (hash, trailing) | not (BS.null hash) && BS.null trailing - -> HashValue hash - _ -> error "hashFromTUF: cannot decode base16 hash" diff -Nru cabal-install-3.2-3.2+git20191216.2.e076113/src/cabal-install-3.2.0.0/Distribution/Client/ProjectConfig/Legacy.hs cabal-install-3.2-3.2+git20200127.2.b84fd4f/src/cabal-install-3.2.0.0/Distribution/Client/ProjectConfig/Legacy.hs --- cabal-install-3.2-3.2+git20191216.2.e076113/src/cabal-install-3.2.0.0/Distribution/Client/ProjectConfig/Legacy.hs 2019-12-17 14:07:19.000000000 +0000 +++ cabal-install-3.2-3.2+git20200127.2.b84fd4f/src/cabal-install-3.2.0.0/Distribution/Client/ProjectConfig/Legacy.hs 2020-01-30 20:19:44.000000000 +0000 @@ -27,12 +27,12 @@ import Distribution.Client.ProjectConfig.Types import Distribution.Client.Types - ( RemoteRepo(..), emptyRemoteRepo + ( RemoteRepo(..), LocalRepo (..), emptyRemoteRepo , AllowNewer(..), AllowOlder(..) ) import Distribution.Client.SourceRepo (sourceRepositoryPackageGrammar, SourceRepoList) import Distribution.Client.Config - ( SavedConfig(..), remoteRepoFields ) + ( SavedConfig(..), remoteRepoFields, postProcessRepo ) import Distribution.Client.CmdInstall.ClientInstallFlags ( ClientInstallFlags(..), defaultClientInstallFlags @@ -78,7 +78,7 @@ ( Doc, ($+$) ) import qualified Distribution.Deprecated.ParseUtils as ParseUtils (field) import Distribution.Deprecated.ParseUtils - ( ParseResult(..), PError(..), syntaxError, PWarning(..), warning + ( ParseResult(..), PError(..), syntaxError, PWarning(..) , simpleField, commaNewLineListField, newLineListField, parseTokenQ , parseHaskellString, showToken ) import Distribution.Client.ParseUtils @@ -90,6 +90,8 @@ import qualified Data.Map as Map +import Network.URI (URI (..)) + ------------------------------------------------------------------ -- Representing the project config file in terms of legacy types -- @@ -334,6 +336,7 @@ globalSandboxConfigFile = _, -- ?? globalRemoteRepos = projectConfigRemoteRepos, globalLocalRepos = projectConfigLocalRepos, + globalLocalNoIndexRepos = projectConfigLocalNoIndexRepos, globalProgPathExtra = projectConfigProgPathExtra, globalStoreDir = projectConfigStoreDir } = globalFlags @@ -371,6 +374,7 @@ --installUpgradeDeps = projectConfigUpgradeDeps, installReorderGoals = projectConfigReorderGoals, installCountConflicts = projectConfigCountConflicts, + installFineGrainedConflicts = projectConfigFineGrainedConflicts, installMinimizeConflictSet = projectConfigMinimizeConflictSet, installPerComponent = projectConfigPerComponent, installIndependentGoals = projectConfigIndependentGoals, @@ -568,6 +572,7 @@ globalRemoteRepos = projectConfigRemoteRepos, globalCacheDir = projectConfigCacheDir, globalLocalRepos = projectConfigLocalRepos, + globalLocalNoIndexRepos = projectConfigLocalNoIndexRepos, globalLogsDir = projectConfigLogsDir, globalWorldFile = mempty, globalRequireSandbox = mempty, @@ -607,6 +612,7 @@ installUpgradeDeps = mempty, --projectConfigUpgradeDeps, installReorderGoals = projectConfigReorderGoals, installCountConflicts = projectConfigCountConflicts, + installFineGrainedConflicts = projectConfigFineGrainedConflicts, installMinimizeConflictSet = projectConfigMinimizeConflictSet, installIndependentGoals = projectConfigIndependentGoals, installShadowPkgs = mempty, --projectConfigShadowPkgs, @@ -1000,8 +1006,9 @@ , "one-shot", "jobs", "keep-going", "offline", "per-component" -- solver flags: , "max-backjumps", "reorder-goals", "count-conflicts" - , "minimize-conflict-set", "independent-goals" - , "strong-flags" , "allow-boot-library-installs", "reject-unconstrained-dependencies", "index-state" + , "fine-grained-conflicts" , "minimize-conflict-set", "independent-goals" + , "strong-flags" , "allow-boot-library-installs" + , "reject-unconstrained-dependencies", "index-state" ] . commandOptionsToFields ) (installOptions ParseArgs) @@ -1385,36 +1392,39 @@ | otherwise = arg +-- The implementation is slight hack: we parse all as remote repository +-- but if the url schema is file+noindex, we switch to local. remoteRepoSectionDescr :: SectionDescr GlobalFlags -remoteRepoSectionDescr = - SectionDescr { - sectionName = "repository", - sectionFields = remoteRepoFields, - sectionSubsections = [], - sectionGet = map (\x->(remoteRepoName x, x)) . fromNubList - . globalRemoteRepos, - sectionSet = - \lineno reponame repo0 conf -> do - when (null reponame) $ - syntaxError lineno $ "a 'repository' section requires the " - ++ "repository name as an argument" - let repo = repo0 { remoteRepoName = reponame } - when (remoteRepoKeyThreshold repo - > length (remoteRepoRootKeys repo)) $ - warning $ "'key-threshold' for repository " - ++ show (remoteRepoName repo) - ++ " higher than number of keys" - when (not (null (remoteRepoRootKeys repo)) - && remoteRepoSecure repo /= Just True) $ - warning $ "'root-keys' for repository " - ++ show (remoteRepoName repo) - ++ " non-empty, but 'secure' not set to True." - return conf { - globalRemoteRepos = overNubList (++[repo]) (globalRemoteRepos conf) - }, - sectionEmpty = emptyRemoteRepo "" +remoteRepoSectionDescr = SectionDescr + { sectionName = "repository" + , sectionEmpty = emptyRemoteRepo "" + , sectionFields = remoteRepoFields + , sectionSubsections = [] + , sectionGet = getS + , sectionSet = setS } - + where + getS :: GlobalFlags -> [(String, RemoteRepo)] + getS gf = + map (\x->(remoteRepoName x, x)) (fromNubList (globalRemoteRepos gf)) + ++ + map (\x->(localRepoName x, localToRemote x)) (fromNubList (globalLocalNoIndexRepos gf)) + + setS :: Int -> String -> RemoteRepo -> GlobalFlags -> ParseResult GlobalFlags + setS lineno reponame repo0 conf = do + repo1 <- postProcessRepo lineno reponame repo0 + case repo1 of + Left repo -> return conf + { globalLocalNoIndexRepos = overNubList (++[repo]) (globalLocalNoIndexRepos conf) + } + Right repo -> return conf + { globalRemoteRepos = overNubList (++[repo]) (globalRemoteRepos conf) + } + + localToRemote :: LocalRepo -> RemoteRepo + localToRemote (LocalRepo name path sharedCache) = (emptyRemoteRepo name) + { remoteRepoURI = URI "file+noindex:" Nothing path "" (if sharedCache then "#shared-cache" else "") + } ------------------------------- -- Local field utils diff -Nru cabal-install-3.2-3.2+git20191216.2.e076113/src/cabal-install-3.2.0.0/Distribution/Client/ProjectConfig/Types.hs cabal-install-3.2-3.2+git20200127.2.b84fd4f/src/cabal-install-3.2.0.0/Distribution/Client/ProjectConfig/Types.hs --- cabal-install-3.2-3.2+git20191216.2.e076113/src/cabal-install-3.2.0.0/Distribution/Client/ProjectConfig/Types.hs 2019-12-17 14:07:19.000000000 +0000 +++ cabal-install-3.2-3.2+git20200127.2.b84fd4f/src/cabal-install-3.2.0.0/Distribution/Client/ProjectConfig/Types.hs 2020-01-30 20:19:44.000000000 +0000 @@ -24,7 +24,7 @@ import Prelude () import Distribution.Client.Types - ( RemoteRepo, AllowNewer(..), AllowOlder(..) + ( RemoteRepo, LocalRepo, AllowNewer(..), AllowOlder(..) , WriteGhcEnvironmentFilesPolicy ) import Distribution.Client.Dependency.Types ( PreSolver ) @@ -179,6 +179,7 @@ -- configuration used both by the solver and other phases projectConfigRemoteRepos :: NubList RemoteRepo, -- ^ Available Hackage servers. projectConfigLocalRepos :: NubList FilePath, + projectConfigLocalNoIndexRepos :: NubList LocalRepo, projectConfigIndexState :: Flag IndexState, projectConfigStoreDir :: Flag FilePath, @@ -194,6 +195,7 @@ projectConfigMaxBackjumps :: Flag Int, projectConfigReorderGoals :: Flag ReorderGoals, projectConfigCountConflicts :: Flag CountConflicts, + projectConfigFineGrainedConflicts :: Flag FineGrainedConflicts, projectConfigMinimizeConflictSet :: Flag MinimizeConflictSet, projectConfigStrongFlags :: Flag StrongFlags, projectConfigAllowBootLibInstalls :: Flag AllowBootLibInstalls, @@ -387,6 +389,7 @@ = SolverSettings { solverSettingRemoteRepos :: [RemoteRepo], -- ^ Available Hackage servers. solverSettingLocalRepos :: [FilePath], + solverSettingLocalNoIndexRepos :: [LocalRepo], solverSettingConstraints :: [(UserConstraint, ConstraintSource)], solverSettingPreferences :: [PackageVersionConstraint], solverSettingFlagAssignment :: FlagAssignment, -- ^ For all local packages @@ -398,6 +401,7 @@ solverSettingMaxBackjumps :: Maybe Int, solverSettingReorderGoals :: ReorderGoals, solverSettingCountConflicts :: CountConflicts, + solverSettingFineGrainedConflicts :: FineGrainedConflicts, solverSettingMinimizeConflictSet :: MinimizeConflictSet, solverSettingStrongFlags :: StrongFlags, solverSettingAllowBootLibInstalls :: AllowBootLibInstalls, @@ -446,6 +450,7 @@ buildSettingKeepTempFiles :: Bool, buildSettingRemoteRepos :: [RemoteRepo], buildSettingLocalRepos :: [FilePath], + buildSettingLocalNoIndexRepos :: [LocalRepo], buildSettingCacheDir :: FilePath, buildSettingHttpTransport :: Maybe String, buildSettingIgnoreExpiry :: Bool, diff -Nru cabal-install-3.2-3.2+git20191216.2.e076113/src/cabal-install-3.2.0.0/Distribution/Client/ProjectConfig.hs cabal-install-3.2-3.2+git20200127.2.b84fd4f/src/cabal-install-3.2.0.0/Distribution/Client/ProjectConfig.hs --- cabal-install-3.2-3.2+git20191216.2.e076113/src/cabal-install-3.2.0.0/Distribution/Client/ProjectConfig.hs 2019-12-17 14:07:19.000000000 +0000 +++ cabal-install-3.2-3.2+git20200127.2.b84fd4f/src/cabal-install-3.2.0.0/Distribution/Client/ProjectConfig.hs 2020-01-30 20:19:44.000000000 +0000 @@ -187,6 +187,7 @@ verbosity buildSettingRemoteRepos buildSettingLocalRepos + buildSettingLocalNoIndexRepos buildSettingCacheDir buildSettingHttpTransport (Just buildSettingIgnoreExpiry) @@ -209,6 +210,7 @@ verbosity (fromNubList projectConfigRemoteRepos) (fromNubList projectConfigLocalRepos) + (fromNubList projectConfigLocalNoIndexRepos) (fromFlagOrDefault (error "projectConfigWithSolverRepoContext: projectConfigCacheDir") @@ -233,6 +235,7 @@ -- the flag assignments need checking. solverSettingRemoteRepos = fromNubList projectConfigRemoteRepos solverSettingLocalRepos = fromNubList projectConfigLocalRepos + solverSettingLocalNoIndexRepos = fromNubList projectConfigLocalNoIndexRepos solverSettingConstraints = projectConfigConstraints solverSettingPreferences = projectConfigPreferences solverSettingFlagAssignment = packageConfigFlagAssignment projectConfigLocalPackages @@ -247,6 +250,7 @@ | otherwise -> Just n solverSettingReorderGoals = fromFlag projectConfigReorderGoals solverSettingCountConflicts = fromFlag projectConfigCountConflicts + solverSettingFineGrainedConflicts = fromFlag projectConfigFineGrainedConflicts solverSettingMinimizeConflictSet = fromFlag projectConfigMinimizeConflictSet solverSettingStrongFlags = fromFlag projectConfigStrongFlags solverSettingAllowBootLibInstalls = fromFlag projectConfigAllowBootLibInstalls @@ -268,6 +272,7 @@ projectConfigMaxBackjumps = Flag defaultMaxBackjumps, projectConfigReorderGoals = Flag (ReorderGoals False), projectConfigCountConflicts = Flag (CountConflicts True), + projectConfigFineGrainedConflicts = Flag (FineGrainedConflicts True), projectConfigMinimizeConflictSet = Flag (MinimizeConflictSet False), projectConfigStrongFlags = Flag (StrongFlags False), projectConfigAllowBootLibInstalls = Flag (AllowBootLibInstalls False), @@ -296,6 +301,7 @@ projectConfigShared = ProjectConfigShared { projectConfigRemoteRepos, projectConfigLocalRepos, + projectConfigLocalNoIndexRepos, projectConfigProgPathExtra }, projectConfigBuildOnly @@ -316,6 +322,7 @@ buildSettingKeepTempFiles = fromFlag projectConfigKeepTempFiles buildSettingRemoteRepos = fromNubList projectConfigRemoteRepos buildSettingLocalRepos = fromNubList projectConfigLocalRepos + buildSettingLocalNoIndexRepos = fromNubList projectConfigLocalNoIndexRepos buildSettingCacheDir = fromFlag projectConfigCacheDir buildSettingHttpTransport = flagToMaybe projectConfigHttpTransport buildSettingIgnoreExpiry = fromFlag projectConfigIgnoreExpiry diff -Nru cabal-install-3.2-3.2+git20191216.2.e076113/src/cabal-install-3.2.0.0/Distribution/Client/ProjectPlanning.hs cabal-install-3.2-3.2+git20200127.2.b84fd4f/src/cabal-install-3.2.0.0/Distribution/Client/ProjectPlanning.hs --- cabal-install-3.2-3.2+git20191216.2.e076113/src/cabal-install-3.2.0.0/Distribution/Client/ProjectPlanning.hs 2019-12-17 14:07:19.000000000 +0000 +++ cabal-install-3.2-3.2+git20200127.2.b84fd4f/src/cabal-install-3.2.0.0/Distribution/Client/ProjectPlanning.hs 2020-01-30 20:19:44.000000000 +0000 @@ -70,6 +70,7 @@ import Prelude () import Distribution.Client.Compat.Prelude +import Distribution.Client.HashValue import Distribution.Client.ProjectPlanning.Types as Ty import Distribution.Client.PackageHash import Distribution.Client.RebuildMonad @@ -957,6 +958,8 @@ . setCountConflicts solverSettingCountConflicts + . setFineGrainedConflicts solverSettingFineGrainedConflicts + . setMinimizeConflictSet solverSettingMinimizeConflictSet --TODO: [required eventually] should only be configurable for diff -Nru cabal-install-3.2-3.2+git20191216.2.e076113/src/cabal-install-3.2.0.0/Distribution/Client/ProjectPlanOutput.hs cabal-install-3.2-3.2+git20200127.2.b84fd4f/src/cabal-install-3.2.0.0/Distribution/Client/ProjectPlanOutput.hs --- cabal-install-3.2-3.2+git20191216.2.e076113/src/cabal-install-3.2.0.0/Distribution/Client/ProjectPlanOutput.hs 2019-12-17 14:07:19.000000000 +0000 +++ cabal-install-3.2-3.2+git20200127.2.b84fd4f/src/cabal-install-3.2.0.0/Distribution/Client/ProjectPlanOutput.hs 2020-01-30 20:19:44.000000000 +0000 @@ -19,7 +19,7 @@ import Distribution.Client.ProjectBuilding.Types import Distribution.Client.DistDirLayout import Distribution.Client.Types (Repo(..), RemoteRepo(..), PackageLocation(..), confInstId) -import Distribution.Client.PackageHash (showHashValue, hashValue) +import Distribution.Client.HashValue (showHashValue, hashValue) import Distribution.Client.SourceRepo (SourceRepoMaybe, SourceRepositoryPackage (..)) import qualified Distribution.Client.InstallPlan as InstallPlan @@ -203,6 +203,10 @@ J.object [ "type" J..= J.String "local-repo" , "path" J..= J.String repoLocalDir ] + RepoLocalNoIndex{..} -> + J.object [ "type" J..= J.String "local-repo-no-index" + , "path" J..= J.String repoLocalDir + ] RepoRemote{..} -> J.object [ "type" J..= J.String "remote-repo" , "uri" J..= J.String (show (remoteRepoURI repoRemote)) diff -Nru cabal-install-3.2-3.2+git20191216.2.e076113/src/cabal-install-3.2.0.0/Distribution/Client/Security/HTTP.hs cabal-install-3.2-3.2+git20200127.2.b84fd4f/src/cabal-install-3.2.0.0/Distribution/Client/Security/HTTP.hs --- cabal-install-3.2-3.2+git20191216.2.e076113/src/cabal-install-3.2.0.0/Distribution/Client/Security/HTTP.hs 2019-12-17 14:07:19.000000000 +0000 +++ cabal-install-3.2-3.2+git20200127.2.b84fd4f/src/cabal-install-3.2.0.0/Distribution/Client/Security/HTTP.hs 2020-01-30 20:19:44.000000000 +0000 @@ -143,7 +143,7 @@ insert :: Eq a => a -> [b] -> [(a, [b])] -> [(a, [b])] insert x y = modifyAssocList x (++ y) - -- modify the first maching element + -- modify the first matching element modifyAssocList :: Eq a => a -> (b -> b) -> [(a, b)] -> [(a, b)] modifyAssocList a f = go where go [] = [] diff -Nru cabal-install-3.2-3.2+git20191216.2.e076113/src/cabal-install-3.2.0.0/Distribution/Client/Setup.hs cabal-install-3.2-3.2+git20200127.2.b84fd4f/src/cabal-install-3.2.0.0/Distribution/Client/Setup.hs --- cabal-install-3.2-3.2+git20191216.2.e076113/src/cabal-install-3.2.0.0/Distribution/Client/Setup.hs 2019-12-17 14:07:19.000000000 +0000 +++ cabal-install-3.2-3.2+git20200127.2.b84fd4f/src/cabal-install-3.2.0.0/Distribution/Client/Setup.hs 2020-01-30 20:19:44.000000000 +0000 @@ -61,9 +61,9 @@ , liftOptions , yesNoOpt --TODO: stop exporting these: - , showRepo - , parseRepo - , readRepo + , showRemoteRepo + , parseRemoteRepo + , readRemoteRepo ) where import Prelude () @@ -73,6 +73,7 @@ import Distribution.Client.Types ( Username(..), Password(..), RemoteRepo(..) + , LocalRepo (..), emptyLocalRepo , AllowNewer(..), AllowOlder(..), RelaxDeps(..) , WriteGhcEnvironmentFilesPolicy(..) ) @@ -420,7 +421,12 @@ option [] ["remote-repo"] "The name and url for a remote repository" globalRemoteRepos (\v flags -> flags { globalRemoteRepos = v }) - (reqArg' "NAME:URL" (toNubList . maybeToList . readRepo) (map showRepo . fromNubList)) + (reqArg' "NAME:URL" (toNubList . maybeToList . readRemoteRepo) (map showRemoteRepo . fromNubList)) + + ,option [] ["local-no-index-repo"] + "The name and a path for a local no-index repository" + globalLocalNoIndexRepos (\v flags -> flags { globalLocalNoIndexRepos = v }) + (reqArg' "NAME:PATH" (toNubList . maybeToList . readLocalRepo) (map showLocalRepo . fromNubList)) ,option [] ["remote-repo-cache"] "The location where downloads from all remote repos are cached" @@ -997,6 +1003,7 @@ fetchMaxBackjumps :: Flag Int, fetchReorderGoals :: Flag ReorderGoals, fetchCountConflicts :: Flag CountConflicts, + fetchFineGrainedConflicts :: Flag FineGrainedConflicts, fetchMinimizeConflictSet :: Flag MinimizeConflictSet, fetchIndependentGoals :: Flag IndependentGoals, fetchShadowPkgs :: Flag ShadowPkgs, @@ -1017,6 +1024,7 @@ fetchMaxBackjumps = Flag defaultMaxBackjumps, fetchReorderGoals = Flag (ReorderGoals False), fetchCountConflicts = Flag (CountConflicts True), + fetchFineGrainedConflicts = Flag (FineGrainedConflicts True), fetchMinimizeConflictSet = Flag (MinimizeConflictSet False), fetchIndependentGoals = Flag (IndependentGoals False), fetchShadowPkgs = Flag (ShadowPkgs False), @@ -1079,6 +1087,7 @@ fetchMaxBackjumps (\v flags -> flags { fetchMaxBackjumps = v }) fetchReorderGoals (\v flags -> flags { fetchReorderGoals = v }) fetchCountConflicts (\v flags -> flags { fetchCountConflicts = v }) + fetchFineGrainedConflicts (\v flags -> flags { fetchFineGrainedConflicts = v }) fetchMinimizeConflictSet (\v flags -> flags { fetchMinimizeConflictSet = v }) fetchIndependentGoals (\v flags -> flags { fetchIndependentGoals = v }) fetchShadowPkgs (\v flags -> flags { fetchShadowPkgs = v }) @@ -1100,6 +1109,7 @@ freezeMaxBackjumps :: Flag Int, freezeReorderGoals :: Flag ReorderGoals, freezeCountConflicts :: Flag CountConflicts, + freezeFineGrainedConflicts :: Flag FineGrainedConflicts, freezeMinimizeConflictSet :: Flag MinimizeConflictSet, freezeIndependentGoals :: Flag IndependentGoals, freezeShadowPkgs :: Flag ShadowPkgs, @@ -1118,6 +1128,7 @@ freezeMaxBackjumps = Flag defaultMaxBackjumps, freezeReorderGoals = Flag (ReorderGoals False), freezeCountConflicts = Flag (CountConflicts True), + freezeFineGrainedConflicts = Flag (FineGrainedConflicts True), freezeMinimizeConflictSet = Flag (MinimizeConflictSet False), freezeIndependentGoals = Flag (IndependentGoals False), freezeShadowPkgs = Flag (ShadowPkgs False), @@ -1171,6 +1182,7 @@ freezeMaxBackjumps (\v flags -> flags { freezeMaxBackjumps = v }) freezeReorderGoals (\v flags -> flags { freezeReorderGoals = v }) freezeCountConflicts (\v flags -> flags { freezeCountConflicts = v }) + freezeFineGrainedConflicts (\v flags -> flags { freezeFineGrainedConflicts = v }) freezeMinimizeConflictSet (\v flags -> flags { freezeMinimizeConflictSet = v }) freezeIndependentGoals (\v flags -> flags { freezeIndependentGoals = v }) freezeShadowPkgs (\v flags -> flags { freezeShadowPkgs = v }) @@ -1743,6 +1755,7 @@ installMaxBackjumps :: Flag Int, installReorderGoals :: Flag ReorderGoals, installCountConflicts :: Flag CountConflicts, + installFineGrainedConflicts :: Flag FineGrainedConflicts, installMinimizeConflictSet :: Flag MinimizeConflictSet, installIndependentGoals :: Flag IndependentGoals, installShadowPkgs :: Flag ShadowPkgs, @@ -1792,6 +1805,7 @@ installMaxBackjumps = Flag defaultMaxBackjumps, installReorderGoals = Flag (ReorderGoals False), installCountConflicts = Flag (CountConflicts True), + installFineGrainedConflicts = Flag (FineGrainedConflicts True), installMinimizeConflictSet = Flag (MinimizeConflictSet False), installIndependentGoals= Flag (IndependentGoals False), installShadowPkgs = Flag (ShadowPkgs False), @@ -2022,6 +2036,7 @@ installMaxBackjumps (\v flags -> flags { installMaxBackjumps = v }) installReorderGoals (\v flags -> flags { installReorderGoals = v }) installCountConflicts (\v flags -> flags { installCountConflicts = v }) + installFineGrainedConflicts (\v flags -> flags { installFineGrainedConflicts = v }) installMinimizeConflictSet (\v flags -> flags { installMinimizeConflictSet = v }) installIndependentGoals (\v flags -> flags { installIndependentGoals = v }) installShadowPkgs (\v flags -> flags { installShadowPkgs = v }) @@ -2854,6 +2869,7 @@ -> (flags -> Flag Int ) -> (Flag Int -> flags -> flags) -> (flags -> Flag ReorderGoals) -> (Flag ReorderGoals -> flags -> flags) -> (flags -> Flag CountConflicts) -> (Flag CountConflicts -> flags -> flags) + -> (flags -> Flag FineGrainedConflicts) -> (Flag FineGrainedConflicts -> flags -> flags) -> (flags -> Flag MinimizeConflictSet) -> (Flag MinimizeConflictSet -> flags -> flags) -> (flags -> Flag IndependentGoals) -> (Flag IndependentGoals -> flags -> flags) -> (flags -> Flag ShadowPkgs) -> (Flag ShadowPkgs -> flags -> flags) @@ -2862,8 +2878,8 @@ -> (flags -> Flag OnlyConstrained) -> (Flag OnlyConstrained -> flags -> flags) -> [OptionField flags] optionSolverFlags showOrParseArgs getmbj setmbj getrg setrg getcc setcc - getmc setmc getig setig getsip setsip getstrfl setstrfl - getib setib getoc setoc = + getfgc setfgc getmc setmc getig setig getsip setsip + getstrfl setstrfl getib setib getoc setoc = [ option [] ["max-backjumps"] ("Maximum number of backjumps allowed while solving (default: " ++ show defaultMaxBackjumps ++ "). Use a negative number to enable unlimited backtracking. Use 0 to disable backtracking completely.") getmbj setmbj @@ -2879,6 +2895,11 @@ (fmap asBool . getcc) (setcc . fmap CountConflicts) (yesNoOpt showOrParseArgs) + , option [] ["fine-grained-conflicts"] + "Skip a version of a package if it does not resolve the conflicts encountered in the last version, as a solver optimization (default)." + (fmap asBool . getfgc) + (setfgc . fmap FineGrainedConflicts) + (yesNoOpt showOrParseArgs) , option [] ["minimize-conflict-set"] ("When there is no solution, try to improve the error message by finding " ++ "a minimal conflict set (default: false). May increase run time " @@ -2951,15 +2972,15 @@ v | v == nullVersion -> Dependency (packageName p) anyVersion (Set.singleton LMainLibName) | otherwise -> Dependency (packageName p) (thisVersion v) (Set.singleton LMainLibName) -showRepo :: RemoteRepo -> String -showRepo repo = remoteRepoName repo ++ ":" +showRemoteRepo :: RemoteRepo -> String +showRemoteRepo repo = remoteRepoName repo ++ ":" ++ uriToString id (remoteRepoURI repo) [] -readRepo :: String -> Maybe RemoteRepo -readRepo = readPToMaybe parseRepo +readRemoteRepo :: String -> Maybe RemoteRepo +readRemoteRepo = readPToMaybe parseRemoteRepo -parseRepo :: Parse.ReadP r RemoteRepo -parseRepo = do +parseRemoteRepo :: Parse.ReadP r RemoteRepo +parseRemoteRepo = do name <- Parse.munch1 (\c -> isAlphaNum c || c `elem` "_-.") _ <- Parse.char ':' uriStr <- Parse.munch1 (\c -> isAlphaNum c || c `elem` "+-=._/*()@'$:;&!?~") @@ -2973,6 +2994,21 @@ remoteRepoShouldTryHttps = False } +showLocalRepo :: LocalRepo -> String +showLocalRepo repo = localRepoName repo ++ ":" ++ localRepoPath repo + +readLocalRepo :: String -> Maybe LocalRepo +readLocalRepo = readPToMaybe parseLocalRepo + +parseLocalRepo :: Parse.ReadP r LocalRepo +parseLocalRepo = do + name <- Parse.munch1 (\c -> isAlphaNum c || c `elem` "_-.") + _ <- Parse.char ':' + path <- Parse.munch1 (const True) + return $ (emptyLocalRepo name) + { localRepoPath = path + } + -- ------------------------------------------------------------ -- * Helpers for Documentation -- ------------------------------------------------------------ diff -Nru cabal-install-3.2-3.2+git20191216.2.e076113/src/cabal-install-3.2.0.0/Distribution/Client/Types.hs cabal-install-3.2-3.2+git20200127.2.b84fd4f/src/cabal-install-3.2.0.0/Distribution/Client/Types.hs --- cabal-install-3.2-3.2+git20191216.2.e076113/src/cabal-install-3.2.0.0/Distribution/Client/Types.hs 2019-12-17 14:07:19.000000000 +0000 +++ cabal-install-3.2-3.2+git20200127.2.b84fd4f/src/cabal-install-3.2.0.0/Distribution/Client/Types.hs 2020-01-30 20:19:44.000000000 +0000 @@ -49,6 +49,7 @@ ( LibraryName(..) ) import Distribution.Client.SourceRepo ( SourceRepoMaybe ) +import Distribution.Client.HashValue (showHashValue, hashValue, truncateHash) import Distribution.Solver.Types.PackageIndex ( PackageIndex ) @@ -64,12 +65,14 @@ import Distribution.Compat.Graph (IsNode(..)) import qualified Distribution.Deprecated.ReadP as Parse import Distribution.Deprecated.ParseUtils (parseOptCommaList) -import Distribution.Simple.Utils (ordNub) +import Distribution.Simple.Utils (ordNub, toUTF8BS) import Distribution.Deprecated.Text (Text(..)) import Network.URI (URI(..), nullURI) import Control.Exception (Exception, SomeException) + import qualified Text.PrettyPrint as Disp +import qualified Data.ByteString.Lazy.Char8 as LBS newtype Username = Username { unUsername :: String } @@ -330,6 +333,34 @@ emptyRemoteRepo :: String -> RemoteRepo emptyRemoteRepo name = RemoteRepo name nullURI Nothing [] 0 False +-- | /no-index/ style local repositories. +-- +-- https://github.com/haskell/cabal/issues/6359 +data LocalRepo = LocalRepo + { localRepoName :: String + , localRepoPath :: FilePath + , localRepoSharedCache :: Bool + } + deriving (Show, Eq, Ord, Generic) + +instance Binary LocalRepo +instance Structured LocalRepo + +-- | Construct a partial 'LocalRepo' value to fold the field parser list over. +emptyLocalRepo :: String -> LocalRepo +emptyLocalRepo name = LocalRepo name "" False + +-- | Calculate a cache key for local-repo. +-- +-- For remote repositories we just use name, but local repositories may +-- all be named "local", so we add a bit of `localRepoPath` into the +-- mix. +localRepoCacheKey :: LocalRepo -> String +localRepoCacheKey local = localRepoName local ++ "-" ++ hashPart where + hashPart + = showHashValue $ truncateHash 8 $ hashValue + $ LBS.fromStrict $ toUTF8BS $ localRepoPath local + -- | Different kinds of repositories -- -- NOTE: It is important that this type remains serializable. @@ -338,6 +369,14 @@ RepoLocal { repoLocalDir :: FilePath } + + -- | Local repository, without index. + -- + -- https://github.com/haskell/cabal/issues/6359 + | RepoLocalNoIndex + { repoLocal :: LocalRepo + , repoLocalDir :: FilePath + } -- | Standard (unsecured) remote repositores | RepoRemote { @@ -364,14 +403,16 @@ -- | Check if this is a remote repo isRepoRemote :: Repo -> Bool -isRepoRemote RepoLocal{} = False -isRepoRemote _ = True +isRepoRemote RepoLocal{} = False +isRepoRemote RepoLocalNoIndex{} = False +isRepoRemote _ = True -- | Extract @RemoteRepo@ from @Repo@ if remote. maybeRepoRemote :: Repo -> Maybe RemoteRepo -maybeRepoRemote (RepoLocal _localDir) = Nothing -maybeRepoRemote (RepoRemote r _localDir) = Just r -maybeRepoRemote (RepoSecure r _localDir) = Just r +maybeRepoRemote (RepoLocal _localDir) = Nothing +maybeRepoRemote (RepoLocalNoIndex _ _localDir) = Nothing +maybeRepoRemote (RepoRemote r _localDir) = Just r +maybeRepoRemote (RepoSecure r _localDir) = Just r -- ------------------------------------------------------------ -- * Build results diff -Nru cabal-install-3.2-3.2+git20191216.2.e076113/src/cabal-install-3.2.0.0/Distribution/Client/Update.hs cabal-install-3.2-3.2+git20200127.2.b84fd4f/src/cabal-install-3.2.0.0/Distribution/Client/Update.hs --- cabal-install-3.2-3.2+git20191216.2.e076113/src/cabal-install-3.2.0.0/Distribution/Client/Update.hs 2019-12-17 14:07:19.000000000 +0000 +++ cabal-install-3.2-3.2+git20200127.2.b84fd4f/src/cabal-install-3.2.0.0/Distribution/Client/Update.hs 2020-01-30 20:19:44.000000000 +0000 @@ -74,6 +74,7 @@ transport <- repoContextGetTransport repoCtxt case repo of RepoLocal{..} -> return () + RepoLocalNoIndex{..} -> return () RepoRemote{..} -> do downloadResult <- downloadIndex transport verbosity repoRemote repoLocalDir case downloadResult of diff -Nru cabal-install-3.2-3.2+git20191216.2.e076113/src/cabal-install-3.2.0.0/Distribution/Solver/Modular/Builder.hs cabal-install-3.2-3.2+git20200127.2.b84fd4f/src/cabal-install-3.2.0.0/Distribution/Solver/Modular/Builder.hs --- cabal-install-3.2-3.2+git20191216.2.e076113/src/cabal-install-3.2.0.0/Distribution/Solver/Modular/Builder.hs 2019-12-17 14:07:19.000000000 +0000 +++ cabal-install-3.2-3.2+git20200127.2.b84fd4f/src/cabal-install-3.2.0.0/Distribution/Solver/Modular/Builder.hs 2020-01-30 20:19:45.000000000 +0000 @@ -145,7 +145,9 @@ -- and then handle each instance in turn. addChildren bs@(BS { rdeps = rdm, index = idx, next = OneGoal (PkgGoal qpn@(Q _ pn) gr) }) = case M.lookup pn idx of - Nothing -> FailF (varToConflictSet (P qpn) `CS.union` goalReasonToCS gr) UnknownPackage + Nothing -> FailF + (varToConflictSet (P qpn) `CS.union` goalReasonToConflictSetWithConflict qpn gr) + UnknownPackage Just pis -> PChoiceF qpn rdm gr (W.fromList (L.map (\ (i, info) -> ([], POption i Nothing, bs { next = Instance qpn info })) (M.toList pis))) diff -Nru cabal-install-3.2-3.2+git20191216.2.e076113/src/cabal-install-3.2.0.0/Distribution/Solver/Modular/ConflictSet.hs cabal-install-3.2-3.2+git20200127.2.b84fd4f/src/cabal-install-3.2.0.0/Distribution/Solver/Modular/ConflictSet.hs --- cabal-install-3.2-3.2+git20191216.2.e076113/src/cabal-install-3.2.0.0/Distribution/Solver/Modular/ConflictSet.hs 2019-12-17 14:07:19.000000000 +0000 +++ cabal-install-3.2-3.2+git20200127.2.b84fd4f/src/cabal-install-3.2.0.0/Distribution/Solver/Modular/ConflictSet.hs 2020-01-30 20:19:45.000000000 +0000 @@ -10,7 +10,9 @@ -- > import qualified Distribution.Solver.Modular.ConflictSet as CS module Distribution.Solver.Modular.ConflictSet ( ConflictSet -- opaque + , Conflict(..) , ConflictMap + , OrderedVersionRange(..) #ifdef DEBUG_CONFLICT_SETS , conflictSetOrigin #endif @@ -26,19 +28,21 @@ , delete , empty , singleton + , singletonWithConflict , size , member + , lookup , filter , fromList ) where -import Prelude hiding (filter) +import Prelude hiding (lookup) import Data.List (intercalate, sortBy) import Data.Map (Map) import Data.Set (Set) import Data.Function (on) +import qualified Data.Map.Strict as M import qualified Data.Set as S -import qualified Data.Map as M #ifdef DEBUG_CONFLICT_SETS import Data.Tree @@ -46,15 +50,14 @@ #endif import Distribution.Solver.Modular.Var +import Distribution.Solver.Modular.Version import Distribution.Solver.Types.PackagePath --- | The set of variables involved in a solver conflict --- --- Since these variables should be preprocessed in some way, this type is --- kept abstract. +-- | The set of variables involved in a solver conflict, each paired with +-- details about the conflict. data ConflictSet = CS { - -- | The set of variables involved on the conflict - conflictSetToSet :: !(Set (Var QPN)) + -- | The set of variables involved in the conflict + conflictSetToMap :: !(Map (Var QPN) (Set Conflict)) #ifdef DEBUG_CONFLICT_SETS -- | The origin of the conflict set @@ -72,11 +75,48 @@ } deriving (Show) +-- | More detailed information about how a conflict set variable caused a +-- conflict. This information can be used to determine whether a second value +-- for that variable would lead to the same conflict. +-- +-- TODO: Handle dependencies under flags or stanzas. +data Conflict = + + -- | The conflict set variable represents a package which depends on the + -- specified problematic package. For example, the conflict set entry + -- '(P x, GoalConflict y)' means that package x introduced package y, and y + -- led to a conflict. + GoalConflict QPN + + -- | The conflict set variable represents a package with a constraint that + -- excluded the specified package and version. For example, the conflict set + -- entry '(P x, VersionConstraintConflict y (mkVersion [2, 0]))' means that + -- package x's constraint on y excluded y-2.0. + | VersionConstraintConflict QPN Ver + + -- | The conflict set variable represents a package that was excluded by a + -- constraint from the specified package. For example, the conflict set + -- entry '(P x, VersionConflict y (orLaterVersion (mkVersion [2, 0])))' + -- means that package y's constraint 'x >= 2.0' excluded some version of x. + | VersionConflict QPN OrderedVersionRange + + -- | Any other conflict. + | OtherConflict + deriving (Eq, Ord, Show) + +-- | Version range with an 'Ord' instance. +newtype OrderedVersionRange = OrderedVersionRange VR + deriving (Eq, Show) + +-- TODO: Avoid converting the version ranges to strings. +instance Ord OrderedVersionRange where + compare = compare `on` show + instance Eq ConflictSet where - (==) = (==) `on` conflictSetToSet + (==) = (==) `on` conflictSetToMap instance Ord ConflictSet where - compare = compare `on` conflictSetToSet + compare = compare `on` conflictSetToMap showConflictSet :: ConflictSet -> String showConflictSet = intercalate ", " . map showVar . toList @@ -102,10 +142,10 @@ -------------------------------------------------------------------------------} toSet :: ConflictSet -> Set (Var QPN) -toSet = conflictSetToSet +toSet = M.keysSet . conflictSetToMap toList :: ConflictSet -> [Var QPN] -toList = S.toList . conflictSetToSet +toList = M.keys . conflictSetToMap union :: #ifdef DEBUG_CONFLICT_SETS @@ -113,7 +153,7 @@ #endif ConflictSet -> ConflictSet -> ConflictSet union cs cs' = CS { - conflictSetToSet = S.union (conflictSetToSet cs) (conflictSetToSet cs') + conflictSetToMap = M.unionWith S.union (conflictSetToMap cs) (conflictSetToMap cs') #ifdef DEBUG_CONFLICT_SETS , conflictSetOrigin = Node ?loc (map conflictSetOrigin [cs, cs']) #endif @@ -125,7 +165,7 @@ #endif [ConflictSet] -> ConflictSet unions css = CS { - conflictSetToSet = S.unions (map conflictSetToSet css) + conflictSetToMap = M.unionsWith S.union (map conflictSetToMap css) #ifdef DEBUG_CONFLICT_SETS , conflictSetOrigin = Node ?loc (map conflictSetOrigin css) #endif @@ -137,7 +177,7 @@ #endif Var QPN -> ConflictSet -> ConflictSet insert var cs = CS { - conflictSetToSet = S.insert var (conflictSetToSet cs) + conflictSetToMap = M.insert var (S.singleton OtherConflict) (conflictSetToMap cs) #ifdef DEBUG_CONFLICT_SETS , conflictSetOrigin = Node ?loc [conflictSetOrigin cs] #endif @@ -145,7 +185,7 @@ delete :: Var QPN -> ConflictSet -> ConflictSet delete var cs = CS { - conflictSetToSet = S.delete var (conflictSetToSet cs) + conflictSetToMap = M.delete var (conflictSetToMap cs) } empty :: @@ -154,7 +194,7 @@ #endif ConflictSet empty = CS { - conflictSetToSet = S.empty + conflictSetToMap = M.empty #ifdef DEBUG_CONFLICT_SETS , conflictSetOrigin = Node ?loc [] #endif @@ -165,30 +205,28 @@ (?loc :: CallStack) => #endif Var QPN -> ConflictSet -singleton var = CS { - conflictSetToSet = S.singleton var +singleton var = singletonWithConflict var OtherConflict + +singletonWithConflict :: +#ifdef DEBUG_CONFLICT_SETS + (?loc :: CallStack) => +#endif + Var QPN -> Conflict -> ConflictSet +singletonWithConflict var conflict = CS { + conflictSetToMap = M.singleton var (S.singleton conflict) #ifdef DEBUG_CONFLICT_SETS , conflictSetOrigin = Node ?loc [] #endif } size :: ConflictSet -> Int -size = S.size . conflictSetToSet +size = M.size . conflictSetToMap member :: Var QPN -> ConflictSet -> Bool -member var = S.member var . conflictSetToSet +member var = M.member var . conflictSetToMap -filter :: -#ifdef DEBUG_CONFLICT_SETS - (?loc :: CallStack) => -#endif - (Var QPN -> Bool) -> ConflictSet -> ConflictSet -filter p cs = CS { - conflictSetToSet = S.filter p (conflictSetToSet cs) -#ifdef DEBUG_CONFLICT_SETS - , conflictSetOrigin = Node ?loc [conflictSetOrigin cs] -#endif - } +lookup :: Var QPN -> ConflictSet -> Maybe (Set Conflict) +lookup var = M.lookup var . conflictSetToMap fromList :: #ifdef DEBUG_CONFLICT_SETS @@ -196,7 +234,7 @@ #endif [Var QPN] -> ConflictSet fromList vars = CS { - conflictSetToSet = S.fromList vars + conflictSetToMap = M.fromList [(var, S.singleton OtherConflict) | var <- vars] #ifdef DEBUG_CONFLICT_SETS , conflictSetOrigin = Node ?loc [] #endif diff -Nru cabal-install-3.2-3.2+git20191216.2.e076113/src/cabal-install-3.2.0.0/Distribution/Solver/Modular/Dependency.hs cabal-install-3.2-3.2+git20200127.2.b84fd4f/src/cabal-install-3.2.0.0/Distribution/Solver/Modular/Dependency.hs --- cabal-install-3.2-3.2+git20191216.2.e076113/src/cabal-install-3.2.0.0/Distribution/Solver/Modular/Dependency.hs 2019-12-17 14:07:19.000000000 +0000 +++ cabal-install-3.2-3.2+git20200127.2.b84fd4f/src/cabal-install-3.2.0.0/Distribution/Solver/Modular/Dependency.hs 2020-01-30 20:19:45.000000000 +0000 @@ -32,8 +32,11 @@ , QGoalReason , goalToVar , varToConflictSet - , goalReasonToCS - , dependencyReasonToCS + , goalReasonToConflictSet + , goalReasonToConflictSetWithConflict + , dependencyReasonToConflictSet + , dependencyReasonToConflictSetWithVersionConstraintConflict + , dependencyReasonToConflictSetWithVersionConflict ) where import Prelude () @@ -279,14 +282,30 @@ varToConflictSet :: Var QPN -> ConflictSet varToConflictSet = CS.singleton -goalReasonToCS :: GoalReason QPN -> ConflictSet -goalReasonToCS UserGoal = CS.empty -goalReasonToCS (DependencyGoal dr) = dependencyReasonToCS dr +-- | Convert a 'GoalReason' to a 'ConflictSet' that can be used when the goal +-- leads to a conflict. +goalReasonToConflictSet :: GoalReason QPN -> ConflictSet +goalReasonToConflictSet UserGoal = CS.empty +goalReasonToConflictSet (DependencyGoal dr) = dependencyReasonToConflictSet dr + +-- | Convert a 'GoalReason' to a 'ConflictSet' containing the reason that the +-- conflict occurred, namely the conflict set variables caused a conflict by +-- introducing the given package goal. See the documentation for 'GoalConflict'. +-- +-- This function currently only specifies the reason for the conflict in the +-- simple case where the 'GoalReason' does not involve any flags or stanzas. +-- Otherwise, it falls back to calling 'goalReasonToConflictSet'. +goalReasonToConflictSetWithConflict :: QPN -> GoalReason QPN -> ConflictSet +goalReasonToConflictSetWithConflict goal (DependencyGoal (DependencyReason qpn flags stanzas)) + | M.null flags && S.null stanzas = + CS.singletonWithConflict (P qpn) $ CS.GoalConflict goal +goalReasonToConflictSetWithConflict _ gr = goalReasonToConflictSet gr -- | This function returns the solver variables responsible for the dependency. --- It drops the flag and stanza values, which are only needed for log messages. -dependencyReasonToCS :: DependencyReason QPN -> ConflictSet -dependencyReasonToCS (DependencyReason qpn flags stanzas) = +-- It drops the values chosen for flag and stanza variables, which are only +-- needed for log messages. +dependencyReasonToConflictSet :: DependencyReason QPN -> ConflictSet +dependencyReasonToConflictSet (DependencyReason qpn flags stanzas) = CS.fromList $ P qpn : flagVars ++ map stanzaToVar (S.toList stanzas) where -- Filter out any flags that introduced the dependency with both values. @@ -297,3 +316,40 @@ stanzaToVar :: Stanza -> Var QPN stanzaToVar = S . SN qpn + +-- | Convert a 'DependencyReason' to a 'ConflictSet' specifying that the +-- conflict occurred because the conflict set variables introduced a problematic +-- version constraint. See the documentation for 'VersionConstraintConflict'. +-- +-- This function currently only specifies the reason for the conflict in the +-- simple case where the 'DependencyReason' does not involve any flags or +-- stanzas. Otherwise, it falls back to calling 'dependencyReasonToConflictSet'. +dependencyReasonToConflictSetWithVersionConstraintConflict :: QPN + -> Ver + -> DependencyReason QPN + -> ConflictSet +dependencyReasonToConflictSetWithVersionConstraintConflict + dependency excludedVersion dr@(DependencyReason qpn flags stanzas) + | M.null flags && S.null stanzas = + CS.singletonWithConflict (P qpn) $ + CS.VersionConstraintConflict dependency excludedVersion + | otherwise = dependencyReasonToConflictSet dr + +-- | Convert a 'DependencyReason' to a 'ConflictSet' specifying that the +-- conflict occurred because the conflict set variables introduced a version of +-- a package that was excluded by a version constraint. See the documentation +-- for 'VersionConflict'. +-- +-- This function currently only specifies the reason for the conflict in the +-- simple case where the 'DependencyReason' does not involve any flags or +-- stanzas. Otherwise, it falls back to calling 'dependencyReasonToConflictSet'. +dependencyReasonToConflictSetWithVersionConflict :: QPN + -> CS.OrderedVersionRange + -> DependencyReason QPN + -> ConflictSet +dependencyReasonToConflictSetWithVersionConflict + pkgWithVersionConstraint constraint dr@(DependencyReason qpn flags stanzas) + | M.null flags && S.null stanzas = + CS.singletonWithConflict (P qpn) $ + CS.VersionConflict pkgWithVersionConstraint constraint + | otherwise = dependencyReasonToConflictSet dr diff -Nru cabal-install-3.2-3.2+git20191216.2.e076113/src/cabal-install-3.2.0.0/Distribution/Solver/Modular/Explore.hs cabal-install-3.2-3.2+git20200127.2.b84fd4f/src/cabal-install-3.2.0.0/Distribution/Solver/Modular/Explore.hs --- cabal-install-3.2-3.2+git20191216.2.e076113/src/cabal-install-3.2.0.0/Distribution/Solver/Modular/Explore.hs 2019-12-17 14:07:19.000000000 +0000 +++ cabal-install-3.2-3.2+git20200127.2.b84fd4f/src/cabal-install-3.2.0.0/Distribution/Solver/Modular/Explore.hs 2020-01-30 20:19:45.000000000 +0000 @@ -7,19 +7,28 @@ import Data.Foldable as F import Data.List as L (foldl') +import Data.Maybe (fromMaybe) import Data.Map.Strict as M +import Data.Set as S + +import Distribution.Simple.Setup (asBool) import Distribution.Solver.Modular.Assignment import Distribution.Solver.Modular.Dependency +import Distribution.Solver.Modular.Index import Distribution.Solver.Modular.Log import Distribution.Solver.Modular.Message +import Distribution.Solver.Modular.Package import qualified Distribution.Solver.Modular.PSQ as P import qualified Distribution.Solver.Modular.ConflictSet as CS import Distribution.Solver.Modular.RetryLog import Distribution.Solver.Modular.Tree +import Distribution.Solver.Modular.Version import qualified Distribution.Solver.Modular.WeightedPSQ as W import Distribution.Solver.Types.PackagePath -import Distribution.Solver.Types.Settings (EnableBackjumping(..), CountConflicts(..)) +import Distribution.Solver.Types.Settings + (CountConflicts(..), EnableBackjumping(..), FineGrainedConflicts(..)) +import Distribution.Types.VersionRange (anyVersion) -- | This function takes the variable we're currently considering, a -- last conflict set and a list of children's logs. Each log yields @@ -43,25 +52,70 @@ -- with the (virtual) option not to choose anything for the current -- variable. See also the comments for 'avoidSet'. -- -backjump :: Maybe Int -> EnableBackjumping -> Var QPN - -> ConflictSet -> W.WeightedPSQ w k (ExploreState -> ConflictSetLog a) +-- We can also skip a child if it does not resolve any of the conflicts paired +-- with the current variable in the previous child's conflict set. 'backjump' +-- takes a function to determine whether a child can be skipped. If the child +-- can be skipped, the function returns a new conflict set to be merged with the +-- previous conflict set. +-- +backjump :: forall w k a . Maybe Int + -> EnableBackjumping + -> FineGrainedConflicts + + -> (k -> S.Set CS.Conflict -> Maybe ConflictSet) + -- ^ Function that determines whether the given choice could resolve + -- the given conflict. It indicates false by returning 'Just', + -- with the new conflicts to be added to the conflict set. + + -> (k -> ConflictSet -> ExploreState -> ConflictSetLog a) + -- ^ Function that logs the given choice that was skipped. + + -> Var QPN -- ^ The current variable. + + -> ConflictSet -- ^ Conflict set representing the reason that the goal + -- was introduced. + + -> W.WeightedPSQ w k (ExploreState -> ConflictSetLog a) + -- ^ List of children's logs. + -> ExploreState -> ConflictSetLog a -backjump mbj (EnableBackjumping enableBj) var lastCS xs = - F.foldr combine avoidGoal xs CS.empty +backjump mbj enableBj fineGrainedConflicts couldResolveConflicts + logSkippedChoice var lastCS xs = + F.foldr combine avoidGoal [(k, v) | (_, k, v) <- W.toList xs] CS.empty Nothing where - combine :: forall a . (ExploreState -> ConflictSetLog a) - -> (ConflictSet -> ExploreState -> ConflictSetLog a) - -> ConflictSet -> ExploreState -> ConflictSetLog a - combine x f csAcc es = retryNoSolution (x es) next + combine :: (k, ExploreState -> ConflictSetLog a) + -> (ConflictSet -> Maybe ConflictSet -> ExploreState -> ConflictSetLog a) + -> ConflictSet -> Maybe ConflictSet -> ExploreState -> ConflictSetLog a + combine (k, x) f csAcc mPreviousCS es = + case (asBool fineGrainedConflicts, mPreviousCS) of + (True, Just previousCS) -> + case CS.lookup var previousCS of + Just conflicts -> + case couldResolveConflicts k conflicts of + Nothing -> retryNoSolution (x es) next + Just newConflicts -> skipChoice (previousCS `CS.union` newConflicts) + _ -> skipChoice previousCS + _ -> retryNoSolution (x es) next where next :: ConflictSet -> ExploreState -> ConflictSetLog a - next !cs es' = if enableBj && not (var `CS.member` cs) + next !cs es' = if asBool enableBj && not (var `CS.member` cs) then skipLoggingBackjump cs es' - else f (csAcc `CS.union` cs) es' + else f (csAcc `CS.union` cs) (Just cs) es' + + -- This function is for skipping the choice when it cannot resolve any + -- of the previous conflicts. + skipChoice :: ConflictSet -> ConflictSetLog a + skipChoice newCS = + retryNoSolution (logSkippedChoice k newCS es) $ \cs' es' -> + f (csAcc `CS.union` cs') (Just cs') $ + + -- Update the conflict map with the conflict set, to make up for + -- skipping the whole subtree. + es' { esConflictMap = updateCM cs' (esConflictMap es') } -- This function represents the option to not choose a value for this goal. - avoidGoal :: ConflictSet -> ExploreState -> ConflictSetLog a - avoidGoal cs !es = + avoidGoal :: ConflictSet -> Maybe ConflictSet -> ExploreState -> ConflictSetLog a + avoidGoal cs _mPreviousCS !es = logBackjump mbj (cs `CS.union` lastCS) $ -- Use 'lastCS' below instead of 'cs' since we do not want to @@ -86,7 +140,7 @@ where reachedBjLimit = case mbj of Nothing -> const False - Just limit -> (== limit) + Just limit -> (>= limit) -- | Like 'retry', except that it only applies the input function when the -- backjump limit has not been reached. @@ -144,15 +198,20 @@ -- | A tree traversal that simultaneously propagates conflict sets up -- the tree from the leaves and creates a log. -exploreLog :: Maybe Int -> EnableBackjumping -> CountConflicts +exploreLog :: Maybe Int + -> EnableBackjumping + -> FineGrainedConflicts + -> CountConflicts + -> Index -> Tree Assignment QGoalReason -> ConflictSetLog (Assignment, RevDepMap) -exploreLog mbj enableBj (CountConflicts countConflicts) t = para go t initES +exploreLog mbj enableBj fineGrainedConflicts (CountConflicts countConflicts) idx t = + para go t initES where getBestGoal' :: P.PSQ (Goal QPN) a -> ConflictMap -> (Goal QPN, a) getBestGoal' - | countConflicts = \ ts cm -> getBestGoal cm ts - | otherwise = \ ts _ -> getFirstGoal ts + | asBool countConflicts = \ ts cm -> getBestGoal cm ts + | otherwise = \ ts _ -> getFirstGoal ts go :: TreeF Assignment QGoalReason (ExploreState -> ConflictSetLog (Assignment, RevDepMap), Tree Assignment QGoalReason) @@ -162,20 +221,29 @@ in failWith (Failure c fr) (NoSolution c es') go (DoneF rdm a) = \ _ -> succeedWith Success (a, rdm) go (PChoiceF qpn _ gr ts) = - backjump mbj enableBj (P qpn) (avoidSet (P qpn) gr) $ -- try children in order, - W.mapWithKey -- when descending ... - (\ k r es -> tryWith (TryP qpn k) (r es)) - (fmap fst ts) + backjump mbj enableBj fineGrainedConflicts + (couldResolveConflicts qpn) + (logSkippedPackage qpn) + (P qpn) (avoidSet (P qpn) gr) $ -- try children in order, + W.mapWithKey -- when descending ... + (\ k r es -> tryWith (TryP qpn k) (r es)) + (fmap fst ts) go (FChoiceF qfn _ gr _ _ _ ts) = - backjump mbj enableBj (F qfn) (avoidSet (F qfn) gr) $ -- try children in order, - W.mapWithKey -- when descending ... - (\ k r es -> tryWith (TryF qfn k) (r es)) - (fmap fst ts) + backjump mbj enableBj fineGrainedConflicts + (\_ _ -> Nothing) + (const logSkippedChoiceSimple) + (F qfn) (avoidSet (F qfn) gr) $ -- try children in order, + W.mapWithKey -- when descending ... + (\ k r es -> tryWith (TryF qfn k) (r es)) + (fmap fst ts) go (SChoiceF qsn _ gr _ ts) = - backjump mbj enableBj (S qsn) (avoidSet (S qsn) gr) $ -- try children in order, - W.mapWithKey -- when descending ... - (\ k r es -> tryWith (TryS qsn k) (r es)) - (fmap fst ts) + backjump mbj enableBj fineGrainedConflicts + (\_ _ -> Nothing) + (const logSkippedChoiceSimple) + (S qsn) (avoidSet (S qsn) gr) $ -- try children in order, + W.mapWithKey -- when descending ... + (\ k r es -> tryWith (TryS qsn k) (r es)) + (fmap fst ts) go (GoalChoiceF _ ts) = \ es -> let (k, (v, tree)) = getBestGoal' ts (esConflictMap es) in continueWith (Next k) $ @@ -194,6 +262,59 @@ , esBackjumps = 0 } + -- Is it possible for this package instance (QPN and POption) to resolve any + -- of the conflicts that were caused by the previous instance? The default + -- is true, because it is always safe to explore a package instance. + -- Skipping it is an optimization. If false, it returns a new conflict set + -- to be merged with the previous one. + couldResolveConflicts :: QPN -> POption -> S.Set CS.Conflict -> Maybe ConflictSet + couldResolveConflicts currentQPN@(Q _ pn) (POption i@(I v _) _) conflicts = + let (PInfo deps _ _ _) = idx ! pn ! i + qdeps = qualifyDeps (defaultQualifyOptions idx) currentQPN deps + + couldBeResolved :: CS.Conflict -> Maybe ConflictSet + couldBeResolved CS.OtherConflict = Nothing + couldBeResolved (CS.GoalConflict conflictingDep) = + -- Check whether this package instance also has 'conflictingDep' + -- as a dependency (ignoring flag and stanza choices). + if F.null [() | Simple (LDep _ (Dep (PkgComponent qpn _) _)) _ <- qdeps, qpn == conflictingDep] + then Nothing + else Just CS.empty + couldBeResolved (CS.VersionConstraintConflict dep excludedVersion) = + -- Check whether this package instance also excludes version + -- 'excludedVersion' of 'dep' (ignoring flag and stanza choices). + let vrs = [vr | Simple (LDep _ (Dep (PkgComponent qpn _) (Constrained vr))) _ <- qdeps, qpn == dep ] + vrIntersection = L.foldl' (.&&.) anyVersion vrs + in if checkVR vrIntersection excludedVersion + then Nothing + else -- If we skip this package instance, we need to update the + -- conflict set to say that 'dep' was also excluded by + -- this package instance's constraint. + Just $ CS.singletonWithConflict (P dep) $ + CS.VersionConflict currentQPN (CS.OrderedVersionRange vrIntersection) + couldBeResolved (CS.VersionConflict reverseDep (CS.OrderedVersionRange excludingVR)) = + -- Check whether this package instance's version is also excluded + -- by 'excludingVR'. + if checkVR excludingVR v + then Nothing + else -- If we skip this version, we need to update the conflict + -- set to say that the reverse dependency also excluded this + -- version. + Just $ CS.singletonWithConflict (P reverseDep) (CS.VersionConstraintConflict currentQPN v) + in fmap CS.unions $ traverse couldBeResolved (S.toList conflicts) + + logSkippedPackage :: QPN -> POption -> ConflictSet -> ExploreState -> ConflictSetLog a + logSkippedPackage qpn pOption cs es = + tryWith (TryP qpn pOption) $ + failWith (Skip (fromMaybe S.empty $ CS.lookup (P qpn) cs)) $ + NoSolution cs es + + -- This function is used for flag and stanza choices, but it should not be + -- called, because there is currently no way to skip a value for a flag or + -- stanza. + logSkippedChoiceSimple :: ConflictSet -> ExploreState -> ConflictSetLog a + logSkippedChoiceSimple cs es = fromProgress $ P.Fail $ NoSolution cs es + -- | Build a conflict set corresponding to the (virtual) option not to -- choose a solution for a goal at all. -- @@ -219,8 +340,10 @@ -- conflict set. -- avoidSet :: Var QPN -> QGoalReason -> ConflictSet -avoidSet var gr = - CS.union (CS.singleton var) (goalReasonToCS gr) +avoidSet var@(P qpn) gr = + CS.union (CS.singleton var) (goalReasonToConflictSetWithConflict qpn gr) +avoidSet var gr = + CS.union (CS.singleton var) (goalReasonToConflictSet gr) -- | Interface. -- @@ -229,11 +352,15 @@ -- backtracking is completely disabled. backjumpAndExplore :: Maybe Int -> EnableBackjumping + -> FineGrainedConflicts -> CountConflicts + -> Index -> Tree d QGoalReason -> RetryLog Message SolverFailure (Assignment, RevDepMap) -backjumpAndExplore mbj enableBj countConflicts = - mapFailure convertFailure . exploreLog mbj enableBj countConflicts . assign +backjumpAndExplore mbj enableBj fineGrainedConflicts countConflicts idx = + mapFailure convertFailure + . exploreLog mbj enableBj fineGrainedConflicts countConflicts idx + . assign where convertFailure (NoSolution cs es) = ExhaustiveSearch cs (esConflictMap es) convertFailure BackjumpLimit = BackjumpLimitReached diff -Nru cabal-install-3.2-3.2+git20191216.2.e076113/src/cabal-install-3.2.0.0/Distribution/Solver/Modular/Linking.hs cabal-install-3.2-3.2+git20200127.2.b84fd4f/src/cabal-install-3.2.0.0/Distribution/Solver/Modular/Linking.hs --- cabal-install-3.2-3.2+git20191216.2.e076113/src/cabal-install-3.2.0.0/Distribution/Solver/Modular/Linking.hs 2019-12-17 14:07:19.000000000 +0000 +++ cabal-install-3.2-3.2+git20200127.2.b84fd4f/src/cabal-install-3.2.0.0/Distribution/Solver/Modular/Linking.hs 2020-01-30 20:19:45.000000000 +0000 @@ -251,7 +251,7 @@ vs <- get let lg = M.findWithDefault (lgSingleton qpn Nothing) qpn $ vsLinks vs lg' = M.findWithDefault (lgSingleton qpn' Nothing) qpn' $ vsLinks vs - lg'' <- lift' $ lgMerge ((CS.union `on` dependencyReasonToCS) dr1 dr2) lg lg' + lg'' <- lift' $ lgMerge ((CS.union `on` dependencyReasonToConflictSet) dr1 dr2) lg lg' updateLinkGroup lg'' (Flagged fn _ t f, ~(Flagged _ _ t' f')) -> do vs <- get diff -Nru cabal-install-3.2-3.2+git20191216.2.e076113/src/cabal-install-3.2.0.0/Distribution/Solver/Modular/Message.hs cabal-install-3.2-3.2+git20200127.2.b84fd4f/src/cabal-install-3.2.0.0/Distribution/Solver/Modular/Message.hs --- cabal-install-3.2-3.2+git20191216.2.e076113/src/cabal-install-3.2.0.0/Distribution/Solver/Modular/Message.hs 2019-12-17 14:07:19.000000000 +0000 +++ cabal-install-3.2-3.2+git20200127.2.b84fd4f/src/cabal-install-3.2.0.0/Distribution/Solver/Modular/Message.hs 2020-01-30 20:19:45.000000000 +0000 @@ -6,10 +6,16 @@ ) where import qualified Data.List as L +import Data.Map (Map) +import qualified Data.Map as M +import Data.Set (Set) +import qualified Data.Set as S +import Data.Maybe (catMaybes, mapMaybe) import Prelude hiding (pi) import Distribution.Pretty (prettyShow) -- from Cabal +import qualified Distribution.Solver.Modular.ConflictSet as CS import Distribution.Solver.Modular.Dependency import Distribution.Solver.Modular.Flag import Distribution.Solver.Modular.Package @@ -28,6 +34,7 @@ | TryF QFN Bool | TryS QSN Bool | Next (Goal QPN) + | Skip (Set CS.Conflict) | Success | Failure ConflictSet FailReason @@ -47,6 +54,8 @@ -- complex patterns go !l (Step (TryP qpn i) (Step Enter (Step (Failure c fr) (Step Leave ms)))) = goPReject l qpn [i] c fr ms + go !l (Step (TryP qpn i) (Step Enter (Step (Skip conflicts) (Step Leave ms)))) = + goPSkip l qpn [i] conflicts ms go !l (Step (TryF qfn b) (Step Enter (Step (Failure c fr) (Step Leave ms)))) = (atLevel l $ "rejecting: " ++ showQFNBool qfn b ++ showFR c fr) (go l ms) go !l (Step (TryS qsn b) (Step Enter (Step (Failure c fr) (Step Leave ms)))) = @@ -63,6 +72,9 @@ go !l (Step (TryS qsn b) ms) = (atLevel l $ "trying: " ++ showQSNBool qsn b) (go l ms) go !l (Step (Next (Goal (P qpn) gr)) ms) = (atLevel l $ showPackageGoal qpn gr) (go l ms) go !l (Step (Next _) ms) = go l ms -- ignore flag goals in the log + go !l (Step (Skip conflicts) ms) = + -- 'Skip' should always be handled by 'goPSkip' in the case above. + (atLevel l $ "skipping: " ++ showConflicts conflicts) (go l ms) go !l (Step (Success) ms) = (atLevel l $ "done") (go l ms) go !l (Step (Failure c fr) ms) = (atLevel l $ showFailure c fr) (go l ms) @@ -85,12 +97,112 @@ goPReject l qpn is c fr ms = (atLevel l $ "rejecting: " ++ L.intercalate ", " (map (showQPNPOpt qpn) (reverse is)) ++ showFR c fr) (go l ms) + -- Handle many subsequent skipped package instances. + goPSkip :: Int + -> QPN + -> [POption] + -> Set CS.Conflict + -> Progress Message a b + -> Progress String a b + goPSkip l qpn is conflicts (Step (TryP qpn' i) (Step Enter (Step (Skip conflicts') (Step Leave ms)))) + | qpn == qpn' && conflicts == conflicts' = goPSkip l qpn (i : is) conflicts ms + goPSkip l qpn is conflicts ms = + let msg = "skipping: " + ++ L.intercalate ", " (map (showQPNPOpt qpn) (reverse is)) + ++ showConflicts conflicts + in atLevel l msg (go l ms) + -- write a message with the current level number atLevel :: Int -> String -> Progress String a b -> Progress String a b atLevel l x xs = let s = show l in Step ("[" ++ replicate (3 - length s) '_' ++ s ++ "] " ++ x) xs +-- | Display the set of 'Conflicts' for a skipped package version. +showConflicts :: Set CS.Conflict -> String +showConflicts conflicts = + " (has the same characteristics that caused the previous version to fail: " + ++ conflictMsg ++ ")" + where + conflictMsg :: String + conflictMsg = + if S.member CS.OtherConflict conflicts + then + -- This case shouldn't happen, because an unknown conflict should not + -- cause a version to be skipped. + "unknown conflict" + else let mergedConflicts = + [ showConflict qpn conflict + | (qpn, conflict) <- M.toList (mergeConflicts conflicts) ] + in if L.null mergedConflicts + then + -- This case shouldn't happen unless backjumping is turned off. + "none" + else L.intercalate "; " mergedConflicts + + -- Merge conflicts to simplify the log message. + mergeConflicts :: Set CS.Conflict -> Map QPN MergedPackageConflict + mergeConflicts = M.fromListWith mergeConflict . mapMaybe toMergedConflict . S.toList + where + mergeConflict :: MergedPackageConflict + -> MergedPackageConflict + -> MergedPackageConflict + mergeConflict mergedConflict1 mergedConflict2 = MergedPackageConflict { + isGoalConflict = + isGoalConflict mergedConflict1 || isGoalConflict mergedConflict2 + , versionConstraintConflict = + L.nub $ versionConstraintConflict mergedConflict1 + ++ versionConstraintConflict mergedConflict2 + , versionConflict = + mergeVersionConflicts (versionConflict mergedConflict1) + (versionConflict mergedConflict2) + } + where + mergeVersionConflicts (Just vr1) (Just vr2) = Just (vr1 .||. vr2) + mergeVersionConflicts (Just vr1) Nothing = Just vr1 + mergeVersionConflicts Nothing (Just vr2) = Just vr2 + mergeVersionConflicts Nothing Nothing = Nothing + + toMergedConflict :: CS.Conflict -> Maybe (QPN, MergedPackageConflict) + toMergedConflict (CS.GoalConflict qpn) = + Just (qpn, MergedPackageConflict True [] Nothing) + toMergedConflict (CS.VersionConstraintConflict qpn v) = + Just (qpn, MergedPackageConflict False [v] Nothing) + toMergedConflict (CS.VersionConflict qpn (CS.OrderedVersionRange vr)) = + Just (qpn, MergedPackageConflict False [] (Just vr)) + toMergedConflict CS.OtherConflict = Nothing + + showConflict :: QPN -> MergedPackageConflict -> String + showConflict qpn mergedConflict = L.intercalate "; " conflictStrings + where + conflictStrings = catMaybes [ + case () of + () | isGoalConflict mergedConflict -> Just $ + "depends on '" ++ showQPN qpn ++ "'" ++ + (if null (versionConstraintConflict mergedConflict) + then "" + else " but excludes " + ++ showVersions (versionConstraintConflict mergedConflict)) + | not $ L.null (versionConstraintConflict mergedConflict) -> Just $ + "excludes '" ++ showQPN qpn + ++ "' " ++ showVersions (versionConstraintConflict mergedConflict) + | otherwise -> Nothing + , (\vr -> "excluded by constraint '" ++ showVR vr ++ "' from '" ++ showQPN qpn ++ "'") + <$> versionConflict mergedConflict + ] + + showVersions [] = "no versions" + showVersions [v] = "version " ++ showVer v + showVersions vs = "versions " ++ L.intercalate ", " (map showVer vs) + +-- | All conflicts related to one package, used for simplifying the display of +-- a 'Set CS.Conflict'. +data MergedPackageConflict = MergedPackageConflict { + isGoalConflict :: Bool + , versionConstraintConflict :: [Ver] + , versionConflict :: Maybe VR + } + showQPNPOpt :: QPN -> POption -> String showQPNPOpt qpn@(Q _pp pn) (POption i linkedTo) = case linkedTo of diff -Nru cabal-install-3.2-3.2+git20191216.2.e076113/src/cabal-install-3.2.0.0/Distribution/Solver/Modular/Package.hs cabal-install-3.2-3.2+git20200127.2.b84fd4f/src/cabal-install-3.2.0.0/Distribution/Solver/Modular/Package.hs --- cabal-install-3.2-3.2+git20191216.2.e076113/src/cabal-install-3.2.0.0/Distribution/Solver/Modular/Package.hs 2019-12-17 14:07:19.000000000 +0000 +++ cabal-install-3.2-3.2+git20200127.2.b84fd4f/src/cabal-install-3.2.0.0/Distribution/Solver/Modular/Package.hs 2020-01-30 20:19:45.000000000 +0000 @@ -18,7 +18,8 @@ , unPN ) where -import Data.List as L +import Prelude () +import Distribution.Solver.Compat.Prelude import Distribution.Package -- from Cabal import Distribution.Deprecated.Text (display) @@ -57,14 +58,12 @@ -- | String representation of an instance. showI :: I -> String showI (I v InRepo) = showVer v -showI (I v (Inst uid)) = showVer v ++ "/installed" ++ shortId uid +showI (I v (Inst uid)) = showVer v ++ "/installed" ++ extractPackageAbiHash uid where - -- A hack to extract the beginning of the package ABI hash - shortId = snip (splitAt 4) (++ "...") - . snip ((\ (x, y) -> (reverse x, y)) . break (=='-') . reverse) ('-':) - . display - snip p f xs = case p xs of - (ys, zs) -> (if L.null zs then id else f) ys + extractPackageAbiHash xs = + case first reverse $ break (=='-') $ reverse (display xs) of + (ys, []) -> ys + (ys, _) -> '-' : ys -- | Package instance. A package name and an instance. data PI qpn = PI qpn I diff -Nru cabal-install-3.2-3.2+git20191216.2.e076113/src/cabal-install-3.2.0.0/Distribution/Solver/Modular/Preference.hs cabal-install-3.2-3.2+git20200127.2.b84fd4f/src/cabal-install-3.2.0.0/Distribution/Solver/Modular/Preference.hs --- cabal-install-3.2-3.2+git20191216.2.e076113/src/cabal-install-3.2.0.0/Distribution/Solver/Modular/Preference.hs 2019-12-17 14:07:19.000000000 +0000 +++ cabal-install-3.2-3.2+git20200127.2.b84fd4f/src/cabal-install-3.2.0.0/Distribution/Solver/Modular/Preference.hs 2020-01-30 20:19:45.000000000 +0000 @@ -343,7 +343,9 @@ onlyConstrained p = trav go where go (PChoiceF v@(Q _ pn) _ gr _) | not (p pn) - = FailF (varToConflictSet (P v) `CS.union` goalReasonToCS gr) NotExplicit + = FailF + (varToConflictSet (P v) `CS.union` goalReasonToConflictSetWithConflict v gr) + NotExplicit go x = x diff -Nru cabal-install-3.2-3.2+git20191216.2.e076113/src/cabal-install-3.2.0.0/Distribution/Solver/Modular/Solver.hs cabal-install-3.2-3.2+git20200127.2.b84fd4f/src/cabal-install-3.2.0.0/Distribution/Solver/Modular/Solver.hs --- cabal-install-3.2-3.2+git20191216.2.e076113/src/cabal-install-3.2.0.0/Distribution/Solver/Modular/Solver.hs 2019-12-17 14:07:19.000000000 +0000 +++ cabal-install-3.2-3.2+git20200127.2.b84fd4f/src/cabal-install-3.2.0.0/Distribution/Solver/Modular/Solver.hs 2020-01-30 20:19:45.000000000 +0000 @@ -57,6 +57,7 @@ data SolverConfig = SolverConfig { reorderGoals :: ReorderGoals, countConflicts :: CountConflicts, + fineGrainedConflicts :: FineGrainedConflicts, minimizeConflictSet :: MinimizeConflictSet, independentGoals :: IndependentGoals, avoidReinstalls :: AvoidReinstalls, @@ -104,7 +105,9 @@ where explorePhase = backjumpAndExplore (maxBackjumps sc) (enableBackjumping sc) + (fineGrainedConflicts sc) (countConflicts sc) + idx detectCycles = traceTree "cycles.json" id . detectCyclesPhase heuristicsPhase = let heuristicsTree = traceTree "heuristics.json" id diff -Nru cabal-install-3.2-3.2+git20191216.2.e076113/src/cabal-install-3.2.0.0/Distribution/Solver/Modular/Validate.hs cabal-install-3.2-3.2+git20200127.2.b84fd4f/src/cabal-install-3.2.0.0/Distribution/Solver/Modular/Validate.hs --- cabal-install-3.2-3.2+git20191216.2.e076113/src/cabal-install-3.2.0.0/Distribution/Solver/Modular/Validate.hs 2019-12-17 14:07:19.000000000 +0000 +++ cabal-install-3.2-3.2+git20200127.2.b84fd4f/src/cabal-install-3.2.0.0/Distribution/Solver/Modular/Validate.hs 2020-01-30 20:19:45.000000000 +0000 @@ -320,7 +320,7 @@ -> (ExposedComponent -> DependencyReason QPN -> FailReason) -> Conflict mkConflict comp dr mkFailure = - (CS.insert (P qpn) (dependencyReasonToCS dr), mkFailure comp dr) + (CS.insert (P qpn) (dependencyReasonToConflictSet dr), mkFailure comp dr) buildableProvidedComps :: [ExposedComponent] buildableProvidedComps = [comp | (comp, IsBuildable True) <- M.toList providedComps] @@ -393,13 +393,13 @@ extendSingle :: PPreAssignment -> LDep QPN -> Either Conflict PPreAssignment extendSingle a (LDep dr (Ext ext )) = if extSupported ext then Right a - else Left (dependencyReasonToCS dr, UnsupportedExtension ext) + else Left (dependencyReasonToConflictSet dr, UnsupportedExtension ext) extendSingle a (LDep dr (Lang lang)) = if langSupported lang then Right a - else Left (dependencyReasonToCS dr, UnsupportedLanguage lang) + else Left (dependencyReasonToConflictSet dr, UnsupportedLanguage lang) extendSingle a (LDep dr (Pkg pn vr)) = if pkgPresent pn vr then Right a - else Left (dependencyReasonToCS dr, MissingPkgconfigPackage pn vr) + else Left (dependencyReasonToConflictSet dr, MissingPkgconfigPackage pn vr) extendSingle a (LDep dr (Dep dep@(PkgComponent qpn _) ci)) = let mergedDep = M.findWithDefault (MergedDepConstrained []) qpn a in case (\ x -> M.insert qpn x a) <$> merge mergedDep (PkgDep dr dep ci) of @@ -448,14 +448,14 @@ merge (MergedDepFixed comp1 vs1 i1) (PkgDep vs2 (PkgComponent p comp2) ci@(Fixed i2)) | i1 == i2 = Right $ MergedDepFixed comp1 vs1 i1 | otherwise = - Left ( (CS.union `on` dependencyReasonToCS) vs1 vs2 + Left ( (CS.union `on` dependencyReasonToConflictSet) vs1 vs2 , ( ConflictingDep vs1 (PkgComponent p comp1) (Fixed i1) , ConflictingDep vs2 (PkgComponent p comp2) ci ) ) merge (MergedDepFixed comp1 vs1 i@(I v _)) (PkgDep vs2 (PkgComponent p comp2) ci@(Constrained vr)) | checkVR vr v = Right $ MergedDepFixed comp1 vs1 i | otherwise = - Left ( (CS.union `on` dependencyReasonToCS) vs1 vs2 + Left ( createConflictSetForVersionConflict p v vs1 vr vs2 , ( ConflictingDep vs1 (PkgComponent p comp1) (Fixed i) , ConflictingDep vs2 (PkgComponent p comp2) ci ) ) @@ -467,7 +467,7 @@ go ((vr, comp1, vs1) : vros) | checkVR vr v = go vros | otherwise = - Left ( (CS.union `on` dependencyReasonToCS) vs1 vs2 + Left ( createConflictSetForVersionConflict p v vs2 vr vs1 , ( ConflictingDep vs1 (PkgComponent p comp1) (Constrained vr) , ConflictingDep vs2 (PkgComponent p comp2) ci ) ) @@ -479,6 +479,45 @@ -- no negative performance impact. vrOrigins ++ [(vr, comp2, vs2)]) +-- | Creates a conflict set representing a conflict between a version constraint +-- and the fixed version chosen for a package. +createConflictSetForVersionConflict :: QPN + -> Ver + -> DependencyReason QPN + -> VR + -> DependencyReason QPN + -> ConflictSet +createConflictSetForVersionConflict pkg + conflictingVersion + versionDR@(DependencyReason p1 _ _) + conflictingVersionRange + versionRangeDR@(DependencyReason p2 _ _) = + let hasFlagsOrStanzas (DependencyReason _ fs ss) = not (M.null fs) || not (S.null ss) + in + -- The solver currently only optimizes the case where there is a conflict + -- between the version chosen for a package and a version constraint that + -- is not under any flags or stanzas. Here is how we check for this case: + -- + -- (1) Choosing a specific version for a package foo is implemented as + -- adding a dependency from foo to that version of foo (See + -- extendWithPackageChoice), so we check that the DependencyReason + -- contains the current package and no flag or stanza choices. + -- + -- (2) We check that the DependencyReason for the version constraint also + -- contains no flag or stanza choices. + -- + -- When these criteria are not met, we fall back to calling + -- dependencyReasonToConflictSet. + if p1 == pkg && not (hasFlagsOrStanzas versionDR) && not (hasFlagsOrStanzas versionRangeDR) + then let cs1 = dependencyReasonToConflictSetWithVersionConflict + p2 + (CS.OrderedVersionRange conflictingVersionRange) + versionDR + cs2 = dependencyReasonToConflictSetWithVersionConstraintConflict + pkg conflictingVersion versionRangeDR + in cs1 `CS.union` cs2 + else dependencyReasonToConflictSet versionRangeDR `CS.union` dependencyReasonToConflictSet versionDR + -- | Takes a list of new dependencies and uses it to try to update the map of -- known component dependencies. It returns a failure when a new dependency -- requires a component that is missing or unbuildable in a previously chosen @@ -512,7 +551,7 @@ -> (QPN -> ExposedComponent -> FailReason) -> Conflict mkConflict qpn comp dr mkFailure = - (CS.insert (P qpn) (dependencyReasonToCS dr), mkFailure qpn comp) + (CS.insert (P qpn) (dependencyReasonToConflictSet dr), mkFailure qpn comp) buildableComps :: Map comp IsBuildable -> [comp] buildableComps comps = [comp | (comp, IsBuildable True) <- M.toList comps] diff -Nru cabal-install-3.2-3.2+git20191216.2.e076113/src/cabal-install-3.2.0.0/Distribution/Solver/Types/InstSolverPackage.hs cabal-install-3.2-3.2+git20200127.2.b84fd4f/src/cabal-install-3.2.0.0/Distribution/Solver/Types/InstSolverPackage.hs --- cabal-install-3.2-3.2+git20191216.2.e076113/src/cabal-install-3.2.0.0/Distribution/Solver/Types/InstSolverPackage.hs 2019-12-17 14:07:19.000000000 +0000 +++ cabal-install-3.2-3.2+git20200127.2.b84fd4f/src/cabal-install-3.2.0.0/Distribution/Solver/Types/InstSolverPackage.hs 2020-01-30 20:19:45.000000000 +0000 @@ -14,7 +14,7 @@ import Distribution.InstalledPackageInfo (InstalledPackageInfo) import GHC.Generics (Generic) --- | An 'InstSolverPackage' is a pre-existing installed pacakge +-- | An 'InstSolverPackage' is a pre-existing installed package -- specified by the dependency solver. data InstSolverPackage = InstSolverPackage { instSolverPkgIPI :: InstalledPackageInfo, diff -Nru cabal-install-3.2-3.2+git20191216.2.e076113/src/cabal-install-3.2.0.0/Distribution/Solver/Types/Settings.hs cabal-install-3.2-3.2+git20200127.2.b84fd4f/src/cabal-install-3.2.0.0/Distribution/Solver/Types/Settings.hs --- cabal-install-3.2-3.2+git20191216.2.e076113/src/cabal-install-3.2.0.0/Distribution/Solver/Types/Settings.hs 2019-12-17 14:07:19.000000000 +0000 +++ cabal-install-3.2-3.2+git20200127.2.b84fd4f/src/cabal-install-3.2.0.0/Distribution/Solver/Types/Settings.hs 2020-01-30 20:19:45.000000000 +0000 @@ -11,6 +11,7 @@ , OnlyConstrained(..) , EnableBackjumping(..) , CountConflicts(..) + , FineGrainedConflicts(..) , SolveExecutables(..) ) where @@ -30,6 +31,9 @@ newtype CountConflicts = CountConflicts Bool deriving (BooleanFlag, Eq, Generic, Show) +newtype FineGrainedConflicts = FineGrainedConflicts Bool + deriving (BooleanFlag, Eq, Generic, Show) + newtype MinimizeConflictSet = MinimizeConflictSet Bool deriving (BooleanFlag, Eq, Generic, Show) @@ -63,6 +67,7 @@ instance Binary ReorderGoals instance Binary CountConflicts +instance Binary FineGrainedConflicts instance Binary IndependentGoals instance Binary MinimizeConflictSet instance Binary AvoidReinstalls @@ -74,6 +79,7 @@ instance Structured ReorderGoals instance Structured CountConflicts +instance Structured FineGrainedConflicts instance Structured IndependentGoals instance Structured MinimizeConflictSet instance Structured AvoidReinstalls diff -Nru cabal-install-3.2-3.2+git20191216.2.e076113/src/mtl-2.2.2/CHANGELOG.markdown cabal-install-3.2-3.2+git20200127.2.b84fd4f/src/mtl-2.2.2/CHANGELOG.markdown --- cabal-install-3.2-3.2+git20191216.2.e076113/src/mtl-2.2.2/CHANGELOG.markdown 2018-02-24 22:02:27.000000000 +0000 +++ cabal-install-3.2-3.2+git20200127.2.b84fd4f/src/mtl-2.2.2/CHANGELOG.markdown 1970-01-01 00:00:00.000000000 +0000 @@ -1,34 +0,0 @@ -2.2.2 ------ -* `Control.Monad.Identity` now re-exports `Control.Monad.Trans.Identity` -* Fix a bug in which `Control.Monad.State.Class.modify'` was not as strict in - the new state as its counterparts in `transformers` -* Add a `MonadError () Maybe` instance -* Add `liftEither :: MonadError e m => Either e a -> m a` to - `Control.Monad.Except{.Class}` -* Add a `MonadWriter w ((,) w)` instance (when built against `base-4.9` or later) - -2.2.1 -------- -* Provide MINIMAL pragmas for `MonadState`, `MonadWriter`, `MonadReader` -* Added a cyclic definition of `ask` in terms of `reader` for consistency with `get`/`put` vs. `state` and `tell` vs. `writer` -* Fix deprecation warnings caused by `transformers` 0.4 deprecating `ErrorT`. -* Added `Control.Monad.Except` in the style of the other `mtl` re-export modules - -2.2.0.1 -------- -* Fixed a bug caused by the change in how `transformers` 0.4 exports its data types. We will now export `runFooT` for each transformer again! - -2.2 ---- -* `transformers` 0.4 support -* Added instances for `ExceptT` -* Added `modify'` to `Control.Monad.State.*` - -2.1.3.1 -------- -* Avoid importing `Control.Monad.Instances` on GHC 7.8 to build without deprecation warnings. - -2.1.3 ------ -* Removed the now-irrelevant `Error` constraint from the `MonadError` instance for `Either e`. diff -Nru cabal-install-3.2-3.2+git20191216.2.e076113/src/mtl-2.2.2/Control/Monad/Cont/Class.hs cabal-install-3.2-3.2+git20200127.2.b84fd4f/src/mtl-2.2.2/Control/Monad/Cont/Class.hs --- cabal-install-3.2-3.2+git20191216.2.e076113/src/mtl-2.2.2/Control/Monad/Cont/Class.hs 2018-02-24 22:02:27.000000000 +0000 +++ cabal-install-3.2-3.2+git20200127.2.b84fd4f/src/mtl-2.2.2/Control/Monad/Cont/Class.hs 1970-01-01 00:00:00.000000000 +0000 @@ -1,139 +0,0 @@ -{-# LANGUAGE CPP #-} -{- | -Module : Control.Monad.Cont.Class -Copyright : (c) The University of Glasgow 2001, - (c) Jeff Newbern 2003-2007, - (c) Andriy Palamarchuk 2007 -License : BSD-style (see the file LICENSE) - -Maintainer : libraries@haskell.org -Stability : experimental -Portability : portable - -[Computation type:] Computations which can be interrupted and resumed. - -[Binding strategy:] Binding a function to a monadic value creates -a new continuation which uses the function as the continuation of the monadic -computation. - -[Useful for:] Complex control structures, error handling, -and creating co-routines. - -[Zero and plus:] None. - -[Example type:] @'Cont' r a@ - -The Continuation monad represents computations in continuation-passing style -(CPS). -In continuation-passing style function result is not returned, -but instead is passed to another function, -received as a parameter (continuation). -Computations are built up from sequences -of nested continuations, terminated by a final continuation (often @id@) -which produces the final result. -Since continuations are functions which represent the future of a computation, -manipulation of the continuation functions can achieve complex manipulations -of the future of the computation, -such as interrupting a computation in the middle, aborting a portion -of a computation, restarting a computation, and interleaving execution of -computations. -The Continuation monad adapts CPS to the structure of a monad. - -Before using the Continuation monad, be sure that you have -a firm understanding of continuation-passing style -and that continuations represent the best solution to your particular -design problem. -Many algorithms which require continuations in other languages do not require -them in Haskell, due to Haskell's lazy semantics. -Abuse of the Continuation monad can produce code that is impossible -to understand and maintain. --} - -module Control.Monad.Cont.Class ( - MonadCont(..), - ) where - -import Control.Monad.Trans.Cont (ContT) -import qualified Control.Monad.Trans.Cont as ContT -import Control.Monad.Trans.Error as Error -import Control.Monad.Trans.Except as Except -import Control.Monad.Trans.Identity as Identity -import Control.Monad.Trans.List as List -import Control.Monad.Trans.Maybe as Maybe -import Control.Monad.Trans.Reader as Reader -import Control.Monad.Trans.RWS.Lazy as LazyRWS -import Control.Monad.Trans.RWS.Strict as StrictRWS -import Control.Monad.Trans.State.Lazy as LazyState -import Control.Monad.Trans.State.Strict as StrictState -import Control.Monad.Trans.Writer.Lazy as LazyWriter -import Control.Monad.Trans.Writer.Strict as StrictWriter - -import Control.Monad -import Data.Monoid - -class Monad m => MonadCont m where - {- | @callCC@ (call-with-current-continuation) - calls a function with the current continuation as its argument. - Provides an escape continuation mechanism for use with Continuation monads. - Escape continuations allow to abort the current computation and return - a value immediately. - They achieve a similar effect to 'Control.Monad.Error.throwError' - and 'Control.Monad.Error.catchError' - within an 'Control.Monad.Error.Error' monad. - Advantage of this function over calling @return@ is that it makes - the continuation explicit, - allowing more flexibility and better control - (see examples in "Control.Monad.Cont"). - - The standard idiom used with @callCC@ is to provide a lambda-expression - to name the continuation. Then calling the named continuation anywhere - within its scope will escape from the computation, - even if it is many layers deep within nested computations. - -} - callCC :: ((a -> m b) -> m a) -> m a -#if __GLASGOW_HASKELL__ >= 707 - {-# MINIMAL callCC #-} -#endif - -instance MonadCont (ContT r m) where - callCC = ContT.callCC - --- --------------------------------------------------------------------------- --- Instances for other mtl transformers - -instance (Error e, MonadCont m) => MonadCont (ErrorT e m) where - callCC = Error.liftCallCC callCC - -{- | @since 2.2 -} -instance MonadCont m => MonadCont (ExceptT e m) where - callCC = Except.liftCallCC callCC - -instance MonadCont m => MonadCont (IdentityT m) where - callCC = Identity.liftCallCC callCC - -instance MonadCont m => MonadCont (ListT m) where - callCC = List.liftCallCC callCC - -instance MonadCont m => MonadCont (MaybeT m) where - callCC = Maybe.liftCallCC callCC - -instance MonadCont m => MonadCont (ReaderT r m) where - callCC = Reader.liftCallCC callCC - -instance (Monoid w, MonadCont m) => MonadCont (LazyRWS.RWST r w s m) where - callCC = LazyRWS.liftCallCC' callCC - -instance (Monoid w, MonadCont m) => MonadCont (StrictRWS.RWST r w s m) where - callCC = StrictRWS.liftCallCC' callCC - -instance MonadCont m => MonadCont (LazyState.StateT s m) where - callCC = LazyState.liftCallCC' callCC - -instance MonadCont m => MonadCont (StrictState.StateT s m) where - callCC = StrictState.liftCallCC' callCC - -instance (Monoid w, MonadCont m) => MonadCont (LazyWriter.WriterT w m) where - callCC = LazyWriter.liftCallCC callCC - -instance (Monoid w, MonadCont m) => MonadCont (StrictWriter.WriterT w m) where - callCC = StrictWriter.liftCallCC callCC diff -Nru cabal-install-3.2-3.2+git20191216.2.e076113/src/mtl-2.2.2/Control/Monad/Cont.hs cabal-install-3.2-3.2+git20200127.2.b84fd4f/src/mtl-2.2.2/Control/Monad/Cont.hs --- cabal-install-3.2-3.2+git20191216.2.e076113/src/mtl-2.2.2/Control/Monad/Cont.hs 2018-02-24 22:02:27.000000000 +0000 +++ cabal-install-3.2-3.2+git20200127.2.b84fd4f/src/mtl-2.2.2/Control/Monad/Cont.hs 1970-01-01 00:00:00.000000000 +0000 @@ -1,169 +0,0 @@ -{- | -Module : Control.Monad.Cont -Copyright : (c) The University of Glasgow 2001, - (c) Jeff Newbern 2003-2007, - (c) Andriy Palamarchuk 2007 -License : BSD-style (see the file LICENSE) - -Maintainer : libraries@haskell.org -Stability : experimental -Portability : portable - -[Computation type:] Computations which can be interrupted and resumed. - -[Binding strategy:] Binding a function to a monadic value creates -a new continuation which uses the function as the continuation of the monadic -computation. - -[Useful for:] Complex control structures, error handling, -and creating co-routines. - -[Zero and plus:] None. - -[Example type:] @'Cont' r a@ - -The Continuation monad represents computations in continuation-passing style -(CPS). -In continuation-passing style function result is not returned, -but instead is passed to another function, -received as a parameter (continuation). -Computations are built up from sequences -of nested continuations, terminated by a final continuation (often @id@) -which produces the final result. -Since continuations are functions which represent the future of a computation, -manipulation of the continuation functions can achieve complex manipulations -of the future of the computation, -such as interrupting a computation in the middle, aborting a portion -of a computation, restarting a computation, and interleaving execution of -computations. -The Continuation monad adapts CPS to the structure of a monad. - -Before using the Continuation monad, be sure that you have -a firm understanding of continuation-passing style -and that continuations represent the best solution to your particular -design problem. -Many algorithms which require continuations in other languages do not require -them in Haskell, due to Haskell's lazy semantics. -Abuse of the Continuation monad can produce code that is impossible -to understand and maintain. --} - -module Control.Monad.Cont ( - -- * MonadCont class - MonadCont(..), - -- * The Cont monad - Cont, - cont, - runCont, - mapCont, - withCont, - -- * The ContT monad transformer - ContT(ContT), - runContT, - mapContT, - withContT, - module Control.Monad, - module Control.Monad.Trans, - -- * Example 1: Simple Continuation Usage - -- $simpleContExample - - -- * Example 2: Using @callCC@ - -- $callCCExample - - -- * Example 3: Using @ContT@ Monad Transformer - -- $ContTExample - ) where - -import Control.Monad.Cont.Class - -import Control.Monad.Trans -import Control.Monad.Trans.Cont - -import Control.Monad - -{- $simpleContExample -Calculating length of a list continuation-style: - ->calculateLength :: [a] -> Cont r Int ->calculateLength l = return (length l) - -Here we use @calculateLength@ by making it to pass its result to @print@: - ->main = do -> runCont (calculateLength "123") print -> -- result: 3 - -It is possible to chain 'Cont' blocks with @>>=@. - ->double :: Int -> Cont r Int ->double n = return (n * 2) -> ->main = do -> runCont (calculateLength "123" >>= double) print -> -- result: 6 --} - -{- $callCCExample -This example gives a taste of how escape continuations work, shows a typical -pattern for their usage. - ->-- Returns a string depending on the length of the name parameter. ->-- If the provided string is empty, returns an error. ->-- Otherwise, returns a welcome message. ->whatsYourName :: String -> String ->whatsYourName name = -> (`runCont` id) $ do -- 1 -> response <- callCC $ \exit -> do -- 2 -> validateName name exit -- 3 -> return $ "Welcome, " ++ name ++ "!" -- 4 -> return response -- 5 -> ->validateName name exit = do -> when (null name) (exit "You forgot to tell me your name!") - -Here is what this example does: - -(1) Runs an anonymous 'Cont' block and extracts value from it with -@(\`runCont\` id)@. Here @id@ is the continuation, passed to the @Cont@ block. - -(1) Binds @response@ to the result of the following 'Control.Monad.Cont.Class.callCC' block, -binds @exit@ to the continuation. - -(1) Validates @name@. -This approach illustrates advantage of using 'Control.Monad.Cont.Class.callCC' over @return@. -We pass the continuation to @validateName@, -and interrupt execution of the @Cont@ block from /inside/ of @validateName@. - -(1) Returns the welcome message from the 'Control.Monad.Cont.Class.callCC' block. -This line is not executed if @validateName@ fails. - -(1) Returns from the @Cont@ block. --} - -{-$ContTExample -'ContT' can be used to add continuation handling to other monads. -Here is an example how to combine it with @IO@ monad: - ->import Control.Monad.Cont ->import System.IO -> ->main = do -> hSetBuffering stdout NoBuffering -> runContT (callCC askString) reportResult -> ->askString :: (String -> ContT () IO String) -> ContT () IO String ->askString next = do -> liftIO $ putStrLn "Please enter a string" -> s <- liftIO $ getLine -> next s -> ->reportResult :: String -> IO () ->reportResult s = do -> putStrLn ("You entered: " ++ s) - -Action @askString@ requests user to enter a string, -and passes it to the continuation. -@askString@ takes as a parameter a continuation taking a string parameter, -and returning @IO ()@. -Compare its signature to 'runContT' definition. --} diff -Nru cabal-install-3.2-3.2+git20191216.2.e076113/src/mtl-2.2.2/Control/Monad/Error/Class.hs cabal-install-3.2-3.2+git20200127.2.b84fd4f/src/mtl-2.2.2/Control/Monad/Error/Class.hs --- cabal-install-3.2-3.2+git20191216.2.e076113/src/mtl-2.2.2/Control/Monad/Error/Class.hs 2018-02-24 22:02:27.000000000 +0000 +++ cabal-install-3.2-3.2+git20200127.2.b84fd4f/src/mtl-2.2.2/Control/Monad/Error/Class.hs 1970-01-01 00:00:00.000000000 +0000 @@ -1,192 +0,0 @@ -{-# LANGUAGE CPP #-} -{-# LANGUAGE FlexibleInstances #-} -{-# LANGUAGE FunctionalDependencies #-} -{-# LANGUAGE MultiParamTypeClasses #-} -{-# LANGUAGE UndecidableInstances #-} - -{- | -Module : Control.Monad.Error.Class -Copyright : (c) Michael Weber 2001, - (c) Jeff Newbern 2003-2006, - (c) Andriy Palamarchuk 2006 - (c) Edward Kmett 2012 -License : BSD-style (see the file LICENSE) - -Maintainer : libraries@haskell.org -Stability : experimental -Portability : non-portable (multi-parameter type classes) - -[Computation type:] Computations which may fail or throw exceptions. - -[Binding strategy:] Failure records information about the cause\/location -of the failure. Failure values bypass the bound function, -other values are used as inputs to the bound function. - -[Useful for:] Building computations from sequences of functions that may fail -or using exception handling to structure error handling. - -[Zero and plus:] Zero is represented by an empty error and the plus operation -executes its second argument if the first fails. - -[Example type:] @'Either' 'String' a@ - -The Error monad (also called the Exception monad). --} - -{- - Rendered by Michael Weber , - inspired by the Haskell Monad Template Library from - Andy Gill () --} -module Control.Monad.Error.Class ( - Error(..), - MonadError(..), - liftEither, - ) where - -import Control.Monad.Trans.Except (Except, ExceptT) -import Control.Monad.Trans.Error (Error(..), ErrorT) -import qualified Control.Monad.Trans.Except as ExceptT (throwE, catchE) -import qualified Control.Monad.Trans.Error as ErrorT (throwError, catchError) -import Control.Monad.Trans.Identity as Identity -import Control.Monad.Trans.List as List -import Control.Monad.Trans.Maybe as Maybe -import Control.Monad.Trans.Reader as Reader -import Control.Monad.Trans.RWS.Lazy as LazyRWS -import Control.Monad.Trans.RWS.Strict as StrictRWS -import Control.Monad.Trans.State.Lazy as LazyState -import Control.Monad.Trans.State.Strict as StrictState -import Control.Monad.Trans.Writer.Lazy as LazyWriter -import Control.Monad.Trans.Writer.Strict as StrictWriter - -import Control.Monad.Trans.Class (lift) -import Control.Exception (IOException, catch, ioError) -import Control.Monad - -#if defined(__GLASGOW_HASKELL__) && __GLASGOW_HASKELL__ < 707 -import Control.Monad.Instances () -#endif - -import Data.Monoid -import Prelude (Either(..), Maybe(..), either, (.), IO) - -{- | -The strategy of combining computations that can throw exceptions -by bypassing bound functions -from the point an exception is thrown to the point that it is handled. - -Is parameterized over the type of error information and -the monad type constructor. -It is common to use @'Either' String@ as the monad type constructor -for an error monad in which error descriptions take the form of strings. -In that case and many other common cases the resulting monad is already defined -as an instance of the 'MonadError' class. -You can also define your own error type and\/or use a monad type constructor -other than @'Either' 'String'@ or @'Either' 'IOError'@. -In these cases you will have to explicitly define instances of the 'MonadError' -class. -(If you are using the deprecated "Control.Monad.Error" or -"Control.Monad.Trans.Error", you may also have to define an 'Error' instance.) --} -class (Monad m) => MonadError e m | m -> e where - -- | Is used within a monadic computation to begin exception processing. - throwError :: e -> m a - - {- | - A handler function to handle previous errors and return to normal execution. - A common idiom is: - - > do { action1; action2; action3 } `catchError` handler - - where the @action@ functions can call 'throwError'. - Note that @handler@ and the do-block must have the same return type. - -} - catchError :: m a -> (e -> m a) -> m a -#if __GLASGOW_HASKELL__ >= 707 - {-# MINIMAL throwError, catchError #-} -#endif - -{- | -Lifts an @'Either' e@ into any @'MonadError' e@. - -> do { val <- liftEither =<< action1; action2 } - -where @action1@ returns an 'Either' to represent errors. - -@since 2.2.2 --} -liftEither :: MonadError e m => Either e a -> m a -liftEither = either throwError return - -instance MonadError IOException IO where - throwError = ioError - catchError = catch - -{- | @since 2.2.2 -} -instance MonadError () Maybe where - throwError () = Nothing - catchError Nothing f = f () - catchError x _ = x - --- --------------------------------------------------------------------------- --- Our parameterizable error monad - -instance MonadError e (Either e) where - throwError = Left - Left l `catchError` h = h l - Right r `catchError` _ = Right r - -instance (Monad m, Error e) => MonadError e (ErrorT e m) where - throwError = ErrorT.throwError - catchError = ErrorT.catchError - -{- | @since 2.2 -} -instance Monad m => MonadError e (ExceptT e m) where - throwError = ExceptT.throwE - catchError = ExceptT.catchE - --- --------------------------------------------------------------------------- --- Instances for other mtl transformers --- --- All of these instances need UndecidableInstances, --- because they do not satisfy the coverage condition. - -instance MonadError e m => MonadError e (IdentityT m) where - throwError = lift . throwError - catchError = Identity.liftCatch catchError - -instance MonadError e m => MonadError e (ListT m) where - throwError = lift . throwError - catchError = List.liftCatch catchError - -instance MonadError e m => MonadError e (MaybeT m) where - throwError = lift . throwError - catchError = Maybe.liftCatch catchError - -instance MonadError e m => MonadError e (ReaderT r m) where - throwError = lift . throwError - catchError = Reader.liftCatch catchError - -instance (Monoid w, MonadError e m) => MonadError e (LazyRWS.RWST r w s m) where - throwError = lift . throwError - catchError = LazyRWS.liftCatch catchError - -instance (Monoid w, MonadError e m) => MonadError e (StrictRWS.RWST r w s m) where - throwError = lift . throwError - catchError = StrictRWS.liftCatch catchError - -instance MonadError e m => MonadError e (LazyState.StateT s m) where - throwError = lift . throwError - catchError = LazyState.liftCatch catchError - -instance MonadError e m => MonadError e (StrictState.StateT s m) where - throwError = lift . throwError - catchError = StrictState.liftCatch catchError - -instance (Monoid w, MonadError e m) => MonadError e (LazyWriter.WriterT w m) where - throwError = lift . throwError - catchError = LazyWriter.liftCatch catchError - -instance (Monoid w, MonadError e m) => MonadError e (StrictWriter.WriterT w m) where - throwError = lift . throwError - catchError = StrictWriter.liftCatch catchError diff -Nru cabal-install-3.2-3.2+git20191216.2.e076113/src/mtl-2.2.2/Control/Monad/Error.hs cabal-install-3.2-3.2+git20200127.2.b84fd4f/src/mtl-2.2.2/Control/Monad/Error.hs --- cabal-install-3.2-3.2+git20191216.2.e076113/src/mtl-2.2.2/Control/Monad/Error.hs 2018-02-24 22:02:27.000000000 +0000 +++ cabal-install-3.2-3.2+git20200127.2.b84fd4f/src/mtl-2.2.2/Control/Monad/Error.hs 1970-01-01 00:00:00.000000000 +0000 @@ -1,151 +0,0 @@ -{-# LANGUAGE CPP #-} -{- | -Module : Control.Monad.Error -Copyright : (c) Michael Weber 2001, - (c) Jeff Newbern 2003-2006, - (c) Andriy Palamarchuk 2006 -License : BSD-style (see the file LICENSE) - -Maintainer : libraries@haskell.org -Stability : experimental -Portability : non-portable (multi-parameter type classes) - -[Computation type:] Computations which may fail or throw exceptions. - -[Binding strategy:] Failure records information about the cause\/location -of the failure. Failure values bypass the bound function, -other values are used as inputs to the bound function. - -[Useful for:] Building computations from sequences of functions that may fail -or using exception handling to structure error handling. - -[Zero and plus:] Zero is represented by an empty error and the plus operation -executes its second argument if the first fails. - -[Example type:] @'Either' String a@ - -The Error monad (also called the Exception monad). --} - -{- - Rendered by Michael Weber , - inspired by the Haskell Monad Template Library from - Andy Gill () --} -module Control.Monad.Error - {-# DEPRECATED "Use \"Control.Monad.Except\" instead" #-} ( - -- * Monads with error handling - MonadError(..), - Error(..), - -- * The ErrorT monad transformer - ErrorT(ErrorT), - runErrorT, - mapErrorT, - module Control.Monad, - module Control.Monad.Fix, - module Control.Monad.Trans, - -- * Example 1: Custom Error Data Type - -- $customErrorExample - - -- * Example 2: Using ErrorT Monad Transformer - -- $ErrorTExample - ) where - -import Control.Monad.Error.Class -import Control.Monad.Trans -import Control.Monad.Trans.Error (ErrorT(ErrorT), runErrorT, mapErrorT) - -import Control.Monad -import Control.Monad.Fix - -#if defined(__GLASGOW_HASKELL__) && __GLASGOW_HASKELL__ < 707 -import Control.Monad.Instances () -#endif - -{- $customErrorExample -Here is an example that demonstrates the use of a custom 'Error' data type with -the 'throwError' and 'catchError' exception mechanism from 'MonadError'. -The example throws an exception if the user enters an empty string -or a string longer than 5 characters. Otherwise it prints length of the string. - ->-- This is the type to represent length calculation error. ->data LengthError = EmptyString -- Entered string was empty. -> | StringTooLong Int -- A string is longer than 5 characters. -> -- Records a length of the string. -> | OtherError String -- Other error, stores the problem description. -> ->-- We make LengthError an instance of the Error class ->-- to be able to throw it as an exception. ->instance Error LengthError where -> noMsg = OtherError "A String Error!" -> strMsg s = OtherError s -> ->-- Converts LengthError to a readable message. ->instance Show LengthError where -> show EmptyString = "The string was empty!" -> show (StringTooLong len) = -> "The length of the string (" ++ (show len) ++ ") is bigger than 5!" -> show (OtherError msg) = msg -> ->-- For our monad type constructor, we use Either LengthError ->-- which represents failure using Left LengthError ->-- or a successful result of type a using Right a. ->type LengthMonad = Either LengthError -> ->main = do -> putStrLn "Please enter a string:" -> s <- getLine -> reportResult (calculateLength s) -> ->-- Wraps length calculation to catch the errors. ->-- Returns either length of the string or an error. ->calculateLength :: String -> LengthMonad Int ->calculateLength s = (calculateLengthOrFail s) `catchError` Left -> ->-- Attempts to calculate length and throws an error if the provided string is ->-- empty or longer than 5 characters. ->-- The processing is done in Either monad. ->calculateLengthOrFail :: String -> LengthMonad Int ->calculateLengthOrFail [] = throwError EmptyString ->calculateLengthOrFail s | len > 5 = throwError (StringTooLong len) -> | otherwise = return len -> where len = length s -> ->-- Prints result of the string length calculation. ->reportResult :: LengthMonad Int -> IO () ->reportResult (Right len) = putStrLn ("The length of the string is " ++ (show len)) ->reportResult (Left e) = putStrLn ("Length calculation failed with error: " ++ (show e)) --} - -{- $ErrorTExample -@'ErrorT'@ monad transformer can be used to add error handling to another monad. -Here is an example how to combine it with an @IO@ monad: - ->import Control.Monad.Error -> ->-- An IO monad which can return String failure. ->-- It is convenient to define the monad type of the combined monad, ->-- especially if we combine more monad transformers. ->type LengthMonad = ErrorT String IO -> ->main = do -> -- runErrorT removes the ErrorT wrapper -> r <- runErrorT calculateLength -> reportResult r -> ->-- Asks user for a non-empty string and returns its length. ->-- Throws an error if user enters an empty string. ->calculateLength :: LengthMonad Int ->calculateLength = do -> -- all the IO operations have to be lifted to the IO monad in the monad stack -> liftIO $ putStrLn "Please enter a non-empty string: " -> s <- liftIO getLine -> if null s -> then throwError "The string was empty!" -> else return $ length s -> ->-- Prints result of the string length calculation. ->reportResult :: Either String Int -> IO () ->reportResult (Right len) = putStrLn ("The length of the string is " ++ (show len)) ->reportResult (Left e) = putStrLn ("Length calculation failed with error: " ++ (show e)) --} diff -Nru cabal-install-3.2-3.2+git20191216.2.e076113/src/mtl-2.2.2/Control/Monad/Except.hs cabal-install-3.2-3.2+git20200127.2.b84fd4f/src/mtl-2.2.2/Control/Monad/Except.hs --- cabal-install-3.2-3.2+git20191216.2.e076113/src/mtl-2.2.2/Control/Monad/Except.hs 2018-02-24 22:02:27.000000000 +0000 +++ cabal-install-3.2-3.2+git20200127.2.b84fd4f/src/mtl-2.2.2/Control/Monad/Except.hs 1970-01-01 00:00:00.000000000 +0000 @@ -1,165 +0,0 @@ -{-# LANGUAGE CPP #-} -{- | -Module : Control.Monad.Except -Copyright : (c) Michael Weber 2001, - (c) Jeff Newbern 2003-2006, - (c) Andriy Palamarchuk 2006 -License : BSD-style (see the file LICENSE) - -Maintainer : libraries@haskell.org -Stability : experimental -Portability : non-portable (multi-parameter type classes) - -[Computation type:] Computations which may fail or throw exceptions. - -[Binding strategy:] Failure records information about the cause\/location -of the failure. Failure values bypass the bound function, -other values are used as inputs to the bound function. - -[Useful for:] Building computations from sequences of functions that may fail -or using exception handling to structure error handling. - -[Example type:] @'Either' String a@ - -The Error monad (also called the Exception monad). - -@since 2.2.1 --} - -{- - Rendered by Michael Weber , - inspired by the Haskell Monad Template Library from - Andy Gill () --} -module Control.Monad.Except - ( - -- * Warning - -- $warning - -- * Monads with error handling - MonadError(..), - liftEither, - -- * The ExceptT monad transformer - ExceptT(ExceptT), - Except, - - runExceptT, - mapExceptT, - withExceptT, - runExcept, - mapExcept, - withExcept, - - module Control.Monad, - module Control.Monad.Fix, - module Control.Monad.Trans, - -- * Example 1: Custom Error Data Type - -- $customErrorExample - - -- * Example 2: Using ExceptT Monad Transformer - -- $ExceptTExample - ) where - -import Control.Monad.Error.Class -import Control.Monad.Trans -import Control.Monad.Trans.Except - ( ExceptT(ExceptT), Except, except - , runExcept, runExceptT - , mapExcept, mapExceptT - , withExcept, withExceptT - ) - -import Control.Monad -import Control.Monad.Fix - -#if defined(__GLASGOW_HASKELL__) && __GLASGOW_HASKELL__ < 707 -import Control.Monad.Instances () -#endif - -{- $warning -Please do not confuse 'ExceptT' and 'throwError' with 'Control.Exception.Exception' / -'Control.Exception.SomeException' and 'Control.Exception.catch', respectively. The latter -are for exceptions built into GHC, by default, and are mostly used from within the IO monad. -They do not interact with the \"exceptions\" in this package at all. This package allows you -to define a new kind of exception control mechanism which does not necessarily need your code to -be placed in the IO monad. - -In short, all \"catching\" mechanisms in this library will be unable to catch exceptions thrown -by functions in the "Control.Exception" module, and vice-versa. --} - -{- $customErrorExample -Here is an example that demonstrates the use of a custom error data type with -the 'throwError' and 'catchError' exception mechanism from 'MonadError'. -The example throws an exception if the user enters an empty string -or a string longer than 5 characters. Otherwise it prints length of the string. - ->-- This is the type to represent length calculation error. ->data LengthError = EmptyString -- Entered string was empty. -> | StringTooLong Int -- A string is longer than 5 characters. -> -- Records a length of the string. -> | OtherError String -- Other error, stores the problem description. -> ->-- Converts LengthError to a readable message. ->instance Show LengthError where -> show EmptyString = "The string was empty!" -> show (StringTooLong len) = -> "The length of the string (" ++ (show len) ++ ") is bigger than 5!" -> show (OtherError msg) = msg -> ->-- For our monad type constructor, we use Either LengthError ->-- which represents failure using Left LengthError ->-- or a successful result of type a using Right a. ->type LengthMonad = Either LengthError -> ->main = do -> putStrLn "Please enter a string:" -> s <- getLine -> reportResult (calculateLength s) -> ->-- Attempts to calculate length and throws an error if the provided string is ->-- empty or longer than 5 characters. ->-- (Throwing an error in this monad means returning a 'Left'.) ->calculateLength :: String -> LengthMonad Int ->calculateLength [] = throwError EmptyString ->calculateLength s | len > 5 = throwError (StringTooLong len) -> | otherwise = return len -> where len = length s -> ->-- Prints result of the string length calculation. ->reportResult :: LengthMonad Int -> IO () ->reportResult (Right len) = putStrLn ("The length of the string is " ++ (show len)) ->reportResult (Left e) = putStrLn ("Length calculation failed with error: " ++ (show e)) --} - -{- $ExceptTExample -@'ExceptT'@ monad transformer can be used to add error handling to another monad. -Here is an example how to combine it with an @IO@ monad: - ->import Control.Monad.Except -> ->-- An IO monad which can return String failure. ->-- It is convenient to define the monad type of the combined monad, ->-- especially if we combine more monad transformers. ->type LengthMonad = ExceptT String IO -> ->main = do -> -- runExceptT removes the ExceptT wrapper -> r <- runExceptT calculateLength -> reportResult r -> ->-- Asks user for a non-empty string and returns its length. ->-- Throws an error if user enters an empty string. ->calculateLength :: LengthMonad Int ->calculateLength = do -> -- all the IO operations have to be lifted to the IO monad in the monad stack -> liftIO $ putStrLn "Please enter a non-empty string: " -> s <- liftIO getLine -> if null s -> then throwError "The string was empty!" -> else return $ length s -> ->-- Prints result of the string length calculation. ->reportResult :: Either String Int -> IO () ->reportResult (Right len) = putStrLn ("The length of the string is " ++ (show len)) ->reportResult (Left e) = putStrLn ("Length calculation failed with error: " ++ (show e)) --} diff -Nru cabal-install-3.2-3.2+git20191216.2.e076113/src/mtl-2.2.2/Control/Monad/Identity.hs cabal-install-3.2-3.2+git20200127.2.b84fd4f/src/mtl-2.2.2/Control/Monad/Identity.hs --- cabal-install-3.2-3.2+git20191216.2.e076113/src/mtl-2.2.2/Control/Monad/Identity.hs 2018-02-24 22:02:27.000000000 +0000 +++ cabal-install-3.2-3.2+git20200127.2.b84fd4f/src/mtl-2.2.2/Control/Monad/Identity.hs 1970-01-01 00:00:00.000000000 +0000 @@ -1,47 +0,0 @@ -{- | -Module : Control.Monad.Identity -Copyright : (c) Andy Gill 2001, - (c) Oregon Graduate Institute of Science and Technology 2001, - (c) Jeff Newbern 2003-2006, - (c) Andriy Palamarchuk 2006 -License : BSD-style (see the file LICENSE) - -Maintainer : libraries@haskell.org -Stability : experimental -Portability : portable - -[Computation type:] Simple function application. - -[Binding strategy:] The bound function is applied to the input value. -@'Identity' x >>= f == f x@ - -[Useful for:] Monads can be derived from monad transformers applied to the -'Identity' monad. - -[Zero and plus:] None. - -[Example type:] @'Identity' a@ - -The @Identity@ monad is a monad that does not embody any computational strategy. -It simply applies the bound function to its input without any modification. -Computationally, there is no reason to use the @Identity@ monad -instead of the much simpler act of simply applying functions to their arguments. -The purpose of the @Identity@ monad is its fundamental role in the theory -of monad transformers. -Any monad transformer applied to the @Identity@ monad yields a non-transformer -version of that monad. --} - -module Control.Monad.Identity ( - module Data.Functor.Identity, - module Control.Monad.Trans.Identity, - - module Control.Monad, - module Control.Monad.Fix, - ) where - -import Data.Functor.Identity -import Control.Monad.Trans.Identity - -import Control.Monad -import Control.Monad.Fix diff -Nru cabal-install-3.2-3.2+git20191216.2.e076113/src/mtl-2.2.2/Control/Monad/List.hs cabal-install-3.2-3.2+git20200127.2.b84fd4f/src/mtl-2.2.2/Control/Monad/List.hs --- cabal-install-3.2-3.2+git20191216.2.e076113/src/mtl-2.2.2/Control/Monad/List.hs 2018-02-24 22:02:27.000000000 +0000 +++ cabal-install-3.2-3.2+git20200127.2.b84fd4f/src/mtl-2.2.2/Control/Monad/List.hs 1970-01-01 00:00:00.000000000 +0000 @@ -1,25 +0,0 @@ ------------------------------------------------------------------------------ --- | --- Module : Control.Monad.List --- Copyright : (c) Andy Gill 2001, --- (c) Oregon Graduate Institute of Science and Technology, 2001 --- License : BSD-style (see the file LICENSE) --- --- Maintainer : libraries@haskell.org --- Stability : experimental --- Portability : portable --- --- The List monad. --- ------------------------------------------------------------------------------ - -module Control.Monad.List ( - ListT(..), - mapListT, - module Control.Monad, - module Control.Monad.Trans, - ) where - -import Control.Monad -import Control.Monad.Trans -import Control.Monad.Trans.List diff -Nru cabal-install-3.2-3.2+git20191216.2.e076113/src/mtl-2.2.2/Control/Monad/Reader/Class.hs cabal-install-3.2-3.2+git20200127.2.b84fd4f/src/mtl-2.2.2/Control/Monad/Reader/Class.hs --- cabal-install-3.2-3.2+git20191216.2.e076113/src/mtl-2.2.2/Control/Monad/Reader/Class.hs 2018-02-24 22:02:27.000000000 +0000 +++ cabal-install-3.2-3.2+git20200127.2.b84fd4f/src/mtl-2.2.2/Control/Monad/Reader/Class.hs 1970-01-01 00:00:00.000000000 +0000 @@ -1,179 +0,0 @@ -{-# LANGUAGE CPP #-} -{-# LANGUAGE FlexibleInstances #-} -{-# LANGUAGE FunctionalDependencies #-} -{-# LANGUAGE MultiParamTypeClasses #-} -{-# LANGUAGE UndecidableInstances #-} --- Search for UndecidableInstances to see why this is needed -{- | -Module : Control.Monad.Reader.Class -Copyright : (c) Andy Gill 2001, - (c) Oregon Graduate Institute of Science and Technology 2001, - (c) Jeff Newbern 2003-2007, - (c) Andriy Palamarchuk 2007 -License : BSD-style (see the file LICENSE) - -Maintainer : libraries@haskell.org -Stability : experimental -Portability : non-portable (multi-param classes, functional dependencies) - -[Computation type:] Computations which read values from a shared environment. - -[Binding strategy:] Monad values are functions from the environment to a value. -The bound function is applied to the bound value, and both have access -to the shared environment. - -[Useful for:] Maintaining variable bindings, or other shared environment. - -[Zero and plus:] None. - -[Example type:] @'Reader' [(String,Value)] a@ - -The 'Reader' monad (also called the Environment monad). -Represents a computation, which can read values from -a shared environment, pass values from function to function, -and execute sub-computations in a modified environment. -Using 'Reader' monad for such computations is often clearer and easier -than using the 'Control.Monad.State.State' monad. - - Inspired by the paper - /Functional Programming with Overloading and Higher-Order Polymorphism/, - Mark P Jones () - Advanced School of Functional Programming, 1995. --} - -module Control.Monad.Reader.Class ( - MonadReader(..), - asks, - ) where - -import Control.Monad.Trans.Cont as Cont -import Control.Monad.Trans.Except -import Control.Monad.Trans.Error -import Control.Monad.Trans.Identity -import Control.Monad.Trans.List -import Control.Monad.Trans.Maybe -import Control.Monad.Trans.Reader (ReaderT) -import qualified Control.Monad.Trans.Reader as ReaderT (ask, local, reader) -import qualified Control.Monad.Trans.RWS.Lazy as LazyRWS (RWST, ask, local, reader) -import qualified Control.Monad.Trans.RWS.Strict as StrictRWS (RWST, ask, local, reader) -import Control.Monad.Trans.State.Lazy as Lazy -import Control.Monad.Trans.State.Strict as Strict -import Control.Monad.Trans.Writer.Lazy as Lazy -import Control.Monad.Trans.Writer.Strict as Strict - -import Control.Monad.Trans.Class (lift) -import Control.Monad -import Data.Monoid - --- ---------------------------------------------------------------------------- --- class MonadReader --- asks for the internal (non-mutable) state. - --- | See examples in "Control.Monad.Reader". --- Note, the partially applied function type @(->) r@ is a simple reader monad. --- See the @instance@ declaration below. -class Monad m => MonadReader r m | m -> r where -#if __GLASGOW_HASKELL__ >= 707 - {-# MINIMAL (ask | reader), local #-} -#endif - -- | Retrieves the monad environment. - ask :: m r - ask = reader id - - -- | Executes a computation in a modified environment. - local :: (r -> r) -- ^ The function to modify the environment. - -> m a -- ^ @Reader@ to run in the modified environment. - -> m a - - -- | Retrieves a function of the current environment. - reader :: (r -> a) -- ^ The selector function to apply to the environment. - -> m a - reader f = do - r <- ask - return (f r) - --- | Retrieves a function of the current environment. -asks :: MonadReader r m - => (r -> a) -- ^ The selector function to apply to the environment. - -> m a -asks = reader - --- ---------------------------------------------------------------------------- --- The partially applied function type is a simple reader monad - -instance MonadReader r ((->) r) where - ask = id - local f m = m . f - reader = id - -instance Monad m => MonadReader r (ReaderT r m) where - ask = ReaderT.ask - local = ReaderT.local - reader = ReaderT.reader - -instance (Monad m, Monoid w) => MonadReader r (LazyRWS.RWST r w s m) where - ask = LazyRWS.ask - local = LazyRWS.local - reader = LazyRWS.reader - -instance (Monad m, Monoid w) => MonadReader r (StrictRWS.RWST r w s m) where - ask = StrictRWS.ask - local = StrictRWS.local - reader = StrictRWS.reader - --- --------------------------------------------------------------------------- --- Instances for other mtl transformers --- --- All of these instances need UndecidableInstances, --- because they do not satisfy the coverage condition. - -instance MonadReader r' m => MonadReader r' (ContT r m) where - ask = lift ask - local = Cont.liftLocal ask local - reader = lift . reader - -instance (Error e, MonadReader r m) => MonadReader r (ErrorT e m) where - ask = lift ask - local = mapErrorT . local - reader = lift . reader - -{- | @since 2.2 -} -instance MonadReader r m => MonadReader r (ExceptT e m) where - ask = lift ask - local = mapExceptT . local - reader = lift . reader - -instance MonadReader r m => MonadReader r (IdentityT m) where - ask = lift ask - local = mapIdentityT . local - reader = lift . reader - -instance MonadReader r m => MonadReader r (ListT m) where - ask = lift ask - local = mapListT . local - reader = lift . reader - -instance MonadReader r m => MonadReader r (MaybeT m) where - ask = lift ask - local = mapMaybeT . local - reader = lift . reader - -instance MonadReader r m => MonadReader r (Lazy.StateT s m) where - ask = lift ask - local = Lazy.mapStateT . local - reader = lift . reader - -instance MonadReader r m => MonadReader r (Strict.StateT s m) where - ask = lift ask - local = Strict.mapStateT . local - reader = lift . reader - -instance (Monoid w, MonadReader r m) => MonadReader r (Lazy.WriterT w m) where - ask = lift ask - local = Lazy.mapWriterT . local - reader = lift . reader - -instance (Monoid w, MonadReader r m) => MonadReader r (Strict.WriterT w m) where - ask = lift ask - local = Strict.mapWriterT . local - reader = lift . reader diff -Nru cabal-install-3.2-3.2+git20191216.2.e076113/src/mtl-2.2.2/Control/Monad/Reader.hs cabal-install-3.2-3.2+git20200127.2.b84fd4f/src/mtl-2.2.2/Control/Monad/Reader.hs --- cabal-install-3.2-3.2+git20191216.2.e076113/src/mtl-2.2.2/Control/Monad/Reader.hs 2018-02-24 22:02:27.000000000 +0000 +++ cabal-install-3.2-3.2+git20200127.2.b84fd4f/src/mtl-2.2.2/Control/Monad/Reader.hs 1970-01-01 00:00:00.000000000 +0000 @@ -1,144 +0,0 @@ -{- | -Module : Control.Monad.Reader -Copyright : (c) Andy Gill 2001, - (c) Oregon Graduate Institute of Science and Technology 2001, - (c) Jeff Newbern 2003-2007, - (c) Andriy Palamarchuk 2007 -License : BSD-style (see the file LICENSE) - -Maintainer : libraries@haskell.org -Stability : experimental -Portability : non-portable (multi-param classes, functional dependencies) - -[Computation type:] Computations which read values from a shared environment. - -[Binding strategy:] Monad values are functions from the environment to a value. -The bound function is applied to the bound value, and both have access -to the shared environment. - -[Useful for:] Maintaining variable bindings, or other shared environment. - -[Zero and plus:] None. - -[Example type:] @'Reader' [(String,Value)] a@ - -The 'Reader' monad (also called the Environment monad). -Represents a computation, which can read values from -a shared environment, pass values from function to function, -and execute sub-computations in a modified environment. -Using 'Reader' monad for such computations is often clearer and easier -than using the 'Control.Monad.State.State' monad. - - Inspired by the paper - /Functional Programming with Overloading and Higher-Order Polymorphism/, - Mark P Jones () - Advanced School of Functional Programming, 1995. --} - -module Control.Monad.Reader ( - -- * MonadReader class - MonadReader(..), - asks, - -- * The Reader monad - Reader, - runReader, - mapReader, - withReader, - -- * The ReaderT monad transformer - ReaderT(ReaderT), - runReaderT, - mapReaderT, - withReaderT, - module Control.Monad, - module Control.Monad.Fix, - module Control.Monad.Trans, - -- * Example 1: Simple Reader Usage - -- $simpleReaderExample - - -- * Example 2: Modifying Reader Content With @local@ - -- $localExample - - -- * Example 3: @ReaderT@ Monad Transformer - -- $ReaderTExample - ) where - -import Control.Monad.Reader.Class - -import Control.Monad.Trans.Reader ( - Reader, runReader, mapReader, withReader, - ReaderT(ReaderT), runReaderT, mapReaderT, withReaderT) -import Control.Monad.Trans - -import Control.Monad -import Control.Monad.Fix - -{- $simpleReaderExample - -In this example the @Reader@ monad provides access to variable bindings. -Bindings are a @Map@ of integer variables. -The variable @count@ contains number of variables in the bindings. -You can see how to run a Reader monad and retrieve data from it -with 'runReader', how to access the Reader data with 'ask' and 'asks'. - -> type Bindings = Map String Int; -> ->-- Returns True if the "count" variable contains correct bindings size. ->isCountCorrect :: Bindings -> Bool ->isCountCorrect bindings = runReader calc_isCountCorrect bindings -> ->-- The Reader monad, which implements this complicated check. ->calc_isCountCorrect :: Reader Bindings Bool ->calc_isCountCorrect = do -> count <- asks (lookupVar "count") -> bindings <- ask -> return (count == (Map.size bindings)) -> ->-- The selector function to use with 'asks'. ->-- Returns value of the variable with specified name. ->lookupVar :: String -> Bindings -> Int ->lookupVar name bindings = maybe 0 id (Map.lookup name bindings) -> ->sampleBindings = Map.fromList [("count",3), ("1",1), ("b",2)] -> ->main = do -> putStr $ "Count is correct for bindings " ++ (show sampleBindings) ++ ": "; -> putStrLn $ show (isCountCorrect sampleBindings); --} - -{- $localExample - -Shows how to modify Reader content with 'local'. - ->calculateContentLen :: Reader String Int ->calculateContentLen = do -> content <- ask -> return (length content); -> ->-- Calls calculateContentLen after adding a prefix to the Reader content. ->calculateModifiedContentLen :: Reader String Int ->calculateModifiedContentLen = local ("Prefix " ++) calculateContentLen -> ->main = do -> let s = "12345"; -> let modifiedLen = runReader calculateModifiedContentLen s -> let len = runReader calculateContentLen s -> putStrLn $ "Modified 's' length: " ++ (show modifiedLen) -> putStrLn $ "Original 's' length: " ++ (show len) --} - -{- $ReaderTExample - -Now you are thinking: 'Wow, what a great monad! I wish I could use -Reader functionality in MyFavoriteComplexMonad!'. Don't worry. -This can be easily done with the 'ReaderT' monad transformer. -This example shows how to combine @ReaderT@ with the IO monad. - ->-- The Reader/IO combined monad, where Reader stores a string. ->printReaderContent :: ReaderT String IO () ->printReaderContent = do -> content <- ask -> liftIO $ putStrLn ("The Reader Content: " ++ content) -> ->main = do -> runReaderT printReaderContent "Some Content" --} diff -Nru cabal-install-3.2-3.2+git20191216.2.e076113/src/mtl-2.2.2/Control/Monad/RWS/Class.hs cabal-install-3.2-3.2+git20200127.2.b84fd4f/src/mtl-2.2.2/Control/Monad/RWS/Class.hs --- cabal-install-3.2-3.2+git20191216.2.e076113/src/mtl-2.2.2/Control/Monad/RWS/Class.hs 2018-02-24 22:02:27.000000000 +0000 +++ cabal-install-3.2-3.2+git20200127.2.b84fd4f/src/mtl-2.2.2/Control/Monad/RWS/Class.hs 1970-01-01 00:00:00.000000000 +0000 @@ -1,64 +0,0 @@ -{-# LANGUAGE FlexibleInstances #-} -{-# LANGUAGE FunctionalDependencies #-} -{-# LANGUAGE MultiParamTypeClasses #-} -{-# LANGUAGE UndecidableInstances #-} --- Search for UndecidableInstances to see why this is needed - ------------------------------------------------------------------------------ --- | --- Module : Control.Monad.RWS.Class --- Copyright : (c) Andy Gill 2001, --- (c) Oregon Graduate Institute of Science and Technology, 2001 --- License : BSD-style (see the file LICENSE) --- --- Maintainer : libraries@haskell.org --- Stability : experimental --- Portability : non-portable (multi-param classes, functional dependencies) --- --- Declaration of the MonadRWS class. --- --- Inspired by the paper --- /Functional Programming with Overloading and Higher-Order Polymorphism/, --- Mark P Jones () --- Advanced School of Functional Programming, 1995. ------------------------------------------------------------------------------ - -module Control.Monad.RWS.Class ( - MonadRWS, - module Control.Monad.Reader.Class, - module Control.Monad.State.Class, - module Control.Monad.Writer.Class, - ) where - -import Control.Monad.Reader.Class -import Control.Monad.State.Class -import Control.Monad.Writer.Class - -import Control.Monad.Trans.Class -import Control.Monad.Trans.Error(Error, ErrorT) -import Control.Monad.Trans.Except(ExceptT) -import Control.Monad.Trans.Maybe(MaybeT) -import Control.Monad.Trans.Identity(IdentityT) -import Control.Monad.Trans.RWS.Lazy as Lazy (RWST) -import qualified Control.Monad.Trans.RWS.Strict as Strict (RWST) - -import Data.Monoid - -class (Monoid w, MonadReader r m, MonadWriter w m, MonadState s m) - => MonadRWS r w s m | m -> r, m -> w, m -> s - -instance (Monoid w, Monad m) => MonadRWS r w s (Lazy.RWST r w s m) - -instance (Monoid w, Monad m) => MonadRWS r w s (Strict.RWST r w s m) - ---------------------------------------------------------------------------- --- Instances for other mtl transformers --- --- All of these instances need UndecidableInstances, --- because they do not satisfy the coverage condition. - --- | @since 2.2 -instance MonadRWS r w s m => MonadRWS r w s (ExceptT e m) -instance (Error e, MonadRWS r w s m) => MonadRWS r w s (ErrorT e m) -instance MonadRWS r w s m => MonadRWS r w s (IdentityT m) -instance MonadRWS r w s m => MonadRWS r w s (MaybeT m) diff -Nru cabal-install-3.2-3.2+git20191216.2.e076113/src/mtl-2.2.2/Control/Monad/RWS/Lazy.hs cabal-install-3.2-3.2+git20200127.2.b84fd4f/src/mtl-2.2.2/Control/Monad/RWS/Lazy.hs --- cabal-install-3.2-3.2+git20191216.2.e076113/src/mtl-2.2.2/Control/Monad/RWS/Lazy.hs 2018-02-24 22:02:27.000000000 +0000 +++ cabal-install-3.2-3.2+git20200127.2.b84fd4f/src/mtl-2.2.2/Control/Monad/RWS/Lazy.hs 1970-01-01 00:00:00.000000000 +0000 @@ -1,53 +0,0 @@ ------------------------------------------------------------------------------ --- | --- Module : Control.Monad.RWS.Lazy --- Copyright : (c) Andy Gill 2001, --- (c) Oregon Graduate Institute of Science and Technology, 2001 --- License : BSD-style (see the file LICENSE) --- --- Maintainer : libraries@haskell.org --- Stability : experimental --- Portability : non-portable (multi-param classes, functional dependencies) --- --- Lazy RWS monad. --- --- Inspired by the paper --- /Functional Programming with Overloading and Higher-Order Polymorphism/, --- Mark P Jones () --- Advanced School of Functional Programming, 1995. ------------------------------------------------------------------------------ - -module Control.Monad.RWS.Lazy ( - -- * The RWS monad - RWS, - rws, - runRWS, - evalRWS, - execRWS, - mapRWS, - withRWS, - -- * The RWST monad transformer - RWST(RWST), - runRWST, - evalRWST, - execRWST, - mapRWST, - withRWST, - -- * Lazy Reader-writer-state monads - module Control.Monad.RWS.Class, - module Control.Monad, - module Control.Monad.Fix, - module Control.Monad.Trans, - module Data.Monoid, - ) where - -import Control.Monad.RWS.Class - -import Control.Monad.Trans -import Control.Monad.Trans.RWS.Lazy ( - RWS, rws, runRWS, evalRWS, execRWS, mapRWS, withRWS, - RWST(RWST), runRWST, evalRWST, execRWST, mapRWST, withRWST) - -import Control.Monad -import Control.Monad.Fix -import Data.Monoid diff -Nru cabal-install-3.2-3.2+git20191216.2.e076113/src/mtl-2.2.2/Control/Monad/RWS/Strict.hs cabal-install-3.2-3.2+git20200127.2.b84fd4f/src/mtl-2.2.2/Control/Monad/RWS/Strict.hs --- cabal-install-3.2-3.2+git20191216.2.e076113/src/mtl-2.2.2/Control/Monad/RWS/Strict.hs 2018-02-24 22:02:27.000000000 +0000 +++ cabal-install-3.2-3.2+git20200127.2.b84fd4f/src/mtl-2.2.2/Control/Monad/RWS/Strict.hs 1970-01-01 00:00:00.000000000 +0000 @@ -1,53 +0,0 @@ ------------------------------------------------------------------------------ --- | --- Module : Control.Monad.RWS.Strict --- Copyright : (c) Andy Gill 2001, --- (c) Oregon Graduate Institute of Science and Technology, 2001 --- License : BSD-style (see the file LICENSE) --- --- Maintainer : libraries@haskell.org --- Stability : experimental --- Portability : non-portable (multi-param classes, functional dependencies) --- --- Strict RWS monad. --- --- Inspired by the paper --- /Functional Programming with Overloading and Higher-Order Polymorphism/, --- Mark P Jones () --- Advanced School of Functional Programming, 1995. ------------------------------------------------------------------------------ - -module Control.Monad.RWS.Strict ( - -- * The RWS monad - RWS, - rws, - runRWS, - evalRWS, - execRWS, - mapRWS, - withRWS, - -- * The RWST monad transformer - RWST(RWST), - runRWST, - evalRWST, - execRWST, - mapRWST, - withRWST, - -- * Strict Reader-writer-state monads - module Control.Monad.RWS.Class, - module Control.Monad, - module Control.Monad.Fix, - module Control.Monad.Trans, - module Data.Monoid, - ) where - -import Control.Monad.RWS.Class - -import Control.Monad.Trans -import Control.Monad.Trans.RWS.Strict ( - RWS, rws, runRWS, evalRWS, execRWS, mapRWS, withRWS, - RWST(RWST), runRWST, evalRWST, execRWST, mapRWST, withRWST) - -import Control.Monad -import Control.Monad.Fix -import Data.Monoid diff -Nru cabal-install-3.2-3.2+git20191216.2.e076113/src/mtl-2.2.2/Control/Monad/RWS.hs cabal-install-3.2-3.2+git20200127.2.b84fd4f/src/mtl-2.2.2/Control/Monad/RWS.hs --- cabal-install-3.2-3.2+git20191216.2.e076113/src/mtl-2.2.2/Control/Monad/RWS.hs 2018-02-24 22:02:27.000000000 +0000 +++ cabal-install-3.2-3.2+git20200127.2.b84fd4f/src/mtl-2.2.2/Control/Monad/RWS.hs 1970-01-01 00:00:00.000000000 +0000 @@ -1,24 +0,0 @@ ------------------------------------------------------------------------------ --- | --- Module : Control.Monad.RWS --- Copyright : (c) Andy Gill 2001, --- (c) Oregon Graduate Institute of Science and Technology, 2001 --- License : BSD-style (see the file LICENSE) --- --- Maintainer : libraries@haskell.org --- Stability : experimental --- Portability : non-portable (multi-param classes, functional dependencies) --- --- Declaration of the MonadRWS class. --- --- Inspired by the paper --- /Functional Programming with Overloading and Higher-Order Polymorphism/, --- Mark P Jones () --- Advanced School of Functional Programming, 1995. ------------------------------------------------------------------------------ - -module Control.Monad.RWS ( - module Control.Monad.RWS.Lazy - ) where - -import Control.Monad.RWS.Lazy diff -Nru cabal-install-3.2-3.2+git20191216.2.e076113/src/mtl-2.2.2/Control/Monad/State/Class.hs cabal-install-3.2-3.2+git20200127.2.b84fd4f/src/mtl-2.2.2/Control/Monad/State/Class.hs --- cabal-install-3.2-3.2+git20191216.2.e076113/src/mtl-2.2.2/Control/Monad/State/Class.hs 2018-02-24 22:02:27.000000000 +0000 +++ cabal-install-3.2-3.2+git20200127.2.b84fd4f/src/mtl-2.2.2/Control/Monad/State/Class.hs 1970-01-01 00:00:00.000000000 +0000 @@ -1,176 +0,0 @@ -{-# LANGUAGE CPP #-} -{-# LANGUAGE FunctionalDependencies #-} -{-# LANGUAGE FlexibleInstances #-} -{-# LANGUAGE MultiParamTypeClasses #-} -{-# LANGUAGE UndecidableInstances #-} --- Search for UndecidableInstances to see why this is needed - ------------------------------------------------------------------------------ --- | --- Module : Control.Monad.State.Class --- Copyright : (c) Andy Gill 2001, --- (c) Oregon Graduate Institute of Science and Technology, 2001 --- License : BSD-style (see the file LICENSE) --- --- Maintainer : libraries@haskell.org --- Stability : experimental --- Portability : non-portable (multi-param classes, functional dependencies) --- --- MonadState class. --- --- This module is inspired by the paper --- /Functional Programming with Overloading and Higher-Order Polymorphism/, --- Mark P Jones () --- Advanced School of Functional Programming, 1995. - ------------------------------------------------------------------------------ - -module Control.Monad.State.Class ( - MonadState(..), - modify, - modify', - gets - ) where - -import Control.Monad.Trans.Cont -import Control.Monad.Trans.Error -import Control.Monad.Trans.Except -import Control.Monad.Trans.Identity -import Control.Monad.Trans.List -import Control.Monad.Trans.Maybe -import Control.Monad.Trans.Reader -import qualified Control.Monad.Trans.RWS.Lazy as LazyRWS (RWST, get, put, state) -import qualified Control.Monad.Trans.RWS.Strict as StrictRWS (RWST, get, put, state) -import qualified Control.Monad.Trans.State.Lazy as Lazy (StateT, get, put, state) -import qualified Control.Monad.Trans.State.Strict as Strict (StateT, get, put, state) -import Control.Monad.Trans.Writer.Lazy as Lazy -import Control.Monad.Trans.Writer.Strict as Strict - -import Control.Monad.Trans.Class (lift) -import Control.Monad -import Data.Monoid - --- --------------------------------------------------------------------------- - --- | Minimal definition is either both of @get@ and @put@ or just @state@ -class Monad m => MonadState s m | m -> s where - -- | Return the state from the internals of the monad. - get :: m s - get = state (\s -> (s, s)) - - -- | Replace the state inside the monad. - put :: s -> m () - put s = state (\_ -> ((), s)) - - -- | Embed a simple state action into the monad. - state :: (s -> (a, s)) -> m a - state f = do - s <- get - let ~(a, s') = f s - put s' - return a -#if __GLASGOW_HASKELL__ >= 707 - {-# MINIMAL state | get, put #-} -#endif - --- | Monadic state transformer. --- --- Maps an old state to a new state inside a state monad. --- The old state is thrown away. --- --- > Main> :t modify ((+1) :: Int -> Int) --- > modify (...) :: (MonadState Int a) => a () --- --- This says that @modify (+1)@ acts over any --- Monad that is a member of the @MonadState@ class, --- with an @Int@ state. -modify :: MonadState s m => (s -> s) -> m () -modify f = state (\s -> ((), f s)) - --- | A variant of 'modify' in which the computation is strict in the --- new state. --- --- @since 2.2 -modify' :: MonadState s m => (s -> s) -> m () -modify' f = do - s' <- get - put $! f s' - --- | Gets specific component of the state, using a projection function --- supplied. -gets :: MonadState s m => (s -> a) -> m a -gets f = do - s <- get - return (f s) - -instance Monad m => MonadState s (Lazy.StateT s m) where - get = Lazy.get - put = Lazy.put - state = Lazy.state - -instance Monad m => MonadState s (Strict.StateT s m) where - get = Strict.get - put = Strict.put - state = Strict.state - -instance (Monad m, Monoid w) => MonadState s (LazyRWS.RWST r w s m) where - get = LazyRWS.get - put = LazyRWS.put - state = LazyRWS.state - -instance (Monad m, Monoid w) => MonadState s (StrictRWS.RWST r w s m) where - get = StrictRWS.get - put = StrictRWS.put - state = StrictRWS.state - --- --------------------------------------------------------------------------- --- Instances for other mtl transformers --- --- All of these instances need UndecidableInstances, --- because they do not satisfy the coverage condition. - -instance MonadState s m => MonadState s (ContT r m) where - get = lift get - put = lift . put - state = lift . state - -instance (Error e, MonadState s m) => MonadState s (ErrorT e m) where - get = lift get - put = lift . put - state = lift . state - --- | @since 2.2 -instance MonadState s m => MonadState s (ExceptT e m) where - get = lift get - put = lift . put - state = lift . state - -instance MonadState s m => MonadState s (IdentityT m) where - get = lift get - put = lift . put - state = lift . state - -instance MonadState s m => MonadState s (ListT m) where - get = lift get - put = lift . put - state = lift . state - -instance MonadState s m => MonadState s (MaybeT m) where - get = lift get - put = lift . put - state = lift . state - -instance MonadState s m => MonadState s (ReaderT r m) where - get = lift get - put = lift . put - state = lift . state - -instance (Monoid w, MonadState s m) => MonadState s (Lazy.WriterT w m) where - get = lift get - put = lift . put - state = lift . state - -instance (Monoid w, MonadState s m) => MonadState s (Strict.WriterT w m) where - get = lift get - put = lift . put - state = lift . state diff -Nru cabal-install-3.2-3.2+git20191216.2.e076113/src/mtl-2.2.2/Control/Monad/State/Lazy.hs cabal-install-3.2-3.2+git20200127.2.b84fd4f/src/mtl-2.2.2/Control/Monad/State/Lazy.hs --- cabal-install-3.2-3.2+git20191216.2.e076113/src/mtl-2.2.2/Control/Monad/State/Lazy.hs 2018-02-24 22:02:27.000000000 +0000 +++ cabal-install-3.2-3.2+git20200127.2.b84fd4f/src/mtl-2.2.2/Control/Monad/State/Lazy.hs 1970-01-01 00:00:00.000000000 +0000 @@ -1,130 +0,0 @@ ------------------------------------------------------------------------------ --- | --- Module : Control.Monad.State.Lazy --- Copyright : (c) Andy Gill 2001, --- (c) Oregon Graduate Institute of Science and Technology, 2001 --- License : BSD-style (see the file LICENSE) --- --- Maintainer : libraries@haskell.org --- Stability : experimental --- Portability : non-portable (multi-param classes, functional dependencies) --- --- Lazy state monads. --- --- This module is inspired by the paper --- /Functional Programming with Overloading and Higher-Order Polymorphism/, --- Mark P Jones () --- Advanced School of Functional Programming, 1995. - ------------------------------------------------------------------------------ - -module Control.Monad.State.Lazy ( - -- * MonadState class - MonadState(..), - modify, - modify', - gets, - -- * The State monad - State, - runState, - evalState, - execState, - mapState, - withState, - -- * The StateT monad transformer - StateT(StateT), - runStateT, - evalStateT, - execStateT, - mapStateT, - withStateT, - module Control.Monad, - module Control.Monad.Fix, - module Control.Monad.Trans, - -- * Examples - -- $examples - ) where - -import Control.Monad.State.Class - -import Control.Monad.Trans -import Control.Monad.Trans.State.Lazy - (State, runState, evalState, execState, mapState, withState, - StateT(StateT), runStateT, evalStateT, execStateT, mapStateT, withStateT) - -import Control.Monad -import Control.Monad.Fix - --- --------------------------------------------------------------------------- --- $examples --- A function to increment a counter. Taken from the paper --- /Generalising Monads to Arrows/, John --- Hughes (), November 1998: --- --- > tick :: State Int Int --- > tick = do n <- get --- > put (n+1) --- > return n --- --- Add one to the given number using the state monad: --- --- > plusOne :: Int -> Int --- > plusOne n = execState tick n --- --- A contrived addition example. Works only with positive numbers: --- --- > plus :: Int -> Int -> Int --- > plus n x = execState (sequence $ replicate n tick) x --- --- An example from /The Craft of Functional Programming/, Simon --- Thompson (), --- Addison-Wesley 1999: \"Given an arbitrary tree, transform it to a --- tree of integers in which the original elements are replaced by --- natural numbers, starting from 0. The same element has to be --- replaced by the same number at every occurrence, and when we meet --- an as-yet-unvisited element we have to find a \'new\' number to match --- it with:\" --- --- > data Tree a = Nil | Node a (Tree a) (Tree a) deriving (Show, Eq) --- > type Table a = [a] --- --- > numberTree :: Eq a => Tree a -> State (Table a) (Tree Int) --- > numberTree Nil = return Nil --- > numberTree (Node x t1 t2) --- > = do num <- numberNode x --- > nt1 <- numberTree t1 --- > nt2 <- numberTree t2 --- > return (Node num nt1 nt2) --- > where --- > numberNode :: Eq a => a -> State (Table a) Int --- > numberNode x --- > = do table <- get --- > (newTable, newPos) <- return (nNode x table) --- > put newTable --- > return newPos --- > nNode:: (Eq a) => a -> Table a -> (Table a, Int) --- > nNode x table --- > = case (findIndexInList (== x) table) of --- > Nothing -> (table ++ [x], length table) --- > Just i -> (table, i) --- > findIndexInList :: (a -> Bool) -> [a] -> Maybe Int --- > findIndexInList = findIndexInListHelp 0 --- > findIndexInListHelp _ _ [] = Nothing --- > findIndexInListHelp count f (h:t) --- > = if (f h) --- > then Just count --- > else findIndexInListHelp (count+1) f t --- --- numTree applies numberTree with an initial state: --- --- > numTree :: (Eq a) => Tree a -> Tree Int --- > numTree t = evalState (numberTree t) [] --- --- > testTree = Node "Zero" (Node "One" (Node "Two" Nil Nil) (Node "One" (Node "Zero" Nil Nil) Nil)) Nil --- > numTree testTree => Node 0 (Node 1 (Node 2 Nil Nil) (Node 1 (Node 0 Nil Nil) Nil)) Nil --- --- sumTree is a little helper function that does not use the State monad: --- --- > sumTree :: (Num a) => Tree a -> a --- > sumTree Nil = 0 --- > sumTree (Node e t1 t2) = e + (sumTree t1) + (sumTree t2) diff -Nru cabal-install-3.2-3.2+git20191216.2.e076113/src/mtl-2.2.2/Control/Monad/State/Strict.hs cabal-install-3.2-3.2+git20200127.2.b84fd4f/src/mtl-2.2.2/Control/Monad/State/Strict.hs --- cabal-install-3.2-3.2+git20191216.2.e076113/src/mtl-2.2.2/Control/Monad/State/Strict.hs 2018-02-24 22:02:27.000000000 +0000 +++ cabal-install-3.2-3.2+git20200127.2.b84fd4f/src/mtl-2.2.2/Control/Monad/State/Strict.hs 1970-01-01 00:00:00.000000000 +0000 @@ -1,130 +0,0 @@ ------------------------------------------------------------------------------ --- | --- Module : Control.Monad.State.Strict --- Copyright : (c) Andy Gill 2001, --- (c) Oregon Graduate Institute of Science and Technology, 2001 --- License : BSD-style (see the file LICENSE) --- --- Maintainer : libraries@haskell.org --- Stability : experimental --- Portability : non-portable (multi-param classes, functional dependencies) --- --- Strict state monads. --- --- This module is inspired by the paper --- /Functional Programming with Overloading and Higher-Order Polymorphism/, --- Mark P Jones () --- Advanced School of Functional Programming, 1995. - ------------------------------------------------------------------------------ - -module Control.Monad.State.Strict ( - -- * MonadState class - MonadState(..), - modify, - modify', - gets, - -- * The State monad - State, - runState, - evalState, - execState, - mapState, - withState, - -- * The StateT monad transformer - StateT(StateT), - runStateT, - evalStateT, - execStateT, - mapStateT, - withStateT, - module Control.Monad, - module Control.Monad.Fix, - module Control.Monad.Trans, - -- * Examples - -- $examples - ) where - -import Control.Monad.State.Class - -import Control.Monad.Trans -import Control.Monad.Trans.State.Strict - (State, runState, evalState, execState, mapState, withState, - StateT(StateT), runStateT, evalStateT, execStateT, mapStateT, withStateT) - -import Control.Monad -import Control.Monad.Fix - --- --------------------------------------------------------------------------- --- $examples --- A function to increment a counter. Taken from the paper --- /Generalising Monads to Arrows/, John --- Hughes (), November 1998: --- --- > tick :: State Int Int --- > tick = do n <- get --- > put (n+1) --- > return n --- --- Add one to the given number using the state monad: --- --- > plusOne :: Int -> Int --- > plusOne n = execState tick n --- --- A contrived addition example. Works only with positive numbers: --- --- > plus :: Int -> Int -> Int --- > plus n x = execState (sequence $ replicate n tick) x --- --- An example from /The Craft of Functional Programming/, Simon --- Thompson (), --- Addison-Wesley 1999: \"Given an arbitrary tree, transform it to a --- tree of integers in which the original elements are replaced by --- natural numbers, starting from 0. The same element has to be --- replaced by the same number at every occurrence, and when we meet --- an as-yet-unvisited element we have to find a \'new\' number to match --- it with:\" --- --- > data Tree a = Nil | Node a (Tree a) (Tree a) deriving (Show, Eq) --- > type Table a = [a] --- --- > numberTree :: Eq a => Tree a -> State (Table a) (Tree Int) --- > numberTree Nil = return Nil --- > numberTree (Node x t1 t2) --- > = do num <- numberNode x --- > nt1 <- numberTree t1 --- > nt2 <- numberTree t2 --- > return (Node num nt1 nt2) --- > where --- > numberNode :: Eq a => a -> State (Table a) Int --- > numberNode x --- > = do table <- get --- > (newTable, newPos) <- return (nNode x table) --- > put newTable --- > return newPos --- > nNode:: (Eq a) => a -> Table a -> (Table a, Int) --- > nNode x table --- > = case (findIndexInList (== x) table) of --- > Nothing -> (table ++ [x], length table) --- > Just i -> (table, i) --- > findIndexInList :: (a -> Bool) -> [a] -> Maybe Int --- > findIndexInList = findIndexInListHelp 0 --- > findIndexInListHelp _ _ [] = Nothing --- > findIndexInListHelp count f (h:t) --- > = if (f h) --- > then Just count --- > else findIndexInListHelp (count+1) f t --- --- numTree applies numberTree with an initial state: --- --- > numTree :: (Eq a) => Tree a -> Tree Int --- > numTree t = evalState (numberTree t) [] --- --- > testTree = Node "Zero" (Node "One" (Node "Two" Nil Nil) (Node "One" (Node "Zero" Nil Nil) Nil)) Nil --- > numTree testTree => Node 0 (Node 1 (Node 2 Nil Nil) (Node 1 (Node 0 Nil Nil) Nil)) Nil --- --- sumTree is a little helper function that does not use the State monad: --- --- > sumTree :: (Num a) => Tree a -> a --- > sumTree Nil = 0 --- > sumTree (Node e t1 t2) = e + (sumTree t1) + (sumTree t2) diff -Nru cabal-install-3.2-3.2+git20191216.2.e076113/src/mtl-2.2.2/Control/Monad/State.hs cabal-install-3.2-3.2+git20200127.2.b84fd4f/src/mtl-2.2.2/Control/Monad/State.hs --- cabal-install-3.2-3.2+git20191216.2.e076113/src/mtl-2.2.2/Control/Monad/State.hs 2018-02-24 22:02:27.000000000 +0000 +++ cabal-install-3.2-3.2+git20200127.2.b84fd4f/src/mtl-2.2.2/Control/Monad/State.hs 1970-01-01 00:00:00.000000000 +0000 @@ -1,25 +0,0 @@ ------------------------------------------------------------------------------ --- | --- Module : Control.Monad.State --- Copyright : (c) Andy Gill 2001, --- (c) Oregon Graduate Institute of Science and Technology, 2001 --- License : BSD-style (see the file LICENSE) --- --- Maintainer : libraries@haskell.org --- Stability : experimental --- Portability : non-portable (multi-param classes, functional dependencies) --- --- State monads. --- --- This module is inspired by the paper --- /Functional Programming with Overloading and Higher-Order Polymorphism/, --- Mark P Jones () --- Advanced School of Functional Programming, 1995. - ------------------------------------------------------------------------------ - -module Control.Monad.State ( - module Control.Monad.State.Lazy - ) where - -import Control.Monad.State.Lazy diff -Nru cabal-install-3.2-3.2+git20191216.2.e076113/src/mtl-2.2.2/Control/Monad/Trans.hs cabal-install-3.2-3.2+git20200127.2.b84fd4f/src/mtl-2.2.2/Control/Monad/Trans.hs --- cabal-install-3.2-3.2+git20191216.2.e076113/src/mtl-2.2.2/Control/Monad/Trans.hs 2018-02-24 22:02:27.000000000 +0000 +++ cabal-install-3.2-3.2+git20200127.2.b84fd4f/src/mtl-2.2.2/Control/Monad/Trans.hs 1970-01-01 00:00:00.000000000 +0000 @@ -1,34 +0,0 @@ ------------------------------------------------------------------------------ --- | --- Module : Control.Monad.Trans --- Copyright : (c) Andy Gill 2001, --- (c) Oregon Graduate Institute of Science and Technology, 2001 --- License : BSD-style (see the file LICENSE) --- --- Maintainer : libraries@haskell.org --- Stability : experimental --- Portability : portable --- --- Classes for monad transformers. --- --- A monad transformer makes new monad out of an existing monad, such --- that computations of the old monad may be embedded in the new one. --- To construct a monad with a desired set of features, one typically --- starts with a base monad, such as @Identity@, @[]@ or 'IO', and --- applies a sequence of monad transformers. --- --- Most monad transformer modules include the special case of applying the --- transformer to @Identity@. For example, @State s@ is an abbreviation --- for @StateT s Identity@. --- --- Each monad transformer also comes with an operation @run@/XXX/ to --- unwrap the transformer, exposing a computation of the inner monad. ------------------------------------------------------------------------------ - -module Control.Monad.Trans ( - module Control.Monad.Trans.Class, - module Control.Monad.IO.Class - ) where - -import Control.Monad.IO.Class -import Control.Monad.Trans.Class diff -Nru cabal-install-3.2-3.2+git20191216.2.e076113/src/mtl-2.2.2/Control/Monad/Writer/Class.hs cabal-install-3.2-3.2+git20200127.2.b84fd4f/src/mtl-2.2.2/Control/Monad/Writer/Class.hs --- cabal-install-3.2-3.2+git20191216.2.e076113/src/mtl-2.2.2/Control/Monad/Writer/Class.hs 2018-02-24 22:02:27.000000000 +0000 +++ cabal-install-3.2-3.2+git20200127.2.b84fd4f/src/mtl-2.2.2/Control/Monad/Writer/Class.hs 1970-01-01 00:00:00.000000000 +0000 @@ -1,188 +0,0 @@ -{-# LANGUAGE CPP #-} -{-# LANGUAGE FlexibleInstances #-} -{-# LANGUAGE FunctionalDependencies #-} -{-# LANGUAGE MultiParamTypeClasses #-} -{-# LANGUAGE UndecidableInstances #-} --- Search for UndecidableInstances to see why this is needed - ------------------------------------------------------------------------------ --- | --- Module : Control.Monad.Writer.Class --- Copyright : (c) Andy Gill 2001, --- (c) Oregon Graduate Institute of Science and Technology, 2001 --- License : BSD-style (see the file LICENSE) --- --- Maintainer : libraries@haskell.org --- Stability : experimental --- Portability : non-portable (multi-param classes, functional dependencies) --- --- The MonadWriter class. --- --- Inspired by the paper --- /Functional Programming with Overloading and Higher-Order Polymorphism/, --- Mark P Jones () --- Advanced School of Functional Programming, 1995. ------------------------------------------------------------------------------ - -module Control.Monad.Writer.Class ( - MonadWriter(..), - listens, - censor, - ) where - -import Control.Monad.Trans.Error as Error -import Control.Monad.Trans.Except as Except -import Control.Monad.Trans.Identity as Identity -import Control.Monad.Trans.Maybe as Maybe -import Control.Monad.Trans.Reader -import qualified Control.Monad.Trans.RWS.Lazy as LazyRWS ( - RWST, writer, tell, listen, pass) -import qualified Control.Monad.Trans.RWS.Strict as StrictRWS ( - RWST, writer, tell, listen, pass) -import Control.Monad.Trans.State.Lazy as Lazy -import Control.Monad.Trans.State.Strict as Strict -import qualified Control.Monad.Trans.Writer.Lazy as Lazy ( - WriterT, writer, tell, listen, pass) -import qualified Control.Monad.Trans.Writer.Strict as Strict ( - WriterT, writer, tell, listen, pass) - -import Control.Monad.Trans.Class (lift) -import Control.Monad -import Data.Monoid - --- --------------------------------------------------------------------------- --- MonadWriter class --- --- tell is like tell on the MUD's it shouts to monad --- what you want to be heard. The monad carries this 'packet' --- upwards, merging it if needed (hence the Monoid requirement). --- --- listen listens to a monad acting, and returns what the monad "said". --- --- pass lets you provide a writer transformer which changes internals of --- the written object. - -class (Monoid w, Monad m) => MonadWriter w m | m -> w where -#if __GLASGOW_HASKELL__ >= 707 - {-# MINIMAL (writer | tell), listen, pass #-} -#endif - -- | @'writer' (a,w)@ embeds a simple writer action. - writer :: (a,w) -> m a - writer ~(a, w) = do - tell w - return a - - -- | @'tell' w@ is an action that produces the output @w@. - tell :: w -> m () - tell w = writer ((),w) - - -- | @'listen' m@ is an action that executes the action @m@ and adds - -- its output to the value of the computation. - listen :: m a -> m (a, w) - -- | @'pass' m@ is an action that executes the action @m@, which - -- returns a value and a function, and returns the value, applying - -- the function to the output. - pass :: m (a, w -> w) -> m a - --- | @'listens' f m@ is an action that executes the action @m@ and adds --- the result of applying @f@ to the output to the value of the computation. --- --- * @'listens' f m = 'liftM' (id *** f) ('listen' m)@ -listens :: MonadWriter w m => (w -> b) -> m a -> m (a, b) -listens f m = do - ~(a, w) <- listen m - return (a, f w) - --- | @'censor' f m@ is an action that executes the action @m@ and --- applies the function @f@ to its output, leaving the return value --- unchanged. --- --- * @'censor' f m = 'pass' ('liftM' (\\x -> (x,f)) m)@ -censor :: MonadWriter w m => (w -> w) -> m a -> m a -censor f m = pass $ do - a <- m - return (a, f) - -#if MIN_VERSION_base(4,9,0) --- | __NOTE__: This instance is only defined for @base >= 4.9.0@. --- --- @since 2.2.2 -instance (Monoid w) => MonadWriter w ((,) w) where - writer ~(a, w) = (w, a) - tell w = (w, ()) - listen ~(w, a) = (w, (a, w)) - pass ~(w, (a, f)) = (f w, a) -#endif - -instance (Monoid w, Monad m) => MonadWriter w (Lazy.WriterT w m) where - writer = Lazy.writer - tell = Lazy.tell - listen = Lazy.listen - pass = Lazy.pass - -instance (Monoid w, Monad m) => MonadWriter w (Strict.WriterT w m) where - writer = Strict.writer - tell = Strict.tell - listen = Strict.listen - pass = Strict.pass - -instance (Monoid w, Monad m) => MonadWriter w (LazyRWS.RWST r w s m) where - writer = LazyRWS.writer - tell = LazyRWS.tell - listen = LazyRWS.listen - pass = LazyRWS.pass - -instance (Monoid w, Monad m) => MonadWriter w (StrictRWS.RWST r w s m) where - writer = StrictRWS.writer - tell = StrictRWS.tell - listen = StrictRWS.listen - pass = StrictRWS.pass - --- --------------------------------------------------------------------------- --- Instances for other mtl transformers --- --- All of these instances need UndecidableInstances, --- because they do not satisfy the coverage condition. - -instance (Error e, MonadWriter w m) => MonadWriter w (ErrorT e m) where - writer = lift . writer - tell = lift . tell - listen = Error.liftListen listen - pass = Error.liftPass pass - --- | @since 2.2 -instance MonadWriter w m => MonadWriter w (ExceptT e m) where - writer = lift . writer - tell = lift . tell - listen = Except.liftListen listen - pass = Except.liftPass pass - -instance MonadWriter w m => MonadWriter w (IdentityT m) where - writer = lift . writer - tell = lift . tell - listen = Identity.mapIdentityT listen - pass = Identity.mapIdentityT pass - -instance MonadWriter w m => MonadWriter w (MaybeT m) where - writer = lift . writer - tell = lift . tell - listen = Maybe.liftListen listen - pass = Maybe.liftPass pass - -instance MonadWriter w m => MonadWriter w (ReaderT r m) where - writer = lift . writer - tell = lift . tell - listen = mapReaderT listen - pass = mapReaderT pass - -instance MonadWriter w m => MonadWriter w (Lazy.StateT s m) where - writer = lift . writer - tell = lift . tell - listen = Lazy.liftListen listen - pass = Lazy.liftPass pass - -instance MonadWriter w m => MonadWriter w (Strict.StateT s m) where - writer = lift . writer - tell = lift . tell - listen = Strict.liftListen listen - pass = Strict.liftPass pass diff -Nru cabal-install-3.2-3.2+git20191216.2.e076113/src/mtl-2.2.2/Control/Monad/Writer/Lazy.hs cabal-install-3.2-3.2+git20200127.2.b84fd4f/src/mtl-2.2.2/Control/Monad/Writer/Lazy.hs --- cabal-install-3.2-3.2+git20191216.2.e076113/src/mtl-2.2.2/Control/Monad/Writer/Lazy.hs 2018-02-24 22:02:27.000000000 +0000 +++ cabal-install-3.2-3.2+git20200127.2.b84fd4f/src/mtl-2.2.2/Control/Monad/Writer/Lazy.hs 1970-01-01 00:00:00.000000000 +0000 @@ -1,50 +0,0 @@ ------------------------------------------------------------------------------ --- | --- Module : Control.Monad.Writer.Lazy --- Copyright : (c) Andy Gill 2001, --- (c) Oregon Graduate Institute of Science and Technology, 2001 --- License : BSD-style (see the file LICENSE) --- --- Maintainer : libraries@haskell.org --- Stability : experimental --- Portability : non-portable (multi-param classes, functional dependencies) --- --- Lazy writer monads. --- --- Inspired by the paper --- /Functional Programming with Overloading and Higher-Order Polymorphism/, --- Mark P Jones () --- Advanced School of Functional Programming, 1995. ------------------------------------------------------------------------------ - -module Control.Monad.Writer.Lazy ( - -- * MonadWriter class - MonadWriter(..), - listens, - censor, - -- * The Writer monad - Writer, - runWriter, - execWriter, - mapWriter, - -- * The WriterT monad transformer - WriterT(WriterT), - runWriterT, - execWriterT, - mapWriterT, - module Control.Monad, - module Control.Monad.Fix, - module Control.Monad.Trans, - module Data.Monoid, - ) where - -import Control.Monad.Writer.Class - -import Control.Monad.Trans -import Control.Monad.Trans.Writer.Lazy ( - Writer, runWriter, execWriter, mapWriter, - WriterT(WriterT), runWriterT, execWriterT, mapWriterT) - -import Control.Monad -import Control.Monad.Fix -import Data.Monoid diff -Nru cabal-install-3.2-3.2+git20191216.2.e076113/src/mtl-2.2.2/Control/Monad/Writer/Strict.hs cabal-install-3.2-3.2+git20200127.2.b84fd4f/src/mtl-2.2.2/Control/Monad/Writer/Strict.hs --- cabal-install-3.2-3.2+git20191216.2.e076113/src/mtl-2.2.2/Control/Monad/Writer/Strict.hs 2018-02-24 22:02:27.000000000 +0000 +++ cabal-install-3.2-3.2+git20200127.2.b84fd4f/src/mtl-2.2.2/Control/Monad/Writer/Strict.hs 1970-01-01 00:00:00.000000000 +0000 @@ -1,49 +0,0 @@ ------------------------------------------------------------------------------ --- | --- Module : Control.Monad.Writer.Strict --- Copyright : (c) Andy Gill 2001, --- (c) Oregon Graduate Institute of Science and Technology, 2001 --- License : BSD-style (see the file LICENSE) --- --- Maintainer : libraries@haskell.org --- Stability : experimental --- Portability : non-portable (multi-param classes, functional dependencies) --- --- Strict writer monads. --- --- Inspired by the paper --- /Functional Programming with Overloading and Higher-Order Polymorphism/, --- Mark P Jones () --- Advanced School of Functional Programming, 1995. ------------------------------------------------------------------------------ - -module Control.Monad.Writer.Strict ( - -- * MonadWriter class - MonadWriter(..), - listens, - censor, - -- * The Writer monad - Writer, - runWriter, - execWriter, - mapWriter, - -- * The WriterT monad transformer - WriterT(..), - execWriterT, - mapWriterT, - module Control.Monad, - module Control.Monad.Fix, - module Control.Monad.Trans, - module Data.Monoid, - ) where - -import Control.Monad.Writer.Class - -import Control.Monad.Trans -import Control.Monad.Trans.Writer.Strict ( - Writer, runWriter, execWriter, mapWriter, - WriterT(..), execWriterT, mapWriterT) - -import Control.Monad -import Control.Monad.Fix -import Data.Monoid diff -Nru cabal-install-3.2-3.2+git20191216.2.e076113/src/mtl-2.2.2/Control/Monad/Writer.hs cabal-install-3.2-3.2+git20200127.2.b84fd4f/src/mtl-2.2.2/Control/Monad/Writer.hs --- cabal-install-3.2-3.2+git20191216.2.e076113/src/mtl-2.2.2/Control/Monad/Writer.hs 2018-02-24 22:02:27.000000000 +0000 +++ cabal-install-3.2-3.2+git20200127.2.b84fd4f/src/mtl-2.2.2/Control/Monad/Writer.hs 1970-01-01 00:00:00.000000000 +0000 @@ -1,24 +0,0 @@ ------------------------------------------------------------------------------ --- | --- Module : Control.Monad.Writer --- Copyright : (c) Andy Gill 2001, --- (c) Oregon Graduate Institute of Science and Technology, 2001 --- License : BSD-style (see the file LICENSE) --- --- Maintainer : libraries@haskell.org --- Stability : experimental --- Portability : non-portable (multi-param classes, functional dependencies) --- --- The MonadWriter class. --- --- Inspired by the paper --- /Functional Programming with Overloading and Higher-Order Polymorphism/, --- Mark P Jones () --- Advanced School of Functional Programming, 1995. ------------------------------------------------------------------------------ - -module Control.Monad.Writer ( - module Control.Monad.Writer.Lazy - ) where - -import Control.Monad.Writer.Lazy diff -Nru cabal-install-3.2-3.2+git20191216.2.e076113/src/mtl-2.2.2/LICENSE cabal-install-3.2-3.2+git20200127.2.b84fd4f/src/mtl-2.2.2/LICENSE --- cabal-install-3.2-3.2+git20191216.2.e076113/src/mtl-2.2.2/LICENSE 2018-02-24 22:02:27.000000000 +0000 +++ cabal-install-3.2-3.2+git20200127.2.b84fd4f/src/mtl-2.2.2/LICENSE 1970-01-01 00:00:00.000000000 +0000 @@ -1,31 +0,0 @@ -The Glasgow Haskell Compiler License - -Copyright 2004, The University Court of the University of Glasgow. -All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are met: - -- Redistributions of source code must retain the above copyright notice, -this list of conditions and the following disclaimer. - -- Redistributions in binary form must reproduce the above copyright notice, -this list of conditions and the following disclaimer in the documentation -and/or other materials provided with the distribution. - -- Neither name of the University nor the names of its contributors may be -used to endorse or promote products derived from this software without -specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE UNIVERSITY COURT OF THE UNIVERSITY OF -GLASGOW AND THE CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, -INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND -FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE -UNIVERSITY COURT OF THE UNIVERSITY OF GLASGOW OR THE CONTRIBUTORS BE LIABLE -FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL -DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR -SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER -CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT -LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY -OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH -DAMAGE. diff -Nru cabal-install-3.2-3.2+git20191216.2.e076113/src/mtl-2.2.2/mtl.cabal cabal-install-3.2-3.2+git20200127.2.b84fd4f/src/mtl-2.2.2/mtl.cabal --- cabal-install-3.2-3.2+git20191216.2.e076113/src/mtl-2.2.2/mtl.cabal 2019-12-17 14:07:26.000000000 +0000 +++ cabal-install-3.2-3.2+git20200127.2.b84fd4f/src/mtl-2.2.2/mtl.cabal 1970-01-01 00:00:00.000000000 +0000 @@ -1,77 +0,0 @@ -name: mtl -version: 2.2.2 -cabal-version: >= 1.10 -license: BSD3 -license-file: LICENSE -author: Andy Gill -maintainer: Edward Kmett -category: Control -synopsis: Monad classes, using functional dependencies -homepage: http://github.com/haskell/mtl -bug-reports: http://github.com/haskell/mtl/issues -description: - Monad classes using functional dependencies, with instances - for various monad transformers, inspired by the paper - /Functional Programming with Overloading and Higher-Order Polymorphism/, - by Mark P Jones, in /Advanced School of Functional Programming/, 1995 - (). -build-type: Simple -extra-source-files: CHANGELOG.markdown, README.markdown -tested-with: - GHC==7.0.4, - GHC==7.2.2, - GHC==7.4.2, - GHC==7.6.3, - GHC==7.8.4, - GHC==7.10.3, - GHC==8.0.2, - GHC==8.2.2, - GHC==8.4.1 - -source-repository head - type: git - location: https://github.com/haskell/mtl.git - -Library - exposed-modules: - Control.Monad.Cont - Control.Monad.Cont.Class - Control.Monad.Error - Control.Monad.Error.Class - Control.Monad.Except - Control.Monad.Identity - Control.Monad.List - Control.Monad.RWS - Control.Monad.RWS.Class - Control.Monad.RWS.Lazy - Control.Monad.RWS.Strict - Control.Monad.Reader - Control.Monad.Reader.Class - Control.Monad.State - Control.Monad.State.Class - Control.Monad.State.Lazy - Control.Monad.State.Strict - Control.Monad.Trans - Control.Monad.Writer - Control.Monad.Writer.Class - Control.Monad.Writer.Lazy - Control.Monad.Writer.Strict - build-depends: base < 5, transformers >= 0.4 && <0.6 - - default-language: Haskell2010 - other-extensions: - CPP - MultiParamTypeClasses - FunctionalDependencies - FlexibleInstances - UndecidableInstances - - -- This is a SafeHaskell safeguard (pun intended) to explicitly declare the API contract of `mtl` - -- GHC versions before 7.4 were hopelessly broken or incapable of SafeHaskell - if impl(ghc >= 7.4) - default-extensions: Safe - - ghc-options: -Wall -fno-warn-unused-imports -fno-warn-warnings-deprecations - - if impl(ghc >= 8.0) - ghc-options: -Wcompat -Wnoncanonical-monad-instances -Wnoncanonical-monadfail-instances diff -Nru cabal-install-3.2-3.2+git20191216.2.e076113/src/mtl-2.2.2/README.markdown cabal-install-3.2-3.2+git20200127.2.b84fd4f/src/mtl-2.2.2/README.markdown --- cabal-install-3.2-3.2+git20191216.2.e076113/src/mtl-2.2.2/README.markdown 2018-02-24 22:02:27.000000000 +0000 +++ cabal-install-3.2-3.2+git20200127.2.b84fd4f/src/mtl-2.2.2/README.markdown 1970-01-01 00:00:00.000000000 +0000 @@ -1,4 +0,0 @@ -The `mtl` Package [![Hackage](https://img.shields.io/hackage/v/mtl.svg)](https://hackage.haskell.org/package/mtl) [![Build Status](https://travis-ci.org/haskell/mtl.svg)](https://travis-ci.org/haskell/mtl) -===================== - -See [`mtl` on Hackage](http://hackage.haskell.org/package/mtl) for more information. diff -Nru cabal-install-3.2-3.2+git20191216.2.e076113/src/mtl-2.2.2/Setup.hs cabal-install-3.2-3.2+git20200127.2.b84fd4f/src/mtl-2.2.2/Setup.hs --- cabal-install-3.2-3.2+git20191216.2.e076113/src/mtl-2.2.2/Setup.hs 2018-02-24 22:02:27.000000000 +0000 +++ cabal-install-3.2-3.2+git20200127.2.b84fd4f/src/mtl-2.2.2/Setup.hs 1970-01-01 00:00:00.000000000 +0000 @@ -1,2 +0,0 @@ -import Distribution.Simple -main = defaultMain diff -Nru cabal-install-3.2-3.2+git20191216.2.e076113/src/parsec-3.1.14.0/ChangeLog.md cabal-install-3.2-3.2+git20200127.2.b84fd4f/src/parsec-3.1.14.0/ChangeLog.md --- cabal-install-3.2-3.2+git20191216.2.e076113/src/parsec-3.1.14.0/ChangeLog.md 2019-07-02 17:10:02.000000000 +0000 +++ cabal-install-3.2-3.2+git20200127.2.b84fd4f/src/parsec-3.1.14.0/ChangeLog.md 1970-01-01 00:00:00.000000000 +0000 @@ -1,91 +0,0 @@ -### 3.1.14.0 - -- Add `parseFromFile` to `Text.Parsec.Text.Lazy` and `Text.Parsec.Text` (#103, #104). - -- Clarify Haddock documentation in various places (#105,#101,#102). - -- Add support for `base-4.13`. - -### 3.1.13.0 - -- Add official support for [`SafeHaskell`](http://downloads.haskell.org/~ghc/latest/docs/html/users_guide/safe_haskell.html) - - **NOTE**: This is the first version whose `SafeHaskell` properties - have become an intentional part of the API contract; previous - versions were merely accidentally safe-inferred (or not depending - on various factors; in other words, this was a fragile - property). If you rely on `SafeHaskell` to consider module imports - from `parsec` *safe*, this is the first version of `parsec` which - actually guarantees a well-defined state; you can declare this - requirement by either specifying - - build-depends: parsec >= 3.1.13.0 && < 3.2 - - or, starting with `cabal-version:2.0`, via - - build-depends: parsec ^>= 3.1.13.0 - -- Drop support for GHC 7.0, GHC 7.2, and GHC 7.4.1; support window - starts with GHC 7.4.2. - -### 3.1.12.0 - -- Support limited to GHC 7.0 & GHC 7.2 only - -- Add `MonadFail` instance for `ParsecT` -- Add `Semigroup`/`Monoid` instances for `ParsecT` (#80,#82) -- Fix space leak in Applicative/Monad interface (#37) -- Add `parserTrace` and `parserTraced` combinators for debugging. - -### 3.1.11 - -- Include `README.md` in package. - -### 3.1.10 - -- Most types now have a `Typeable` instance. Some instances are dropped from - older versions of GHC (sorry about that!). -- The token-parser now rejects Unicode numeric escape sequences for characters - outside the Unicode range. -- The token-parser now loses less precision when parsing literal doubles. -- Documentation fixes and corrections. -- We no longer test parsec builds on GHC 7.4. - -### 3.1.9 - -- Many and various updates to documentation and package description (inlcuding - the homepage links). -- Add an `Eq` instance for `ParseError` -- Fixed a regression from 3.1.6: `runP` is again exported from module - Text.Parsec. - -### 3.1.8 - -- Fix a regression from 3.1.6 related to exports from the main module. - -### 3.1.7 - -- Fix a regression from 3.1.6 related to the reported position of error messages. - See bug #9 for details. -- Reset the current error position on success of `lookAhead`. - -### 3.1.6 - -- Export `Text` instances from Text.Parsec -- Make Text.Parsec exports more visible -- Re-arrange Text.Parsec exports -- Add functions `crlf` and `endOfLine` to Text.Parsec.Char for handling - input streams that do not have normalized line terminators. -- Fix off-by-one error in Token.charControl - -### 3.1.4 - -- Relax dependency on `text` - -### 3.1.5 - -- Relax dependency on `text` - -### 3.1.3 - -- Fix a regression introduced in 3.1.2 related to positions reported by error messages. diff -Nru cabal-install-3.2-3.2+git20191216.2.e076113/src/parsec-3.1.14.0/LICENSE cabal-install-3.2-3.2+git20200127.2.b84fd4f/src/parsec-3.1.14.0/LICENSE --- cabal-install-3.2-3.2+git20191216.2.e076113/src/parsec-3.1.14.0/LICENSE 2019-07-02 17:10:02.000000000 +0000 +++ cabal-install-3.2-3.2+git20200127.2.b84fd4f/src/parsec-3.1.14.0/LICENSE 1970-01-01 00:00:00.000000000 +0000 @@ -1,21 +0,0 @@ -Copyright 1999-2000, Daan Leijen; 2007, Paolo Martini. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are met: - -* Redistributions of source code must retain the above copyright notice, - this list of conditions and the following disclaimer. -* Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in the - documentation and/or other materials provided with the distribution. - -This software is provided by the copyright holders "as is" and any express or -implied warranties, including, but not limited to, the implied warranties of -merchantability and fitness for a particular purpose are disclaimed. In no -event shall the copyright holders be liable for any direct, indirect, -incidental, special, exemplary, or consequential damages (including, but not -limited to, procurement of substitute goods or services; loss of use, data, -or profits; or business interruption) however caused and on any theory of -liability, whether in contract, strict liability, or tort (including -negligence or otherwise) arising in any way out of the use of this software, -even if advised of the possibility of such damage. diff -Nru cabal-install-3.2-3.2+git20191216.2.e076113/src/parsec-3.1.14.0/parsec.cabal cabal-install-3.2-3.2+git20200127.2.b84fd4f/src/parsec-3.1.14.0/parsec.cabal --- cabal-install-3.2-3.2+git20191216.2.e076113/src/parsec-3.1.14.0/parsec.cabal 2019-12-17 14:07:33.000000000 +0000 +++ cabal-install-3.2-3.2+git20200127.2.b84fd4f/src/parsec-3.1.14.0/parsec.cabal 1970-01-01 00:00:00.000000000 +0000 @@ -1,130 +0,0 @@ -cabal-version: 1.12 -name: parsec -version: 3.1.14.0 - -synopsis: Monadic parser combinators -description: Parsec is designed from scratch as an industrial-strength parser - library. It is simple, safe, well documented (on the package - homepage), has extensive libraries, good error messages, - and is fast. It is defined as a monad transformer that can be - stacked on arbitrary monads, and it is also parametric in the - input stream type. - . - The main entry point is the "Text.Parsec" module which provides - defaults for parsing 'Char'acter data. - . - The "Text.ParserCombinators.Parsec" module hierarchy contains - the legacy @parsec-2@ API and may be removed at some point in - the future. - -license: BSD3 -license-file: LICENSE -author: Daan Leijen , Paolo Martini , Antoine Latter -maintainer: Herbert Valerio Riedel -homepage: https://github.com/haskell/parsec -bug-reports: https://github.com/haskell/parsec/issues -category: Parsing - -build-type: Simple -tested-with: GHC ==8.6.1 || ==8.4.3 || ==8.2.2 || ==8.0.2 || ==7.10.3 || ==7.8.4 || ==7.6.3 || ==7.4.2 || ==7.4.1 - -extra-source-files: ChangeLog.md, README.md - -source-repository head - type: git - location: https://github.com/haskell/parsec - -library - hs-source-dirs: src - exposed-modules: - Text.Parsec - Text.Parsec.String - Text.Parsec.ByteString - Text.Parsec.ByteString.Lazy - Text.Parsec.Text - Text.Parsec.Text.Lazy - Text.Parsec.Pos - Text.Parsec.Error - Text.Parsec.Prim - Text.Parsec.Char - Text.Parsec.Combinator - Text.Parsec.Token - Text.Parsec.Expr - Text.Parsec.Language - Text.Parsec.Perm - Text.ParserCombinators.Parsec - Text.ParserCombinators.Parsec.Char - Text.ParserCombinators.Parsec.Combinator - Text.ParserCombinators.Parsec.Error - Text.ParserCombinators.Parsec.Expr - Text.ParserCombinators.Parsec.Language - Text.ParserCombinators.Parsec.Perm - Text.ParserCombinators.Parsec.Pos - Text.ParserCombinators.Parsec.Prim - Text.ParserCombinators.Parsec.Token - - build-depends: - base >= 4.5.0 && < 4.14, - mtl >= 1.1.1 && < 2.3, - bytestring >= 0.9.2.1 && < 0.11, - text (>= 0.11.3.1 && < 0.12) - || (>= 1.0.0.0 && < 1.3) - - default-language: Haskell2010 - other-extensions: - CPP - DeriveDataTypeable - ExistentialQuantification - FlexibleContexts - FlexibleInstances - FunctionalDependencies - MultiParamTypeClasses - PolymorphicComponents - StandaloneDeriving - Safe - Trustworthy - UndecidableInstances - - ghc-options: -Wall - if impl(ghc >= 8.0) - ghc-options: -Wcompat -Wnoncanonical-monad-instances -Wno-trustworthy-safe - if impl(ghc < 8.8) - ghc-options: -Wnoncanonical-monadfail-instances - else - -- provide/emulate `Control.Monad.Fail` and `Semigroup` API for pre-GHC8 - build-depends: fail == 4.9.*, semigroups >= 0.18 && < 0.20 - - if impl(ghc >= 7.10) - ghc-options: -fno-warn-trustworthy-safe - -test-suite parsec. - type: exitcode-stdio-1.0 - - hs-source-dirs: test - main-is: Main.hs - other-modules: - Bugs - Bugs.Bug2 - Bugs.Bug6 - Bugs.Bug9 - Bugs.Bug35 - Features - Features.Feature80 - Util - - build-depends: - base, - mtl, - parsec, - -- dependencies whose version bounds are not inherited via lib:parsec - HUnit == 1.6.* || (>= 1.3.1.2 && < 1.4), - test-framework == 0.8.*, - test-framework-hunit == 0.3.* - - default-language: Haskell2010 - - ghc-options: -Wall - if impl(ghc >= 8.0) - ghc-options: -Wcompat -Wnoncanonical-monad-instances -Wnoncanonical-monadfail-instances - else - build-depends: semigroups diff -Nru cabal-install-3.2-3.2+git20191216.2.e076113/src/parsec-3.1.14.0/README.md cabal-install-3.2-3.2+git20200127.2.b84fd4f/src/parsec-3.1.14.0/README.md --- cabal-install-3.2-3.2+git20191216.2.e076113/src/parsec-3.1.14.0/README.md 2019-07-02 17:10:02.000000000 +0000 +++ cabal-install-3.2-3.2+git20200127.2.b84fd4f/src/parsec-3.1.14.0/README.md 1970-01-01 00:00:00.000000000 +0000 @@ -1,102 +0,0 @@ -Parsec [![Build Status](https://travis-ci.org/haskell/parsec.svg?branch=master)](https://travis-ci.org/haskell/parsec) -====== - -**Please refer to the [package description on Hackage](https://hackage.haskell.org/package/parsec#description) for more information.** - -A monadic parser combinator library, written by Daan Leijen. Parsec is designed -from scratch as an industrial-strength parser library. It is simple, safe, well -documented, has extensive libraries, good error messages, and is fast. - -Some links: - -* [Parsec on Hackage](https://hackage.haskell.org/package/parsec), - contains the generated documentation. -* The 2001 paper written by Daan Leijen, some what outdated - ([PDF](https://web.archive.org/web/20140528151730/http://legacy.cs.uu.nl/daan/download/parsec/parsec.pdf), - [HTML](https://web.archive.org/web/20140528151730/http://legacy.cs.uu.nl/daan/download/parsec/parsec.html), - thanks to [archive.org](http://web.archive.org); - and [PDF](https://research.microsoft.com/en-us/um/people/daan/download/parsec/parsec.pdf), - thanks to Microsoft Research). -* [Using Parsec](http://book.realworldhaskell.org/read/using-parsec.html), - chapter 16 of [Real World Haskell](http://book.realworldhaskell.org/). -* [An introduction to the Parsec library](http://kunigami.wordpress.com/2014/01/21/an-introduction-to-the-parsec-library) - on Kunigami's blog. -* [An introduction to parsing text in Haskell with Parsec](http://unbui.lt/#!/post/haskell-parsec-basics) on Wilson's blog. -* Differences between Parsec and - [Attoparsec](http://hackage.haskell.org/package/attoparsec) - (Haskell's other prominent parser library) as explained in - [an answer on StackExchange](http://stackoverflow.com/a/19213247). -* Differences between Parsec and [Happy](http://www.haskell.org/happy) - (Haskell's parser generator) as explained in two - answers on separate StackExchange questions - ([1](http://stackoverflow.com/a/7270904), - [2](http://stackoverflow.com/a/14775331)). -* Differences between Parsec and - [Megaparsec](http://hackage.haskell.org/package/megaparsec) - (an advanced fork of Parsec) as explained in - [Megaparsec's README](https://github.com/mrkkrp/megaparsec#megaparsec-vs-parsec). - - -By analyzing [Parsec's reverse dependencies on Hackage](http://packdeps.haskellers.com/reverse/parsec) -we can find open source project that make use of Parsec. For example -[bibtex](http://hackage.haskell.org/package/bibtex), -[ConfigFile](http://hackage.haskell.org/package/ConfigFile), -[csv](http://hackage.haskell.org/package/csv) and -[hjson](http://hackage.haskell.org/package/hjson). - - -## Getting started - -This requires a working version of `cabal` and `ghci`, which are part of -any modern installation of Haskell, such as -[Haskell Platform](https://www.haskell.org/platform). - -First install Parsec. - - cabal install parsec - -Below we show how a very simple parser that tests matching parentheses -was made from GHCI (the interactive GHC environment), which we started -with the `ghci` command). - -``` -Prelude> :m +Text.Parsec -Prelude Text.Parsec> let parenSet = char '(' >> many parenSet >> char ')' :: Parsec String () Char -Loading package transformers-0.3.0.0 ... linking ... done. -Loading package array-0.5.0.0 ... linking ... done. -Loading package deepseq-1.3.0.2 ... linking ... done. -Loading package bytestring-0.10.4.0 ... linking ... done. -Loading package mtl-2.1.3.1 ... linking ... done. -Loading package text-1.1.1.3 ... linking ... done. -Loading package parsec-3.1.5 ... linking ... done. -Prelude Text.Parsec> let parens = (many parenSet >> eof) <|> eof -Prelude Text.Parsec> parse parens "" "()" -Right () -Prelude Text.Parsec> parse parens "" "()(())" -Right () -Prelude Text.Parsec> parse parens "" "(" -Left (line 1, column 2): -unexpected end of input -expecting "(" or ")" -``` - -The `Right ()` results indicate successes: the parentheses matched. -The `Left [...]` result indicates a parse failure, and is detailed -with an error message. - -For a more thorough introduction to Parsec we recommend the links at -the top of this README file. - - -## Contributing - -Issues (bugs, feature requests or otherwise feedback) may be reported in -[the Github issue tracker for this project](https://github.com/haskell/parsec/issues). - -Pull-requests are also welcome. - - -## License - -See the [LICENSE](https://github.com/haskell/parsec/blob/master/LICENSE) -file in the repository. diff -Nru cabal-install-3.2-3.2+git20191216.2.e076113/src/parsec-3.1.14.0/Setup.hs cabal-install-3.2-3.2+git20200127.2.b84fd4f/src/parsec-3.1.14.0/Setup.hs --- cabal-install-3.2-3.2+git20191216.2.e076113/src/parsec-3.1.14.0/Setup.hs 2019-07-02 17:10:02.000000000 +0000 +++ cabal-install-3.2-3.2+git20200127.2.b84fd4f/src/parsec-3.1.14.0/Setup.hs 1970-01-01 00:00:00.000000000 +0000 @@ -1,6 +0,0 @@ -module Main (main) where - -import Distribution.Simple - -main :: IO () -main = defaultMain diff -Nru cabal-install-3.2-3.2+git20191216.2.e076113/src/parsec-3.1.14.0/src/Text/Parsec/ByteString/Lazy.hs cabal-install-3.2-3.2+git20200127.2.b84fd4f/src/parsec-3.1.14.0/src/Text/Parsec/ByteString/Lazy.hs --- cabal-install-3.2-3.2+git20191216.2.e076113/src/parsec-3.1.14.0/src/Text/Parsec/ByteString/Lazy.hs 2019-07-02 17:10:02.000000000 +0000 +++ cabal-install-3.2-3.2+git20200127.2.b84fd4f/src/parsec-3.1.14.0/src/Text/Parsec/ByteString/Lazy.hs 1970-01-01 00:00:00.000000000 +0000 @@ -1,42 +0,0 @@ -{-# LANGUAGE Safe #-} - ------------------------------------------------------------------------------ --- | --- Module : Text.Parsec.ByteString.Lazy --- Copyright : (c) Paolo Martini 2007 --- License : BSD-style (see the LICENSE file) --- --- Maintainer : derek.a.elkins@gmail.com --- Stability : provisional --- Portability : portable --- --- Convinience definitions for working with lazy 'C.ByteString's. --- ------------------------------------------------------------------------------ - -module Text.Parsec.ByteString.Lazy - ( Parser, GenParser, parseFromFile - ) where - -import Text.Parsec.Error -import Text.Parsec.Prim - -import qualified Data.ByteString.Lazy.Char8 as C - -type Parser = Parsec C.ByteString () -type GenParser t st = Parsec C.ByteString st - --- | @parseFromFile p filePath@ runs a lazy bytestring parser @p@ on the --- input read from @filePath@ using 'ByteString.Lazy.Char8.readFile'. Returns either a 'ParseError' --- ('Left') or a value of type @a@ ('Right'). --- --- > main = do{ result <- parseFromFile numbers "digits.txt" --- > ; case result of --- > Left err -> print err --- > Right xs -> print (sum xs) --- > } - -parseFromFile :: Parser a -> FilePath -> IO (Either ParseError a) -parseFromFile p fname - = do input <- C.readFile fname - return (runP p () fname input) diff -Nru cabal-install-3.2-3.2+git20191216.2.e076113/src/parsec-3.1.14.0/src/Text/Parsec/ByteString.hs cabal-install-3.2-3.2+git20200127.2.b84fd4f/src/parsec-3.1.14.0/src/Text/Parsec/ByteString.hs --- cabal-install-3.2-3.2+git20191216.2.e076113/src/parsec-3.1.14.0/src/Text/Parsec/ByteString.hs 2019-07-02 17:10:02.000000000 +0000 +++ cabal-install-3.2-3.2+git20200127.2.b84fd4f/src/parsec-3.1.14.0/src/Text/Parsec/ByteString.hs 1970-01-01 00:00:00.000000000 +0000 @@ -1,42 +0,0 @@ -{-# LANGUAGE Safe #-} - ------------------------------------------------------------------------------ --- | --- Module : Text.Parsec.ByteString --- Copyright : (c) Paolo Martini 2007 --- License : BSD-style (see the LICENSE file) --- --- Maintainer : derek.a.elkins@gmail.com --- Stability : provisional --- Portability : portable --- --- Convinience definitions for working with 'C.ByteString's. --- ------------------------------------------------------------------------------ - -module Text.Parsec.ByteString - ( Parser, GenParser, parseFromFile - ) where - -import Text.Parsec.Error -import Text.Parsec.Prim - -import qualified Data.ByteString.Char8 as C - -type Parser = Parsec C.ByteString () -type GenParser t st = Parsec C.ByteString st - --- | @parseFromFile p filePath@ runs a strict bytestring parser @p@ on the --- input read from @filePath@ using 'ByteString.Char8.readFile'. Returns either a 'ParseError' --- ('Left') or a value of type @a@ ('Right'). --- --- > main = do{ result <- parseFromFile numbers "digits.txt" --- > ; case result of --- > Left err -> print err --- > Right xs -> print (sum xs) --- > } - -parseFromFile :: Parser a -> FilePath -> IO (Either ParseError a) -parseFromFile p fname - = do input <- C.readFile fname - return (runP p () fname input) diff -Nru cabal-install-3.2-3.2+git20191216.2.e076113/src/parsec-3.1.14.0/src/Text/Parsec/Char.hs cabal-install-3.2-3.2+git20200127.2.b84fd4f/src/parsec-3.1.14.0/src/Text/Parsec/Char.hs --- cabal-install-3.2-3.2+git20191216.2.e076113/src/parsec-3.1.14.0/src/Text/Parsec/Char.hs 2019-07-02 17:10:02.000000000 +0000 +++ cabal-install-3.2-3.2+git20200127.2.b84fd4f/src/parsec-3.1.14.0/src/Text/Parsec/Char.hs 1970-01-01 00:00:00.000000000 +0000 @@ -1,158 +0,0 @@ -{-# LANGUAGE CPP, FlexibleContexts, Safe #-} - ------------------------------------------------------------------------------ --- | --- Module : Text.Parsec.Char --- Copyright : (c) Daan Leijen 1999-2001, (c) Paolo Martini 2007 --- License : BSD-style (see the LICENSE file) --- --- Maintainer : derek.a.elkins@gmail.com --- Stability : provisional --- Portability : portable --- --- Commonly used character parsers. --- ------------------------------------------------------------------------------ - -module Text.Parsec.Char where - -import Data.Char -import Text.Parsec.Pos -import Text.Parsec.Prim -#if !(MIN_VERSION_base(4,8,0)) -import Control.Applicative ((*>)) -#endif - --- | @oneOf cs@ succeeds if the current character is in the supplied --- list of characters @cs@. Returns the parsed character. See also --- 'satisfy'. --- --- > vowel = oneOf "aeiou" - -oneOf :: (Stream s m Char) => [Char] -> ParsecT s u m Char -oneOf cs = satisfy (\c -> elem c cs) - --- | As the dual of 'oneOf', @noneOf cs@ succeeds if the current --- character /not/ in the supplied list of characters @cs@. Returns the --- parsed character. --- --- > consonant = noneOf "aeiou" - -noneOf :: (Stream s m Char) => [Char] -> ParsecT s u m Char -noneOf cs = satisfy (\c -> not (elem c cs)) - --- | Skips /zero/ or more white space characters. See also 'skipMany'. - -spaces :: (Stream s m Char) => ParsecT s u m () -spaces = skipMany space "white space" - --- | Parses a white space character (any character which satisfies 'isSpace') --- Returns the parsed character. - -space :: (Stream s m Char) => ParsecT s u m Char -space = satisfy isSpace "space" - --- | Parses a newline character (\'\\n\'). Returns a newline character. - -newline :: (Stream s m Char) => ParsecT s u m Char -newline = char '\n' "lf new-line" - --- | Parses a carriage return character (\'\\r\') followed by a newline character (\'\\n\'). --- Returns a newline character. - -crlf :: (Stream s m Char) => ParsecT s u m Char -crlf = char '\r' *> char '\n' "crlf new-line" - --- | Parses a CRLF (see 'crlf') or LF (see 'newline') end-of-line. --- Returns a newline character (\'\\n\'). --- --- > endOfLine = newline <|> crlf --- - -endOfLine :: (Stream s m Char) => ParsecT s u m Char -endOfLine = newline <|> crlf "new-line" - --- | Parses a tab character (\'\\t\'). Returns a tab character. - -tab :: (Stream s m Char) => ParsecT s u m Char -tab = char '\t' "tab" - --- | Parses an upper case letter (according to 'isUpper'). --- Returns the parsed character. - -upper :: (Stream s m Char) => ParsecT s u m Char -upper = satisfy isUpper "uppercase letter" - --- | Parses a lower case character (according to 'isLower'). --- Returns the parsed character. - -lower :: (Stream s m Char) => ParsecT s u m Char -lower = satisfy isLower "lowercase letter" - --- | Parses a alphabetic or numeric Unicode characters --- according to 'isAlphaNum'. Returns the parsed character. --- --- Note that numeric digits outside the ASCII range (such as arabic-indic digits like e.g. \"٤\" or @U+0664@), --- as well as numeric characters which aren't digits, are parsed by this function --- but not by 'digit'. - -alphaNum :: (Stream s m Char => ParsecT s u m Char) -alphaNum = satisfy isAlphaNum "letter or digit" - --- | Parses an alphabetic Unicode characters (lower-case, upper-case and title-case letters, --- plus letters of caseless scripts and modifiers letters according to 'isAlpha'). --- Returns the parsed character. - -letter :: (Stream s m Char) => ParsecT s u m Char -letter = satisfy isAlpha "letter" - --- | Parses an ASCII digit. Returns the parsed character. - -digit :: (Stream s m Char) => ParsecT s u m Char -digit = satisfy isDigit "digit" - --- | Parses a hexadecimal digit (a digit or a letter between \'a\' and --- \'f\' or \'A\' and \'F\'). Returns the parsed character. - -hexDigit :: (Stream s m Char) => ParsecT s u m Char -hexDigit = satisfy isHexDigit "hexadecimal digit" - --- | Parses an octal digit (a character between \'0\' and \'7\'). Returns --- the parsed character. - -octDigit :: (Stream s m Char) => ParsecT s u m Char -octDigit = satisfy isOctDigit "octal digit" - --- | @char c@ parses a single character @c@. Returns the parsed --- character (i.e. @c@). --- --- > semiColon = char ';' - -char :: (Stream s m Char) => Char -> ParsecT s u m Char -char c = satisfy (==c) show [c] - --- | This parser succeeds for any character. Returns the parsed character. - -anyChar :: (Stream s m Char) => ParsecT s u m Char -anyChar = satisfy (const True) - --- | The parser @satisfy f@ succeeds for any character for which the --- supplied function @f@ returns 'True'. Returns the character that is --- actually parsed. - --- > digit = satisfy isDigit --- > oneOf cs = satisfy (\c -> c `elem` cs) - -satisfy :: (Stream s m Char) => (Char -> Bool) -> ParsecT s u m Char -satisfy f = tokenPrim (\c -> show [c]) - (\pos c _cs -> updatePosChar pos c) - (\c -> if f c then Just c else Nothing) - --- | @string s@ parses a sequence of characters given by @s@. Returns --- the parsed string (i.e. @s@). --- --- > divOrMod = string "div" --- > <|> string "mod" - -string :: (Stream s m Char) => String -> ParsecT s u m String -string s = tokens show updatePosString s diff -Nru cabal-install-3.2-3.2+git20191216.2.e076113/src/parsec-3.1.14.0/src/Text/Parsec/Combinator.hs cabal-install-3.2-3.2+git20200127.2.b84fd4f/src/parsec-3.1.14.0/src/Text/Parsec/Combinator.hs --- cabal-install-3.2-3.2+git20191216.2.e076113/src/parsec-3.1.14.0/src/Text/Parsec/Combinator.hs 2019-07-02 17:10:02.000000000 +0000 +++ cabal-install-3.2-3.2+git20200127.2.b84fd4f/src/parsec-3.1.14.0/src/Text/Parsec/Combinator.hs 1970-01-01 00:00:00.000000000 +0000 @@ -1,337 +0,0 @@ --- due to Debug.Trace -{-# LANGUAGE Trustworthy #-} - ------------------------------------------------------------------------------ --- | --- Module : Text.Parsec.Combinator --- Copyright : (c) Daan Leijen 1999-2001, (c) Paolo Martini 2007 --- License : BSD-style (see the LICENSE file) --- --- Maintainer : derek.a.elkins@gmail.com --- Stability : provisional --- Portability : portable --- --- Commonly used generic combinators. --- --- See also the [parser-combinators](http://hackage.haskell.org/package/parser-combinators) --- package for additional (and generalised) combinators. --- ------------------------------------------------------------------------------ - -module Text.Parsec.Combinator - ( choice - , count - , between - , option, optionMaybe, optional - , skipMany1 - , many1 - , sepBy, sepBy1 - , endBy, endBy1 - , sepEndBy, sepEndBy1 - , chainl, chainl1 - , chainr, chainr1 - , eof, notFollowedBy - -- tricky combinators - , manyTill, lookAhead, anyToken - -- * Debugging - -- - -- | As a more comprehensive alternative for debugging Parsec parsers, - -- there's also the [parsec-free](http://hackage.haskell.org/package/parsec-free) - -- package. - -- - , parserTrace, parserTraced - ) where - -import Control.Monad -import Text.Parsec.Prim -import Debug.Trace (trace) - --- | @choice ps@ tries to apply the parsers in the list @ps@ in order, --- until one of them succeeds. Returns the value of the succeeding --- parser. - -choice :: (Stream s m t) => [ParsecT s u m a] -> ParsecT s u m a -choice ps = foldr (<|>) mzero ps - --- | @option x p@ tries to apply parser @p@. If @p@ fails without --- consuming input, it returns the value @x@, otherwise the value --- returned by @p@. --- --- > priority = option 0 (do{ d <- digit --- > ; return (digitToInt d) --- > }) - -option :: (Stream s m t) => a -> ParsecT s u m a -> ParsecT s u m a -option x p = p <|> return x - --- | @optionMaybe p@ tries to apply parser @p@. If @p@ fails without --- consuming input, it return 'Nothing', otherwise it returns --- 'Just' the value returned by @p@. - -optionMaybe :: (Stream s m t) => ParsecT s u m a -> ParsecT s u m (Maybe a) -optionMaybe p = option Nothing (liftM Just p) - --- | @optional p@ tries to apply parser @p@. It will parse @p@ or nothing. --- It only fails if @p@ fails after consuming input. It discards the result --- of @p@. - -optional :: (Stream s m t) => ParsecT s u m a -> ParsecT s u m () -optional p = do{ _ <- p; return ()} <|> return () - --- | @between open close p@ parses @open@, followed by @p@ and @close@. --- Returns the value returned by @p@. --- --- > braces = between (symbol "{") (symbol "}") - -between :: (Stream s m t) => ParsecT s u m open -> ParsecT s u m close - -> ParsecT s u m a -> ParsecT s u m a -between open close p - = do{ _ <- open; x <- p; _ <- close; return x } - --- | @skipMany1 p@ applies the parser @p@ /one/ or more times, skipping --- its result. - -skipMany1 :: (Stream s m t) => ParsecT s u m a -> ParsecT s u m () -skipMany1 p = do{ _ <- p; skipMany p } -{- -skipMany p = scan - where - scan = do{ p; scan } <|> return () --} - --- | @many1 p@ applies the parser @p@ /one/ or more times. Returns a --- list of the returned values of @p@. --- --- > word = many1 letter - -many1 :: (Stream s m t) => ParsecT s u m a -> ParsecT s u m [a] -many1 p = do{ x <- p; xs <- many p; return (x:xs) } -{- -many p = scan id - where - scan f = do{ x <- p - ; scan (\tail -> f (x:tail)) - } - <|> return (f []) --} - - --- | @sepBy p sep@ parses /zero/ or more occurrences of @p@, separated --- by @sep@. Returns a list of values returned by @p@. --- --- > commaSep p = p `sepBy` (symbol ",") - -sepBy :: (Stream s m t) => ParsecT s u m a -> ParsecT s u m sep -> ParsecT s u m [a] -sepBy p sep = sepBy1 p sep <|> return [] - --- | @sepBy1 p sep@ parses /one/ or more occurrences of @p@, separated --- by @sep@. Returns a list of values returned by @p@. - -sepBy1 :: (Stream s m t) => ParsecT s u m a -> ParsecT s u m sep -> ParsecT s u m [a] -sepBy1 p sep = do{ x <- p - ; xs <- many (sep >> p) - ; return (x:xs) - } - - --- | @sepEndBy1 p sep@ parses /one/ or more occurrences of @p@, --- separated and optionally ended by @sep@. Returns a list of values --- returned by @p@. - -sepEndBy1 :: (Stream s m t) => ParsecT s u m a -> ParsecT s u m sep -> ParsecT s u m [a] -sepEndBy1 p sep = do{ x <- p - ; do{ _ <- sep - ; xs <- sepEndBy p sep - ; return (x:xs) - } - <|> return [x] - } - --- | @sepEndBy p sep@ parses /zero/ or more occurrences of @p@, --- separated and optionally ended by @sep@, ie. haskell style --- statements. Returns a list of values returned by @p@. --- --- > haskellStatements = haskellStatement `sepEndBy` semi - -sepEndBy :: (Stream s m t) => ParsecT s u m a -> ParsecT s u m sep -> ParsecT s u m [a] -sepEndBy p sep = sepEndBy1 p sep <|> return [] - - --- | @endBy1 p sep@ parses /one/ or more occurrences of @p@, separated --- and ended by @sep@. Returns a list of values returned by @p@. - -endBy1 :: (Stream s m t) => ParsecT s u m a -> ParsecT s u m sep -> ParsecT s u m [a] -endBy1 p sep = many1 (do{ x <- p; _ <- sep; return x }) - --- | @endBy p sep@ parses /zero/ or more occurrences of @p@, separated --- and ended by @sep@. Returns a list of values returned by @p@. --- --- > cStatements = cStatement `endBy` semi - -endBy :: (Stream s m t) => ParsecT s u m a -> ParsecT s u m sep -> ParsecT s u m [a] -endBy p sep = many (do{ x <- p; _ <- sep; return x }) - --- | @count n p@ parses @n@ occurrences of @p@. If @n@ is smaller or --- equal to zero, the parser equals to @return []@. Returns a list of --- @n@ values returned by @p@. - -count :: (Stream s m t) => Int -> ParsecT s u m a -> ParsecT s u m [a] -count n p | n <= 0 = return [] - | otherwise = sequence (replicate n p) - --- | @chainr p op x@ parses /zero/ or more occurrences of @p@, --- separated by @op@ Returns a value obtained by a /right/ associative --- application of all functions returned by @op@ to the values returned --- by @p@. If there are no occurrences of @p@, the value @x@ is --- returned. - -chainr :: (Stream s m t) => ParsecT s u m a -> ParsecT s u m (a -> a -> a) -> a -> ParsecT s u m a -chainr p op x = chainr1 p op <|> return x - --- | @chainl p op x@ parses /zero/ or more occurrences of @p@, --- separated by @op@. Returns a value obtained by a /left/ associative --- application of all functions returned by @op@ to the values returned --- by @p@. If there are zero occurrences of @p@, the value @x@ is --- returned. - -chainl :: (Stream s m t) => ParsecT s u m a -> ParsecT s u m (a -> a -> a) -> a -> ParsecT s u m a -chainl p op x = chainl1 p op <|> return x - --- | @chainl1 p op@ parses /one/ or more occurrences of @p@, --- separated by @op@ Returns a value obtained by a /left/ associative --- application of all functions returned by @op@ to the values returned --- by @p@. This parser can for example be used to eliminate left --- recursion which typically occurs in expression grammars. --- --- > expr = term `chainl1` addop --- > term = factor `chainl1` mulop --- > factor = parens expr <|> integer --- > --- > mulop = do{ symbol "*"; return (*) } --- > <|> do{ symbol "/"; return (div) } --- > --- > addop = do{ symbol "+"; return (+) } --- > <|> do{ symbol "-"; return (-) } - -chainl1 :: (Stream s m t) => ParsecT s u m a -> ParsecT s u m (a -> a -> a) -> ParsecT s u m a -chainl1 p op = do{ x <- p; rest x } - where - rest x = do{ f <- op - ; y <- p - ; rest (f x y) - } - <|> return x - --- | @chainr1 p op x@ parses /one/ or more occurrences of |p|, --- separated by @op@ Returns a value obtained by a /right/ associative --- application of all functions returned by @op@ to the values returned --- by @p@. - -chainr1 :: (Stream s m t) => ParsecT s u m a -> ParsecT s u m (a -> a -> a) -> ParsecT s u m a -chainr1 p op = scan - where - scan = do{ x <- p; rest x } - - rest x = do{ f <- op - ; y <- scan - ; return (f x y) - } - <|> return x - ------------------------------------------------------------ --- Tricky combinators ------------------------------------------------------------ --- | The parser @anyToken@ accepts any kind of token. It is for example --- used to implement 'eof'. Returns the accepted token. - -anyToken :: (Stream s m t, Show t) => ParsecT s u m t -anyToken = tokenPrim show (\pos _tok _toks -> pos) Just - --- | This parser only succeeds at the end of the input. This is not a --- primitive parser but it is defined using 'notFollowedBy'. --- --- > eof = notFollowedBy anyToken "end of input" - -eof :: (Stream s m t, Show t) => ParsecT s u m () -eof = notFollowedBy anyToken "end of input" - --- | @notFollowedBy p@ only succeeds when parser @p@ fails. This parser --- does not consume any input. This parser can be used to implement the --- \'longest match\' rule. For example, when recognizing keywords (for --- example @let@), we want to make sure that a keyword is not followed --- by a legal identifier character, in which case the keyword is --- actually an identifier (for example @lets@). We can program this --- behaviour as follows: --- --- > keywordLet = try (do{ string "let" --- > ; notFollowedBy alphaNum --- > }) --- --- __NOTE__: Currently, 'notFollowedBy' exhibits surprising behaviour --- when applied to a parser @p@ that doesn't consume any input; --- specifically --- --- - @'notFollowedBy' . 'notFollowedBy'@ is /not/ equivalent to 'lookAhead', and --- --- - @'notFollowedBy' 'eof'@ /never/ fails. --- --- See [haskell/parsec#8](https://github.com/haskell/parsec/issues/8) --- for more details. - -notFollowedBy :: (Stream s m t, Show a) => ParsecT s u m a -> ParsecT s u m () -notFollowedBy p = try (do{ c <- try p; unexpected (show c) } - <|> return () - ) - --- | @manyTill p end@ applies parser @p@ /zero/ or more times until --- parser @end@ succeeds. Returns the list of values returned by @p@. --- This parser can be used to scan comments: --- --- > simpleComment = do{ string "")) --- > } --- --- Note the overlapping parsers @anyChar@ and @string \"-->\"@, and --- therefore the use of the 'try' combinator. - -manyTill :: (Stream s m t) => ParsecT s u m a -> ParsecT s u m end -> ParsecT s u m [a] -manyTill p end = scan - where - scan = do{ _ <- end; return [] } - <|> - do{ x <- p; xs <- scan; return (x:xs) } - --- | @parserTrace label@ is an impure function, implemented with "Debug.Trace" that --- prints to the console the remaining parser state at the time it is invoked. --- It is intended to be used for debugging parsers by inspecting their intermediate states. --- --- > *> parseTest (oneOf "aeiou" >> parserTrace "label") "atest" --- > label: "test" --- > ... --- --- @since 3.1.12.0 -parserTrace :: (Show t, Stream s m t) => String -> ParsecT s u m () -parserTrace s = pt <|> return () - where - pt = try $ do - x <- try $ many1 anyToken - trace (s++": " ++ show x) $ try $ eof - fail (show x) - --- | @parserTraced label p@ is an impure function, implemented with "Debug.Trace" that --- prints to the console the remaining parser state at the time it is invoked. --- It then continues to apply parser @p@, and if @p@ fails will indicate that --- the label has been backtracked. --- It is intended to be used for debugging parsers by inspecting their intermediate states. --- --- > *> parseTest (oneOf "aeiou" >> parserTraced "label" (oneOf "nope")) "atest" --- > label: "test" --- > label backtracked --- > parse error at (line 1, column 2): --- > ... --- --- @since 3.1.12.0 -parserTraced :: (Stream s m t, Show t) => String -> ParsecT s u m b -> ParsecT s u m b -parserTraced s p = do - parserTrace s - p <|> trace (s ++ " backtracked") (fail s) diff -Nru cabal-install-3.2-3.2+git20191216.2.e076113/src/parsec-3.1.14.0/src/Text/Parsec/Error.hs cabal-install-3.2-3.2+git20200127.2.b84fd4f/src/parsec-3.1.14.0/src/Text/Parsec/Error.hs --- cabal-install-3.2-3.2+git20191216.2.e076113/src/parsec-3.1.14.0/src/Text/Parsec/Error.hs 2019-07-02 17:10:02.000000000 +0000 +++ cabal-install-3.2-3.2+git20200127.2.b84fd4f/src/parsec-3.1.14.0/src/Text/Parsec/Error.hs 1970-01-01 00:00:00.000000000 +0000 @@ -1,217 +0,0 @@ -{-# LANGUAGE DeriveDataTypeable #-} -{-# LANGUAGE Safe #-} - ------------------------------------------------------------------------------ --- | --- Module : Text.Parsec.Error --- Copyright : (c) Daan Leijen 1999-2001, (c) Paolo Martini 2007 --- License : BSD-style (see the LICENSE file) --- --- Maintainer : derek.a.elkins@gmail.com --- Stability : provisional --- Portability : portable --- --- Parse errors --- ------------------------------------------------------------------------------ - -module Text.Parsec.Error - ( Message ( SysUnExpect, UnExpect, Expect, Message ) - , messageString - , ParseError, errorPos, errorMessages, errorIsUnknown - , showErrorMessages - , newErrorMessage, newErrorUnknown - , addErrorMessage, setErrorPos, setErrorMessage - , mergeError - ) where - -import Data.List ( nub, sort ) -import Data.Typeable ( Typeable ) - -import Text.Parsec.Pos - --- | This abstract data type represents parse error messages. There are --- four kinds of messages: --- --- > data Message = SysUnExpect String --- > | UnExpect String --- > | Expect String --- > | Message String --- --- The fine distinction between different kinds of parse errors allows --- the system to generate quite good error messages for the user. It --- also allows error messages that are formatted in different --- languages. Each kind of message is generated by different combinators: --- --- * A 'SysUnExpect' message is automatically generated by the --- 'Text.Parsec.Combinator.satisfy' combinator. The argument is the --- unexpected input. --- --- * A 'UnExpect' message is generated by the 'Text.Parsec.Prim.unexpected' --- combinator. The argument describes the --- unexpected item. --- --- * A 'Expect' message is generated by the 'Text.Parsec.Prim.' --- combinator. The argument describes the expected item. --- --- * A 'Message' message is generated by the 'fail' --- combinator. The argument is some general parser message. - -data Message = SysUnExpect !String -- @ library generated unexpect - | UnExpect !String -- @ unexpected something - | Expect !String -- @ expecting something - | Message !String -- @ raw message - deriving ( Typeable ) - -instance Enum Message where - fromEnum (SysUnExpect _) = 0 - fromEnum (UnExpect _) = 1 - fromEnum (Expect _) = 2 - fromEnum (Message _) = 3 - toEnum _ = error "toEnum is undefined for Message" - --- < Return 'True' only when 'compare' would return 'EQ'. - -instance Eq Message where - - m1 == m2 = fromEnum m1 == fromEnum m2 - --- < Compares two error messages without looking at their content. Only --- the constructors are compared where: --- --- > 'SysUnExpect' < 'UnExpect' < 'Expect' < 'Message' - -instance Ord Message where - compare msg1 msg2 = compare (fromEnum msg1) (fromEnum msg2) - --- | Extract the message string from an error message - -messageString :: Message -> String -messageString (SysUnExpect s) = s -messageString (UnExpect s) = s -messageString (Expect s) = s -messageString (Message s) = s - --- | The abstract data type @ParseError@ represents parse errors. It --- provides the source position ('SourcePos') of the error --- and a list of error messages ('Message'). A @ParseError@ --- can be returned by the function 'Text.Parsec.Prim.parse'. @ParseError@ is an --- instance of the 'Show' and 'Eq' classes. - -data ParseError = ParseError !SourcePos [Message] - deriving ( Typeable ) - --- | Extracts the source position from the parse error - -errorPos :: ParseError -> SourcePos -errorPos (ParseError pos _msgs) - = pos - --- | Extracts the list of error messages from the parse error - -errorMessages :: ParseError -> [Message] -errorMessages (ParseError _pos msgs) - = sort msgs - -errorIsUnknown :: ParseError -> Bool -errorIsUnknown (ParseError _pos msgs) - = null msgs - --- < Create parse errors - -newErrorUnknown :: SourcePos -> ParseError -newErrorUnknown pos - = ParseError pos [] - -newErrorMessage :: Message -> SourcePos -> ParseError -newErrorMessage msg pos - = ParseError pos [msg] - -addErrorMessage :: Message -> ParseError -> ParseError -addErrorMessage msg (ParseError pos msgs) - = ParseError pos (msg:msgs) - -setErrorPos :: SourcePos -> ParseError -> ParseError -setErrorPos pos (ParseError _ msgs) - = ParseError pos msgs - -setErrorMessage :: Message -> ParseError -> ParseError -setErrorMessage msg (ParseError pos msgs) - = ParseError pos (msg : filter (msg /=) msgs) - -mergeError :: ParseError -> ParseError -> ParseError -mergeError e1@(ParseError pos1 msgs1) e2@(ParseError pos2 msgs2) - -- prefer meaningful errors - | null msgs2 && not (null msgs1) = e1 - | null msgs1 && not (null msgs2) = e2 - | otherwise - = case pos1 `compare` pos2 of - -- select the longest match - EQ -> ParseError pos1 (msgs1 ++ msgs2) - GT -> e1 - LT -> e2 - -instance Show ParseError where - show err - = show (errorPos err) ++ ":" ++ - showErrorMessages "or" "unknown parse error" - "expecting" "unexpected" "end of input" - (errorMessages err) - -instance Eq ParseError where - l == r - = errorPos l == errorPos r && messageStrs l == messageStrs r - where - messageStrs = map messageString . errorMessages - --- Language independent show function - --- TODO --- < The standard function for showing error messages. Formats a list of --- error messages in English. This function is used in the |Show| --- instance of |ParseError <#ParseError>|. The resulting string will be --- formatted like: --- --- |unexpected /{The first UnExpect or a SysUnExpect message}/; --- expecting /{comma separated list of Expect messages}/; --- /{comma separated list of Message messages}/ - -showErrorMessages :: - String -> String -> String -> String -> String -> [Message] -> String -showErrorMessages msgOr msgUnknown msgExpecting msgUnExpected msgEndOfInput msgs - | null msgs = msgUnknown - | otherwise = concat $ map ("\n"++) $ clean $ - [showSysUnExpect,showUnExpect,showExpect,showMessages] - where - (sysUnExpect,msgs1) = span ((SysUnExpect "") ==) msgs - (unExpect,msgs2) = span ((UnExpect "") ==) msgs1 - (expect,messages) = span ((Expect "") ==) msgs2 - - showExpect = showMany msgExpecting expect - showUnExpect = showMany msgUnExpected unExpect - showSysUnExpect | not (null unExpect) || - null sysUnExpect = "" - | null firstMsg = msgUnExpected ++ " " ++ msgEndOfInput - | otherwise = msgUnExpected ++ " " ++ firstMsg - where - firstMsg = messageString (head sysUnExpect) - - showMessages = showMany "" messages - - -- helpers - showMany pre msgs3 = case clean (map messageString msgs3) of - [] -> "" - ms | null pre -> commasOr ms - | otherwise -> pre ++ " " ++ commasOr ms - - commasOr [] = "" - commasOr [m] = m - commasOr ms = commaSep (init ms) ++ " " ++ msgOr ++ " " ++ last ms - - commaSep = separate ", " . clean - - separate _ [] = "" - separate _ [m] = m - separate sep (m:ms) = m ++ sep ++ separate sep ms - - clean = nub . filter (not . null) diff -Nru cabal-install-3.2-3.2+git20191216.2.e076113/src/parsec-3.1.14.0/src/Text/Parsec/Expr.hs cabal-install-3.2-3.2+git20200127.2.b84fd4f/src/parsec-3.1.14.0/src/Text/Parsec/Expr.hs --- cabal-install-3.2-3.2+git20191216.2.e076113/src/parsec-3.1.14.0/src/Text/Parsec/Expr.hs 2019-07-02 17:10:02.000000000 +0000 +++ cabal-install-3.2-3.2+git20200127.2.b84fd4f/src/parsec-3.1.14.0/src/Text/Parsec/Expr.hs 1970-01-01 00:00:00.000000000 +0000 @@ -1,176 +0,0 @@ -{-# LANGUAGE CPP #-} -{-# LANGUAGE DeriveDataTypeable #-} -{-# LANGUAGE Safe #-} - ------------------------------------------------------------------------------ --- | --- Module : Text.Parsec.Expr --- Copyright : (c) Daan Leijen 1999-2001, (c) Paolo Martini 2007 --- License : BSD-style (see the LICENSE file) --- --- Maintainer : derek.a.elkins@gmail.com --- Stability : provisional --- Portability : non-portable --- --- A helper module to parse \"expressions\". --- Builds a parser given a table of operators and associativities. --- ------------------------------------------------------------------------------ - -module Text.Parsec.Expr - ( Assoc(..), Operator(..), OperatorTable - , buildExpressionParser - ) where - -import Data.Typeable ( Typeable ) - -import Text.Parsec.Prim -import Text.Parsec.Combinator - ------------------------------------------------------------ --- Assoc and OperatorTable ------------------------------------------------------------ - --- | This data type specifies the associativity of operators: left, right --- or none. - -data Assoc = AssocNone - | AssocLeft - | AssocRight - deriving ( Typeable ) - --- | This data type specifies operators that work on values of type @a@. --- An operator is either binary infix or unary prefix or postfix. A --- binary operator has also an associated associativity. - -data Operator s u m a = Infix (ParsecT s u m (a -> a -> a)) Assoc - | Prefix (ParsecT s u m (a -> a)) - | Postfix (ParsecT s u m (a -> a)) -#if MIN_VERSION_base(4,7,0) - deriving ( Typeable ) -#endif - --- | An @OperatorTable s u m a@ is a list of @Operator s u m a@ --- lists. The list is ordered in descending --- precedence. All operators in one list have the same precedence (but --- may have a different associativity). - -type OperatorTable s u m a = [[Operator s u m a]] - ------------------------------------------------------------ --- Convert an OperatorTable and basic term parser into --- a full fledged expression parser ------------------------------------------------------------ - --- | @buildExpressionParser table term@ builds an expression parser for --- terms @term@ with operators from @table@, taking the associativity --- and precedence specified in @table@ into account. Prefix and postfix --- operators of the same precedence can only occur once (i.e. @--2@ is --- not allowed if @-@ is prefix negate). Prefix and postfix operators --- of the same precedence associate to the left (i.e. if @++@ is --- postfix increment, than @-2++@ equals @-1@, not @-3@). --- --- The @buildExpressionParser@ takes care of all the complexity --- involved in building expression parser. Here is an example of an --- expression parser that handles prefix signs, postfix increment and --- basic arithmetic. --- --- > expr = buildExpressionParser table term --- > "expression" --- > --- > term = parens expr --- > <|> natural --- > "simple expression" --- > --- > table = [ [prefix "-" negate, prefix "+" id ] --- > , [postfix "++" (+1)] --- > , [binary "*" (*) AssocLeft, binary "/" (div) AssocLeft ] --- > , [binary "+" (+) AssocLeft, binary "-" (-) AssocLeft ] --- > ] --- > --- > binary name fun assoc = Infix (do{ reservedOp name; return fun }) assoc --- > prefix name fun = Prefix (do{ reservedOp name; return fun }) --- > postfix name fun = Postfix (do{ reservedOp name; return fun }) - -buildExpressionParser :: (Stream s m t) - => OperatorTable s u m a - -> ParsecT s u m a - -> ParsecT s u m a -buildExpressionParser operators simpleExpr - = foldl (makeParser) simpleExpr operators - where - makeParser term ops - = let (rassoc,lassoc,nassoc - ,prefix,postfix) = foldr splitOp ([],[],[],[],[]) ops - - rassocOp = choice rassoc - lassocOp = choice lassoc - nassocOp = choice nassoc - prefixOp = choice prefix "" - postfixOp = choice postfix "" - - ambiguous assoc op= try $ - do{ _ <- op; fail ("ambiguous use of a " ++ assoc - ++ " associative operator") - } - - ambiguousRight = ambiguous "right" rassocOp - ambiguousLeft = ambiguous "left" lassocOp - ambiguousNon = ambiguous "non" nassocOp - - termP = do{ pre <- prefixP - ; x <- term - ; post <- postfixP - ; return (post (pre x)) - } - - postfixP = postfixOp <|> return id - - prefixP = prefixOp <|> return id - - rassocP x = do{ f <- rassocOp - ; y <- do{ z <- termP; rassocP1 z } - ; return (f x y) - } - <|> ambiguousLeft - <|> ambiguousNon - -- <|> return x - - rassocP1 x = rassocP x <|> return x - - lassocP x = do{ f <- lassocOp - ; y <- termP - ; lassocP1 (f x y) - } - <|> ambiguousRight - <|> ambiguousNon - -- <|> return x - - lassocP1 x = lassocP x <|> return x - - nassocP x = do{ f <- nassocOp - ; y <- termP - ; ambiguousRight - <|> ambiguousLeft - <|> ambiguousNon - <|> return (f x y) - } - -- <|> return x - - in do{ x <- termP - ; rassocP x <|> lassocP x <|> nassocP x <|> return x - "operator" - } - - - splitOp (Infix op assoc) (rassoc,lassoc,nassoc,prefix,postfix) - = case assoc of - AssocNone -> (rassoc,lassoc,op:nassoc,prefix,postfix) - AssocLeft -> (rassoc,op:lassoc,nassoc,prefix,postfix) - AssocRight -> (op:rassoc,lassoc,nassoc,prefix,postfix) - - splitOp (Prefix op) (rassoc,lassoc,nassoc,prefix,postfix) - = (rassoc,lassoc,nassoc,op:prefix,postfix) - - splitOp (Postfix op) (rassoc,lassoc,nassoc,prefix,postfix) - = (rassoc,lassoc,nassoc,prefix,op:postfix) diff -Nru cabal-install-3.2-3.2+git20191216.2.e076113/src/parsec-3.1.14.0/src/Text/Parsec/Language.hs cabal-install-3.2-3.2+git20200127.2.b84fd4f/src/parsec-3.1.14.0/src/Text/Parsec/Language.hs --- cabal-install-3.2-3.2+git20191216.2.e076113/src/parsec-3.1.14.0/src/Text/Parsec/Language.hs 2019-07-02 17:10:02.000000000 +0000 +++ cabal-install-3.2-3.2+git20200127.2.b84fd4f/src/parsec-3.1.14.0/src/Text/Parsec/Language.hs 1970-01-01 00:00:00.000000000 +0000 @@ -1,151 +0,0 @@ -{-# LANGUAGE Safe #-} - ------------------------------------------------------------------------------ --- | --- Module : Text.Parsec.Language --- Copyright : (c) Daan Leijen 1999-2001, (c) Paolo Martini 2007 --- License : BSD-style (see the LICENSE file) --- --- Maintainer : derek.a.elkins@gmail.com --- Stability : provisional --- Portability : non-portable (uses non-portable module Text.Parsec.Token) --- --- A helper module that defines some language definitions that can be used --- to instantiate a token parser (see "Text.Parsec.Token"). --- ------------------------------------------------------------------------------ - -module Text.Parsec.Language - ( haskellDef, haskell - , mondrianDef, mondrian - , emptyDef - , haskellStyle - , javaStyle - , LanguageDef - , GenLanguageDef - ) where - -import Text.Parsec -import Text.Parsec.Token - ------------------------------------------------------------ --- Styles: haskellStyle, javaStyle ------------------------------------------------------------ - --- | This is a minimal token definition for Haskell style languages. It --- defines the style of comments, valid identifiers and case --- sensitivity. It does not define any reserved words or operators. - -haskellStyle :: LanguageDef st -haskellStyle = emptyDef - { commentStart = "{-" - , commentEnd = "-}" - , commentLine = "--" - , nestedComments = True - , identStart = letter - , identLetter = alphaNum <|> oneOf "_'" - , opStart = opLetter haskellStyle - , opLetter = oneOf ":!#$%&*+./<=>?@\\^|-~" - , reservedOpNames= [] - , reservedNames = [] - , caseSensitive = True - } - --- | This is a minimal token definition for Java style languages. It --- defines the style of comments, valid identifiers and case --- sensitivity. It does not define any reserved words or operators. - -javaStyle :: LanguageDef st -javaStyle = emptyDef - { commentStart = "/*" - , commentEnd = "*/" - , commentLine = "//" - , nestedComments = True - , identStart = letter - , identLetter = alphaNum <|> oneOf "_'" - , reservedNames = [] - , reservedOpNames= [] - , caseSensitive = False - } - ------------------------------------------------------------ --- minimal language definition --------------------------------------------------------- - --- | This is the most minimal token definition. It is recommended to use --- this definition as the basis for other definitions. @emptyDef@ has --- no reserved names or operators, is case sensitive and doesn't accept --- comments, identifiers or operators. - -emptyDef :: LanguageDef st -emptyDef = LanguageDef - { commentStart = "" - , commentEnd = "" - , commentLine = "" - , nestedComments = True - , identStart = letter <|> char '_' - , identLetter = alphaNum <|> oneOf "_'" - , opStart = opLetter emptyDef - , opLetter = oneOf ":!#$%&*+./<=>?@\\^|-~" - , reservedOpNames= [] - , reservedNames = [] - , caseSensitive = True - } - - - ------------------------------------------------------------ --- Haskell ------------------------------------------------------------ - --- | A lexer for the haskell language. - -haskell :: TokenParser st -haskell = makeTokenParser haskellDef - --- | The language definition for the Haskell language. - -haskellDef :: LanguageDef st -haskellDef = haskell98Def - { identLetter = identLetter haskell98Def <|> char '#' - , reservedNames = reservedNames haskell98Def ++ - ["foreign","import","export","primitive" - ,"_ccall_","_casm_" - ,"forall" - ] - } - --- | The language definition for the language Haskell98. - -haskell98Def :: LanguageDef st -haskell98Def = haskellStyle - { reservedOpNames= ["::","..","=","\\","|","<-","->","@","~","=>"] - , reservedNames = ["let","in","case","of","if","then","else", - "data","type", - "class","default","deriving","do","import", - "infix","infixl","infixr","instance","module", - "newtype","where", - "primitive" - -- "as","qualified","hiding" - ] - } - - ------------------------------------------------------------ --- Mondrian ------------------------------------------------------------ - --- | A lexer for the mondrian language. - -mondrian :: TokenParser st -mondrian = makeTokenParser mondrianDef - --- | The language definition for the language Mondrian. - -mondrianDef :: LanguageDef st -mondrianDef = javaStyle - { reservedNames = [ "case", "class", "default", "extends" - , "import", "in", "let", "new", "of", "package" - ] - , caseSensitive = True - } diff -Nru cabal-install-3.2-3.2+git20191216.2.e076113/src/parsec-3.1.14.0/src/Text/Parsec/Perm.hs cabal-install-3.2-3.2+git20200127.2.b84fd4f/src/parsec-3.1.14.0/src/Text/Parsec/Perm.hs --- cabal-install-3.2-3.2+git20191216.2.e076113/src/parsec-3.1.14.0/src/Text/Parsec/Perm.hs 2019-07-02 17:10:02.000000000 +0000 +++ cabal-install-3.2-3.2+git20200127.2.b84fd4f/src/parsec-3.1.14.0/src/Text/Parsec/Perm.hs 1970-01-01 00:00:00.000000000 +0000 @@ -1,209 +0,0 @@ -{-# LANGUAGE CPP #-} -{-# LANGUAGE DeriveDataTypeable #-} -{-# LANGUAGE ExistentialQuantification #-} -{-# LANGUAGE FlexibleContexts #-} -{-# LANGUAGE StandaloneDeriving #-} -{-# LANGUAGE Safe #-} - ------------------------------------------------------------------------------ --- | --- Module : Text.Parsec.Perm --- Copyright : (c) Daan Leijen 1999-2001, (c) Paolo Martini 2007 --- License : BSD-style (see the file libraries/parsec/LICENSE) --- --- Maintainer : derek.a.elkins@gmail.com --- Stability : provisional --- Portability : non-portable (uses existentially quantified data constructors) --- --- This module implements permutation parsers. The algorithm used --- is fairly complex since we push the type system to its limits :-) --- The algorithm is described in: --- --- /Parsing Permutation Phrases,/ --- by Arthur Baars, Andres Loh and Doaitse Swierstra. --- Published as a functional pearl at the Haskell Workshop 2001. --- --- From the abstract: --- --- A permutation phrase is a sequence of elements (possibly of different types) --- in which each element occurs exactly once and the order is irrelevant. --- Some of the permutable elements may be optional. --- ------------------------------------------------------------------------------ - - -module Text.Parsec.Perm - ( PermParser - , StreamPermParser -- abstract - - , permute - , (<||>), (<$$>) - , (<|?>), (<$?>) - ) where - -import Text.Parsec - -import Control.Monad.Identity -#if MIN_VERSION_base(4,7,0) -import Data.Typeable ( Typeable ) -#else --- For GHC 7.6 -import Data.Typeable ( Typeable3 ) -#endif - -infixl 1 <||>, <|?> -infixl 2 <$$>, <$?> - - -{--------------------------------------------------------------- - test -- parse a permutation of - * an optional string of 'a's - * a required 'b' - * an optional 'c' ----------------------------------------------------------------} -{- -test input - = parse (do{ x <- ptest; eof; return x }) "" input - -ptest :: Parser (String,Char,Char) -ptest - = permute $ - (,,) <$?> ("",many1 (char 'a')) - <||> char 'b' - <|?> ('_',char 'c') --} - -{--------------------------------------------------------------- - Building a permutation parser ----------------------------------------------------------------} - --- | The expression @perm \<||> p@ adds parser @p@ to the permutation --- parser @perm@. The parser @p@ is not allowed to accept empty input - --- use the optional combinator ('<|?>') instead. Returns a --- new permutation parser that includes @p@. - -(<||>) :: (Stream s Identity tok) => StreamPermParser s st (a -> b) -> Parsec s st a -> StreamPermParser s st b -(<||>) perm p = add perm p - --- | The expression @f \<$$> p@ creates a fresh permutation parser --- consisting of parser @p@. The the final result of the permutation --- parser is the function @f@ applied to the return value of @p@. The --- parser @p@ is not allowed to accept empty input - use the optional --- combinator ('<$?>') instead. --- --- If the function @f@ takes more than one parameter, the type variable --- @b@ is instantiated to a functional type which combines nicely with --- the adds parser @p@ to the ('<||>') combinator. This --- results in stylized code where a permutation parser starts with a --- combining function @f@ followed by the parsers. The function @f@ --- gets its parameters in the order in which the parsers are specified, --- but actual input can be in any order. - -(<$$>) :: (Stream s Identity tok) => (a -> b) -> Parsec s st a -> StreamPermParser s st b -(<$$>) f p = newperm f <||> p - --- | The expression @perm \<||> (x,p)@ adds parser @p@ to the --- permutation parser @perm@. The parser @p@ is optional - if it can --- not be applied, the default value @x@ will be used instead. Returns --- a new permutation parser that includes the optional parser @p@. - -(<|?>) :: (Stream s Identity tok) => StreamPermParser s st (a -> b) -> (a, Parsec s st a) -> StreamPermParser s st b -(<|?>) perm (x,p) = addopt perm x p - --- | The expression @f \<$?> (x,p)@ creates a fresh permutation parser --- consisting of parser @p@. The the final result of the permutation --- parser is the function @f@ applied to the return value of @p@. The --- parser @p@ is optional - if it can not be applied, the default value --- @x@ will be used instead. - -(<$?>) :: (Stream s Identity tok) => (a -> b) -> (a, Parsec s st a) -> StreamPermParser s st b -(<$?>) f (x,p) = newperm f <|?> (x,p) - -{--------------------------------------------------------------- - The permutation tree ----------------------------------------------------------------} - --- | Provided for backwards compatibility. The tok type is ignored. - -type PermParser tok st a = StreamPermParser String st a - --- | The type @StreamPermParser s st a@ denotes a permutation parser that, --- when converted by the 'permute' function, parses --- @s@ streams with user state @st@ and returns a value of --- type @a@ on success. --- --- Normally, a permutation parser is first build with special operators --- like ('<||>') and than transformed into a normal parser --- using 'permute'. - -data StreamPermParser s st a = Perm (Maybe a) [StreamBranch s st a] -#if MIN_VERSION_base(4,7,0) - deriving ( Typeable ) -#else -deriving instance Typeable3 StreamPermParser -#endif - --- type Branch st a = StreamBranch String st a - -data StreamBranch s st a = forall b. Branch (StreamPermParser s st (b -> a)) (Parsec s st b) -#if MIN_VERSION_base(4,7,0) - deriving ( Typeable ) -#else -deriving instance Typeable3 StreamBranch -#endif - --- | The parser @permute perm@ parses a permutation of parser described --- by @perm@. For example, suppose we want to parse a permutation of: --- an optional string of @a@'s, the character @b@ and an optional @c@. --- This can be described by: --- --- > test = permute (tuple <$?> ("",many1 (char 'a')) --- > <||> char 'b' --- > <|?> ('_',char 'c')) --- > where --- > tuple a b c = (a,b,c) - --- transform a permutation tree into a normal parser -permute :: (Stream s Identity tok) => StreamPermParser s st a -> Parsec s st a -permute (Perm def xs) - = choice (map branch xs ++ empty) - where - empty - = case def of - Nothing -> [] - Just x -> [return x] - - branch (Branch perm p) - = do{ x <- p - ; f <- permute perm - ; return (f x) - } - --- build permutation trees -newperm :: (Stream s Identity tok) => (a -> b) -> StreamPermParser s st (a -> b) -newperm f - = Perm (Just f) [] - -add :: (Stream s Identity tok) => StreamPermParser s st (a -> b) -> Parsec s st a -> StreamPermParser s st b -add perm@(Perm _mf fs) p - = Perm Nothing (first:map insert fs) - where - first = Branch perm p - insert (Branch perm' p') - = Branch (add (mapPerms flip perm') p) p' - -addopt :: (Stream s Identity tok) => StreamPermParser s st (a -> b) -> a -> Parsec s st a -> StreamPermParser s st b -addopt perm@(Perm mf fs) x p - = Perm (fmap ($ x) mf) (first:map insert fs) - where - first = Branch perm p - insert (Branch perm' p') - = Branch (addopt (mapPerms flip perm') x p) p' - - -mapPerms :: (Stream s Identity tok) => (a -> b) -> StreamPermParser s st a -> StreamPermParser s st b -mapPerms f (Perm x xs) - = Perm (fmap f x) (map mapBranch xs) - where - mapBranch (Branch perm p) - = Branch (mapPerms (f.) perm) p diff -Nru cabal-install-3.2-3.2+git20191216.2.e076113/src/parsec-3.1.14.0/src/Text/Parsec/Pos.hs cabal-install-3.2-3.2+git20200127.2.b84fd4f/src/parsec-3.1.14.0/src/Text/Parsec/Pos.hs --- cabal-install-3.2-3.2+git20191216.2.e076113/src/parsec-3.1.14.0/src/Text/Parsec/Pos.hs 2019-07-02 17:10:02.000000000 +0000 +++ cabal-install-3.2-3.2+git20200127.2.b84fd4f/src/parsec-3.1.14.0/src/Text/Parsec/Pos.hs 1970-01-01 00:00:00.000000000 +0000 @@ -1,129 +0,0 @@ -{-# LANGUAGE DeriveDataTypeable #-} -{-# LANGUAGE Safe #-} - ------------------------------------------------------------------------------ --- | --- Module : Text.Parsec.Pos --- Copyright : (c) Daan Leijen 1999-2001, (c) Paolo Martini 2007 --- License : BSD-style (see the LICENSE file) --- --- Maintainer : derek.a.elkins@gmail.com --- Stability : provisional --- Portability : portable --- --- Textual source positions. --- ------------------------------------------------------------------------------ - -module Text.Parsec.Pos - ( SourceName, Line, Column - , SourcePos - , sourceLine, sourceColumn, sourceName - , incSourceLine, incSourceColumn - , setSourceLine, setSourceColumn, setSourceName - , newPos, initialPos - , updatePosChar, updatePosString - ) where - -import Data.Data (Data) -import Data.Typeable (Typeable) - --- < Source positions: a file name, a line and a column --- upper left is (1,1) - -type SourceName = String -type Line = Int -type Column = Int - --- | The abstract data type @SourcePos@ represents source positions. It --- contains the name of the source (i.e. file name), a line number and --- a column number. @SourcePos@ is an instance of the 'Show', 'Eq' and --- 'Ord' class. - -data SourcePos = SourcePos SourceName !Line !Column - deriving ( Eq, Ord, Data, Typeable) - --- | Create a new 'SourcePos' with the given source name, --- line number and column number. - -newPos :: SourceName -> Line -> Column -> SourcePos -newPos name line column - = SourcePos name line column - --- | Create a new 'SourcePos' with the given source name, --- and line number and column number set to 1, the upper left. - -initialPos :: SourceName -> SourcePos -initialPos name - = newPos name 1 1 - --- | Extracts the name of the source from a source position. - -sourceName :: SourcePos -> SourceName -sourceName (SourcePos name _line _column) = name - --- | Extracts the line number from a source position. - -sourceLine :: SourcePos -> Line -sourceLine (SourcePos _name line _column) = line - --- | Extracts the column number from a source position. - -sourceColumn :: SourcePos -> Column -sourceColumn (SourcePos _name _line column) = column - --- | Increments the line number of a source position. - -incSourceLine :: SourcePos -> Line -> SourcePos -incSourceLine (SourcePos name line column) n = SourcePos name (line+n) column - --- | Increments the column number of a source position. - -incSourceColumn :: SourcePos -> Column -> SourcePos -incSourceColumn (SourcePos name line column) n = SourcePos name line (column+n) - --- | Set the name of the source. - -setSourceName :: SourcePos -> SourceName -> SourcePos -setSourceName (SourcePos _name line column) n = SourcePos n line column - --- | Set the line number of a source position. - -setSourceLine :: SourcePos -> Line -> SourcePos -setSourceLine (SourcePos name _line column) n = SourcePos name n column - --- | Set the column number of a source position. - -setSourceColumn :: SourcePos -> Column -> SourcePos -setSourceColumn (SourcePos name line _column) n = SourcePos name line n - --- | The expression @updatePosString pos s@ updates the source position --- @pos@ by calling 'updatePosChar' on every character in @s@, ie. --- @foldl updatePosChar pos string@. - -updatePosString :: SourcePos -> String -> SourcePos -updatePosString pos string - = foldl updatePosChar pos string - --- | Update a source position given a character. If the character is a --- newline (\'\\n\') or carriage return (\'\\r\') the line number is --- incremented by 1. If the character is a tab (\'\t\') the column --- number is incremented to the nearest 8'th column, ie. @column + 8 - --- ((column-1) \`mod\` 8)@. In all other cases, the column is --- incremented by 1. - -updatePosChar :: SourcePos -> Char -> SourcePos -updatePosChar (SourcePos name line column) c - = case c of - '\n' -> SourcePos name (line+1) 1 - '\t' -> SourcePos name line (column + 8 - ((column-1) `mod` 8)) - _ -> SourcePos name line (column + 1) - -instance Show SourcePos where - show (SourcePos name line column) - | null name = showLineColumn - | otherwise = "\"" ++ name ++ "\" " ++ showLineColumn - where - showLineColumn = "(line " ++ show line ++ - ", column " ++ show column ++ - ")" diff -Nru cabal-install-3.2-3.2+git20191216.2.e076113/src/parsec-3.1.14.0/src/Text/Parsec/Prim.hs cabal-install-3.2-3.2+git20200127.2.b84fd4f/src/parsec-3.1.14.0/src/Text/Parsec/Prim.hs --- cabal-install-3.2-3.2+git20191216.2.e076113/src/parsec-3.1.14.0/src/Text/Parsec/Prim.hs 2019-07-02 17:10:02.000000000 +0000 +++ cabal-install-3.2-3.2+git20200127.2.b84fd4f/src/parsec-3.1.14.0/src/Text/Parsec/Prim.hs 1970-01-01 00:00:00.000000000 +0000 @@ -1,855 +0,0 @@ -{-# LANGUAGE CPP #-} -{-# LANGUAGE DeriveDataTypeable #-} -{-# LANGUAGE ExistentialQuantification #-} -{-# LANGUAGE FlexibleContexts #-} -{-# LANGUAGE FlexibleInstances #-} -{-# LANGUAGE FunctionalDependencies #-} -{-# LANGUAGE MultiParamTypeClasses #-} -{-# LANGUAGE PolymorphicComponents #-} -{-# LANGUAGE Safe #-} -{-# LANGUAGE StandaloneDeriving #-} -{-# LANGUAGE UndecidableInstances #-} - -{-# OPTIONS_GHC -fno-warn-name-shadowing #-} - ------------------------------------------------------------------------------ --- | --- Module : Text.Parsec.Prim --- Copyright : (c) Daan Leijen 1999-2001, (c) Paolo Martini 2007 --- License : BSD-style (see the LICENSE file) --- --- Maintainer : derek.a.elkins@gmail.com --- Stability : provisional --- Portability : portable --- --- The primitive parser combinators. --- ------------------------------------------------------------------------------ - -{-# OPTIONS_HADDOCK not-home #-} - -module Text.Parsec.Prim - ( unknownError - , sysUnExpectError - , unexpected - , ParsecT - , runParsecT - , mkPT - , Parsec - , Consumed(..) - , Reply(..) - , State(..) - , parsecMap - , parserReturn - , parserBind - , mergeErrorReply - , parserFail - , parserZero - , parserPlus - , () - , (<|>) - , label - , labels - , lookAhead - , Stream(..) - , tokens - , try - , token - , tokenPrim - , tokenPrimEx - , many - , skipMany - , manyAccum - , runPT - , runP - , runParserT - , runParser - , parse - , parseTest - , getPosition - , getInput - , setPosition - , setInput - , getParserState - , setParserState - , updateParserState - , getState - , putState - , modifyState - , setState - , updateState - ) where - - -import Prelude hiding (sequence) -import qualified Data.ByteString.Lazy.Char8 as CL -import qualified Data.ByteString.Char8 as C - -import Data.Typeable ( Typeable ) - -import qualified Data.Text as Text -import qualified Data.Text.Lazy as TextL - --- To define Monoid instance -import qualified Data.List.NonEmpty as NE -import Data.List ( genericReplicate ) -import Data.Traversable (sequence) -import qualified Data.Functor as Functor ( Functor(..) ) -import qualified Data.Semigroup as Semigroup ( Semigroup(..) ) -import qualified Data.Monoid as Monoid ( Monoid(..) ) - -import qualified Control.Applicative as Applicative ( Applicative(..), Alternative(..), liftA2 ) -import Control.Monad hiding (sequence) -import Control.Monad.Trans -import Control.Monad.Identity hiding (sequence) -import qualified Control.Monad.Fail as Fail - -import Control.Monad.Reader.Class -import Control.Monad.State.Class -import Control.Monad.Cont.Class -import Control.Monad.Error.Class - -import Text.Parsec.Pos -import Text.Parsec.Error - -unknownError :: State s u -> ParseError -unknownError state = newErrorUnknown (statePos state) - -sysUnExpectError :: String -> SourcePos -> Reply s u a -sysUnExpectError msg pos = Error (newErrorMessage (SysUnExpect msg) pos) - --- | The parser @unexpected msg@ always fails with an unexpected error --- message @msg@ without consuming any input. --- --- The parsers 'fail', ('') and @unexpected@ are the three parsers --- used to generate error messages. Of these, only ('') is commonly --- used. For an example of the use of @unexpected@, see the definition --- of 'Text.Parsec.Combinator.notFollowedBy'. - -unexpected :: (Stream s m t) => String -> ParsecT s u m a -unexpected msg - = ParsecT $ \s _ _ _ eerr -> - eerr $ newErrorMessage (UnExpect msg) (statePos s) - --- | ParserT monad transformer and Parser type - --- | @ParsecT s u m a@ is a parser with stream type @s@, user state type @u@, --- underlying monad @m@ and return type @a@. Parsec is strict in the user state. --- If this is undesirable, simply use a data type like @data Box a = Box a@ and --- the state type @Box YourStateType@ to add a level of indirection. - -newtype ParsecT s u m a - = ParsecT {unParser :: forall b . - State s u - -> (a -> State s u -> ParseError -> m b) -- consumed ok - -> (ParseError -> m b) -- consumed err - -> (a -> State s u -> ParseError -> m b) -- empty ok - -> (ParseError -> m b) -- empty err - -> m b - } -#if MIN_VERSION_base(4,7,0) - deriving ( Typeable ) - -- GHC 7.6 doesn't like deriving instances of Typeabl1 for types with - -- non-* type-arguments. -#endif - --- | Low-level unpacking of the ParsecT type. To run your parser, please look to --- runPT, runP, runParserT, runParser and other such functions. -runParsecT :: Monad m => ParsecT s u m a -> State s u -> m (Consumed (m (Reply s u a))) -runParsecT p s = unParser p s cok cerr eok eerr - where cok a s' err = return . Consumed . return $ Ok a s' err - cerr err = return . Consumed . return $ Error err - eok a s' err = return . Empty . return $ Ok a s' err - eerr err = return . Empty . return $ Error err - --- | Low-level creation of the ParsecT type. You really shouldn't have to do this. -mkPT :: Monad m => (State s u -> m (Consumed (m (Reply s u a)))) -> ParsecT s u m a -mkPT k = ParsecT $ \s cok cerr eok eerr -> do - cons <- k s - case cons of - Consumed mrep -> do - rep <- mrep - case rep of - Ok x s' err -> cok x s' err - Error err -> cerr err - Empty mrep -> do - rep <- mrep - case rep of - Ok x s' err -> eok x s' err - Error err -> eerr err - -type Parsec s u = ParsecT s u Identity - -data Consumed a = Consumed a - | Empty !a - deriving ( Typeable ) - -data Reply s u a = Ok a !(State s u) ParseError - | Error ParseError - deriving ( Typeable ) - -data State s u = State { - stateInput :: s, - statePos :: !SourcePos, - stateUser :: !u - } - deriving ( Typeable ) - --- | The 'Semigroup' instance for 'ParsecT' is used to append the result --- of several parsers, for example: --- --- @ --- (many $ char 'a') <> (many $ char 'b') --- @ --- --- The above will parse a string like @"aabbb"@ and return a successful --- parse result @"aabbb"@. Compare against the below which will --- produce a result of @"bbb"@ for the same input: --- --- @ --- (many $ char 'a') >> (many $ char 'b') --- (many $ char 'a') *> (many $ char 'b') --- @ --- --- @since 3.1.12 -instance Semigroup.Semigroup a => Semigroup.Semigroup (ParsecT s u m a) where - -- | Combines two parsers like '*>', '>>' and @do {...;...}@ - -- /but/ also combines their results with (<>) instead of - -- discarding the first. - (<>) = Applicative.liftA2 (Semigroup.<>) - -#if MIN_VERSION_base(4,8,0) - sconcat = fmap Semigroup.sconcat . sequence -#else - sconcat = fmap (Semigroup.sconcat . NE.fromList) . sequence . NE.toList -#endif - stimes b = Semigroup.sconcat . NE.fromList . genericReplicate b - --- | The 'Monoid' instance for 'ParsecT' is used for the same purposes as --- the 'Semigroup' instance. --- --- @since 3.1.12 -instance ( Monoid.Monoid a - , Semigroup.Semigroup (ParsecT s u m a) - ) => Monoid.Monoid (ParsecT s u m a) where - -- | A parser that always succeeds, consumes no input, and - -- returns the underlying 'Monoid''s 'mempty' value - mempty = Applicative.pure Monoid.mempty - - -- | See 'ParsecT''s 'Semigroup.<>' implementation - mappend = (Semigroup.<>) - - mconcat = Functor.fmap Monoid.mconcat . sequence - -instance Functor Consumed where - fmap f (Consumed x) = Consumed (f x) - fmap f (Empty x) = Empty (f x) - -instance Functor (Reply s u) where - fmap f (Ok x s e) = Ok (f x) s e - fmap _ (Error e) = Error e -- XXX - -instance Functor (ParsecT s u m) where - fmap f p = parsecMap f p - -parsecMap :: (a -> b) -> ParsecT s u m a -> ParsecT s u m b -parsecMap f p - = ParsecT $ \s cok cerr eok eerr -> - unParser p s (cok . f) cerr (eok . f) eerr - -instance Applicative.Applicative (ParsecT s u m) where - pure = parserReturn - (<*>) = ap -- TODO: Can this be optimized? - p1 *> p2 = p1 `parserBind` const p2 - p1 <* p2 = do { x1 <- p1 ; void p2 ; return x1 } - -instance Applicative.Alternative (ParsecT s u m) where - empty = mzero - (<|>) = mplus - -instance Monad (ParsecT s u m) where - return = Applicative.pure - p >>= f = parserBind p f - (>>) = (Applicative.*>) -#if !MIN_VERSION_base(4,13,0) - fail = Fail.fail -#endif - --- | @since 3.1.12.0 -instance Fail.MonadFail (ParsecT s u m) where - fail = parserFail - -instance (MonadIO m) => MonadIO (ParsecT s u m) where - liftIO = lift . liftIO - -instance (MonadReader r m) => MonadReader r (ParsecT s u m) where - ask = lift ask - local f p = mkPT $ \s -> local f (runParsecT p s) - --- I'm presuming the user might want a separate, non-backtracking --- state aside from the Parsec user state. -instance (MonadState s m) => MonadState s (ParsecT s' u m) where - get = lift get - put = lift . put - -instance (MonadCont m) => MonadCont (ParsecT s u m) where - callCC f = mkPT $ \s -> - callCC $ \c -> - runParsecT (f (\a -> mkPT $ \s' -> c (pack s' a))) s - - where pack s a= Empty $ return (Ok a s (unknownError s)) - -instance (MonadError e m) => MonadError e (ParsecT s u m) where - throwError = lift . throwError - p `catchError` h = mkPT $ \s -> - runParsecT p s `catchError` \e -> - runParsecT (h e) s - -parserReturn :: a -> ParsecT s u m a -parserReturn x - = ParsecT $ \s _ _ eok _ -> - eok x s (unknownError s) - -parserBind :: ParsecT s u m a -> (a -> ParsecT s u m b) -> ParsecT s u m b -{-# INLINE parserBind #-} -parserBind m k - = ParsecT $ \s cok cerr eok eerr -> - let - -- consumed-okay case for m - mcok x s err = - let - -- if (k x) consumes, those go straigt up - pcok = cok - pcerr = cerr - - -- if (k x) doesn't consume input, but is okay, - -- we still return in the consumed continuation - peok x s err' = cok x s (mergeError err err') - - -- if (k x) doesn't consume input, but errors, - -- we return the error in the 'consumed-error' - -- continuation - peerr err' = cerr (mergeError err err') - in unParser (k x) s pcok pcerr peok peerr - - -- empty-ok case for m - meok x s err = - let - -- in these cases, (k x) can return as empty - pcok = cok - peok x s err' = eok x s (mergeError err err') - pcerr = cerr - peerr err' = eerr (mergeError err err') - in unParser (k x) s pcok pcerr peok peerr - -- consumed-error case for m - mcerr = cerr - - -- empty-error case for m - meerr = eerr - - in unParser m s mcok mcerr meok meerr - - -mergeErrorReply :: ParseError -> Reply s u a -> Reply s u a -mergeErrorReply err1 reply -- XXX where to put it? - = case reply of - Ok x state err2 -> Ok x state (mergeError err1 err2) - Error err2 -> Error (mergeError err1 err2) - -parserFail :: String -> ParsecT s u m a -parserFail msg - = ParsecT $ \s _ _ _ eerr -> - eerr $ newErrorMessage (Message msg) (statePos s) - -instance MonadPlus (ParsecT s u m) where - mzero = parserZero - mplus p1 p2 = parserPlus p1 p2 - --- | @parserZero@ always fails without consuming any input. @parserZero@ is defined --- equal to the 'mzero' member of the 'MonadPlus' class and to the 'Control.Applicative.empty' member --- of the 'Control.Applicative.Alternative' class. - -parserZero :: ParsecT s u m a -parserZero - = ParsecT $ \s _ _ _ eerr -> - eerr $ unknownError s - -parserPlus :: ParsecT s u m a -> ParsecT s u m a -> ParsecT s u m a -{-# INLINE parserPlus #-} -parserPlus m n - = ParsecT $ \s cok cerr eok eerr -> - let - meerr err = - let - neok y s' err' = eok y s' (mergeError err err') - neerr err' = eerr $ mergeError err err' - in unParser n s cok cerr neok neerr - in unParser m s cok cerr eok meerr - -instance MonadTrans (ParsecT s u) where - lift amb = ParsecT $ \s _ _ eok _ -> do - a <- amb - eok a s $ unknownError s - -infix 0 -infixr 1 <|> - --- | The parser @p \ msg@ behaves as parser @p@, but whenever the --- parser @p@ fails /without consuming any input/, it replaces expect --- error messages with the expect error message @msg@. --- --- This is normally used at the end of a set alternatives where we want --- to return an error message in terms of a higher level construct --- rather than returning all possible characters. For example, if the --- @expr@ parser from the 'try' example would fail, the error --- message is: '...: expecting expression'. Without the @(\)@ --- combinator, the message would be like '...: expecting \"let\" or --- letter', which is less friendly. - -() :: (ParsecT s u m a) -> String -> (ParsecT s u m a) -p msg = label p msg - --- | This combinator implements choice. The parser @p \<|> q@ first --- applies @p@. If it succeeds, the value of @p@ is returned. If @p@ --- fails /without consuming any input/, parser @q@ is tried. This --- combinator is defined equal to the 'mplus' member of the 'MonadPlus' --- class and the ('Control.Applicative.<|>') member of 'Control.Applicative.Alternative'. --- --- The parser is called /predictive/ since @q@ is only tried when --- parser @p@ didn't consume any input (i.e.. the look ahead is 1). --- This non-backtracking behaviour allows for both an efficient --- implementation of the parser combinators and the generation of good --- error messages. - -(<|>) :: (ParsecT s u m a) -> (ParsecT s u m a) -> (ParsecT s u m a) -p1 <|> p2 = mplus p1 p2 - --- | A synonym for @\@, but as a function instead of an operator. -label :: ParsecT s u m a -> String -> ParsecT s u m a -label p msg - = labels p [msg] - -labels :: ParsecT s u m a -> [String] -> ParsecT s u m a -labels p msgs = - ParsecT $ \s cok cerr eok eerr -> - let eok' x s' error = eok x s' $ if errorIsUnknown error - then error - else setExpectErrors error msgs - eerr' err = eerr $ setExpectErrors err msgs - - in unParser p s cok cerr eok' eerr' - - where - setExpectErrors err [] = setErrorMessage (Expect "") err - setExpectErrors err [msg] = setErrorMessage (Expect msg) err - setExpectErrors err (msg:msgs) - = foldr (\msg' err' -> addErrorMessage (Expect msg') err') - (setErrorMessage (Expect msg) err) msgs - --- TODO: There should be a stronger statement that can be made about this - --- | An instance of @Stream@ has stream type @s@, underlying monad @m@ and token type @t@ determined by the stream --- --- Some rough guidelines for a \"correct\" instance of Stream: --- --- * unfoldM uncons gives the [t] corresponding to the stream --- --- * A @Stream@ instance is responsible for maintaining the \"position within the stream\" in the stream state @s@. This is trivial unless you are using the monad in a non-trivial way. - -class (Monad m) => Stream s m t | s -> t where - uncons :: s -> m (Maybe (t,s)) - -instance (Monad m) => Stream [tok] m tok where - uncons [] = return $ Nothing - uncons (t:ts) = return $ Just (t,ts) - {-# INLINE uncons #-} - - -instance (Monad m) => Stream CL.ByteString m Char where - uncons = return . CL.uncons - -instance (Monad m) => Stream C.ByteString m Char where - uncons = return . C.uncons - -instance (Monad m) => Stream Text.Text m Char where - uncons = return . Text.uncons - {-# INLINE uncons #-} - -instance (Monad m) => Stream TextL.Text m Char where - uncons = return . TextL.uncons - {-# INLINE uncons #-} - - -tokens :: (Stream s m t, Eq t) - => ([t] -> String) -- Pretty print a list of tokens - -> (SourcePos -> [t] -> SourcePos) - -> [t] -- List of tokens to parse - -> ParsecT s u m [t] -{-# INLINE tokens #-} -tokens _ _ [] - = ParsecT $ \s _ _ eok _ -> - eok [] s $ unknownError s -tokens showTokens nextposs tts@(tok:toks) - = ParsecT $ \(State input pos u) cok cerr _eok eerr -> - let - errEof = (setErrorMessage (Expect (showTokens tts)) - (newErrorMessage (SysUnExpect "") pos)) - - errExpect x = (setErrorMessage (Expect (showTokens tts)) - (newErrorMessage (SysUnExpect (showTokens [x])) pos)) - - walk [] rs = ok rs - walk (t:ts) rs = do - sr <- uncons rs - case sr of - Nothing -> cerr $ errEof - Just (x,xs) | t == x -> walk ts xs - | otherwise -> cerr $ errExpect x - - ok rs = let pos' = nextposs pos tts - s' = State rs pos' u - in cok tts s' (newErrorUnknown pos') - in do - sr <- uncons input - case sr of - Nothing -> eerr $ errEof - Just (x,xs) - | tok == x -> walk toks xs - | otherwise -> eerr $ errExpect x - --- | The parser @try p@ behaves like parser @p@, except that it --- pretends that it hasn't consumed any input when an error occurs. --- --- This combinator is used whenever arbitrary look ahead is needed. --- Since it pretends that it hasn't consumed any input when @p@ fails, --- the ('<|>') combinator will try its second alternative even when the --- first parser failed while consuming input. --- --- The @try@ combinator can for example be used to distinguish --- identifiers and reserved words. Both reserved words and identifiers --- are a sequence of letters. Whenever we expect a certain reserved --- word where we can also expect an identifier we have to use the @try@ --- combinator. Suppose we write: --- --- > expr = letExpr <|> identifier "expression" --- > --- > letExpr = do{ string "let"; ... } --- > identifier = many1 letter --- --- If the user writes \"lexical\", the parser fails with: @unexpected --- \'x\', expecting \'t\' in \"let\"@. Indeed, since the ('<|>') combinator --- only tries alternatives when the first alternative hasn't consumed --- input, the @identifier@ parser is never tried (because the prefix --- \"le\" of the @string \"let\"@ parser is already consumed). The --- right behaviour can be obtained by adding the @try@ combinator: --- --- > expr = letExpr <|> identifier "expression" --- > --- > letExpr = do{ try (string "let"); ... } --- > identifier = many1 letter - -try :: ParsecT s u m a -> ParsecT s u m a -try p = - ParsecT $ \s cok _ eok eerr -> - unParser p s cok eerr eok eerr - --- | @lookAhead p@ parses @p@ without consuming any input. --- --- If @p@ fails and consumes some input, so does @lookAhead@. Combine with 'try' --- if this is undesirable. - -lookAhead :: (Stream s m t) => ParsecT s u m a -> ParsecT s u m a -lookAhead p = - ParsecT $ \s _ cerr eok eerr -> do - let eok' a _ _ = eok a s (newErrorUnknown (statePos s)) - unParser p s eok' cerr eok' eerr - --- | The parser @token showTok posFromTok testTok@ accepts a token @t@ --- with result @x@ when the function @testTok t@ returns @'Just' x@. The --- source position of the @t@ should be returned by @posFromTok t@ and --- the token can be shown using @showTok t@. --- --- This combinator is expressed in terms of 'tokenPrim'. --- It is used to accept user defined token streams. For example, --- suppose that we have a stream of basic tokens tupled with source --- positions. We can then define a parser that accepts single tokens as: --- --- > mytoken x --- > = token showTok posFromTok testTok --- > where --- > showTok (pos,t) = show t --- > posFromTok (pos,t) = pos --- > testTok (pos,t) = if x == t then Just t else Nothing - -token :: (Stream s Identity t) - => (t -> String) -- ^ Token pretty-printing function. - -> (t -> SourcePos) -- ^ Computes the position of a token. - -> (t -> Maybe a) -- ^ Matching function for the token to parse. - -> Parsec s u a -token showToken tokpos test = tokenPrim showToken nextpos test - where - nextpos _ tok ts = case runIdentity (uncons ts) of - Nothing -> tokpos tok - Just (tok',_) -> tokpos tok' - --- | The parser @tokenPrim showTok nextPos testTok@ accepts a token @t@ --- with result @x@ when the function @testTok t@ returns @'Just' x@. The --- token can be shown using @showTok t@. The position of the /next/ --- token should be returned when @nextPos@ is called with the current --- source position @pos@, the current token @t@ and the rest of the --- tokens @toks@, @nextPos pos t toks@. --- --- This is the most primitive combinator for accepting tokens. For --- example, the 'Text.Parsec.Char.char' parser could be implemented as: --- --- > char c --- > = tokenPrim showChar nextPos testChar --- > where --- > showChar x = "'" ++ x ++ "'" --- > testChar x = if x == c then Just x else Nothing --- > nextPos pos x xs = updatePosChar pos x - -tokenPrim :: (Stream s m t) - => (t -> String) -- ^ Token pretty-printing function. - -> (SourcePos -> t -> s -> SourcePos) -- ^ Next position calculating function. - -> (t -> Maybe a) -- ^ Matching function for the token to parse. - -> ParsecT s u m a -{-# INLINE tokenPrim #-} -tokenPrim showToken nextpos test = tokenPrimEx showToken nextpos Nothing test - -tokenPrimEx :: (Stream s m t) - => (t -> String) - -> (SourcePos -> t -> s -> SourcePos) - -> Maybe (SourcePos -> t -> s -> u -> u) - -> (t -> Maybe a) - -> ParsecT s u m a -{-# INLINE tokenPrimEx #-} -tokenPrimEx showToken nextpos Nothing test - = ParsecT $ \(State input pos user) cok _cerr _eok eerr -> do - r <- uncons input - case r of - Nothing -> eerr $ unexpectError "" pos - Just (c,cs) - -> case test c of - Just x -> let newpos = nextpos pos c cs - newstate = State cs newpos user - in seq newpos $ seq newstate $ - cok x newstate (newErrorUnknown newpos) - Nothing -> eerr $ unexpectError (showToken c) pos -tokenPrimEx showToken nextpos (Just nextState) test - = ParsecT $ \(State input pos user) cok _cerr _eok eerr -> do - r <- uncons input - case r of - Nothing -> eerr $ unexpectError "" pos - Just (c,cs) - -> case test c of - Just x -> let newpos = nextpos pos c cs - newUser = nextState pos c cs user - newstate = State cs newpos newUser - in seq newpos $ seq newstate $ - cok x newstate $ newErrorUnknown newpos - Nothing -> eerr $ unexpectError (showToken c) pos - -unexpectError :: String -> SourcePos -> ParseError -unexpectError msg pos = newErrorMessage (SysUnExpect msg) pos - - --- | @many p@ applies the parser @p@ /zero/ or more times. Returns a --- list of the returned values of @p@. --- --- > identifier = do{ c <- letter --- > ; cs <- many (alphaNum <|> char '_') --- > ; return (c:cs) --- > } - -many :: ParsecT s u m a -> ParsecT s u m [a] -many p - = do xs <- manyAccum (:) p - return (reverse xs) - --- | @skipMany p@ applies the parser @p@ /zero/ or more times, skipping --- its result. --- --- > spaces = skipMany space - -skipMany :: ParsecT s u m a -> ParsecT s u m () -skipMany p - = do _ <- manyAccum (\_ _ -> []) p - return () - -manyAccum :: (a -> [a] -> [a]) - -> ParsecT s u m a - -> ParsecT s u m [a] -manyAccum acc p = - ParsecT $ \s cok cerr eok _eerr -> - let walk xs x s' _err = - unParser p s' - (seq xs $ walk $ acc x xs) -- consumed-ok - cerr -- consumed-err - manyErr -- empty-ok - (\e -> cok (acc x xs) s' e) -- empty-err - in unParser p s (walk []) cerr manyErr (\e -> eok [] s e) - -manyErr :: a -manyErr = error "Text.ParserCombinators.Parsec.Prim.many: combinator 'many' is applied to a parser that accepts an empty string." - - --- < Running a parser: monadic (runPT) and pure (runP) - -runPT :: (Stream s m t) - => ParsecT s u m a -> u -> SourceName -> s -> m (Either ParseError a) -runPT p u name s - = do res <- runParsecT p (State s (initialPos name) u) - r <- parserReply res - case r of - Ok x _ _ -> return (Right x) - Error err -> return (Left err) - where - parserReply res - = case res of - Consumed r -> r - Empty r -> r - -runP :: (Stream s Identity t) - => Parsec s u a -> u -> SourceName -> s -> Either ParseError a -runP p u name s = runIdentity $ runPT p u name s - --- | The most general way to run a parser. @runParserT p state filePath --- input@ runs parser @p@ on the input list of tokens @input@, --- obtained from source @filePath@ with the initial user state @st@. --- The @filePath@ is only used in error messages and may be the empty --- string. Returns a computation in the underlying monad @m@ that return either a 'ParseError' ('Left') or a --- value of type @a@ ('Right'). - -runParserT :: (Stream s m t) - => ParsecT s u m a -> u -> SourceName -> s -> m (Either ParseError a) -runParserT = runPT - --- | The most general way to run a parser over the Identity monad. @runParser p state filePath --- input@ runs parser @p@ on the input list of tokens @input@, --- obtained from source @filePath@ with the initial user state @st@. --- The @filePath@ is only used in error messages and may be the empty --- string. Returns either a 'ParseError' ('Left') or a --- value of type @a@ ('Right'). --- --- > parseFromFile p fname --- > = do{ input <- readFile fname --- > ; return (runParser p () fname input) --- > } - -runParser :: (Stream s Identity t) - => Parsec s u a -> u -> SourceName -> s -> Either ParseError a -runParser = runP - --- | @parse p filePath input@ runs a parser @p@ over Identity without user --- state. The @filePath@ is only used in error messages and may be the --- empty string. Returns either a 'ParseError' ('Left') --- or a value of type @a@ ('Right'). --- --- > main = case (parse numbers "" "11, 2, 43") of --- > Left err -> print err --- > Right xs -> print (sum xs) --- > --- > numbers = commaSep integer - -parse :: (Stream s Identity t) - => Parsec s () a -> SourceName -> s -> Either ParseError a -parse p = runP p () - --- | The expression @parseTest p input@ applies a parser @p@ against --- input @input@ and prints the result to stdout. Used for testing --- parsers. - -parseTest :: (Stream s Identity t, Show a) - => Parsec s () a -> s -> IO () -parseTest p input - = case parse p "" input of - Left err -> do putStr "parse error at " - print err - Right x -> print x - --- < Parser state combinators - --- | Returns the current source position. See also 'SourcePos'. - -getPosition :: (Monad m) => ParsecT s u m SourcePos -getPosition = do state <- getParserState - return (statePos state) - --- | Returns the current input - -getInput :: (Monad m) => ParsecT s u m s -getInput = do state <- getParserState - return (stateInput state) - --- | @setPosition pos@ sets the current source position to @pos@. - -setPosition :: (Monad m) => SourcePos -> ParsecT s u m () -setPosition pos - = do _ <- updateParserState (\(State input _ user) -> State input pos user) - return () - --- | @setInput input@ continues parsing with @input@. The 'getInput' and --- @setInput@ functions can for example be used to deal with #include --- files. - -setInput :: (Monad m) => s -> ParsecT s u m () -setInput input - = do _ <- updateParserState (\(State _ pos user) -> State input pos user) - return () - --- | Returns the full parser state as a 'State' record. - -getParserState :: (Monad m) => ParsecT s u m (State s u) -getParserState = updateParserState id - --- | @setParserState st@ set the full parser state to @st@. - -setParserState :: (Monad m) => State s u -> ParsecT s u m (State s u) -setParserState st = updateParserState (const st) - --- | @updateParserState f@ applies function @f@ to the parser state. - -updateParserState :: (State s u -> State s u) -> ParsecT s u m (State s u) -updateParserState f = - ParsecT $ \s _ _ eok _ -> - let s' = f s - in eok s' s' $ unknownError s' - --- < User state combinators - --- | Returns the current user state. - -getState :: (Monad m) => ParsecT s u m u -getState = stateUser `liftM` getParserState - --- | @putState st@ set the user state to @st@. - -putState :: (Monad m) => u -> ParsecT s u m () -putState u = do _ <- updateParserState $ \s -> s { stateUser = u } - return () - --- | @modifyState f@ applies function @f@ to the user state. Suppose --- that we want to count identifiers in a source, we could use the user --- state as: --- --- > expr = do{ x <- identifier --- > ; modifyState (+1) --- > ; return (Id x) --- > } - -modifyState :: (Monad m) => (u -> u) -> ParsecT s u m () -modifyState f = do _ <- updateParserState $ \s -> s { stateUser = f (stateUser s) } - return () - --- XXX Compat - --- | An alias for putState for backwards compatibility. - -setState :: (Monad m) => u -> ParsecT s u m () -setState = putState - --- | An alias for modifyState for backwards compatibility. - -updateState :: (Monad m) => (u -> u) -> ParsecT s u m () -updateState = modifyState diff -Nru cabal-install-3.2-3.2+git20191216.2.e076113/src/parsec-3.1.14.0/src/Text/Parsec/String.hs cabal-install-3.2-3.2+git20200127.2.b84fd4f/src/parsec-3.1.14.0/src/Text/Parsec/String.hs --- cabal-install-3.2-3.2+git20191216.2.e076113/src/parsec-3.1.14.0/src/Text/Parsec/String.hs 2019-07-02 17:10:02.000000000 +0000 +++ cabal-install-3.2-3.2+git20200127.2.b84fd4f/src/parsec-3.1.14.0/src/Text/Parsec/String.hs 1970-01-01 00:00:00.000000000 +0000 @@ -1,39 +0,0 @@ -{-# LANGUAGE Safe #-} - ------------------------------------------------------------------------------ --- | --- Module : Text.Parsec.String --- Copyright : (c) Paolo Martini 2007 --- License : BSD-style (see the file libraries/parsec/LICENSE) --- --- Maintainer : derek.a.elkins@gmail.com --- Stability : provisional --- Portability : portable --- --- Make Strings an instance of 'Stream' with 'Char' token type. --- ------------------------------------------------------------------------------ - -module Text.Parsec.String - ( Parser, GenParser, parseFromFile - ) where - -import Text.Parsec.Error -import Text.Parsec.Prim - -type Parser = Parsec String () -type GenParser tok st = Parsec [tok] st - --- | @parseFromFile p filePath@ runs a string parser @p@ on the --- input read from @filePath@ using 'Prelude.readFile'. Returns either a 'ParseError' --- ('Left') or a value of type @a@ ('Right'). --- --- > main = do{ result <- parseFromFile numbers "digits.txt" --- > ; case result of --- > Left err -> print err --- > Right xs -> print (sum xs) --- > } -parseFromFile :: Parser a -> FilePath -> IO (Either ParseError a) -parseFromFile p fname - = do input <- readFile fname - return (runP p () fname input) diff -Nru cabal-install-3.2-3.2+git20191216.2.e076113/src/parsec-3.1.14.0/src/Text/Parsec/Text/Lazy.hs cabal-install-3.2-3.2+git20200127.2.b84fd4f/src/parsec-3.1.14.0/src/Text/Parsec/Text/Lazy.hs --- cabal-install-3.2-3.2+git20191216.2.e076113/src/parsec-3.1.14.0/src/Text/Parsec/Text/Lazy.hs 2019-07-02 17:10:02.000000000 +0000 +++ cabal-install-3.2-3.2+git20200127.2.b84fd4f/src/parsec-3.1.14.0/src/Text/Parsec/Text/Lazy.hs 1970-01-01 00:00:00.000000000 +0000 @@ -1,44 +0,0 @@ -{-# LANGUAGE Safe #-} - ------------------------------------------------------------------------------ --- | --- Module : Text.Parsec.String --- Copyright : (c) Antoine Latter 2011 --- License : BSD-style (see the file libraries/parsec/LICENSE) --- --- Maintainer : aslatter@gmail.com --- Stability : provisional --- Portability : portable --- --- Convenience definitions for working with lazy 'Text.Text'. --- ------------------------------------------------------------------------------ - -module Text.Parsec.Text.Lazy - ( Parser, GenParser, parseFromFile - ) where - -import qualified Data.Text.Lazy as Text -import Text.Parsec.Prim -import Text.Parsec.Error -import Data.Text.Lazy.IO as TL - -type Parser = Parsec Text.Text () -type GenParser st = Parsec Text.Text st - --- | @parseFromFile p filePath@ runs a strict text parser @p@ on the --- input read from @filePath@ using 'Data.Text.Lazy.IO.readFile'. Returns either a 'ParseError' --- ('Left') or a value of type @a@ ('Right'). --- --- > main = do{ result <- parseFromFile numbers "digits.txt" --- > ; case result of --- > Left err -> print err --- > Right xs -> print (sum xs) --- > } --- --- @since 3.1.14.0 - -parseFromFile :: Parser a -> FilePath -> IO (Either ParseError a) -parseFromFile p fname - = do input <- TL.readFile fname - return (runP p () fname input) diff -Nru cabal-install-3.2-3.2+git20191216.2.e076113/src/parsec-3.1.14.0/src/Text/Parsec/Text.hs cabal-install-3.2-3.2+git20200127.2.b84fd4f/src/parsec-3.1.14.0/src/Text/Parsec/Text.hs --- cabal-install-3.2-3.2+git20191216.2.e076113/src/parsec-3.1.14.0/src/Text/Parsec/Text.hs 2019-07-02 17:10:02.000000000 +0000 +++ cabal-install-3.2-3.2+git20200127.2.b84fd4f/src/parsec-3.1.14.0/src/Text/Parsec/Text.hs 1970-01-01 00:00:00.000000000 +0000 @@ -1,44 +0,0 @@ -{-# LANGUAGE Safe #-} - ------------------------------------------------------------------------------ --- | --- Module : Text.Parsec.String --- Copyright : (c) Antoine Latter 2011 --- License : BSD-style (see the file libraries/parsec/LICENSE) --- --- Maintainer : aslatter@gmail.com --- Stability : provisional --- Portability : portable --- --- Convinience definitions for working with 'Text.Text'. --- ------------------------------------------------------------------------------ - -module Text.Parsec.Text - ( Parser, GenParser, parseFromFile - ) where - -import qualified Data.Text as Text -import Text.Parsec.Prim -import Text.Parsec.Error -import Data.Text.IO as T - -type Parser = Parsec Text.Text () -type GenParser st = Parsec Text.Text st - --- | @parseFromFile p filePath@ runs a strict text parser @p@ on the --- input read from @filePath@ using 'Data.Text.IO.readFile'. Returns either a 'ParseError' --- ('Left') or a value of type @a@ ('Right'). --- --- > main = do{ result <- parseFromFile numbers "digits.txt" --- > ; case result of --- > Left err -> print err --- > Right xs -> print (sum xs) --- > } --- --- @since 3.1.14.0 - -parseFromFile :: Parser a -> FilePath -> IO (Either ParseError a) -parseFromFile p fname - = do input <- T.readFile fname - return (runP p () fname input) diff -Nru cabal-install-3.2-3.2+git20191216.2.e076113/src/parsec-3.1.14.0/src/Text/Parsec/Token.hs cabal-install-3.2-3.2+git20200127.2.b84fd4f/src/parsec-3.1.14.0/src/Text/Parsec/Token.hs --- cabal-install-3.2-3.2+git20191216.2.e076113/src/parsec-3.1.14.0/src/Text/Parsec/Token.hs 2019-07-02 17:10:02.000000000 +0000 +++ cabal-install-3.2-3.2+git20200127.2.b84fd4f/src/parsec-3.1.14.0/src/Text/Parsec/Token.hs 1970-01-01 00:00:00.000000000 +0000 @@ -1,738 +0,0 @@ -{-# LANGUAGE CPP #-} -{-# LANGUAGE DeriveDataTypeable #-} -{-# LANGUAGE FlexibleContexts #-} -{-# LANGUAGE PolymorphicComponents #-} -{-# LANGUAGE Safe #-} - ------------------------------------------------------------------------------ --- | --- Module : Text.Parsec.Token --- Copyright : (c) Daan Leijen 1999-2001, (c) Paolo Martini 2007 --- License : BSD-style (see the LICENSE file) --- --- Maintainer : derek.a.elkins@gmail.com --- Stability : provisional --- Portability : non-portable (uses local universal quantification: PolymorphicComponents) --- --- A helper module to parse lexical elements (tokens). See 'makeTokenParser' --- for a description of how to use it. --- ------------------------------------------------------------------------------ - -{-# OPTIONS_GHC -fno-warn-name-shadowing #-} - -module Text.Parsec.Token - ( LanguageDef - , GenLanguageDef (..) - , TokenParser - , GenTokenParser (..) - , makeTokenParser - ) where - -import Data.Char ( isAlpha, toLower, toUpper, isSpace, digitToInt ) -#if MIN_VERSION_base(4,7,0) -import Data.Typeable ( Typeable ) -#endif -import Data.List ( nub, sort ) -import Control.Monad.Identity -import Text.Parsec.Prim -import Text.Parsec.Char -import Text.Parsec.Combinator - ------------------------------------------------------------ --- Language Definition ------------------------------------------------------------ - -type LanguageDef st = GenLanguageDef String st Identity - --- | The @GenLanguageDef@ type is a record that contains all parameterizable --- features of the "Text.Parsec.Token" module. The module "Text.Parsec.Language" --- contains some default definitions. - -data GenLanguageDef s u m - = LanguageDef { - - -- | Describes the start of a block comment. Use the empty string if the - -- language doesn't support block comments. For example \"\/*\". - - commentStart :: String, - - -- | Describes the end of a block comment. Use the empty string if the - -- language doesn't support block comments. For example \"*\/\". - - commentEnd :: String, - - -- | Describes the start of a line comment. Use the empty string if the - -- language doesn't support line comments. For example \"\/\/\". - - commentLine :: String, - - -- | Set to 'True' if the language supports nested block comments. - - nestedComments :: Bool, - - -- | This parser should accept any start characters of identifiers. For - -- example @letter \<|> char \'_\'@. - - identStart :: ParsecT s u m Char, - - -- | This parser should accept any legal tail characters of identifiers. - -- For example @alphaNum \<|> char \'_\'@. - - identLetter :: ParsecT s u m Char, - - -- | This parser should accept any start characters of operators. For - -- example @oneOf \":!#$%&*+.\/\<=>?\@\\\\^|-~\"@ - - opStart :: ParsecT s u m Char, - - -- | This parser should accept any legal tail characters of operators. - -- Note that this parser should even be defined if the language doesn't - -- support user-defined operators, or otherwise the 'reservedOp' - -- parser won't work correctly. - - opLetter :: ParsecT s u m Char, - - -- | The list of reserved identifiers. - - reservedNames :: [String], - - -- | The list of reserved operators. - - reservedOpNames:: [String], - - -- | Set to 'True' if the language is case sensitive. - - caseSensitive :: Bool - - } -#if MIN_VERSION_base(4,7,0) - deriving ( Typeable ) -#endif - ------------------------------------------------------------ --- A first class module: TokenParser ------------------------------------------------------------ - -type TokenParser st = GenTokenParser String st Identity - --- | The type of the record that holds lexical parsers that work on --- @s@ streams with state @u@ over a monad @m@. - -data GenTokenParser s u m - = TokenParser { - - -- | This lexeme parser parses a legal identifier. Returns the identifier - -- string. This parser will fail on identifiers that are reserved - -- words. Legal identifier (start) characters and reserved words are - -- defined in the 'LanguageDef' that is passed to - -- 'makeTokenParser'. An @identifier@ is treated as - -- a single token using 'try'. - - identifier :: ParsecT s u m String, - - -- | The lexeme parser @reserved name@ parses @symbol - -- name@, but it also checks that the @name@ is not a prefix of a - -- valid identifier. A @reserved@ word is treated as a single token - -- using 'try'. - - reserved :: String -> ParsecT s u m (), - - -- | This lexeme parser parses a legal operator. Returns the name of the - -- operator. This parser will fail on any operators that are reserved - -- operators. Legal operator (start) characters and reserved operators - -- are defined in the 'LanguageDef' that is passed to - -- 'makeTokenParser'. An @operator@ is treated as a - -- single token using 'try'. - - operator :: ParsecT s u m String, - - -- |The lexeme parser @reservedOp name@ parses @symbol - -- name@, but it also checks that the @name@ is not a prefix of a - -- valid operator. A @reservedOp@ is treated as a single token using - -- 'try'. - - reservedOp :: String -> ParsecT s u m (), - - - -- | This lexeme parser parses a single literal character. Returns the - -- literal character value. This parsers deals correctly with escape - -- sequences. The literal character is parsed according to the grammar - -- rules defined in the Haskell report (which matches most programming - -- languages quite closely). - - charLiteral :: ParsecT s u m Char, - - -- | This lexeme parser parses a literal string. Returns the literal - -- string value. This parsers deals correctly with escape sequences and - -- gaps. The literal string is parsed according to the grammar rules - -- defined in the Haskell report (which matches most programming - -- languages quite closely). - - stringLiteral :: ParsecT s u m String, - - -- | This lexeme parser parses a natural number (a positive whole - -- number). Returns the value of the number. The number can be - -- specified in 'decimal', 'hexadecimal' or - -- 'octal'. The number is parsed according to the grammar - -- rules in the Haskell report. - - natural :: ParsecT s u m Integer, - - -- | This lexeme parser parses an integer (a whole number). This parser - -- is like 'natural' except that it can be prefixed with - -- sign (i.e. \'-\' or \'+\'). Returns the value of the number. The - -- number can be specified in 'decimal', 'hexadecimal' - -- or 'octal'. The number is parsed according - -- to the grammar rules in the Haskell report. - - integer :: ParsecT s u m Integer, - - -- | This lexeme parser parses a floating point value. Returns the value - -- of the number. The number is parsed according to the grammar rules - -- defined in the Haskell report. - - float :: ParsecT s u m Double, - - -- | This lexeme parser parses either 'natural' or a 'float'. - -- Returns the value of the number. This parsers deals with - -- any overlap in the grammar rules for naturals and floats. The number - -- is parsed according to the grammar rules defined in the Haskell report. - - naturalOrFloat :: ParsecT s u m (Either Integer Double), - - -- | Parses a non-negative whole number in the decimal system. Returns the - -- value of the number. - - decimal :: ParsecT s u m Integer, - - -- | Parses a non-negative whole number in the hexadecimal system. The - -- number should be prefixed with \"x\" or \"X\". Returns the value of the - -- number. - - hexadecimal :: ParsecT s u m Integer, - - -- | Parses a non-negative whole number in the octal system. The number - -- should be prefixed with \"o\" or \"O\". Returns the value of the - -- number. - - octal :: ParsecT s u m Integer, - - -- | Lexeme parser @symbol s@ parses 'string' @s@ and skips - -- trailing white space. - - symbol :: String -> ParsecT s u m String, - - -- | @lexeme p@ first applies parser @p@ and then the 'whiteSpace' - -- parser, returning the value of @p@. Every lexical - -- token (lexeme) is defined using @lexeme@, this way every parse - -- starts at a point without white space. Parsers that use @lexeme@ are - -- called /lexeme/ parsers in this document. - -- - -- The only point where the 'whiteSpace' parser should be - -- called explicitly is the start of the main parser in order to skip - -- any leading white space. - -- - -- > mainParser = do{ whiteSpace - -- > ; ds <- many (lexeme digit) - -- > ; eof - -- > ; return (sum ds) - -- > } - - lexeme :: forall a. ParsecT s u m a -> ParsecT s u m a, - - -- | Parses any white space. White space consists of /zero/ or more - -- occurrences of a 'space', a line comment or a block (multi - -- line) comment. Block comments may be nested. How comments are - -- started and ended is defined in the 'LanguageDef' - -- that is passed to 'makeTokenParser'. - - whiteSpace :: ParsecT s u m (), - - -- | Lexeme parser @parens p@ parses @p@ enclosed in parenthesis, - -- returning the value of @p@. - - parens :: forall a. ParsecT s u m a -> ParsecT s u m a, - - -- | Lexeme parser @braces p@ parses @p@ enclosed in braces (\'{\' and - -- \'}\'), returning the value of @p@. - - braces :: forall a. ParsecT s u m a -> ParsecT s u m a, - - -- | Lexeme parser @angles p@ parses @p@ enclosed in angle brackets (\'\<\' - -- and \'>\'), returning the value of @p@. - - angles :: forall a. ParsecT s u m a -> ParsecT s u m a, - - -- | Lexeme parser @brackets p@ parses @p@ enclosed in brackets (\'[\' - -- and \']\'), returning the value of @p@. - - brackets :: forall a. ParsecT s u m a -> ParsecT s u m a, - - -- | DEPRECATED: Use 'brackets'. - - squares :: forall a. ParsecT s u m a -> ParsecT s u m a, - - -- | Lexeme parser |semi| parses the character \';\' and skips any - -- trailing white space. Returns the string \";\". - - semi :: ParsecT s u m String, - - -- | Lexeme parser @comma@ parses the character \',\' and skips any - -- trailing white space. Returns the string \",\". - - comma :: ParsecT s u m String, - - -- | Lexeme parser @colon@ parses the character \':\' and skips any - -- trailing white space. Returns the string \":\". - - colon :: ParsecT s u m String, - - -- | Lexeme parser @dot@ parses the character \'.\' and skips any - -- trailing white space. Returns the string \".\". - - dot :: ParsecT s u m String, - - -- | Lexeme parser @semiSep p@ parses /zero/ or more occurrences of @p@ - -- separated by 'semi'. Returns a list of values returned by - -- @p@. - - semiSep :: forall a . ParsecT s u m a -> ParsecT s u m [a], - - -- | Lexeme parser @semiSep1 p@ parses /one/ or more occurrences of @p@ - -- separated by 'semi'. Returns a list of values returned by @p@. - - semiSep1 :: forall a . ParsecT s u m a -> ParsecT s u m [a], - - -- | Lexeme parser @commaSep p@ parses /zero/ or more occurrences of - -- @p@ separated by 'comma'. Returns a list of values returned - -- by @p@. - - commaSep :: forall a . ParsecT s u m a -> ParsecT s u m [a], - - -- | Lexeme parser @commaSep1 p@ parses /one/ or more occurrences of - -- @p@ separated by 'comma'. Returns a list of values returned - -- by @p@. - - commaSep1 :: forall a . ParsecT s u m a -> ParsecT s u m [a] - } -#if MIN_VERSION_base(4,7,0) - deriving ( Typeable ) -#endif - ------------------------------------------------------------ --- Given a LanguageDef, create a token parser. ------------------------------------------------------------ - --- | The expression @makeTokenParser language@ creates a 'GenTokenParser' --- record that contains lexical parsers that are --- defined using the definitions in the @language@ record. --- --- The use of this function is quite stylized - one imports the --- appropiate language definition and selects the lexical parsers that --- are needed from the resulting 'GenTokenParser'. --- --- > module Main where --- > --- > import Text.Parsec --- > import qualified Text.Parsec.Token as P --- > import Text.Parsec.Language (haskellDef) --- > --- > -- The parser --- > ... --- > --- > expr = parens expr --- > <|> identifier --- > <|> ... --- > --- > --- > -- The lexer --- > lexer = P.makeTokenParser haskellDef --- > --- > parens = P.parens lexer --- > braces = P.braces lexer --- > identifier = P.identifier lexer --- > reserved = P.reserved lexer --- > ... - -makeTokenParser :: (Stream s m Char) - => GenLanguageDef s u m -> GenTokenParser s u m -makeTokenParser languageDef - = TokenParser{ identifier = identifier - , reserved = reserved - , operator = operator - , reservedOp = reservedOp - - , charLiteral = charLiteral - , stringLiteral = stringLiteral - , natural = natural - , integer = integer - , float = float - , naturalOrFloat = naturalOrFloat - , decimal = decimal - , hexadecimal = hexadecimal - , octal = octal - - , symbol = symbol - , lexeme = lexeme - , whiteSpace = whiteSpace - - , parens = parens - , braces = braces - , angles = angles - , brackets = brackets - , squares = brackets - , semi = semi - , comma = comma - , colon = colon - , dot = dot - , semiSep = semiSep - , semiSep1 = semiSep1 - , commaSep = commaSep - , commaSep1 = commaSep1 - } - where - - ----------------------------------------------------------- - -- Bracketing - ----------------------------------------------------------- - parens p = between (symbol "(") (symbol ")") p - braces p = between (symbol "{") (symbol "}") p - angles p = between (symbol "<") (symbol ">") p - brackets p = between (symbol "[") (symbol "]") p - - semi = symbol ";" - comma = symbol "," - dot = symbol "." - colon = symbol ":" - - commaSep p = sepBy p comma - semiSep p = sepBy p semi - - commaSep1 p = sepBy1 p comma - semiSep1 p = sepBy1 p semi - - - ----------------------------------------------------------- - -- Chars & Strings - ----------------------------------------------------------- - charLiteral = lexeme (between (char '\'') - (char '\'' "end of character") - characterChar ) - "character" - - characterChar = charLetter <|> charEscape - "literal character" - - charEscape = do{ _ <- char '\\'; escapeCode } - charLetter = satisfy (\c -> (c /= '\'') && (c /= '\\') && (c > '\026')) - - - - stringLiteral = lexeme ( - do{ str <- between (char '"') - (char '"' "end of string") - (many stringChar) - ; return (foldr (maybe id (:)) "" str) - } - "literal string") - - stringChar = do{ c <- stringLetter; return (Just c) } - <|> stringEscape - "string character" - - stringLetter = satisfy (\c -> (c /= '"') && (c /= '\\') && (c > '\026')) - - stringEscape = do{ _ <- char '\\' - ; do{ _ <- escapeGap ; return Nothing } - <|> do{ _ <- escapeEmpty; return Nothing } - <|> do{ esc <- escapeCode; return (Just esc) } - } - - escapeEmpty = char '&' - escapeGap = do{ _ <- many1 space - ; char '\\' "end of string gap" - } - - - - -- escape codes - escapeCode = charEsc <|> charNum <|> charAscii <|> charControl - "escape code" - - charControl = do{ _ <- char '^' - ; code <- upper - ; return (toEnum (fromEnum code - fromEnum 'A' + 1)) - } - - charNum = do{ code <- decimal - <|> do{ _ <- char 'o'; number 8 octDigit } - <|> do{ _ <- char 'x'; number 16 hexDigit } - ; if code > 0x10FFFF - then fail "invalid escape sequence" - else return (toEnum (fromInteger code)) - } - - charEsc = choice (map parseEsc escMap) - where - parseEsc (c,code) = do{ _ <- char c; return code } - - charAscii = choice (map parseAscii asciiMap) - where - parseAscii (asc,code) = try (do{ _ <- string asc; return code }) - - - -- escape code tables - escMap = zip ("abfnrtv\\\"\'") ("\a\b\f\n\r\t\v\\\"\'") - asciiMap = zip (ascii3codes ++ ascii2codes) (ascii3 ++ ascii2) - - ascii2codes = ["BS","HT","LF","VT","FF","CR","SO","SI","EM", - "FS","GS","RS","US","SP"] - ascii3codes = ["NUL","SOH","STX","ETX","EOT","ENQ","ACK","BEL", - "DLE","DC1","DC2","DC3","DC4","NAK","SYN","ETB", - "CAN","SUB","ESC","DEL"] - - ascii2 = ['\BS','\HT','\LF','\VT','\FF','\CR','\SO','\SI', - '\EM','\FS','\GS','\RS','\US','\SP'] - ascii3 = ['\NUL','\SOH','\STX','\ETX','\EOT','\ENQ','\ACK', - '\BEL','\DLE','\DC1','\DC2','\DC3','\DC4','\NAK', - '\SYN','\ETB','\CAN','\SUB','\ESC','\DEL'] - - - ----------------------------------------------------------- - -- Numbers - ----------------------------------------------------------- - naturalOrFloat = lexeme (natFloat) "number" - - float = lexeme floating "float" - integer = lexeme int "integer" - natural = lexeme nat "natural" - - - -- floats - floating = do{ n <- decimal - ; fractExponent n - } - - - natFloat = do{ _ <- char '0' - ; zeroNumFloat - } - <|> decimalFloat - - zeroNumFloat = do{ n <- hexadecimal <|> octal - ; return (Left n) - } - <|> decimalFloat - <|> fractFloat (0 :: Integer) - <|> return (Left 0) - - decimalFloat = do{ n <- decimal - ; option (Left n) - (fractFloat n) - } - - fractFloat n = do{ f <- fractExponent n - ; return (Right f) - } - - fractExponent n = do{ fract <- fraction - ; expo <- option "" exponent' - ; readDouble (show n ++ fract ++ expo) - } - <|> - do{ expo <- exponent' - ; readDouble (show n ++ expo) - } - where - readDouble s = - case reads s of - [(x, "")] -> return x - _ -> parserZero - - fraction = do{ _ <- char '.' - ; digits <- many1 digit "fraction" - ; return ('.' : digits) - } - "fraction" - - exponent' = do{ _ <- oneOf "eE" - ; sign' <- fmap (:[]) (oneOf "+-") <|> return "" - ; e <- decimal "exponent" - ; return ('e' : sign' ++ show e) - } - "exponent" - - - -- integers and naturals - int = do{ f <- lexeme sign - ; n <- nat - ; return (f n) - } - - sign = (char '-' >> return negate) - <|> (char '+' >> return id) - <|> return id - - nat = zeroNumber <|> decimal - - zeroNumber = do{ _ <- char '0' - ; hexadecimal <|> octal <|> decimal <|> return 0 - } - "" - - decimal = number 10 digit - hexadecimal = do{ _ <- oneOf "xX"; number 16 hexDigit } - octal = do{ _ <- oneOf "oO"; number 8 octDigit } - - number base baseDigit - = do{ digits <- many1 baseDigit - ; let n = foldl (\x d -> base*x + toInteger (digitToInt d)) 0 digits - ; seq n (return n) - } - - ----------------------------------------------------------- - -- Operators & reserved ops - ----------------------------------------------------------- - reservedOp name = - lexeme $ try $ - do{ _ <- string name - ; notFollowedBy (opLetter languageDef) ("end of " ++ show name) - } - - operator = - lexeme $ try $ - do{ name <- oper - ; if (isReservedOp name) - then unexpected ("reserved operator " ++ show name) - else return name - } - - oper = - do{ c <- (opStart languageDef) - ; cs <- many (opLetter languageDef) - ; return (c:cs) - } - "operator" - - isReservedOp name = - isReserved (sort (reservedOpNames languageDef)) name - - - ----------------------------------------------------------- - -- Identifiers & Reserved words - ----------------------------------------------------------- - reserved name = - lexeme $ try $ - do{ _ <- caseString name - ; notFollowedBy (identLetter languageDef) ("end of " ++ show name) - } - - caseString name - | caseSensitive languageDef = string name - | otherwise = do{ walk name; return name } - where - walk [] = return () - walk (c:cs) = do{ _ <- caseChar c msg; walk cs } - - caseChar c | isAlpha c = char (toLower c) <|> char (toUpper c) - | otherwise = char c - - msg = show name - - - identifier = - lexeme $ try $ - do{ name <- ident - ; if (isReservedName name) - then unexpected ("reserved word " ++ show name) - else return name - } - - - ident - = do{ c <- identStart languageDef - ; cs <- many (identLetter languageDef) - ; return (c:cs) - } - "identifier" - - isReservedName name - = isReserved theReservedNames caseName - where - caseName | caseSensitive languageDef = name - | otherwise = map toLower name - - - isReserved names name - = scan names - where - scan [] = False - scan (r:rs) = case (compare r name) of - LT -> scan rs - EQ -> True - GT -> False - - theReservedNames - | caseSensitive languageDef = sort reserved - | otherwise = sort . map (map toLower) $ reserved - where - reserved = reservedNames languageDef - - - - ----------------------------------------------------------- - -- White space & symbols - ----------------------------------------------------------- - symbol name - = lexeme (string name) - - lexeme p - = do{ x <- p; whiteSpace; return x } - - - --whiteSpace - whiteSpace - | noLine && noMulti = skipMany (simpleSpace "") - | noLine = skipMany (simpleSpace <|> multiLineComment "") - | noMulti = skipMany (simpleSpace <|> oneLineComment "") - | otherwise = skipMany (simpleSpace <|> oneLineComment <|> multiLineComment "") - where - noLine = null (commentLine languageDef) - noMulti = null (commentStart languageDef) - - - simpleSpace = - skipMany1 (satisfy isSpace) - - oneLineComment = - do{ _ <- try (string (commentLine languageDef)) - ; skipMany (satisfy (/= '\n')) - ; return () - } - - multiLineComment = - do { _ <- try (string (commentStart languageDef)) - ; inComment - } - - inComment - | nestedComments languageDef = inCommentMulti - | otherwise = inCommentSingle - - inCommentMulti - = do{ _ <- try (string (commentEnd languageDef)) ; return () } - <|> do{ multiLineComment ; inCommentMulti } - <|> do{ skipMany1 (noneOf startEnd) ; inCommentMulti } - <|> do{ _ <- oneOf startEnd ; inCommentMulti } - "end of comment" - where - startEnd = nub (commentEnd languageDef ++ commentStart languageDef) - - inCommentSingle - = do{ _ <- try (string (commentEnd languageDef)); return () } - <|> do{ skipMany1 (noneOf startEnd) ; inCommentSingle } - <|> do{ _ <- oneOf startEnd ; inCommentSingle } - "end of comment" - where - startEnd = nub (commentEnd languageDef ++ commentStart languageDef) diff -Nru cabal-install-3.2-3.2+git20191216.2.e076113/src/parsec-3.1.14.0/src/Text/Parsec.hs cabal-install-3.2-3.2+git20200127.2.b84fd4f/src/parsec-3.1.14.0/src/Text/Parsec.hs --- cabal-install-3.2-3.2+git20191216.2.e076113/src/parsec-3.1.14.0/src/Text/Parsec.hs 2019-07-02 17:10:02.000000000 +0000 +++ cabal-install-3.2-3.2+git20200127.2.b84fd4f/src/parsec-3.1.14.0/src/Text/Parsec.hs 1970-01-01 00:00:00.000000000 +0000 @@ -1,129 +0,0 @@ -{-# LANGUAGE Safe #-} - -{-| -Module : Text.Parsec -Copyright : (c) Daan Leijen 1999-2001, (c) Paolo Martini 2007 -License : BSD-style (see the LICENSE file) - -Maintainer : aslatter@gmail.com -Stability : provisional -Portability : portable - -This module includes everything you need to get started writing a -parser. - -By default this module is set up to parse character data. If you'd like -to parse the result of your own tokenizer you should start with the following -imports: - -@ - import Text.Parsec.Prim - import Text.Parsec.Combinator -@ - -Then you can implement your own version of 'satisfy' on top of the 'tokenPrim' -primitive. - --} - -module Text.Parsec - ( -- * Parsers - ParsecT - , Parsec - , token - , tokens - , runParserT - , runParser - , parse - , parseTest - , getPosition - , getInput - , getState - , putState - , modifyState - -- * Combinators - , (<|>) - , () - , label - , labels - , try - , unexpected - , choice - , many - , many1 - , skipMany - , skipMany1 - , count - , between - , option - , optionMaybe - , optional - , sepBy - , sepBy1 - , endBy - , endBy1 - , sepEndBy - , sepEndBy1 - , chainl - , chainl1 - , chainr - , chainr1 - , eof - , notFollowedBy - , manyTill - , lookAhead - , anyToken - -- * Character Parsing - , module Text.Parsec.Char - -- * Error messages - , ParseError - , errorPos - -- * Position - , SourcePos - , SourceName, Line, Column - , sourceName, sourceLine, sourceColumn - , incSourceLine, incSourceColumn - , setSourceLine, setSourceColumn, setSourceName - -- * Debugging - -- - -- | As a more comprehensive alternative for debugging Parsec parsers, - -- there's also the [parsec-free](http://hackage.haskell.org/package/parsec-free) - -- package. - -- - , parserTrace, parserTraced - -- * Low-level operations - , manyAccum - , tokenPrim - , tokenPrimEx - , runPT - , unknownError - , sysUnExpectError - , mergeErrorReply - , getParserState - , setParserState - , updateParserState - , Stream (..) - , runParsecT - , mkPT - , runP - , Consumed (..) - , Reply (..) - , State (..) - , setPosition - , setInput - -- * Other stuff - , setState - , updateState - , parsecMap - , parserReturn - , parserBind - , parserFail - , parserZero - , parserPlus - ) where - -import Text.Parsec.Pos -import Text.Parsec.Error -import Text.Parsec.Prim -import Text.Parsec.Char -import Text.Parsec.Combinator diff -Nru cabal-install-3.2-3.2+git20191216.2.e076113/src/parsec-3.1.14.0/src/Text/ParserCombinators/Parsec/Char.hs cabal-install-3.2-3.2+git20200127.2.b84fd4f/src/parsec-3.1.14.0/src/Text/ParserCombinators/Parsec/Char.hs --- cabal-install-3.2-3.2+git20191216.2.e076113/src/parsec-3.1.14.0/src/Text/ParserCombinators/Parsec/Char.hs 2019-07-02 17:10:02.000000000 +0000 +++ cabal-install-3.2-3.2+git20200127.2.b84fd4f/src/parsec-3.1.14.0/src/Text/ParserCombinators/Parsec/Char.hs 1970-01-01 00:00:00.000000000 +0000 @@ -1,42 +0,0 @@ -{-# LANGUAGE Safe #-} - ------------------------------------------------------------------------------ --- | --- Module : Text.ParserCombinators.Parsec.Char --- Copyright : (c) Paolo Martini 2007 --- License : BSD-style (see the LICENSE file) --- --- Maintainer : derek.a.elkins@gmail.com --- Stability : provisional --- Portability : portable --- --- Parsec compatibility module --- ------------------------------------------------------------------------------ - -module Text.ParserCombinators.Parsec.Char - ( CharParser, - spaces, - space, - newline, - tab, - upper, - lower, - alphaNum, - letter, - digit, - hexDigit, - octDigit, - char, - string, - anyChar, - oneOf, - noneOf, - satisfy - ) where - - -import Text.Parsec.Char -import Text.Parsec.String - -type CharParser st = GenParser Char st diff -Nru cabal-install-3.2-3.2+git20191216.2.e076113/src/parsec-3.1.14.0/src/Text/ParserCombinators/Parsec/Combinator.hs cabal-install-3.2-3.2+git20200127.2.b84fd4f/src/parsec-3.1.14.0/src/Text/ParserCombinators/Parsec/Combinator.hs --- cabal-install-3.2-3.2+git20191216.2.e076113/src/parsec-3.1.14.0/src/Text/ParserCombinators/Parsec/Combinator.hs 2019-07-02 17:10:02.000000000 +0000 +++ cabal-install-3.2-3.2+git20200127.2.b84fd4f/src/parsec-3.1.14.0/src/Text/ParserCombinators/Parsec/Combinator.hs 1970-01-01 00:00:00.000000000 +0000 @@ -1,44 +0,0 @@ -{-# LANGUAGE Safe #-} - ------------------------------------------------------------------------------ --- | --- Module : Text.ParserCombinators.Parsec.Combinator --- Copyright : (c) Paolo Martini 2007 --- License : BSD-style (see the LICENSE file) --- --- Maintainer : derek.a.elkins@gmail.com --- Stability : provisional --- Portability : portable --- --- Parsec compatibility module --- ------------------------------------------------------------------------------ - -module Text.ParserCombinators.Parsec.Combinator - ( choice, - count, - between, - option, - optionMaybe, - optional, - skipMany1, - many1, - sepBy, - sepBy1, - endBy, - endBy1, - sepEndBy, - sepEndBy1, - chainl, - chainl1, - chainr, - chainr1, - eof, - notFollowedBy, - manyTill, - lookAhead, - anyToken - ) where - - -import Text.Parsec.Combinator diff -Nru cabal-install-3.2-3.2+git20191216.2.e076113/src/parsec-3.1.14.0/src/Text/ParserCombinators/Parsec/Error.hs cabal-install-3.2-3.2+git20200127.2.b84fd4f/src/parsec-3.1.14.0/src/Text/ParserCombinators/Parsec/Error.hs --- cabal-install-3.2-3.2+git20191216.2.e076113/src/parsec-3.1.14.0/src/Text/ParserCombinators/Parsec/Error.hs 2019-07-02 17:10:02.000000000 +0000 +++ cabal-install-3.2-3.2+git20200127.2.b84fd4f/src/parsec-3.1.14.0/src/Text/ParserCombinators/Parsec/Error.hs 1970-01-01 00:00:00.000000000 +0000 @@ -1,42 +0,0 @@ -{-# LANGUAGE Safe #-} - ------------------------------------------------------------------------------ --- | --- Module : Text.ParserCombinators.Parsec.Error --- Copyright : (c) Paolo Martini 2007 --- License : BSD-style (see the LICENSE file) --- --- Maintainer : derek.a.elkins@gmail.com --- Stability : provisional --- Portability : portable --- --- Parsec compatibility module --- ------------------------------------------------------------------------------ - -module Text.ParserCombinators.Parsec.Error - ( Message (SysUnExpect,UnExpect,Expect,Message), - messageString, - messageCompare, - messageEq, - ParseError, - errorPos, - errorMessages, - errorIsUnknown, - showErrorMessages, - newErrorMessage, - newErrorUnknown, - addErrorMessage, - setErrorPos, - setErrorMessage, - mergeError - ) where - -import Text.Parsec.Error - - -messageCompare :: Message -> Message -> Ordering -messageCompare = compare - -messageEq :: Message -> Message -> Bool -messageEq = (==) diff -Nru cabal-install-3.2-3.2+git20191216.2.e076113/src/parsec-3.1.14.0/src/Text/ParserCombinators/Parsec/Expr.hs cabal-install-3.2-3.2+git20200127.2.b84fd4f/src/parsec-3.1.14.0/src/Text/ParserCombinators/Parsec/Expr.hs --- cabal-install-3.2-3.2+git20191216.2.e076113/src/parsec-3.1.14.0/src/Text/ParserCombinators/Parsec/Expr.hs 2019-07-02 17:10:02.000000000 +0000 +++ cabal-install-3.2-3.2+git20200127.2.b84fd4f/src/parsec-3.1.14.0/src/Text/ParserCombinators/Parsec/Expr.hs 1970-01-01 00:00:00.000000000 +0000 @@ -1,44 +0,0 @@ -{-# LANGUAGE Safe #-} - ------------------------------------------------------------------------------ --- | --- Module : Text.ParserCombinators.Parsec.Expr --- Copyright : (c) Paolo Martini 2007 --- License : BSD-style (see the LICENSE file) --- --- Maintainer : derek.a.elkins@gmail.com --- Stability : provisional --- Portability : portable --- --- Parsec compatibility module --- ------------------------------------------------------------------------------ - -module Text.ParserCombinators.Parsec.Expr - ( Assoc (AssocNone,AssocLeft,AssocRight), - Operator(..), - OperatorTable, - buildExpressionParser - ) where - -import Text.Parsec.Expr(Assoc(..)) -import qualified Text.Parsec.Expr as N -import Text.ParserCombinators.Parsec(GenParser) - -import Control.Monad.Identity - -data Operator tok st a = Infix (GenParser tok st (a -> a -> a)) Assoc - | Prefix (GenParser tok st (a -> a)) - | Postfix (GenParser tok st (a -> a)) - -type OperatorTable tok st a = [[Operator tok st a]] - -convert :: Operator tok st a -> N.Operator [tok] st Identity a -convert (Infix p a) = N.Infix p a -convert (Prefix p) = N.Prefix p -convert (Postfix p) = N.Postfix p - -buildExpressionParser :: OperatorTable tok st a - -> GenParser tok st a - -> GenParser tok st a -buildExpressionParser = N.buildExpressionParser . map (map convert) diff -Nru cabal-install-3.2-3.2+git20191216.2.e076113/src/parsec-3.1.14.0/src/Text/ParserCombinators/Parsec/Language.hs cabal-install-3.2-3.2+git20200127.2.b84fd4f/src/parsec-3.1.14.0/src/Text/ParserCombinators/Parsec/Language.hs --- cabal-install-3.2-3.2+git20191216.2.e076113/src/parsec-3.1.14.0/src/Text/ParserCombinators/Parsec/Language.hs 2019-07-02 17:10:02.000000000 +0000 +++ cabal-install-3.2-3.2+git20200127.2.b84fd4f/src/parsec-3.1.14.0/src/Text/ParserCombinators/Parsec/Language.hs 1970-01-01 00:00:00.000000000 +0000 @@ -1,30 +0,0 @@ -{-# LANGUAGE Safe #-} - ------------------------------------------------------------------------------ --- | --- Module : Text.ParserCombinators.Parsec.Language --- Copyright : (c) Paolo Martini 2007 --- License : BSD-style (see the LICENSE file) --- --- Maintainer : derek.a.elkins@gmail.com --- Stability : provisional --- Portability : portable --- --- Parsec compatibility module --- ------------------------------------------------------------------------------ - -module Text.ParserCombinators.Parsec.Language - ( haskellDef, - haskell, - mondrianDef, - mondrian, - emptyDef, - haskellStyle, - javaStyle, - LanguageDef, - GenLanguageDef(..), - ) where - -import Text.Parsec.Token -import Text.Parsec.Language diff -Nru cabal-install-3.2-3.2+git20191216.2.e076113/src/parsec-3.1.14.0/src/Text/ParserCombinators/Parsec/Perm.hs cabal-install-3.2-3.2+git20200127.2.b84fd4f/src/parsec-3.1.14.0/src/Text/ParserCombinators/Parsec/Perm.hs --- cabal-install-3.2-3.2+git20191216.2.e076113/src/parsec-3.1.14.0/src/Text/ParserCombinators/Parsec/Perm.hs 2019-07-02 17:10:02.000000000 +0000 +++ cabal-install-3.2-3.2+git20200127.2.b84fd4f/src/parsec-3.1.14.0/src/Text/ParserCombinators/Parsec/Perm.hs 1970-01-01 00:00:00.000000000 +0000 @@ -1,26 +0,0 @@ -{-# LANGUAGE Safe #-} - ------------------------------------------------------------------------------ --- | --- Module : Text.ParserCombinators.Parsec.Perm --- Copyright : (c) Paolo Martini 2007 --- License : BSD-style (see the LICENSE file) --- --- Maintainer : derek.a.elkins@gmail.com --- Stability : provisional --- Portability : portable --- --- Parsec compatibility module --- ------------------------------------------------------------------------------ - -module Text.ParserCombinators.Parsec.Perm - ( PermParser, - permute, - (<||>), - (<$$>), - (<|?>), - (<$?>) - ) where - -import Text.Parsec.Perm diff -Nru cabal-install-3.2-3.2+git20191216.2.e076113/src/parsec-3.1.14.0/src/Text/ParserCombinators/Parsec/Pos.hs cabal-install-3.2-3.2+git20200127.2.b84fd4f/src/parsec-3.1.14.0/src/Text/ParserCombinators/Parsec/Pos.hs --- cabal-install-3.2-3.2+git20191216.2.e076113/src/parsec-3.1.14.0/src/Text/ParserCombinators/Parsec/Pos.hs 2019-07-02 17:10:02.000000000 +0000 +++ cabal-install-3.2-3.2+git20200127.2.b84fd4f/src/parsec-3.1.14.0/src/Text/ParserCombinators/Parsec/Pos.hs 1970-01-01 00:00:00.000000000 +0000 @@ -1,37 +0,0 @@ -{-# LANGUAGE Safe #-} - ------------------------------------------------------------------------------ --- | --- Module : Text.ParserCombinators.Parsec.Pos --- Copyright : (c) Paolo Martini 2007 --- License : BSD-style (see the LICENSE file) --- --- Maintainer : derek.a.elkins@gmail.com --- Stability : provisional --- Portability : portable --- --- Parsec compatibility module --- ------------------------------------------------------------------------------ - -module Text.ParserCombinators.Parsec.Pos - ( SourceName, - Line, - Column, - SourcePos, - sourceLine, - sourceColumn, - sourceName, - incSourceLine, - incSourceColumn, - setSourceLine, - setSourceColumn, - setSourceName, - newPos, - initialPos, - updatePosChar, - updatePosString - ) where - - -import Text.Parsec.Pos diff -Nru cabal-install-3.2-3.2+git20191216.2.e076113/src/parsec-3.1.14.0/src/Text/ParserCombinators/Parsec/Prim.hs cabal-install-3.2-3.2+git20200127.2.b84fd4f/src/parsec-3.1.14.0/src/Text/ParserCombinators/Parsec/Prim.hs --- cabal-install-3.2-3.2+git20191216.2.e076113/src/parsec-3.1.14.0/src/Text/ParserCombinators/Parsec/Prim.hs 2019-07-02 17:10:02.000000000 +0000 +++ cabal-install-3.2-3.2+git20200127.2.b84fd4f/src/parsec-3.1.14.0/src/Text/ParserCombinators/Parsec/Prim.hs 1970-01-01 00:00:00.000000000 +0000 @@ -1,67 +0,0 @@ -{-# LANGUAGE Safe #-} - ------------------------------------------------------------------------------ --- | --- Module : Text.ParserCombinators.Parsec.Prim --- Copyright : (c) Paolo Martini 2007 --- License : BSD-style (see the LICENSE file) --- --- Maintainer : derek.a.elkins@gmail.com --- Stability : provisional --- Portability : portable --- --- Parsec compatibility module --- ------------------------------------------------------------------------------ - -module Text.ParserCombinators.Parsec.Prim - ( (), - (<|>), - Parser, - GenParser, - runParser, - parse, - parseFromFile, - parseTest, - token, - tokens, - tokenPrim, - tokenPrimEx, - try, - label, - labels, - unexpected, - pzero, - many, - skipMany, - getState, - setState, - updateState, - getPosition, - setPosition, - getInput, - setInput, - State(..), - getParserState, - setParserState - ) where - -import Text.Parsec.Prim hiding (runParser, try) -import qualified Text.Parsec.Prim as N -- 'N' for 'New' -import Text.Parsec.String - -import Text.Parsec.Error -import Text.Parsec.Pos - -pzero :: GenParser tok st a -pzero = parserZero - -runParser :: GenParser tok st a - -> st - -> SourceName - -> [tok] - -> Either ParseError a -runParser = N.runParser - -try :: GenParser tok st a -> GenParser tok st a -try = N.try diff -Nru cabal-install-3.2-3.2+git20191216.2.e076113/src/parsec-3.1.14.0/src/Text/ParserCombinators/Parsec/Token.hs cabal-install-3.2-3.2+git20200127.2.b84fd4f/src/parsec-3.1.14.0/src/Text/ParserCombinators/Parsec/Token.hs --- cabal-install-3.2-3.2+git20191216.2.e076113/src/parsec-3.1.14.0/src/Text/ParserCombinators/Parsec/Token.hs 2019-07-02 17:10:02.000000000 +0000 +++ cabal-install-3.2-3.2+git20200127.2.b84fd4f/src/parsec-3.1.14.0/src/Text/ParserCombinators/Parsec/Token.hs 1970-01-01 00:00:00.000000000 +0000 @@ -1,25 +0,0 @@ -{-# LANGUAGE Safe #-} - ------------------------------------------------------------------------------ --- | --- Module : Text.ParserCombinators.Parsec.Token --- Copyright : (c) Paolo Martini 2007 --- License : BSD-style (see the LICENSE file) --- --- Maintainer : derek.a.elkins@gmail.com --- Stability : provisional --- Portability : portable --- --- Parsec compatibility module --- ------------------------------------------------------------------------------ - -module Text.ParserCombinators.Parsec.Token - ( LanguageDef, - GenLanguageDef(..), - TokenParser, - GenTokenParser(..), - makeTokenParser - ) where - -import Text.Parsec.Token diff -Nru cabal-install-3.2-3.2+git20191216.2.e076113/src/parsec-3.1.14.0/src/Text/ParserCombinators/Parsec.hs cabal-install-3.2-3.2+git20200127.2.b84fd4f/src/parsec-3.1.14.0/src/Text/ParserCombinators/Parsec.hs --- cabal-install-3.2-3.2+git20191216.2.e076113/src/parsec-3.1.14.0/src/Text/ParserCombinators/Parsec.hs 2019-07-02 17:10:02.000000000 +0000 +++ cabal-install-3.2-3.2+git20200127.2.b84fd4f/src/parsec-3.1.14.0/src/Text/ParserCombinators/Parsec.hs 1970-01-01 00:00:00.000000000 +0000 @@ -1,43 +0,0 @@ -{-# LANGUAGE Safe #-} - ------------------------------------------------------------------------------ --- | --- Module : Text.ParserCombinators.Parsec --- Copyright : (c) Paolo Martini 2007 --- License : BSD-style (see the LICENSE file) --- --- Maintainer : derek.a.elkins@gmail.com --- Stability : provisional --- Portability : portable --- --- Parsec compatibility module --- ------------------------------------------------------------------------------ - -module Text.ParserCombinators.Parsec - ( -- complete modules - module Text.ParserCombinators.Parsec.Prim - , module Text.ParserCombinators.Parsec.Combinator - , module Text.ParserCombinators.Parsec.Char - - -- module Text.ParserCombinators.Parsec.Error - , ParseError - , errorPos - - -- module Text.ParserCombinators.Parsec.Pos - , SourcePos - , SourceName, Line, Column - , sourceName, sourceLine, sourceColumn - , incSourceLine, incSourceColumn - , setSourceLine, setSourceColumn, setSourceName - - ) where - -import Text.Parsec.String() - -import Text.ParserCombinators.Parsec.Prim -import Text.ParserCombinators.Parsec.Combinator -import Text.ParserCombinators.Parsec.Char - -import Text.ParserCombinators.Parsec.Error -import Text.ParserCombinators.Parsec.Pos diff -Nru cabal-install-3.2-3.2+git20191216.2.e076113/src/parsec-3.1.14.0/test/Bugs/Bug2.hs cabal-install-3.2-3.2+git20200127.2.b84fd4f/src/parsec-3.1.14.0/test/Bugs/Bug2.hs --- cabal-install-3.2-3.2+git20191216.2.e076113/src/parsec-3.1.14.0/test/Bugs/Bug2.hs 2019-07-02 17:10:02.000000000 +0000 +++ cabal-install-3.2-3.2+git20200127.2.b84fd4f/src/parsec-3.1.14.0/test/Bugs/Bug2.hs 1970-01-01 00:00:00.000000000 +0000 @@ -1,28 +0,0 @@ - -module Bugs.Bug2 - ( main - ) where - -import Test.HUnit hiding ( Test ) -import Test.Framework -import Test.Framework.Providers.HUnit - -import Text.Parsec -import Text.Parsec.String -import qualified Text.Parsec.Token as P -import Text.Parsec.Language (haskellDef) - -main :: Test -main = - testCase "Control Char Parsing (#2)" $ - parseString "\"test\\^Bstring\"" @?= "test\^Bstring" - - where - parseString :: String -> String - parseString input = - case parse parser "Example" input of - Left{} -> error "Parse failure" - Right str -> str - - parser :: Parser String - parser = P.stringLiteral $ P.makeTokenParser haskellDef \ No newline at end of file diff -Nru cabal-install-3.2-3.2+git20191216.2.e076113/src/parsec-3.1.14.0/test/Bugs/Bug35.hs cabal-install-3.2-3.2+git20200127.2.b84fd4f/src/parsec-3.1.14.0/test/Bugs/Bug35.hs --- cabal-install-3.2-3.2+git20191216.2.e076113/src/parsec-3.1.14.0/test/Bugs/Bug35.hs 2019-07-02 17:10:02.000000000 +0000 +++ cabal-install-3.2-3.2+git20200127.2.b84fd4f/src/parsec-3.1.14.0/test/Bugs/Bug35.hs 1970-01-01 00:00:00.000000000 +0000 @@ -1,40 +0,0 @@ - -module Bugs.Bug35 (main) where - -import Text.Parsec -import Text.Parsec.Language -import Text.Parsec.String -import qualified Text.Parsec.Token as Token - -import Test.HUnit hiding (Test) -import Test.Framework -import Test.Framework.Providers.HUnit - -trickyFloats :: [String] -trickyFloats = - [ "1.5339794352098402e-118" - , "2.108934760892056e-59" - , "2.250634744599241e-19" - , "5.0e-324" - , "5.960464477539063e-8" - , "0.25996181067141905" - , "0.3572019862807257" - , "0.46817723004874223" - , "0.9640035681058178" - , "4.23808622486133" - , "4.540362294799751" - , "5.212384849884261" - , "13.958257048123212" - , "32.96176575630599" - , "38.47735512322269" - ] - -float :: Parser Double -float = Token.float (Token.makeTokenParser emptyDef) - -testBatch :: Assertion -testBatch = mapM_ testFloat trickyFloats - where testFloat x = parse float "" x @?= Right (read x :: Double) - -main :: Test -main = testCase "Quality of output of Text.Parsec.Token.float (#35)" testBatch diff -Nru cabal-install-3.2-3.2+git20191216.2.e076113/src/parsec-3.1.14.0/test/Bugs/Bug6.hs cabal-install-3.2-3.2+git20200127.2.b84fd4f/src/parsec-3.1.14.0/test/Bugs/Bug6.hs --- cabal-install-3.2-3.2+git20191216.2.e076113/src/parsec-3.1.14.0/test/Bugs/Bug6.hs 2019-07-02 17:10:02.000000000 +0000 +++ cabal-install-3.2-3.2+git20200127.2.b84fd4f/src/parsec-3.1.14.0/test/Bugs/Bug6.hs 1970-01-01 00:00:00.000000000 +0000 @@ -1,25 +0,0 @@ - -module Bugs.Bug6 - ( main - ) where - -import Test.HUnit hiding ( Test ) -import Test.Framework -import Test.Framework.Providers.HUnit - -import Text.Parsec -import Text.Parsec.String - -import Util - -main :: Test -main = - testCase "Look-ahead preserving error location (#6)" $ - parseErrors variable "return" @?= ["'return' is a reserved keyword"] - -variable :: Parser String -variable = do - x <- lookAhead (many1 letter) - if x == "return" - then fail "'return' is a reserved keyword" - else string x diff -Nru cabal-install-3.2-3.2+git20191216.2.e076113/src/parsec-3.1.14.0/test/Bugs/Bug9.hs cabal-install-3.2-3.2+git20200127.2.b84fd4f/src/parsec-3.1.14.0/test/Bugs/Bug9.hs --- cabal-install-3.2-3.2+git20191216.2.e076113/src/parsec-3.1.14.0/test/Bugs/Bug9.hs 2019-07-02 17:10:02.000000000 +0000 +++ cabal-install-3.2-3.2+git20200127.2.b84fd4f/src/parsec-3.1.14.0/test/Bugs/Bug9.hs 1970-01-01 00:00:00.000000000 +0000 @@ -1,45 +0,0 @@ -module Bugs.Bug9 ( main ) where - -import Control.Applicative ((<$), (<$>), (<*)) -import Text.Parsec -import Text.Parsec.Expr -import Text.Parsec.Language (haskellStyle) -import Text.Parsec.String (Parser) -import qualified Text.Parsec.Token as P - -import Test.Framework -import Test.Framework.Providers.HUnit -import Test.HUnit hiding (Test) - -import Util - -data Expr = Const Integer | Op Expr Expr - deriving Show - -main :: Test -main = - testCase "Tracing of current position in error message (#9)" - $ result @?= ["unexpected '>'","expecting operator or end of input"] - - where - result :: [String] - result = parseErrors parseTopLevel "4 >> 5" - --- Syntax analaysis - -parseTopLevel :: Parser Expr -parseTopLevel = parseExpr <* eof - -parseExpr :: Parser Expr -parseExpr = buildExpressionParser table (Const <$> integer) - where - table = [[ Infix (Op <$ reserved ">>>") AssocLeft ]] - - -- Lexical analysis - - lexer = P.makeTokenParser haskellStyle { P.reservedOpNames = [">>>"] } - - integer = P.integer lexer - reserved = P.reserved lexer - _reservedOp = P.reservedOp lexer - diff -Nru cabal-install-3.2-3.2+git20191216.2.e076113/src/parsec-3.1.14.0/test/Bugs.hs cabal-install-3.2-3.2+git20200127.2.b84fd4f/src/parsec-3.1.14.0/test/Bugs.hs --- cabal-install-3.2-3.2+git20191216.2.e076113/src/parsec-3.1.14.0/test/Bugs.hs 2019-07-02 17:10:02.000000000 +0000 +++ cabal-install-3.2-3.2+git20200127.2.b84fd4f/src/parsec-3.1.14.0/test/Bugs.hs 1970-01-01 00:00:00.000000000 +0000 @@ -1,18 +0,0 @@ - -module Bugs - ( bugs - ) where - -import Test.Framework - -import qualified Bugs.Bug2 -import qualified Bugs.Bug6 -import qualified Bugs.Bug9 -import qualified Bugs.Bug35 - -bugs :: [Test] -bugs = [ Bugs.Bug2.main - , Bugs.Bug6.main - , Bugs.Bug9.main - , Bugs.Bug35.main - ] diff -Nru cabal-install-3.2-3.2+git20191216.2.e076113/src/parsec-3.1.14.0/test/Features/Feature80.hs cabal-install-3.2-3.2+git20200127.2.b84fd4f/src/parsec-3.1.14.0/test/Features/Feature80.hs --- cabal-install-3.2-3.2+git20191216.2.e076113/src/parsec-3.1.14.0/test/Features/Feature80.hs 2019-07-02 17:10:02.000000000 +0000 +++ cabal-install-3.2-3.2+git20200127.2.b84fd4f/src/parsec-3.1.14.0/test/Features/Feature80.hs 1970-01-01 00:00:00.000000000 +0000 @@ -1,48 +0,0 @@ -module Features.Feature80 ( main ) where - -import Control.Applicative (pure) -import Control.Monad.Identity -import Data.List.NonEmpty -import Data.Semigroup -import Test.Framework -import Test.Framework.Providers.HUnit -import Test.HUnit hiding (Test) - -import Text.Parsec - -main :: Test -main = - testCase "Monoid instance (#80)" $ do - parseString (as <> bs) "aabbb" @?= "aabbb" - parseString (mempty <> as) "aabbb" @?= "aa" - parseString (as <> mempty) "aabbb" @?= "aa" - parseString (sconcat $ fromList [as, mempty, bs]) "aabbb" @?= "aabbb" - parseString (mconcat [as, mempty, bs]) "aabbb" @?= "aabbb" - parseString (mempty :: ParsecT String () Identity String) "aabbb" @?= "" - parseString (stimes (2::Int) str_a) "aabbb" @?= "aa" - parseFail (stimes (3::Int) str_a) "aabbb" @?= "no parse" - parseString ((one ch_a) <> (one ch_a) <> bs) "aabbb" @?= "aabbb" - - where - one = fmap pure - - as :: ParsecT String () Identity String - as = many $ char 'a' - bs :: ParsecT String () Identity String - bs = many $ char 'b' - ch_a :: ParsecT String () Identity Char - ch_a = char 'a' - str_a :: ParsecT String () Identity String - str_a = string "a" - - parseString :: ParsecT String () Identity String -> String -> String - parseString p input = - case parse p "Example" input of - Left{} -> error "Parse failure" - Right str -> str - - parseFail :: ParsecT String () Identity String -> String -> String - parseFail p input = - case parse p "Example" input of - Left{} -> "no parse" - Right _ -> error "Parsed but shouldn't" diff -Nru cabal-install-3.2-3.2+git20191216.2.e076113/src/parsec-3.1.14.0/test/Features.hs cabal-install-3.2-3.2+git20200127.2.b84fd4f/src/parsec-3.1.14.0/test/Features.hs --- cabal-install-3.2-3.2+git20191216.2.e076113/src/parsec-3.1.14.0/test/Features.hs 2019-07-02 17:10:02.000000000 +0000 +++ cabal-install-3.2-3.2+git20200127.2.b84fd4f/src/parsec-3.1.14.0/test/Features.hs 1970-01-01 00:00:00.000000000 +0000 @@ -1,12 +0,0 @@ -module Features - ( features - ) where - -import Test.Framework - -import qualified Features.Feature80 - -features :: [Test] -features = [ - Features.Feature80.main - ] diff -Nru cabal-install-3.2-3.2+git20191216.2.e076113/src/parsec-3.1.14.0/test/Main.hs cabal-install-3.2-3.2+git20200127.2.b84fd4f/src/parsec-3.1.14.0/test/Main.hs --- cabal-install-3.2-3.2+git20191216.2.e076113/src/parsec-3.1.14.0/test/Main.hs 2019-07-02 17:10:02.000000000 +0000 +++ cabal-install-3.2-3.2+git20200127.2.b84fd4f/src/parsec-3.1.14.0/test/Main.hs 1970-01-01 00:00:00.000000000 +0000 @@ -1,12 +0,0 @@ - -import Test.Framework - -import Bugs ( bugs ) -import Features ( features ) - -main :: IO () -main = do - defaultMain - [ testGroup "Bugs" bugs - , testGroup "Features" features - ] diff -Nru cabal-install-3.2-3.2+git20191216.2.e076113/src/parsec-3.1.14.0/test/Util.hs cabal-install-3.2-3.2+git20200127.2.b84fd4f/src/parsec-3.1.14.0/test/Util.hs --- cabal-install-3.2-3.2+git20191216.2.e076113/src/parsec-3.1.14.0/test/Util.hs 2019-07-02 17:10:02.000000000 +0000 +++ cabal-install-3.2-3.2+git20200127.2.b84fd4f/src/parsec-3.1.14.0/test/Util.hs 1970-01-01 00:00:00.000000000 +0000 @@ -1,14 +0,0 @@ - -module Util where - -import Text.Parsec -import Text.Parsec.String ( Parser ) - --- | Returns the error messages associated --- with a failed parse. -parseErrors :: Parser a -> String -> [String] -parseErrors p input = - case parse p "" input of - Left err -> - drop 1 $ lines $ show err - Right{} -> [] diff -Nru cabal-install-3.2-3.2+git20191216.2.e076113/src/stm-2.5.0.0/changelog.md cabal-install-3.2-3.2+git20200127.2.b84fd4f/src/stm-2.5.0.0/changelog.md --- cabal-install-3.2-3.2+git20191216.2.e076113/src/stm-2.5.0.0/changelog.md 2018-09-22 10:47:05.000000000 +0000 +++ cabal-install-3.2-3.2+git20200127.2.b84fd4f/src/stm-2.5.0.0/changelog.md 1970-01-01 00:00:00.000000000 +0000 @@ -1,90 +0,0 @@ -# Changelog for [`stm` package](http://hackage.haskell.org/package/stm) - -## 2.5.0.0 *Sep 2018* - - * Removed `alwaysSucceeds` and `always`, GHC's invariant checking primitives. (GHC #14324) - - * Add `lengthTBQueue` to `Control.Concurrent.STM.TBQueue` (gh-9) - - * Add `stateTVar :: TVar s -> (s -> (a, s)) -> STM a` combinator (gh-14) - - * Switched `newTBQueue` and `newTBQueueIO` to accept `Natural` as size (gh-17) - - * Switched `signalTSemN` and `newTSem` to accept `Natural` and `Integer` respectively (gh-17) - ----- - -#### 2.4.5.1 *Sep 2018* - - * Fix incorrect bookkeeping of write capacity in `flushTBQueue` (gh-9) - - * Avoid redundant `writeTVar`s in `flushTQueue` to avoid unncessarily - invalidating other transactions (gh-6) - -### 2.4.5.0 *Feb 2018* - - * Fix space leak in `TBQueue` (gh-2, GHC#14494) - - * Make `signalTSem` resilient against `Int` overflows (gh-4) - - * Make definition of `readTQueue` consistent with `readTBQueue` (gh-3, GHC#9539) - - * Add `flushTQueue` to `Control.Concurrent.STM.TQueue` (gh-1) - - * Add `flushTBQueue` to `Control.Concurrent.STM.TBQueue` (gh-1) - - * Add `signalTSemN` operation (gh-5) - - -#### 2.4.4.1 *Dec 2015* - - * Add support for `base-4.9.0.0` - - * Drop support for GHC 6.12 / `base-4.2` - -### 2.4.4 *Dec 2014* - - * Add support for `base-4.8.0.0` - - * Tighten Safe Haskell bounds - - * Add `mkWeakTMVar` to `Control.Concurrent.STM.TMVar` - - * Add `@since`-annotations - -### 2.4.3 *Mar 2014* - - * Update behaviour of `newBroadcastTChanIO` to match - `newBroadcastTChan` in causing an error on a read from the - broadcast channel - - * Add `mkWeakTVar` - - * Add `isFullTBQueue` - - * Fix `TChan` created via `newBroadcastTChanIO` to throw same - exception on a `readTChan` as when created via `newBroadcastTChan` - - * Update to Cabal 1.10 format - -### 2.4.2 *Nov 2012* - - * Add `Control.Concurrent.STM.TSem` (transactional semaphore) - - * Add Applicative/Alternative instances of STM for GHC <7.0 - - * Throw proper exception when `readTChan` called on a broadcast `TChan` - -## 2.4 *Jul 2012* - - * Add `Control.Concurrent.STM.TQueue` (a faster `TChan`) - - * Add `Control.Concurrent.STM.TBQueue` (a bounded channel based on `TQueue`) - - * Add `Eq` instance for `TChan` - - * Add `newBroadcastTChan` and `newBroadcastTChanIO` - - * Some performance improvements for `TChan` - - * Add `cloneTChan` diff -Nru cabal-install-3.2-3.2+git20191216.2.e076113/src/stm-2.5.0.0/Control/Concurrent/STM/TArray.hs cabal-install-3.2-3.2+git20200127.2.b84fd4f/src/stm-2.5.0.0/Control/Concurrent/STM/TArray.hs --- cabal-install-3.2-3.2+git20191216.2.e076113/src/stm-2.5.0.0/Control/Concurrent/STM/TArray.hs 2018-09-22 10:47:04.000000000 +0000 +++ cabal-install-3.2-3.2+git20200127.2.b84fd4f/src/stm-2.5.0.0/Control/Concurrent/STM/TArray.hs 1970-01-01 00:00:00.000000000 +0000 @@ -1,68 +0,0 @@ -{-# LANGUAGE CPP, DeriveDataTypeable, FlexibleInstances, MultiParamTypeClasses #-} - -#if __GLASGOW_HASKELL__ >= 701 -{-# LANGUAGE Trustworthy #-} -#endif - ------------------------------------------------------------------------------ --- | --- Module : Control.Concurrent.STM.TArray --- Copyright : (c) The University of Glasgow 2005 --- License : BSD-style (see the file libraries/base/LICENSE) --- --- Maintainer : libraries@haskell.org --- Stability : experimental --- Portability : non-portable (requires STM) --- --- TArrays: transactional arrays, for use in the STM monad --- ------------------------------------------------------------------------------ - -module Control.Concurrent.STM.TArray ( - TArray -) where - -import Data.Array (Array, bounds) -import Data.Array.Base (listArray, arrEleBottom, unsafeAt, MArray(..), - IArray(numElements)) -import Data.Ix (rangeSize) -import Data.Typeable (Typeable) -import Control.Concurrent.STM.TVar (TVar, newTVar, readTVar, writeTVar) -#ifdef __GLASGOW_HASKELL__ -import GHC.Conc (STM) -#else -import Control.Sequential.STM (STM) -#endif - --- |TArray is a transactional array, supporting the usual 'MArray' --- interface for mutable arrays. --- --- It is currently implemented as @Array ix (TVar e)@, --- but it may be replaced by a more efficient implementation in the future --- (the interface will remain the same, however). --- -newtype TArray i e = TArray (Array i (TVar e)) deriving (Eq, Typeable) - -instance MArray TArray e STM where - getBounds (TArray a) = return (bounds a) - newArray b e = do - a <- rep (rangeSize b) (newTVar e) - return $ TArray (listArray b a) - newArray_ b = do - a <- rep (rangeSize b) (newTVar arrEleBottom) - return $ TArray (listArray b a) - unsafeRead (TArray a) i = readTVar $ unsafeAt a i - unsafeWrite (TArray a) i e = writeTVar (unsafeAt a i) e - getNumElements (TArray a) = return (numElements a) - --- | Like 'replicateM' but uses an accumulator to prevent stack overflows. --- Unlike 'replicateM' the returned list is in reversed order. --- This doesn't matter though since this function is only used to create --- arrays with identical elements. -rep :: Monad m => Int -> m a -> m [a] -rep n m = go n [] - where - go 0 xs = return xs - go i xs = do - x <- m - go (i-1) (x:xs) diff -Nru cabal-install-3.2-3.2+git20191216.2.e076113/src/stm-2.5.0.0/Control/Concurrent/STM/TBQueue.hs cabal-install-3.2-3.2+git20200127.2.b84fd4f/src/stm-2.5.0.0/Control/Concurrent/STM/TBQueue.hs --- cabal-install-3.2-3.2+git20191216.2.e076113/src/stm-2.5.0.0/Control/Concurrent/STM/TBQueue.hs 2018-09-22 10:47:05.000000000 +0000 +++ cabal-install-3.2-3.2+git20200127.2.b84fd4f/src/stm-2.5.0.0/Control/Concurrent/STM/TBQueue.hs 1970-01-01 00:00:00.000000000 +0000 @@ -1,227 +0,0 @@ -{-# LANGUAGE CPP #-} -{-# LANGUAGE DeriveDataTypeable #-} - -#if __GLASGOW_HASKELL__ >= 701 -{-# LANGUAGE Trustworthy #-} -#endif - ------------------------------------------------------------------------------ --- | --- Module : Control.Concurrent.STM.TBQueue --- Copyright : (c) The University of Glasgow 2012 --- License : BSD-style (see the file libraries/base/LICENSE) --- --- Maintainer : libraries@haskell.org --- Stability : experimental --- Portability : non-portable (requires STM) --- --- 'TBQueue' is a bounded version of 'TQueue'. The queue has a maximum --- capacity set when it is created. If the queue already contains the --- maximum number of elements, then 'writeTBQueue' blocks until an --- element is removed from the queue. --- --- The implementation is based on the traditional purely-functional --- queue representation that uses two lists to obtain amortised /O(1)/ --- enqueue and dequeue operations. --- --- @since 2.4 ------------------------------------------------------------------------------ - -module Control.Concurrent.STM.TBQueue ( - -- * TBQueue - TBQueue, - newTBQueue, - newTBQueueIO, - readTBQueue, - tryReadTBQueue, - flushTBQueue, - peekTBQueue, - tryPeekTBQueue, - writeTBQueue, - unGetTBQueue, - lengthTBQueue, - isEmptyTBQueue, - isFullTBQueue, - ) where - -import Data.Typeable (Typeable) -import GHC.Conc (STM, TVar, newTVar, newTVarIO, orElse, - readTVar, retry, writeTVar) -import Numeric.Natural (Natural) -import Prelude hiding (read) - --- | 'TBQueue' is an abstract type representing a bounded FIFO channel. --- --- @since 2.4 -data TBQueue a - = TBQueue {-# UNPACK #-} !(TVar Natural) -- CR: read capacity - {-# UNPACK #-} !(TVar [a]) -- R: elements waiting to be read - {-# UNPACK #-} !(TVar Natural) -- CW: write capacity - {-# UNPACK #-} !(TVar [a]) -- W: elements written (head is most recent) - !(Natural) -- CAP: initial capacity - deriving Typeable - -instance Eq (TBQueue a) where - TBQueue a _ _ _ _ == TBQueue b _ _ _ _ = a == b - --- Total channel capacity remaining is CR + CW. Reads only need to --- access CR, writes usually need to access only CW but sometimes need --- CR. So in the common case we avoid contention between CR and CW. --- --- - when removing an element from R: --- CR := CR + 1 --- --- - when adding an element to W: --- if CW is non-zero --- then CW := CW - 1 --- then if CR is non-zero --- then CW := CR - 1; CR := 0 --- else **FULL** - --- | Builds and returns a new instance of 'TBQueue'. -newTBQueue :: Natural -- ^ maximum number of elements the queue can hold - -> STM (TBQueue a) -newTBQueue size = do - read <- newTVar [] - write <- newTVar [] - rsize <- newTVar 0 - wsize <- newTVar size - return (TBQueue rsize read wsize write size) - --- |@IO@ version of 'newTBQueue'. This is useful for creating top-level --- 'TBQueue's using 'System.IO.Unsafe.unsafePerformIO', because using --- 'atomically' inside 'System.IO.Unsafe.unsafePerformIO' isn't --- possible. -newTBQueueIO :: Natural -> IO (TBQueue a) -newTBQueueIO size = do - read <- newTVarIO [] - write <- newTVarIO [] - rsize <- newTVarIO 0 - wsize <- newTVarIO size - return (TBQueue rsize read wsize write size) - --- |Write a value to a 'TBQueue'; blocks if the queue is full. -writeTBQueue :: TBQueue a -> a -> STM () -writeTBQueue (TBQueue rsize _read wsize write _size) a = do - w <- readTVar wsize - if (w > 0) - then do writeTVar wsize $! w - 1 - else do - r <- readTVar rsize - if (r > 0) - then do writeTVar rsize 0 - writeTVar wsize $! r - 1 - else retry - listend <- readTVar write - writeTVar write (a:listend) - --- |Read the next value from the 'TBQueue'. -readTBQueue :: TBQueue a -> STM a -readTBQueue (TBQueue rsize read _wsize write _size) = do - xs <- readTVar read - r <- readTVar rsize - writeTVar rsize $! r + 1 - case xs of - (x:xs') -> do - writeTVar read xs' - return x - [] -> do - ys <- readTVar write - case ys of - [] -> retry - _ -> do - let (z:zs) = reverse ys -- NB. lazy: we want the transaction to be - -- short, otherwise it will conflict - writeTVar write [] - writeTVar read zs - return z - --- | A version of 'readTBQueue' which does not retry. Instead it --- returns @Nothing@ if no value is available. -tryReadTBQueue :: TBQueue a -> STM (Maybe a) -tryReadTBQueue c = fmap Just (readTBQueue c) `orElse` return Nothing - --- | Efficiently read the entire contents of a 'TBQueue' into a list. This --- function never retries. --- --- @since 2.4.5 -flushTBQueue :: TBQueue a -> STM [a] -flushTBQueue (TBQueue rsize read wsize write size) = do - xs <- readTVar read - ys <- readTVar write - if null xs && null ys - then return [] - else do - writeTVar read [] - writeTVar write [] - writeTVar rsize 0 - writeTVar wsize size - return (xs ++ reverse ys) - --- | Get the next value from the @TBQueue@ without removing it, --- retrying if the channel is empty. -peekTBQueue :: TBQueue a -> STM a -peekTBQueue c = do - x <- readTBQueue c - unGetTBQueue c x - return x - --- | A version of 'peekTBQueue' which does not retry. Instead it --- returns @Nothing@ if no value is available. -tryPeekTBQueue :: TBQueue a -> STM (Maybe a) -tryPeekTBQueue c = do - m <- tryReadTBQueue c - case m of - Nothing -> return Nothing - Just x -> do - unGetTBQueue c x - return m - --- |Put a data item back onto a channel, where it will be the next item read. --- Blocks if the queue is full. -unGetTBQueue :: TBQueue a -> a -> STM () -unGetTBQueue (TBQueue rsize read wsize _write _size) a = do - r <- readTVar rsize - if (r > 0) - then do writeTVar rsize $! r - 1 - else do - w <- readTVar wsize - if (w > 0) - then writeTVar wsize $! w - 1 - else retry - xs <- readTVar read - writeTVar read (a:xs) - --- |Return the length of a 'TBQueue'. --- --- @since 2.5.0.0 -lengthTBQueue :: TBQueue a -> STM Natural -lengthTBQueue (TBQueue rsize _read wsize _write size) = do - r <- readTVar rsize - w <- readTVar wsize - return $! size - r - w - --- |Returns 'True' if the supplied 'TBQueue' is empty. -isEmptyTBQueue :: TBQueue a -> STM Bool -isEmptyTBQueue (TBQueue _rsize read _wsize write _size) = do - xs <- readTVar read - case xs of - (_:_) -> return False - [] -> do ys <- readTVar write - case ys of - [] -> return True - _ -> return False - --- |Returns 'True' if the supplied 'TBQueue' is full. --- --- @since 2.4.3 -isFullTBQueue :: TBQueue a -> STM Bool -isFullTBQueue (TBQueue rsize _read wsize _write _size) = do - w <- readTVar wsize - if (w > 0) - then return False - else do - r <- readTVar rsize - if (r > 0) - then return False - else return True diff -Nru cabal-install-3.2-3.2+git20191216.2.e076113/src/stm-2.5.0.0/Control/Concurrent/STM/TChan.hs cabal-install-3.2-3.2+git20200127.2.b84fd4f/src/stm-2.5.0.0/Control/Concurrent/STM/TChan.hs --- cabal-install-3.2-3.2+git20191216.2.e076113/src/stm-2.5.0.0/Control/Concurrent/STM/TChan.hs 2018-09-22 10:47:05.000000000 +0000 +++ cabal-install-3.2-3.2+git20200127.2.b84fd4f/src/stm-2.5.0.0/Control/Concurrent/STM/TChan.hs 1970-01-01 00:00:00.000000000 +0000 @@ -1,209 +0,0 @@ -{-# OPTIONS_GHC -fno-warn-name-shadowing #-} -{-# LANGUAGE CPP, DeriveDataTypeable #-} - -#if __GLASGOW_HASKELL__ >= 701 -{-# LANGUAGE Trustworthy #-} -#endif - ------------------------------------------------------------------------------ --- | --- Module : Control.Concurrent.STM.TChan --- Copyright : (c) The University of Glasgow 2004 --- License : BSD-style (see the file libraries/base/LICENSE) --- --- Maintainer : libraries@haskell.org --- Stability : experimental --- Portability : non-portable (requires STM) --- --- TChan: Transactional channels --- (GHC only) --- ------------------------------------------------------------------------------ - -module Control.Concurrent.STM.TChan ( -#ifdef __GLASGOW_HASKELL__ - -- * TChans - TChan, - - -- ** Construction - newTChan, - newTChanIO, - newBroadcastTChan, - newBroadcastTChanIO, - dupTChan, - cloneTChan, - - -- ** Reading and writing - readTChan, - tryReadTChan, - peekTChan, - tryPeekTChan, - writeTChan, - unGetTChan, - isEmptyTChan -#endif - ) where - -#ifdef __GLASGOW_HASKELL__ -import GHC.Conc - -import Data.Typeable (Typeable) - -#define _UPK_(x) {-# UNPACK #-} !(x) - --- | 'TChan' is an abstract type representing an unbounded FIFO channel. -data TChan a = TChan _UPK_(TVar (TVarList a)) - _UPK_(TVar (TVarList a)) - deriving (Eq, Typeable) - -type TVarList a = TVar (TList a) -data TList a = TNil | TCons a _UPK_(TVarList a) - --- |Build and return a new instance of 'TChan' -newTChan :: STM (TChan a) -newTChan = do - hole <- newTVar TNil - read <- newTVar hole - write <- newTVar hole - return (TChan read write) - --- |@IO@ version of 'newTChan'. This is useful for creating top-level --- 'TChan's using 'System.IO.Unsafe.unsafePerformIO', because using --- 'atomically' inside 'System.IO.Unsafe.unsafePerformIO' isn't --- possible. -newTChanIO :: IO (TChan a) -newTChanIO = do - hole <- newTVarIO TNil - read <- newTVarIO hole - write <- newTVarIO hole - return (TChan read write) - --- | Create a write-only 'TChan'. More precisely, 'readTChan' will 'retry' --- even after items have been written to the channel. The only way to read --- a broadcast channel is to duplicate it with 'dupTChan'. --- --- Consider a server that broadcasts messages to clients: --- --- >serve :: TChan Message -> Client -> IO loop --- >serve broadcastChan client = do --- > myChan <- dupTChan broadcastChan --- > forever $ do --- > message <- readTChan myChan --- > send client message --- --- The problem with using 'newTChan' to create the broadcast channel is that if --- it is only written to and never read, items will pile up in memory. By --- using 'newBroadcastTChan' to create the broadcast channel, items can be --- garbage collected after clients have seen them. --- --- @since 2.4 -newBroadcastTChan :: STM (TChan a) -newBroadcastTChan = do - write_hole <- newTVar TNil - read <- newTVar (error "reading from a TChan created by newBroadcastTChan; use dupTChan first") - write <- newTVar write_hole - return (TChan read write) - --- | @IO@ version of 'newBroadcastTChan'. --- --- @since 2.4 -newBroadcastTChanIO :: IO (TChan a) -newBroadcastTChanIO = do - write_hole <- newTVarIO TNil - read <- newTVarIO (error "reading from a TChan created by newBroadcastTChanIO; use dupTChan first") - write <- newTVarIO write_hole - return (TChan read write) - --- |Write a value to a 'TChan'. -writeTChan :: TChan a -> a -> STM () -writeTChan (TChan _read write) a = do - listend <- readTVar write -- listend == TVar pointing to TNil - new_listend <- newTVar TNil - writeTVar listend (TCons a new_listend) - writeTVar write new_listend - --- |Read the next value from the 'TChan'. -readTChan :: TChan a -> STM a -readTChan (TChan read _write) = do - listhead <- readTVar read - head <- readTVar listhead - case head of - TNil -> retry - TCons a tail -> do - writeTVar read tail - return a - --- | A version of 'readTChan' which does not retry. Instead it --- returns @Nothing@ if no value is available. --- --- @since 2.3 -tryReadTChan :: TChan a -> STM (Maybe a) -tryReadTChan (TChan read _write) = do - listhead <- readTVar read - head <- readTVar listhead - case head of - TNil -> return Nothing - TCons a tl -> do - writeTVar read tl - return (Just a) - --- | Get the next value from the @TChan@ without removing it, --- retrying if the channel is empty. --- --- @since 2.3 -peekTChan :: TChan a -> STM a -peekTChan (TChan read _write) = do - listhead <- readTVar read - head <- readTVar listhead - case head of - TNil -> retry - TCons a _ -> return a - --- | A version of 'peekTChan' which does not retry. Instead it --- returns @Nothing@ if no value is available. --- --- @since 2.3 -tryPeekTChan :: TChan a -> STM (Maybe a) -tryPeekTChan (TChan read _write) = do - listhead <- readTVar read - head <- readTVar listhead - case head of - TNil -> return Nothing - TCons a _ -> return (Just a) - --- |Duplicate a 'TChan': the duplicate channel begins empty, but data written to --- either channel from then on will be available from both. Hence this creates --- a kind of broadcast channel, where data written by anyone is seen by --- everyone else. -dupTChan :: TChan a -> STM (TChan a) -dupTChan (TChan _read write) = do - hole <- readTVar write - new_read <- newTVar hole - return (TChan new_read write) - --- |Put a data item back onto a channel, where it will be the next item read. -unGetTChan :: TChan a -> a -> STM () -unGetTChan (TChan read _write) a = do - listhead <- readTVar read - newhead <- newTVar (TCons a listhead) - writeTVar read newhead - --- |Returns 'True' if the supplied 'TChan' is empty. -isEmptyTChan :: TChan a -> STM Bool -isEmptyTChan (TChan read _write) = do - listhead <- readTVar read - head <- readTVar listhead - case head of - TNil -> return True - TCons _ _ -> return False - --- |Clone a 'TChan': similar to dupTChan, but the cloned channel starts with the --- same content available as the original channel. --- --- @since 2.4 -cloneTChan :: TChan a -> STM (TChan a) -cloneTChan (TChan read write) = do - readpos <- readTVar read - new_read <- newTVar readpos - return (TChan new_read write) -#endif diff -Nru cabal-install-3.2-3.2+git20191216.2.e076113/src/stm-2.5.0.0/Control/Concurrent/STM/TMVar.hs cabal-install-3.2-3.2+git20200127.2.b84fd4f/src/stm-2.5.0.0/Control/Concurrent/STM/TMVar.hs --- cabal-install-3.2-3.2+git20191216.2.e076113/src/stm-2.5.0.0/Control/Concurrent/STM/TMVar.hs 2018-09-22 10:47:05.000000000 +0000 +++ cabal-install-3.2-3.2+git20200127.2.b84fd4f/src/stm-2.5.0.0/Control/Concurrent/STM/TMVar.hs 1970-01-01 00:00:00.000000000 +0000 @@ -1,166 +0,0 @@ -{-# LANGUAGE CPP, DeriveDataTypeable, MagicHash, UnboxedTuples #-} - -#if __GLASGOW_HASKELL__ >= 701 -{-# LANGUAGE Trustworthy #-} -#endif - ------------------------------------------------------------------------------ --- | --- Module : Control.Concurrent.STM.TMVar --- Copyright : (c) The University of Glasgow 2004 --- License : BSD-style (see the file libraries/base/LICENSE) --- --- Maintainer : libraries@haskell.org --- Stability : experimental --- Portability : non-portable (requires STM) --- --- TMVar: Transactional MVars, for use in the STM monad --- (GHC only) --- ------------------------------------------------------------------------------ - -module Control.Concurrent.STM.TMVar ( -#ifdef __GLASGOW_HASKELL__ - -- * TMVars - TMVar, - newTMVar, - newEmptyTMVar, - newTMVarIO, - newEmptyTMVarIO, - takeTMVar, - putTMVar, - readTMVar, - tryReadTMVar, - swapTMVar, - tryTakeTMVar, - tryPutTMVar, - isEmptyTMVar, - mkWeakTMVar -#endif - ) where - -#ifdef __GLASGOW_HASKELL__ -import GHC.Base -import GHC.Conc -import GHC.Weak - -import Data.Typeable (Typeable) - -newtype TMVar a = TMVar (TVar (Maybe a)) deriving (Eq, Typeable) -{- ^ -A 'TMVar' is a synchronising variable, used -for communication between concurrent threads. It can be thought of -as a box, which may be empty or full. --} - --- |Create a 'TMVar' which contains the supplied value. -newTMVar :: a -> STM (TMVar a) -newTMVar a = do - t <- newTVar (Just a) - return (TMVar t) - --- |@IO@ version of 'newTMVar'. This is useful for creating top-level --- 'TMVar's using 'System.IO.Unsafe.unsafePerformIO', because using --- 'atomically' inside 'System.IO.Unsafe.unsafePerformIO' isn't --- possible. -newTMVarIO :: a -> IO (TMVar a) -newTMVarIO a = do - t <- newTVarIO (Just a) - return (TMVar t) - --- |Create a 'TMVar' which is initially empty. -newEmptyTMVar :: STM (TMVar a) -newEmptyTMVar = do - t <- newTVar Nothing - return (TMVar t) - --- |@IO@ version of 'newEmptyTMVar'. This is useful for creating top-level --- 'TMVar's using 'System.IO.Unsafe.unsafePerformIO', because using --- 'atomically' inside 'System.IO.Unsafe.unsafePerformIO' isn't --- possible. -newEmptyTMVarIO :: IO (TMVar a) -newEmptyTMVarIO = do - t <- newTVarIO Nothing - return (TMVar t) - --- |Return the contents of the 'TMVar'. If the 'TMVar' is currently --- empty, the transaction will 'retry'. After a 'takeTMVar', --- the 'TMVar' is left empty. -takeTMVar :: TMVar a -> STM a -takeTMVar (TMVar t) = do - m <- readTVar t - case m of - Nothing -> retry - Just a -> do writeTVar t Nothing; return a - --- | A version of 'takeTMVar' that does not 'retry'. The 'tryTakeTMVar' --- function returns 'Nothing' if the 'TMVar' was empty, or @'Just' a@ if --- the 'TMVar' was full with contents @a@. After 'tryTakeTMVar', the --- 'TMVar' is left empty. -tryTakeTMVar :: TMVar a -> STM (Maybe a) -tryTakeTMVar (TMVar t) = do - m <- readTVar t - case m of - Nothing -> return Nothing - Just a -> do writeTVar t Nothing; return (Just a) - --- |Put a value into a 'TMVar'. If the 'TMVar' is currently full, --- 'putTMVar' will 'retry'. -putTMVar :: TMVar a -> a -> STM () -putTMVar (TMVar t) a = do - m <- readTVar t - case m of - Nothing -> do writeTVar t (Just a); return () - Just _ -> retry - --- | A version of 'putTMVar' that does not 'retry'. The 'tryPutTMVar' --- function attempts to put the value @a@ into the 'TMVar', returning --- 'True' if it was successful, or 'False' otherwise. -tryPutTMVar :: TMVar a -> a -> STM Bool -tryPutTMVar (TMVar t) a = do - m <- readTVar t - case m of - Nothing -> do writeTVar t (Just a); return True - Just _ -> return False - --- | This is a combination of 'takeTMVar' and 'putTMVar'; ie. it --- takes the value from the 'TMVar', puts it back, and also returns --- it. -readTMVar :: TMVar a -> STM a -readTMVar (TMVar t) = do - m <- readTVar t - case m of - Nothing -> retry - Just a -> return a - --- | A version of 'readTMVar' which does not retry. Instead it --- returns @Nothing@ if no value is available. --- --- @since 2.3 -tryReadTMVar :: TMVar a -> STM (Maybe a) -tryReadTMVar (TMVar t) = readTVar t - --- |Swap the contents of a 'TMVar' for a new value. -swapTMVar :: TMVar a -> a -> STM a -swapTMVar (TMVar t) new = do - m <- readTVar t - case m of - Nothing -> retry - Just old -> do writeTVar t (Just new); return old - --- |Check whether a given 'TMVar' is empty. -isEmptyTMVar :: TMVar a -> STM Bool -isEmptyTMVar (TMVar t) = do - m <- readTVar t - case m of - Nothing -> return True - Just _ -> return False - --- | Make a 'Weak' pointer to a 'TMVar', using the second argument as --- a finalizer to run when the 'TMVar' is garbage-collected. --- --- @since 2.4.4 -mkWeakTMVar :: TMVar a -> IO () -> IO (Weak (TMVar a)) -mkWeakTMVar tmv@(TMVar (TVar t#)) (IO finalizer) = IO $ \s -> - case mkWeak# t# tmv finalizer s of (# s1, w #) -> (# s1, Weak w #) -#endif diff -Nru cabal-install-3.2-3.2+git20191216.2.e076113/src/stm-2.5.0.0/Control/Concurrent/STM/TQueue.hs cabal-install-3.2-3.2+git20200127.2.b84fd4f/src/stm-2.5.0.0/Control/Concurrent/STM/TQueue.hs --- cabal-install-3.2-3.2+git20191216.2.e076113/src/stm-2.5.0.0/Control/Concurrent/STM/TQueue.hs 2018-09-22 10:47:05.000000000 +0000 +++ cabal-install-3.2-3.2+git20200127.2.b84fd4f/src/stm-2.5.0.0/Control/Concurrent/STM/TQueue.hs 1970-01-01 00:00:00.000000000 +0000 @@ -1,156 +0,0 @@ -{-# OPTIONS_GHC -fno-warn-name-shadowing #-} -{-# LANGUAGE CPP, DeriveDataTypeable #-} - -#if __GLASGOW_HASKELL__ >= 701 -{-# LANGUAGE Trustworthy #-} -#endif - ------------------------------------------------------------------------------ --- | --- Module : Control.Concurrent.STM.TQueue --- Copyright : (c) The University of Glasgow 2012 --- License : BSD-style (see the file libraries/base/LICENSE) --- --- Maintainer : libraries@haskell.org --- Stability : experimental --- Portability : non-portable (requires STM) --- --- A 'TQueue' is like a 'TChan', with two important differences: --- --- * it has faster throughput than both 'TChan' and 'Chan' (although --- the costs are amortised, so the cost of individual operations --- can vary a lot). --- --- * it does /not/ provide equivalents of the 'dupTChan' and --- 'cloneTChan' operations. --- --- The implementation is based on the traditional purely-functional --- queue representation that uses two lists to obtain amortised /O(1)/ --- enqueue and dequeue operations. --- --- @since 2.4 ------------------------------------------------------------------------------ - -module Control.Concurrent.STM.TQueue ( - -- * TQueue - TQueue, - newTQueue, - newTQueueIO, - readTQueue, - tryReadTQueue, - flushTQueue, - peekTQueue, - tryPeekTQueue, - writeTQueue, - unGetTQueue, - isEmptyTQueue, - ) where - -import GHC.Conc -import Control.Monad (unless) -import Data.Typeable (Typeable) - --- | 'TQueue' is an abstract type representing an unbounded FIFO channel. --- --- @since 2.4 -data TQueue a = TQueue {-# UNPACK #-} !(TVar [a]) - {-# UNPACK #-} !(TVar [a]) - deriving Typeable - -instance Eq (TQueue a) where - TQueue a _ == TQueue b _ = a == b - --- |Build and returns a new instance of 'TQueue' -newTQueue :: STM (TQueue a) -newTQueue = do - read <- newTVar [] - write <- newTVar [] - return (TQueue read write) - --- |@IO@ version of 'newTQueue'. This is useful for creating top-level --- 'TQueue's using 'System.IO.Unsafe.unsafePerformIO', because using --- 'atomically' inside 'System.IO.Unsafe.unsafePerformIO' isn't --- possible. -newTQueueIO :: IO (TQueue a) -newTQueueIO = do - read <- newTVarIO [] - write <- newTVarIO [] - return (TQueue read write) - --- |Write a value to a 'TQueue'. -writeTQueue :: TQueue a -> a -> STM () -writeTQueue (TQueue _read write) a = do - listend <- readTVar write - writeTVar write (a:listend) - --- |Read the next value from the 'TQueue'. -readTQueue :: TQueue a -> STM a -readTQueue (TQueue read write) = do - xs <- readTVar read - case xs of - (x:xs') -> do - writeTVar read xs' - return x - [] -> do - ys <- readTVar write - case ys of - [] -> retry - _ -> do - let (z:zs) = reverse ys -- NB. lazy: we want the transaction to be - -- short, otherwise it will conflict - writeTVar write [] - writeTVar read zs - return z - --- | A version of 'readTQueue' which does not retry. Instead it --- returns @Nothing@ if no value is available. -tryReadTQueue :: TQueue a -> STM (Maybe a) -tryReadTQueue c = fmap Just (readTQueue c) `orElse` return Nothing - --- | Efficiently read the entire contents of a 'TQueue' into a list. This --- function never retries. --- --- @since 2.4.5 -flushTQueue :: TQueue a -> STM [a] -flushTQueue (TQueue read write) = do - xs <- readTVar read - ys <- readTVar write - unless (null xs) $ writeTVar read [] - unless (null ys) $ writeTVar write [] - return (xs ++ reverse ys) - --- | Get the next value from the @TQueue@ without removing it, --- retrying if the channel is empty. -peekTQueue :: TQueue a -> STM a -peekTQueue c = do - x <- readTQueue c - unGetTQueue c x - return x - --- | A version of 'peekTQueue' which does not retry. Instead it --- returns @Nothing@ if no value is available. -tryPeekTQueue :: TQueue a -> STM (Maybe a) -tryPeekTQueue c = do - m <- tryReadTQueue c - case m of - Nothing -> return Nothing - Just x -> do - unGetTQueue c x - return m - --- |Put a data item back onto a channel, where it will be the next item read. -unGetTQueue :: TQueue a -> a -> STM () -unGetTQueue (TQueue read _write) a = do - xs <- readTVar read - writeTVar read (a:xs) - --- |Returns 'True' if the supplied 'TQueue' is empty. -isEmptyTQueue :: TQueue a -> STM Bool -isEmptyTQueue (TQueue read write) = do - xs <- readTVar read - case xs of - (_:_) -> return False - [] -> do ys <- readTVar write - case ys of - [] -> return True - _ -> return False diff -Nru cabal-install-3.2-3.2+git20191216.2.e076113/src/stm-2.5.0.0/Control/Concurrent/STM/TSem.hs cabal-install-3.2-3.2+git20200127.2.b84fd4f/src/stm-2.5.0.0/Control/Concurrent/STM/TSem.hs --- cabal-install-3.2-3.2+git20191216.2.e076113/src/stm-2.5.0.0/Control/Concurrent/STM/TSem.hs 2018-09-22 10:47:05.000000000 +0000 +++ cabal-install-3.2-3.2+git20200127.2.b84fd4f/src/stm-2.5.0.0/Control/Concurrent/STM/TSem.hs 1970-01-01 00:00:00.000000000 +0000 @@ -1,107 +0,0 @@ ------------------------------------------------------------------------------ --- | --- Module : Control.Concurrent.STM.TSem --- Copyright : (c) The University of Glasgow 2012 --- License : BSD-style (see the file libraries/base/LICENSE) --- --- Maintainer : libraries@haskell.org --- Stability : experimental --- Portability : non-portable (requires STM) --- --- 'TSem': transactional semaphores. --- --- @since 2.4.2 ------------------------------------------------------------------------------ - -{-# LANGUAGE DeriveDataTypeable #-} -module Control.Concurrent.STM.TSem - ( TSem - , newTSem - - , waitTSem - - , signalTSem - , signalTSemN - ) where - -import Control.Concurrent.STM -import Control.Monad -import Data.Typeable -import Numeric.Natural - --- | 'TSem' is a transactional semaphore. It holds a certain number --- of units, and units may be acquired or released by 'waitTSem' and --- 'signalTSem' respectively. When the 'TSem' is empty, 'waitTSem' --- blocks. --- --- Note that 'TSem' has no concept of fairness, and there is no --- guarantee that threads blocked in `waitTSem` will be unblocked in --- the same order; in fact they will all be unblocked at the same time --- and will fight over the 'TSem'. Hence 'TSem' is not suitable if --- you expect there to be a high number of threads contending for the --- resource. However, like other STM abstractions, 'TSem' is --- composable. --- --- @since 2.4.2 -newtype TSem = TSem (TVar Integer) - deriving (Eq, Typeable) - --- | Construct new 'TSem' with an initial counter value. --- --- A positive initial counter value denotes availability of --- units 'waitTSem' can acquire. --- --- The initial counter value can be negative which denotes a resource --- \"debt\" that requires a respective amount of 'signalTSem' --- operations to counter-balance. --- --- @since 2.4.2 -newTSem :: Integer -> STM TSem -newTSem i = fmap TSem (newTVar $! i) - --- NOTE: we can't expose a good `TSem -> STM Int' operation as blocked --- 'waitTSem' aren't reliably reflected in a negative counter value. - --- | Wait on 'TSem' (aka __P__ operation). --- --- This operation acquires a unit from the semaphore (i.e. decreases --- the internal counter) and blocks (via 'retry') if no units are --- available (i.e. if the counter is /not/ positive). --- --- @since 2.4.2 -waitTSem :: TSem -> STM () -waitTSem (TSem t) = do - i <- readTVar t - when (i <= 0) retry - writeTVar t $! (i-1) - - --- Alternatively, the implementation could block (via 'retry') when --- the next increment would overflow, i.e. testing for 'maxBound' - --- | Signal a 'TSem' (aka __V__ operation). --- --- This operation adds\/releases a unit back to the semaphore --- (i.e. increments the internal counter). --- --- @since 2.4.2 -signalTSem :: TSem -> STM () -signalTSem (TSem t) = do - i <- readTVar t - writeTVar t $! i+1 - - --- | Multi-signal a 'TSem' --- --- This operation adds\/releases multiple units back to the semaphore --- (i.e. increments the internal counter). --- --- > signalTSem == signalTSemN 1 --- --- @since 2.4.5 -signalTSemN :: Natural -> TSem -> STM () -signalTSemN 0 _ = return () -signalTSemN 1 s = signalTSem s -signalTSemN n (TSem t) = do - i <- readTVar t - writeTVar t $! i+(toInteger n) diff -Nru cabal-install-3.2-3.2+git20191216.2.e076113/src/stm-2.5.0.0/Control/Concurrent/STM/TVar.hs cabal-install-3.2-3.2+git20200127.2.b84fd4f/src/stm-2.5.0.0/Control/Concurrent/STM/TVar.hs --- cabal-install-3.2-3.2+git20191216.2.e076113/src/stm-2.5.0.0/Control/Concurrent/STM/TVar.hs 2018-09-22 10:47:04.000000000 +0000 +++ cabal-install-3.2-3.2+git20200127.2.b84fd4f/src/stm-2.5.0.0/Control/Concurrent/STM/TVar.hs 1970-01-01 00:00:00.000000000 +0000 @@ -1,100 +0,0 @@ -{-# LANGUAGE CPP, MagicHash, UnboxedTuples #-} - -#if __GLASGOW_HASKELL__ >= 701 -{-# LANGUAGE Trustworthy #-} -#endif - ------------------------------------------------------------------------------ --- | --- Module : Control.Concurrent.STM.TVar --- Copyright : (c) The University of Glasgow 2004 --- License : BSD-style (see the file libraries/base/LICENSE) --- --- Maintainer : libraries@haskell.org --- Stability : experimental --- Portability : non-portable (requires STM) --- --- TVar: Transactional variables --- ------------------------------------------------------------------------------ - -module Control.Concurrent.STM.TVar ( - -- * TVars - TVar, - newTVar, - newTVarIO, - readTVar, - readTVarIO, - writeTVar, - modifyTVar, - modifyTVar', - stateTVar, - swapTVar, -#ifdef __GLASGOW_HASKELL__ - registerDelay, -#endif - mkWeakTVar - ) where - -#ifdef __GLASGOW_HASKELL__ -import GHC.Base -import GHC.Conc -import GHC.Weak -#else -import Control.Sequential.STM -#endif - --- Like 'modifyIORef' but for 'TVar'. --- | Mutate the contents of a 'TVar'. /N.B./, this version is --- non-strict. --- --- @since 2.3 -modifyTVar :: TVar a -> (a -> a) -> STM () -modifyTVar var f = do - x <- readTVar var - writeTVar var (f x) -{-# INLINE modifyTVar #-} - - --- | Strict version of 'modifyTVar'. --- --- @since 2.3 -modifyTVar' :: TVar a -> (a -> a) -> STM () -modifyTVar' var f = do - x <- readTVar var - writeTVar var $! f x -{-# INLINE modifyTVar' #-} - - --- | Like 'modifyTVar'' but the function is a simple state transition that can --- return a side value which is passed on as the result of the 'STM'. --- --- @since 2.5.0 -stateTVar :: TVar s -> (s -> (a, s)) -> STM a -stateTVar var f = do - s <- readTVar var - let (a, s') = f s -- since we destructure this, we are strict in f - writeTVar var s' - return a -{-# INLINE stateTVar #-} - - --- Like 'swapTMVar' but for 'TVar'. --- | Swap the contents of a 'TVar' for a new value. --- --- @since 2.3 -swapTVar :: TVar a -> a -> STM a -swapTVar var new = do - old <- readTVar var - writeTVar var new - return old -{-# INLINE swapTVar #-} - - --- | Make a 'Weak' pointer to a 'TVar', using the second argument as --- a finalizer to run when 'TVar' is garbage-collected --- --- @since 2.4.3 -mkWeakTVar :: TVar a -> IO () -> IO (Weak (TVar a)) -mkWeakTVar t@(TVar t#) (IO finalizer) = IO $ \s -> - case mkWeak# t# t finalizer s of (# s1, w #) -> (# s1, Weak w #) diff -Nru cabal-install-3.2-3.2+git20191216.2.e076113/src/stm-2.5.0.0/Control/Concurrent/STM.hs cabal-install-3.2-3.2+git20200127.2.b84fd4f/src/stm-2.5.0.0/Control/Concurrent/STM.hs --- cabal-install-3.2-3.2+git20191216.2.e076113/src/stm-2.5.0.0/Control/Concurrent/STM.hs 2018-09-22 10:47:04.000000000 +0000 +++ cabal-install-3.2-3.2+git20200127.2.b84fd4f/src/stm-2.5.0.0/Control/Concurrent/STM.hs 1970-01-01 00:00:00.000000000 +0000 @@ -1,49 +0,0 @@ -{-# LANGUAGE CPP #-} - -#if __GLASGOW_HASKELL__ >= 709 -{-# LANGUAGE Safe #-} -#elif __GLASGOW_HASKELL__ >= 701 -{-# LANGUAGE Trustworthy #-} -#endif - ------------------------------------------------------------------------------ --- | --- Module : Control.Concurrent.STM --- Copyright : (c) The University of Glasgow 2004 --- License : BSD-style (see the file libraries/base/LICENSE) --- --- Maintainer : libraries@haskell.org --- Stability : experimental --- Portability : non-portable (requires STM) --- --- Software Transactional Memory: a modular composable concurrency --- abstraction. See --- --- * /Composable memory transactions/, by Tim Harris, Simon Marlow, Simon --- Peyton Jones, and Maurice Herlihy, in --- /ACM Conference on Principles and Practice of Parallel Programming/ 2005. --- --- ------------------------------------------------------------------------------ - -module Control.Concurrent.STM ( - module Control.Monad.STM, - module Control.Concurrent.STM.TVar, -#ifdef __GLASGOW_HASKELL__ - module Control.Concurrent.STM.TMVar, - module Control.Concurrent.STM.TChan, - module Control.Concurrent.STM.TQueue, - module Control.Concurrent.STM.TBQueue, -#endif - module Control.Concurrent.STM.TArray - ) where - -import Control.Monad.STM -import Control.Concurrent.STM.TVar -#ifdef __GLASGOW_HASKELL__ -import Control.Concurrent.STM.TMVar -import Control.Concurrent.STM.TChan -#endif -import Control.Concurrent.STM.TArray -import Control.Concurrent.STM.TQueue -import Control.Concurrent.STM.TBQueue diff -Nru cabal-install-3.2-3.2+git20191216.2.e076113/src/stm-2.5.0.0/Control/Monad/STM.hs cabal-install-3.2-3.2+git20200127.2.b84fd4f/src/stm-2.5.0.0/Control/Monad/STM.hs --- cabal-install-3.2-3.2+git20191216.2.e076113/src/stm-2.5.0.0/Control/Monad/STM.hs 2018-09-22 10:47:05.000000000 +0000 +++ cabal-install-3.2-3.2+git20200127.2.b84fd4f/src/stm-2.5.0.0/Control/Monad/STM.hs 1970-01-01 00:00:00.000000000 +0000 @@ -1,138 +0,0 @@ -{-# OPTIONS_GHC -fno-warn-orphans #-} -{-# LANGUAGE CPP, MagicHash, UnboxedTuples #-} - -#if __GLASGOW_HASKELL__ >= 701 -{-# LANGUAGE Trustworthy #-} -#endif - ------------------------------------------------------------------------------ --- | --- Module : Control.Monad.STM --- Copyright : (c) The University of Glasgow 2004 --- License : BSD-style (see the file libraries/base/LICENSE) --- --- Maintainer : libraries@haskell.org --- Stability : experimental --- Portability : non-portable (requires STM) --- --- Software Transactional Memory: a modular composable concurrency --- abstraction. See --- --- * /Composable memory transactions/, by Tim Harris, Simon Marlow, Simon --- Peyton Jones, and Maurice Herlihy, in --- /ACM Conference on Principles and Practice of Parallel Programming/ 2005. --- --- --- This module only defines the 'STM' monad; you probably want to --- import "Control.Concurrent.STM" (which exports "Control.Monad.STM"). --- --- Note that invariant checking (namely the @always@ and @alwaysSucceeds@ --- functions) has been removed. See ticket [#14324](https://ghc.haskell.org/trac/ghc/ticket/14324) and --- the [removal proposal](https://github.com/ghc-proposals/ghc-proposals/blob/master/proposals/0011-deprecate-stm-invariants.rst). --- Existing users are encouraged to encapsulate their STM operations in safe --- abstractions which can perform the invariant checking without help from the --- runtime system. - ------------------------------------------------------------------------------ - -module Control.Monad.STM ( - STM, - atomically, -#ifdef __GLASGOW_HASKELL__ - retry, - orElse, - check, -#endif - throwSTM, - catchSTM - ) where - -#ifdef __GLASGOW_HASKELL__ -#if ! (MIN_VERSION_base(4,3,0)) -import GHC.Conc hiding (catchSTM) -import Control.Monad ( MonadPlus(..) ) -import Control.Exception -#else -import GHC.Conc -#endif -import GHC.Exts -import Control.Monad.Fix -#else -import Control.Sequential.STM -#endif - -#ifdef __GLASGOW_HASKELL__ -#if ! (MIN_VERSION_base(4,3,0)) -import Control.Applicative -import Control.Monad (ap) -#endif -#endif - - -#ifdef __GLASGOW_HASKELL__ -#if ! (MIN_VERSION_base(4,3,0)) -instance MonadPlus STM where - mzero = retry - mplus = orElse - -instance Applicative STM where - pure = return - (<*>) = ap - -instance Alternative STM where - empty = retry - (<|>) = orElse -#endif - --- | Check that the boolean condition is true and, if not, 'retry'. --- --- In other words, @check b = unless b retry@. --- --- @since 2.1.1 -check :: Bool -> STM () -check b = if b then return () else retry -#endif - -#if ! (MIN_VERSION_base(4,3,0)) --- |Exception handling within STM actions. -catchSTM :: Exception e => STM a -> (e -> STM a) -> STM a -catchSTM (STM m) handler = STM $ catchSTM# m handler' - where - handler' e = case fromException e of - Just e' -> case handler e' of STM m' -> m' - Nothing -> raiseIO# e - --- | A variant of 'throw' that can only be used within the 'STM' monad. --- --- Throwing an exception in @STM@ aborts the transaction and propagates the --- exception. --- --- Although 'throwSTM' has a type that is an instance of the type of 'throw', the --- two functions are subtly different: --- --- > throw e `seq` x ===> throw e --- > throwSTM e `seq` x ===> x --- --- The first example will cause the exception @e@ to be raised, --- whereas the second one won\'t. In fact, 'throwSTM' will only cause --- an exception to be raised when it is used within the 'STM' monad. --- The 'throwSTM' variant should be used in preference to 'throw' to --- raise an exception within the 'STM' monad because it guarantees --- ordering with respect to other 'STM' operations, whereas 'throw' --- does not. -throwSTM :: Exception e => e -> STM a -throwSTM e = STM $ raiseIO# (toException e) -#endif - - -data STMret a = STMret (State# RealWorld) a - -liftSTM :: STM a -> State# RealWorld -> STMret a -liftSTM (STM m) = \s -> case m s of (# s', r #) -> STMret s' r - --- | @since 2.3 -instance MonadFix STM where - mfix k = STM $ \s -> - let ans = liftSTM (k r) s - STMret _ r = ans - in case ans of STMret s' x -> (# s', x #) diff -Nru cabal-install-3.2-3.2+git20191216.2.e076113/src/stm-2.5.0.0/Control/Sequential/STM.hs cabal-install-3.2-3.2+git20200127.2.b84fd4f/src/stm-2.5.0.0/Control/Sequential/STM.hs --- cabal-install-3.2-3.2+git20191216.2.e076113/src/stm-2.5.0.0/Control/Sequential/STM.hs 2018-09-22 10:47:05.000000000 +0000 +++ cabal-install-3.2-3.2+git20200127.2.b84fd4f/src/stm-2.5.0.0/Control/Sequential/STM.hs 1970-01-01 00:00:00.000000000 +0000 @@ -1,95 +0,0 @@ --- Transactional memory for sequential implementations. --- Transactions do not run concurrently, but are atomic in the face --- of exceptions. - -{-# LANGUAGE CPP #-} - -#if __GLASGOW_HASKELL__ >= 709 -{-# LANGUAGE Safe #-} -#elif __GLASGOW_HASKELL__ >= 701 -{-# LANGUAGE Trustworthy #-} -#endif - --- #hide -module Control.Sequential.STM ( - STM, atomically, throwSTM, catchSTM, - TVar, newTVar, newTVarIO, readTVar, readTVarIO, writeTVar - ) where - -#if __GLASGOW_HASKELL__ < 705 -import Prelude hiding (catch) -#endif -#if __GLASGOW_HASKELL__ < 709 -import Control.Applicative (Applicative(pure, (<*>))) -#endif -import Control.Exception -import Data.IORef - --- The reference contains a rollback action to be executed on exceptions -newtype STM a = STM (IORef (IO ()) -> IO a) - -unSTM :: STM a -> IORef (IO ()) -> IO a -unSTM (STM f) = f - -instance Functor STM where - fmap f (STM m) = STM (fmap f . m) - -instance Applicative STM where - pure = STM . const . pure - STM mf <*> STM mx = STM $ \ r -> mf r <*> mx r - -instance Monad STM where - return = pure - STM m >>= k = STM $ \ r -> do - x <- m r - unSTM (k x) r - -atomically :: STM a -> IO a -atomically (STM m) = do - r <- newIORef (return ()) - m r `onException` do - rollback <- readIORef r - rollback - --- | @since 2.2.0 -throwSTM :: Exception e => e -> STM a -throwSTM = STM . const . throwIO - -catchSTM :: Exception e => STM a -> (e -> STM a) -> STM a -catchSTM (STM m) h = STM $ \ r -> do - old_rollback <- readIORef r - writeIORef r (return ()) - res <- try (m r) - rollback_m <- readIORef r - case res of - Left ex -> do - rollback_m - writeIORef r old_rollback - unSTM (h ex) r - Right a -> do - writeIORef r (rollback_m >> old_rollback) - return a - -newtype TVar a = TVar (IORef a) - deriving (Eq) - -newTVar :: a -> STM (TVar a) -newTVar a = STM (const (newTVarIO a)) - -newTVarIO :: a -> IO (TVar a) -newTVarIO a = do - ref <- newIORef a - return (TVar ref) - -readTVar :: TVar a -> STM a -readTVar (TVar ref) = STM (const (readIORef ref)) - --- | @since 2.1.2 -readTVarIO :: TVar a -> IO a -readTVarIO (TVar ref) = readIORef ref - -writeTVar :: TVar a -> a -> STM () -writeTVar (TVar ref) a = STM $ \ r -> do - oldval <- readIORef ref - modifyIORef r (writeIORef ref oldval >>) - writeIORef ref a diff -Nru cabal-install-3.2-3.2+git20191216.2.e076113/src/stm-2.5.0.0/LICENSE cabal-install-3.2-3.2+git20200127.2.b84fd4f/src/stm-2.5.0.0/LICENSE --- cabal-install-3.2-3.2+git20191216.2.e076113/src/stm-2.5.0.0/LICENSE 2018-09-22 10:47:05.000000000 +0000 +++ cabal-install-3.2-3.2+git20200127.2.b84fd4f/src/stm-2.5.0.0/LICENSE 1970-01-01 00:00:00.000000000 +0000 @@ -1,31 +0,0 @@ -The Glasgow Haskell Compiler License - -Copyright 2004, The University Court of the University of Glasgow. -All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are met: - -- Redistributions of source code must retain the above copyright notice, -this list of conditions and the following disclaimer. - -- Redistributions in binary form must reproduce the above copyright notice, -this list of conditions and the following disclaimer in the documentation -and/or other materials provided with the distribution. - -- Neither name of the University nor the names of its contributors may be -used to endorse or promote products derived from this software without -specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE UNIVERSITY COURT OF THE UNIVERSITY OF -GLASGOW AND THE CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, -INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND -FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE -UNIVERSITY COURT OF THE UNIVERSITY OF GLASGOW OR THE CONTRIBUTORS BE LIABLE -FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL -DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR -SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER -CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT -LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY -OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH -DAMAGE. diff -Nru cabal-install-3.2-3.2+git20191216.2.e076113/src/stm-2.5.0.0/Setup.hs cabal-install-3.2-3.2+git20200127.2.b84fd4f/src/stm-2.5.0.0/Setup.hs --- cabal-install-3.2-3.2+git20191216.2.e076113/src/stm-2.5.0.0/Setup.hs 2018-09-22 10:47:05.000000000 +0000 +++ cabal-install-3.2-3.2+git20200127.2.b84fd4f/src/stm-2.5.0.0/Setup.hs 1970-01-01 00:00:00.000000000 +0000 @@ -1,6 +0,0 @@ -module Main (main) where - -import Distribution.Simple - -main :: IO () -main = defaultMain diff -Nru cabal-install-3.2-3.2+git20191216.2.e076113/src/stm-2.5.0.0/stm.cabal cabal-install-3.2-3.2+git20200127.2.b84fd4f/src/stm-2.5.0.0/stm.cabal --- cabal-install-3.2-3.2+git20191216.2.e076113/src/stm-2.5.0.0/stm.cabal 2019-12-17 14:07:28.000000000 +0000 +++ cabal-install-3.2-3.2+git20200127.2.b84fd4f/src/stm-2.5.0.0/stm.cabal 1970-01-01 00:00:00.000000000 +0000 @@ -1,67 +0,0 @@ -cabal-version: >=1.10 -name: stm -version: 2.5.0.0 -x-revision: 1 --- don't forget to update changelog.md file! - -license: BSD3 -license-file: LICENSE -maintainer: libraries@haskell.org -homepage: https://wiki.haskell.org/Software_transactional_memory -bug-reports: https://github.com/haskell/stm/issues -synopsis: Software Transactional Memory -category: Concurrency -build-type: Simple -tested-with: GHC==8.6.*, GHC==8.4.*, GHC==8.2.*, GHC==8.0.*, GHC==7.10.*, GHC==7.8.*, GHC==7.6.*, GHC==7.4.*, GHC==7.2.*, GHC==7.0.* -description: - Software Transactional Memory, or STM, is an abstraction for - concurrent communication. The main benefits of STM are - /composability/ and /modularity/. That is, using STM you can write - concurrent abstractions that can be easily composed with any other - abstraction built using STM, without exposing the details of how - your abstraction ensures safety. This is typically not the case - with other forms of concurrent communication, such as locks or - 'MVar's. - -extra-source-files: - changelog.md - -source-repository head - type: git - location: https://github.com/haskell/stm.git - -library - default-language: Haskell2010 - other-extensions: - CPP - DeriveDataTypeable - FlexibleInstances - MagicHash - MultiParamTypeClasses - UnboxedTuples - if impl(ghc >= 7.2) - other-extensions: Trustworthy - if impl(ghc >= 7.9) - other-extensions: Safe - - if !impl(ghc >= 7.10) - build-depends: nats (>= 0.1.3 && < 0.3) || (>= 1 && < 1.2) - - build-depends: - base >= 4.3 && < 4.14, - array >= 0.3 && < 0.6 - - exposed-modules: - Control.Concurrent.STM - Control.Concurrent.STM.TArray - Control.Concurrent.STM.TVar - Control.Concurrent.STM.TChan - Control.Concurrent.STM.TMVar - Control.Concurrent.STM.TQueue - Control.Concurrent.STM.TBQueue - Control.Concurrent.STM.TSem - Control.Monad.STM - other-modules: - Control.Sequential.STM - - ghc-options: -Wall diff -Nru cabal-install-3.2-3.2+git20191216.2.e076113/src/text-1.2.4.0/benchmarks/cbits/time_iconv.c cabal-install-3.2-3.2+git20200127.2.b84fd4f/src/text-1.2.4.0/benchmarks/cbits/time_iconv.c --- cabal-install-3.2-3.2+git20191216.2.e076113/src/text-1.2.4.0/benchmarks/cbits/time_iconv.c 2001-09-09 01:46:40.000000000 +0000 +++ cabal-install-3.2-3.2+git20200127.2.b84fd4f/src/text-1.2.4.0/benchmarks/cbits/time_iconv.c 1970-01-01 00:00:00.000000000 +0000 @@ -1,35 +0,0 @@ -#include -#include -#include -#include - -int time_iconv(char *srcbuf, size_t srcbufsize) -{ - uint16_t *destbuf = NULL; - size_t destbufsize; - static uint16_t *origdestbuf; - static size_t origdestbufsize; - iconv_t ic = (iconv_t) -1; - int ret = 0; - - if (ic == (iconv_t) -1) { - ic = iconv_open("UTF-16LE", "UTF-8"); - if (ic == (iconv_t) -1) { - ret = -1; - goto done; - } - } - - destbufsize = srcbufsize * sizeof(uint16_t); - if (destbufsize > origdestbufsize) { - free(origdestbuf); - origdestbuf = destbuf = malloc(origdestbufsize = destbufsize); - } else { - destbuf = origdestbuf; - } - - iconv(ic, &srcbuf, &srcbufsize, (char**) &destbuf, &destbufsize); - - done: - return ret; -} diff -Nru cabal-install-3.2-3.2+git20191216.2.e076113/src/text-1.2.4.0/benchmarks/haskell/Benchmarks/Builder.hs cabal-install-3.2-3.2+git20200127.2.b84fd4f/src/text-1.2.4.0/benchmarks/haskell/Benchmarks/Builder.hs --- cabal-install-3.2-3.2+git20191216.2.e076113/src/text-1.2.4.0/benchmarks/haskell/Benchmarks/Builder.hs 2001-09-09 01:46:40.000000000 +0000 +++ cabal-install-3.2-3.2+git20200127.2.b84fd4f/src/text-1.2.4.0/benchmarks/haskell/Benchmarks/Builder.hs 1970-01-01 00:00:00.000000000 +0000 @@ -1,75 +0,0 @@ --- | Testing the internal builder monoid --- --- Tested in this benchmark: --- --- * Concatenating many small strings using a builder --- -{-# LANGUAGE OverloadedStrings #-} -module Benchmarks.Builder - ( benchmark - ) where - -import Criterion (Benchmark, bgroup, bench, nf) -import Data.Binary.Builder as B -import Data.ByteString.Char8 () -import Data.Monoid (mconcat, mempty) -import qualified Blaze.ByteString.Builder as Blaze -import qualified Blaze.ByteString.Builder.Char.Utf8 as Blaze -import qualified Data.ByteString as SB -import qualified Data.ByteString.Lazy as LB -import qualified Data.Text as T -import qualified Data.Text.Lazy as LT -import qualified Data.Text.Lazy.Builder as LTB -import qualified Data.Text.Lazy.Builder.Int as Int -import Data.Int (Int64) - -benchmark :: Benchmark -benchmark = bgroup "Builder" - [ bgroup "Comparison" - [ bench "LazyText" $ nf - (LT.length . LTB.toLazyText . mconcat . map LTB.fromText) texts - , bench "Binary" $ nf - (LB.length . B.toLazyByteString . mconcat . map B.fromByteString) - byteStrings - , bench "Blaze" $ nf - (LB.length . Blaze.toLazyByteString . mconcat . map Blaze.fromString) - strings - ] - , bgroup "Int" - [ bgroup "Decimal" - [ bgroup "Positive" . - flip map numbers $ \n -> - (bench (show (length (show n))) $ nf (LTB.toLazyText . Int.decimal) n) - , bgroup "Negative" . - flip map numbers $ \m -> - let n = negate m in - (bench (show (length (show n))) $ nf (LTB.toLazyText . Int.decimal) n) - , bench "Empty" $ nf LTB.toLazyText mempty - , bgroup "Show" . - flip map numbers $ \n -> - (bench (show (length (show n))) $ nf show n) - ] - ] - ] - where - numbers :: [Int64] - numbers = [ - 6, 14, 500, 9688, 10654, 620735, 5608880, 37010612, - 731223504, 5061580596, 24596952933, 711732309084, 2845910093839, - 54601756118340, 735159434806159, 3619097625502435, 95777227510267124, - 414944309510675693, 8986407456998704019 - ] - -texts :: [T.Text] -texts = take 200000 $ cycle ["foo", "λx", "由の"] -{-# NOINLINE texts #-} - --- Note that the non-ascii characters will be chopped -byteStrings :: [SB.ByteString] -byteStrings = take 200000 $ cycle ["foo", "λx", "由の"] -{-# NOINLINE byteStrings #-} - --- Note that the non-ascii characters will be chopped -strings :: [String] -strings = take 200000 $ cycle ["foo", "λx", "由の"] -{-# NOINLINE strings #-} diff -Nru cabal-install-3.2-3.2+git20191216.2.e076113/src/text-1.2.4.0/benchmarks/haskell/Benchmarks/Concat.hs cabal-install-3.2-3.2+git20200127.2.b84fd4f/src/text-1.2.4.0/benchmarks/haskell/Benchmarks/Concat.hs --- cabal-install-3.2-3.2+git20191216.2.e076113/src/text-1.2.4.0/benchmarks/haskell/Benchmarks/Concat.hs 2001-09-09 01:46:40.000000000 +0000 +++ cabal-install-3.2-3.2+git20200127.2.b84fd4f/src/text-1.2.4.0/benchmarks/haskell/Benchmarks/Concat.hs 1970-01-01 00:00:00.000000000 +0000 @@ -1,25 +0,0 @@ -{-# LANGUAGE OverloadedStrings #-} - -module Benchmarks.Concat (benchmark) where - -import Control.Monad.Trans.Writer -import Criterion (Benchmark, bgroup, bench, whnf) -import Data.Text as T - -benchmark :: Benchmark -benchmark = bgroup "Concat" - [ bench "append" $ whnf (append4 "Text 1" "Text 2" "Text 3") "Text 4" - , bench "concat" $ whnf (concat4 "Text 1" "Text 2" "Text 3") "Text 4" - , bench "write" $ whnf (write4 "Text 1" "Text 2" "Text 3") "Text 4" - ] - -append4, concat4, write4 :: Text -> Text -> Text -> Text -> Text - -{-# NOINLINE append4 #-} -append4 x1 x2 x3 x4 = x1 `append` x2 `append` x3 `append` x4 - -{-# NOINLINE concat4 #-} -concat4 x1 x2 x3 x4 = T.concat [x1, x2, x3, x4] - -{-# NOINLINE write4 #-} -write4 x1 x2 x3 x4 = execWriter $ tell x1 >> tell x2 >> tell x3 >> tell x4 diff -Nru cabal-install-3.2-3.2+git20191216.2.e076113/src/text-1.2.4.0/benchmarks/haskell/Benchmarks/DecodeUtf8.hs cabal-install-3.2-3.2+git20200127.2.b84fd4f/src/text-1.2.4.0/benchmarks/haskell/Benchmarks/DecodeUtf8.hs --- cabal-install-3.2-3.2+git20191216.2.e076113/src/text-1.2.4.0/benchmarks/haskell/Benchmarks/DecodeUtf8.hs 2001-09-09 01:46:40.000000000 +0000 +++ cabal-install-3.2-3.2+git20200127.2.b84fd4f/src/text-1.2.4.0/benchmarks/haskell/Benchmarks/DecodeUtf8.hs 1970-01-01 00:00:00.000000000 +0000 @@ -1,74 +0,0 @@ -{-# LANGUAGE ForeignFunctionInterface #-} - --- | Test decoding of UTF-8 --- --- Tested in this benchmark: --- --- * Decoding bytes using UTF-8 --- --- In some tests: --- --- * Taking the length of the result --- --- * Taking the init of the result --- --- The latter are used for testing stream fusion. --- -module Benchmarks.DecodeUtf8 - ( initEnv - , benchmark - ) where - -import Foreign.C.Types -import Data.ByteString.Internal (ByteString(..)) -import Data.ByteString.Lazy.Internal (ByteString(..)) -import Foreign.Ptr (Ptr, plusPtr) -import Foreign.ForeignPtr (withForeignPtr) -import Data.Word (Word8) -import qualified Criterion as C -import Criterion (Benchmark, bgroup, nf, whnfIO) -import qualified Codec.Binary.UTF8.Generic as U8 -import qualified Data.ByteString as B -import qualified Data.ByteString.Lazy as BL -import qualified Data.Text as T -import qualified Data.Text.Encoding as T -import qualified Data.Text.Lazy as TL -import qualified Data.Text.Lazy.Encoding as TL - -type Env = (B.ByteString, BL.ByteString) - -initEnv :: FilePath -> IO Env -initEnv fp = do - bs <- B.readFile fp - lbs <- BL.readFile fp - return (bs, lbs) - -benchmark :: String -> Env -> Benchmark -benchmark kind ~(bs, lbs) = - let bench name = C.bench (name ++ "+" ++ kind) - decodeStream (Chunk b0 bs0) = case T.streamDecodeUtf8 b0 of - T.Some t0 _ f0 -> t0 : go f0 bs0 - where go f (Chunk b bs1) = case f b of - T.Some t1 _ f1 -> t1 : go f1 bs1 - go _ _ = [] - decodeStream _ = [] - in bgroup "DecodeUtf8" - [ bench "Strict" $ nf T.decodeUtf8 bs - , bench "Stream" $ nf decodeStream lbs - , bench "IConv" $ whnfIO $ iconv bs - , bench "StrictLength" $ nf (T.length . T.decodeUtf8) bs - , bench "StrictInitLength" $ nf (T.length . T.init . T.decodeUtf8) bs - , bench "Lazy" $ nf TL.decodeUtf8 lbs - , bench "LazyLength" $ nf (TL.length . TL.decodeUtf8) lbs - , bench "LazyInitLength" $ nf (TL.length . TL.init . TL.decodeUtf8) lbs - , bench "StrictStringUtf8" $ nf U8.toString bs - , bench "StrictStringUtf8Length" $ nf (length . U8.toString) bs - , bench "LazyStringUtf8" $ nf U8.toString lbs - , bench "LazyStringUtf8Length" $ nf (length . U8.toString) lbs - ] - -iconv :: B.ByteString -> IO CInt -iconv (PS fp off len) = withForeignPtr fp $ \ptr -> - time_iconv (ptr `plusPtr` off) (fromIntegral len) - -foreign import ccall unsafe time_iconv :: Ptr Word8 -> CSize -> IO CInt diff -Nru cabal-install-3.2-3.2+git20191216.2.e076113/src/text-1.2.4.0/benchmarks/haskell/Benchmarks/EncodeUtf8.hs cabal-install-3.2-3.2+git20200127.2.b84fd4f/src/text-1.2.4.0/benchmarks/haskell/Benchmarks/EncodeUtf8.hs --- cabal-install-3.2-3.2+git20191216.2.e076113/src/text-1.2.4.0/benchmarks/haskell/Benchmarks/EncodeUtf8.hs 2001-09-09 01:46:40.000000000 +0000 +++ cabal-install-3.2-3.2+git20200127.2.b84fd4f/src/text-1.2.4.0/benchmarks/haskell/Benchmarks/EncodeUtf8.hs 1970-01-01 00:00:00.000000000 +0000 @@ -1,33 +0,0 @@ --- | UTF-8 encode a text --- --- Tested in this benchmark: --- --- * Replicating a string a number of times --- --- * UTF-8 encoding it --- -module Benchmarks.EncodeUtf8 - ( benchmark - ) where - -import Criterion (Benchmark, bgroup, bench, whnf) -import qualified Data.ByteString as B -import qualified Data.ByteString.Lazy as BL -import qualified Data.Text as T -import qualified Data.Text.Encoding as T -import qualified Data.Text.Lazy as TL -import qualified Data.Text.Lazy.Encoding as TL - -benchmark :: String -> Benchmark -benchmark string = - bgroup "EncodeUtf8" - [ bench "Text" $ whnf (B.length . T.encodeUtf8) text - , bench "LazyText" $ whnf (BL.length . TL.encodeUtf8) lazyText - ] - where - -- The string in different formats - text = T.replicate k $ T.pack string - lazyText = TL.replicate (fromIntegral k) $ TL.pack string - - -- Amount - k = 100000 diff -Nru cabal-install-3.2-3.2+git20191216.2.e076113/src/text-1.2.4.0/benchmarks/haskell/Benchmarks/Equality.hs cabal-install-3.2-3.2+git20200127.2.b84fd4f/src/text-1.2.4.0/benchmarks/haskell/Benchmarks/Equality.hs --- cabal-install-3.2-3.2+git20191216.2.e076113/src/text-1.2.4.0/benchmarks/haskell/Benchmarks/Equality.hs 2001-09-09 01:46:40.000000000 +0000 +++ cabal-install-3.2-3.2+git20200127.2.b84fd4f/src/text-1.2.4.0/benchmarks/haskell/Benchmarks/Equality.hs 1970-01-01 00:00:00.000000000 +0000 @@ -1,43 +0,0 @@ --- | Compare a string with a copy of itself that is identical except --- for the last character. --- --- Tested in this benchmark: --- --- * Comparison of strings (Eq instance) --- -module Benchmarks.Equality - ( initEnv - , benchmark - ) where - -import Criterion (Benchmark, bgroup, bench, whnf) -import qualified Data.ByteString.Char8 as B -import qualified Data.ByteString.Lazy.Char8 as BL -import qualified Data.Text as T -import qualified Data.Text.Encoding as T -import qualified Data.Text.Lazy as TL -import qualified Data.Text.Lazy.Encoding as TL - -type Env = (T.Text, TL.Text, B.ByteString, BL.ByteString, BL.ByteString, String) - -initEnv :: FilePath -> IO Env -initEnv fp = do - b <- B.readFile fp - bl1 <- BL.readFile fp - -- A lazy bytestring is a list of chunks. When we do not explicitly create two - -- different lazy bytestrings at a different address, the bytestring library - -- will compare the chunk addresses instead of the chunk contents. This is why - -- we read the lazy bytestring twice here. - bl2 <- BL.readFile fp - l <- readFile fp - return (T.decodeUtf8 b, TL.decodeUtf8 bl1, b, bl1, bl2, l) - -benchmark :: Env -> Benchmark -benchmark ~(t, tl, b, bl1, bl2, l) = - bgroup "Equality" - [ bench "Text" $ whnf (== T.init t `T.snoc` '\xfffd') t - , bench "LazyText" $ whnf (== TL.init tl `TL.snoc` '\xfffd') tl - , bench "ByteString" $ whnf (== B.init b `B.snoc` '\xfffd') b - , bench "LazyByteString" $ whnf (== BL.init bl2 `BL.snoc` '\xfffd') bl1 - , bench "String" $ whnf (== init l ++ "\xfffd") l - ] diff -Nru cabal-install-3.2-3.2+git20191216.2.e076113/src/text-1.2.4.0/benchmarks/haskell/Benchmarks/FileRead.hs cabal-install-3.2-3.2+git20200127.2.b84fd4f/src/text-1.2.4.0/benchmarks/haskell/Benchmarks/FileRead.hs --- cabal-install-3.2-3.2+git20191216.2.e076113/src/text-1.2.4.0/benchmarks/haskell/Benchmarks/FileRead.hs 2001-09-09 01:46:40.000000000 +0000 +++ cabal-install-3.2-3.2+git20200127.2.b84fd4f/src/text-1.2.4.0/benchmarks/haskell/Benchmarks/FileRead.hs 1970-01-01 00:00:00.000000000 +0000 @@ -1,33 +0,0 @@ --- | Benchmarks simple file reading --- --- Tested in this benchmark: --- --- * Reading a file from the disk --- -module Benchmarks.FileRead - ( benchmark - ) where - -import Control.Applicative ((<$>)) -import Criterion (Benchmark, bgroup, bench, whnfIO) -import qualified Data.ByteString as SB -import qualified Data.ByteString.Lazy as LB -import qualified Data.Text as T -import qualified Data.Text.Encoding as T -import qualified Data.Text.IO as T -import qualified Data.Text.Lazy as LT -import qualified Data.Text.Lazy.Encoding as LT -import qualified Data.Text.Lazy.IO as LT - -benchmark :: FilePath -> Benchmark -benchmark p = bgroup "FileRead" - [ bench "String" $ whnfIO $ length <$> readFile p - , bench "ByteString" $ whnfIO $ SB.length <$> SB.readFile p - , bench "LazyByteString" $ whnfIO $ LB.length <$> LB.readFile p - , bench "Text" $ whnfIO $ T.length <$> T.readFile p - , bench "LazyText" $ whnfIO $ LT.length <$> LT.readFile p - , bench "TextByteString" $ whnfIO $ - (T.length . T.decodeUtf8) <$> SB.readFile p - , bench "LazyTextByteString" $ whnfIO $ - (LT.length . LT.decodeUtf8) <$> LB.readFile p - ] diff -Nru cabal-install-3.2-3.2+git20191216.2.e076113/src/text-1.2.4.0/benchmarks/haskell/Benchmarks/FoldLines.hs cabal-install-3.2-3.2+git20200127.2.b84fd4f/src/text-1.2.4.0/benchmarks/haskell/Benchmarks/FoldLines.hs --- cabal-install-3.2-3.2+git20191216.2.e076113/src/text-1.2.4.0/benchmarks/haskell/Benchmarks/FoldLines.hs 2001-09-09 01:46:40.000000000 +0000 +++ cabal-install-3.2-3.2+git20200127.2.b84fd4f/src/text-1.2.4.0/benchmarks/haskell/Benchmarks/FoldLines.hs 1970-01-01 00:00:00.000000000 +0000 @@ -1,58 +0,0 @@ --- | Read a file line-by-line using handles, and perform a fold over the lines. --- The fold is used here to calculate the number of lines in the file. --- --- Tested in this benchmark: --- --- * Buffered, line-based IO --- -{-# LANGUAGE BangPatterns #-} -module Benchmarks.FoldLines - ( benchmark - ) where - -import Criterion (Benchmark, bgroup, bench, whnfIO) -import System.IO -import qualified Data.ByteString as B -import qualified Data.Text as T -import qualified Data.Text.IO as T - -benchmark :: FilePath -> Benchmark -benchmark fp = bgroup "ReadLines" - [ bench "Text" $ withHandle $ foldLinesT (\n _ -> n + 1) (0 :: Int) - , bench "ByteString" $ withHandle $ foldLinesB (\n _ -> n + 1) (0 :: Int) - ] - where - withHandle f = whnfIO $ do - h <- openFile fp ReadMode - hSetBuffering h (BlockBuffering (Just 16384)) - x <- f h - hClose h - return x - --- | Text line fold --- -foldLinesT :: (a -> T.Text -> a) -> a -> Handle -> IO a -foldLinesT f z0 h = go z0 - where - go !z = do - eof <- hIsEOF h - if eof - then return z - else do - l <- T.hGetLine h - let z' = f z l in go z' -{-# INLINE foldLinesT #-} - --- | ByteString line fold --- -foldLinesB :: (a -> B.ByteString -> a) -> a -> Handle -> IO a -foldLinesB f z0 h = go z0 - where - go !z = do - eof <- hIsEOF h - if eof - then return z - else do - l <- B.hGetLine h - let z' = f z l in go z' -{-# INLINE foldLinesB #-} diff -Nru cabal-install-3.2-3.2+git20191216.2.e076113/src/text-1.2.4.0/benchmarks/haskell/Benchmarks/Mul.hs cabal-install-3.2-3.2+git20200127.2.b84fd4f/src/text-1.2.4.0/benchmarks/haskell/Benchmarks/Mul.hs --- cabal-install-3.2-3.2+git20191216.2.e076113/src/text-1.2.4.0/benchmarks/haskell/Benchmarks/Mul.hs 2001-09-09 01:46:40.000000000 +0000 +++ cabal-install-3.2-3.2+git20200127.2.b84fd4f/src/text-1.2.4.0/benchmarks/haskell/Benchmarks/Mul.hs 1970-01-01 00:00:00.000000000 +0000 @@ -1,146 +0,0 @@ -module Benchmarks.Mul - ( initEnv - , benchmark - ) where - -import Control.Exception (evaluate) -import Criterion.Main -import Data.Int (Int32, Int64) -import Data.Text.Internal (mul32, mul64) -import qualified Data.Vector.Unboxed as U - -oldMul :: Int64 -> Int64 -> Int64 -oldMul m n - | n == 0 = 0 - | m <= maxBound `quot` n = m * n - | otherwise = error "overflow" - -type Env = (U.Vector (Int32,Int32), U.Vector (Int64,Int64)) - -initEnv :: IO Env -initEnv = do - x <- evaluate testVector32 - y <- evaluate testVector64 - return (x, y) - -benchmark :: Env -> Benchmark -benchmark ~(tv32, tv64) = bgroup "Mul" - [ bench "oldMul" $ whnf (U.map (uncurry oldMul)) tv64 - , bench "mul64" $ whnf (U.map (uncurry mul64)) tv64 - , bench "*64" $ whnf (U.map (uncurry (*))) tv64 - , bench "mul32" $ whnf (U.map (uncurry mul32)) tv32 - , bench "*32" $ whnf (U.map (uncurry (*))) tv32 - ] - -testVector64 :: U.Vector (Int64,Int64) -testVector64 = U.fromList [ - (0,1248868987182846646),(169004623633872,24458),(482549039517835,7614), - (372,8157063115504364),(27,107095594861148252),(3,63249878517962420), - (4363,255694473572912),(86678474,1732634806),(1572453024,1800489338), - (9384523143,77053781),(49024709555,75095046),(7,43457620410239131), - (8,8201563008844571),(387719037,1520696708),(189869238220197,1423), - (46788016849611,23063),(503077742109974359,0),(104,1502010908706487), - (30478140346,207525518),(80961140129236192,14),(4283,368012829143675), - (1028719181728108146,6),(318904,5874863049591),(56724427166898,110794), - (234539368,31369110449),(2,251729663598178612),(103291548194451219,5), - (76013,5345328755566),(1769631,2980846129318),(40898,60598477385754), - (0,98931348893227155),(573555872156917492,3),(318821187115,4476566), - (11152874213584,243582),(40274276,16636653248),(127,4249988676030597), - (103543712111871836,5),(71,16954462148248238),(3963027173504,216570), - (13000,503523808916753),(17038308,20018685905),(0,510350226577891549), - (175898,3875698895405),(425299191292676,5651),(17223451323664536,50), - (61755131,14247665326),(0,1018195131697569303),(36433751497238985,20), - (3473607861601050,1837),(1392342328,1733971838),(225770297367,3249655), - (14,127545244155254102),(1751488975299136,2634),(3949208,504190668767), - (153329,831454434345),(1066212122928663658,2),(351224,2663633539556), - (344565,53388869217),(35825609350446863,54),(276011553660081475,10), - (1969754174790470349,3),(35,68088438338633),(506710,3247689556438), - (11099382291,327739909),(105787303549,32824363),(210366111,14759049409), - (688893241579,3102676),(8490,70047474429581),(152085,29923000251880), - (5046974599257095,400),(4183167795,263434071),(10089728,502781960687), - (44831977765,4725378),(91,8978094664238578),(30990165721,44053350), - (1772377,149651820860),(243420621763408572,4),(32,5790357453815138), - (27980806337993771,5),(47696295759774,20848),(1745874142313778,1098), - (46869334770121,1203),(886995283,1564424789),(40679396544,76002479), - (1,672849481568486995),(337656187205,3157069),(816980552858963,6003), - (2271434085804831543,1),(0,1934521023868747186),(6266220038281,15825), - (4160,107115946987394),(524,246808621791561),(0,1952519482439636339), - (128,2865935904539691),(1044,3211982069426297),(16000511542473,88922), - (1253596745404082,2226),(27041,56836278958002),(23201,49247489754471), - (175906590497,21252392),(185163584757182295,24),(34742225226802197,150), - (2363228,250824838408),(216327527109550,45),(24,81574076994520675), - (28559899906542,15356),(10890139774837133,511),(2293,707179303654492), - (2749366833,40703233),(0,4498229704622845986),(439,4962056468281937), - (662,1453820621089921),(16336770612459631,220),(24282989393,74239137), - (2724564648490195,3),(743672760,124992589),(4528103,704330948891), - (6050483122491561,250),(13322953,13594265152),(181794,22268101450214), - (25957941712,75384092),(43352,7322262295009),(32838,52609059549923), - (33003585202001564,2),(103019,68430142267402),(129918230800,8742978), - (0,2114347379589080688),(2548,905723041545274),(222745067962838382,0), - (1671683850790425181,1),(455,4836932776795684),(794227702827214,6620), - (212534135175874,1365),(96432431858,29784975),(466626763743380,3484), - (29793949,53041519613),(8359,309952753409844),(3908960585331901,26), - (45185288970365760,114),(10131829775,68110174),(58039242399640479,83), - (628092278238719399,6),(1,196469106875361889),(302336625,16347502444), - (148,3748088684181047),(1,1649096568849015456),(1019866864,2349753026), - (8211344830,569363306),(65647579546873,34753),(2340190,1692053129069), - (64263301,30758930355),(48681618072372209,110),(7074794736,47640197), - (249634721521,7991792),(1162917363807215,232),(7446433349,420634045), - (63398619383,60709817),(51359004508011,14200),(131788797028647,7072), - (52079887791430043,7),(7,136277667582599838),(28582879735696,50327), - (1404582800566278,833),(469164435,15017166943),(99567079957578263,49), - (1015285971,3625801566),(321504843,4104079293),(5196954,464515406632), - (114246832260876,7468),(8149664437,487119673),(12265299,378168974869), - (37711995764,30766513),(3971137243,710996152),(483120070302,603162), - (103009942,61645547145),(8476344625340,6987),(547948761229739,1446), - (42234,18624767306301),(13486714173011,58948),(4,198309153268019840), - (9913176974,325539248),(28246225540203,116822),(2882463945582154,18), - (959,25504987505398),(3,1504372236378217710),(13505229956793,374987), - (751661959,457611342),(27375926,36219151769),(482168869,5301952074), - (1,1577425863241520640),(714116235611821,1164),(904492524250310488,0), - (5983514941763398,68),(10759472423,23540686),(72539568471529,34919), - (4,176090672310337473),(938702842110356453,1),(673652445,3335287382), - (3111998893666122,917),(1568013,3168419765469)] - -testVector32 :: U.Vector (Int32,Int32) -testVector32 = U.fromList [ - (39242,410),(0,100077553),(2206,9538),(509400240,1),(38048,6368), - (1789,651480),(2399,157032),(701,170017),(5241456,14),(11212,70449), - (1,227804876),(749687254,1),(74559,2954),(1158,147957),(410604456,1), - (170851,1561),(92643422,1),(6192,180509),(7,24202210),(3440,241481), - (5753677,5),(294327,1622),(252,4454673),(127684121,11),(28315800,30), - (340370905,0),(1,667887987),(592782090,1),(49023,27641),(750,290387), - (72886,3847),(0,301047933),(3050276,473),(1,788366142),(59457,15813), - (637726933,1),(1135,344317),(853616,264),(696816,493),(7038,12046), - (125219574,4),(803694088,1),(107081726,1),(39294,21699),(16361,38191), - (132561123,12),(1760,23499),(847543,484),(175687349,1),(2963,252678), - (6248,224553),(27596,4606),(5422922,121),(1542,485890),(131,583035), - (59096,4925),(3637115,132),(0,947225435),(86854,6794),(2984745,339), - (760129569,1),(1,68260595),(380835652,2),(430575,2579),(54514,7211), - (15550606,3),(9,27367402),(3007053,207),(7060988,60),(28560,27130), - (1355,21087),(10880,53059),(14563646,4),(461886361,1),(2,169260724), - (241454126,2),(406797,1),(61631630,16),(44473,5943),(63869104,12), - (950300,1528),(2113,62333),(120817,9358),(100261456,1),(426764723,1), - (119,12723684),(3,53358711),(4448071,18),(1,230278091),(238,232102), - (8,57316440),(42437979,10),(6769,19555),(48590,22006),(11500585,79), - (2808,97638),(42,26952545),(11,32104194),(23954638,1),(785427272,0), - (513,81379),(31333960,37),(897772,1009),(4,25679692),(103027993,12), - (104972702,11),(546,443401),(7,65137092),(88574269,3),(872139069,0), - (2,97417121),(378802603,0),(141071401,4),(22613,10575),(2191743,118), - (470,116119),(7062,38166),(231056,1847),(43901963,9),(2400,70640), - (63553,1555),(34,11249573),(815174,1820),(997894011,0),(98881794,2), - (5448,43132),(27956,9),(904926,1357),(112608626,3),(124,613021), - (282086,1966),(99,10656881),(113799,1501),(433318,2085),(442,948171), - (165380,1043),(28,14372905),(14880,50462),(2386,219918),(229,1797565), - (1174961,298),(3925,41833),(3903515,299),(15690452,111),(360860521,3), - (7440846,81),(2541026,507),(0,492448477),(6869,82469),(245,8322939), - (3503496,253),(123495298,0),(150963,2299),(33,4408482),(1,200911107), - (305,252121),(13,123369189),(215846,8181),(2440,65387),(776764401,1), - (1241172,434),(8,15493155),(81953961,6),(17884993,5),(26,6893822), - (0,502035190),(1,582451018),(2,514870139),(227,3625619),(49,12720258), - (1456769,207),(94797661,10),(234407,893),(26843,5783),(15688,24547), - (4091,86268),(4339448,151),(21360,6294),(397046497,2),(1227,205936), - (9966,21959),(160046791,1),(0,159992224),(27,24974797),(19177,29334), - (4136148,42),(21179785,53),(61256583,31),(385,344176),(7,11934915), - (1,18992566),(3488065,5),(768021,224),(36288474,7),(8624,117561), - (8,20341439),(5903,261475),(561,1007618),(1738,392327),(633049,1708)] diff -Nru cabal-install-3.2-3.2+git20191216.2.e076113/src/text-1.2.4.0/benchmarks/haskell/Benchmarks/Programs/BigTable.hs cabal-install-3.2-3.2+git20200127.2.b84fd4f/src/text-1.2.4.0/benchmarks/haskell/Benchmarks/Programs/BigTable.hs --- cabal-install-3.2-3.2+git20191216.2.e076113/src/text-1.2.4.0/benchmarks/haskell/Benchmarks/Programs/BigTable.hs 2001-09-09 01:46:40.000000000 +0000 +++ cabal-install-3.2-3.2+git20200127.2.b84fd4f/src/text-1.2.4.0/benchmarks/haskell/Benchmarks/Programs/BigTable.hs 1970-01-01 00:00:00.000000000 +0000 @@ -1,42 +0,0 @@ --- | Create a large HTML table and dump it to a handle --- --- Tested in this benchmark: --- --- * Creating a large HTML document using a builder --- --- * Writing to a handle --- -{-# LANGUAGE OverloadedStrings #-} -module Benchmarks.Programs.BigTable - ( benchmark - ) where - -import Criterion (Benchmark, bench, whnfIO) -import Data.Monoid (mappend, mconcat) -import Data.Text.Lazy.Builder (Builder, fromText, toLazyText) -import Data.Text.Lazy.IO (hPutStr) -import System.IO (Handle) -import qualified Data.Text as T - -benchmark :: Handle -> Benchmark -benchmark sink = bench "BigTable" $ whnfIO $ do - hPutStr sink "Content-Type: text/html\n\n" - hPutStr sink . toLazyText . makeTable =<< rows - hPutStr sink "
" - where - -- We provide the number of rows in IO so the builder value isn't shared - -- between the benchmark samples. - rows :: IO Int - rows = return 20000 - {-# NOINLINE rows #-} - -makeTable :: Int -> Builder -makeTable n = mconcat $ replicate n $ mconcat $ map makeCol [1 .. 50] - -makeCol :: Int -> Builder -makeCol 1 = fromText "1" -makeCol 50 = fromText "50" -makeCol i = fromText "" `mappend` (fromInt i `mappend` fromText "") - -fromInt :: Int -> Builder -fromInt = fromText . T.pack . show diff -Nru cabal-install-3.2-3.2+git20191216.2.e076113/src/text-1.2.4.0/benchmarks/haskell/Benchmarks/Programs/Cut.hs cabal-install-3.2-3.2+git20200127.2.b84fd4f/src/text-1.2.4.0/benchmarks/haskell/Benchmarks/Programs/Cut.hs --- cabal-install-3.2-3.2+git20191216.2.e076113/src/text-1.2.4.0/benchmarks/haskell/Benchmarks/Programs/Cut.hs 2001-09-09 01:46:40.000000000 +0000 +++ cabal-install-3.2-3.2+git20200127.2.b84fd4f/src/text-1.2.4.0/benchmarks/haskell/Benchmarks/Programs/Cut.hs 1970-01-01 00:00:00.000000000 +0000 @@ -1,98 +0,0 @@ --- | Cut into a file, selecting certain columns (e.g. columns 10 to 40) --- --- Tested in this benchmark: --- --- * Reading the file --- --- * Splitting into lines --- --- * Taking a number of characters from the lines --- --- * Joining the lines --- --- * Writing back to a handle --- -module Benchmarks.Programs.Cut - ( benchmark - ) where - -import Criterion (Benchmark, bgroup, bench, whnfIO) -import System.IO (Handle, hPutStr) -import qualified Data.ByteString as B -import qualified Data.ByteString.Char8 as BC -import qualified Data.ByteString.Lazy as BL -import qualified Data.ByteString.Lazy.Char8 as BLC -import qualified Data.Text as T -import qualified Data.Text.Encoding as T -import qualified Data.Text.IO as T -import qualified Data.Text.Lazy as TL -import qualified Data.Text.Lazy.Encoding as TL -import qualified Data.Text.Lazy.IO as TL - -benchmark :: FilePath -> Handle -> Int -> Int -> Benchmark -benchmark p sink from to = bgroup "Cut" - [ bench' "String" string - , bench' "ByteString" byteString - , bench' "LazyByteString" lazyByteString - , bench' "Text" text - , bench' "LazyText" lazyText - , bench' "TextByteString" textByteString - , bench' "LazyTextByteString" lazyTextByteString - ] - where - bench' n s = bench n $ whnfIO (s p sink from to) - -string :: FilePath -> Handle -> Int -> Int -> IO () -string fp sink from to = do - s <- readFile fp - hPutStr sink $ cut s - where - cut = unlines . map (take (to - from) . drop from) . lines - -byteString :: FilePath -> Handle -> Int -> Int -> IO () -byteString fp sink from to = do - bs <- B.readFile fp - B.hPutStr sink $ cut bs - where - cut = BC.unlines . map (B.take (to - from) . B.drop from) . BC.lines - -lazyByteString :: FilePath -> Handle -> Int -> Int -> IO () -lazyByteString fp sink from to = do - bs <- BL.readFile fp - BL.hPutStr sink $ cut bs - where - cut = BLC.unlines . map (BL.take (to' - from') . BL.drop from') . BLC.lines - from' = fromIntegral from - to' = fromIntegral to - -text :: FilePath -> Handle -> Int -> Int -> IO () -text fp sink from to = do - t <- T.readFile fp - T.hPutStr sink $ cut t - where - cut = T.unlines . map (T.take (to - from) . T.drop from) . T.lines - -lazyText :: FilePath -> Handle -> Int -> Int -> IO () -lazyText fp sink from to = do - t <- TL.readFile fp - TL.hPutStr sink $ cut t - where - cut = TL.unlines . map (TL.take (to' - from') . TL.drop from') . TL.lines - from' = fromIntegral from - to' = fromIntegral to - -textByteString :: FilePath -> Handle -> Int -> Int -> IO () -textByteString fp sink from to = do - t <- T.decodeUtf8 `fmap` B.readFile fp - B.hPutStr sink $ T.encodeUtf8 $ cut t - where - cut = T.unlines . map (T.take (to - from) . T.drop from) . T.lines - -lazyTextByteString :: FilePath -> Handle -> Int -> Int -> IO () -lazyTextByteString fp sink from to = do - t <- TL.decodeUtf8 `fmap` BL.readFile fp - BL.hPutStr sink $ TL.encodeUtf8 $ cut t - where - cut = TL.unlines . map (TL.take (to' - from') . TL.drop from') . TL.lines - from' = fromIntegral from - to' = fromIntegral to diff -Nru cabal-install-3.2-3.2+git20191216.2.e076113/src/text-1.2.4.0/benchmarks/haskell/Benchmarks/Programs/Fold.hs cabal-install-3.2-3.2+git20200127.2.b84fd4f/src/text-1.2.4.0/benchmarks/haskell/Benchmarks/Programs/Fold.hs --- cabal-install-3.2-3.2+git20191216.2.e076113/src/text-1.2.4.0/benchmarks/haskell/Benchmarks/Programs/Fold.hs 2001-09-09 01:46:40.000000000 +0000 +++ cabal-install-3.2-3.2+git20200127.2.b84fd4f/src/text-1.2.4.0/benchmarks/haskell/Benchmarks/Programs/Fold.hs 1970-01-01 00:00:00.000000000 +0000 @@ -1,68 +0,0 @@ --- | Benchmark which formats paragraph, like the @sort@ unix utility. --- --- Tested in this benchmark: --- --- * Reading the file --- --- * Splitting into paragraphs --- --- * Reformatting the paragraphs to a certain line width --- --- * Concatenating the results using the text builder --- --- * Writing back to a handle --- -{-# LANGUAGE OverloadedStrings #-} -module Benchmarks.Programs.Fold - ( benchmark - ) where - -import Data.List (foldl') -import Data.List (intersperse) -import Data.Monoid (mempty, mappend, mconcat) -import System.IO (Handle) -import Criterion (Benchmark, bench, whnfIO) -import qualified Data.Text as T -import qualified Data.Text.IO as T -import qualified Data.Text.Lazy.Builder as TLB -import qualified Data.Text.Lazy as TL -import qualified Data.Text.Lazy.IO as TL - -benchmark :: FilePath -> Handle -> Benchmark -benchmark i o = - bench "Fold" $ whnfIO $ T.readFile i >>= TL.hPutStr o . fold 80 - --- | We represent a paragraph by a word list --- -type Paragraph = [T.Text] - --- | Fold a text --- -fold :: Int -> T.Text -> TL.Text -fold maxWidth = TLB.toLazyText . mconcat . - intersperse "\n\n" . map (foldParagraph maxWidth) . paragraphs - --- | Fold a paragraph --- -foldParagraph :: Int -> Paragraph -> TLB.Builder -foldParagraph _ [] = mempty -foldParagraph max' (w : ws) = fst $ foldl' go (TLB.fromText w, T.length w) ws - where - go (builder, width) word - | width + len + 1 <= max' = - (builder `mappend` " " `mappend` word', width + len + 1) - | otherwise = - (builder `mappend` "\n" `mappend` word', len) - where - word' = TLB.fromText word - len = T.length word - --- | Divide a text into paragraphs --- -paragraphs :: T.Text -> [Paragraph] -paragraphs = splitParagraphs . map T.words . T.lines - where - splitParagraphs ls = case break null ls of - ([], []) -> [] - (p, []) -> [concat p] - (p, lr) -> concat p : splitParagraphs (dropWhile null lr) diff -Nru cabal-install-3.2-3.2+git20191216.2.e076113/src/text-1.2.4.0/benchmarks/haskell/Benchmarks/Programs/Sort.hs cabal-install-3.2-3.2+git20200127.2.b84fd4f/src/text-1.2.4.0/benchmarks/haskell/Benchmarks/Programs/Sort.hs --- cabal-install-3.2-3.2+git20191216.2.e076113/src/text-1.2.4.0/benchmarks/haskell/Benchmarks/Programs/Sort.hs 2001-09-09 01:46:40.000000000 +0000 +++ cabal-install-3.2-3.2+git20200127.2.b84fd4f/src/text-1.2.4.0/benchmarks/haskell/Benchmarks/Programs/Sort.hs 1970-01-01 00:00:00.000000000 +0000 @@ -1,71 +0,0 @@ --- | This benchmark sorts the lines of a file, like the @sort@ unix utility. --- --- Tested in this benchmark: --- --- * Reading the file --- --- * Splitting into lines --- --- * Sorting the lines --- --- * Joining the lines --- --- * Writing back to a handle --- -{-# LANGUAGE OverloadedStrings #-} -module Benchmarks.Programs.Sort - ( benchmark - ) where - -import Criterion (Benchmark, bgroup, bench, whnfIO) -import Data.Monoid (mconcat) -import System.IO (Handle, hPutStr) -import qualified Data.ByteString as B -import qualified Data.ByteString.Char8 as BC -import qualified Data.ByteString.Lazy as BL -import qualified Data.ByteString.Lazy.Char8 as BLC -import qualified Data.List as L -import qualified Data.Text as T -import qualified Data.Text.Encoding as T -import qualified Data.Text.IO as T -import qualified Data.Text.Lazy as TL -import qualified Data.Text.Lazy.Builder as TLB -import qualified Data.Text.Lazy.Encoding as TL -import qualified Data.Text.Lazy.IO as TL - -benchmark :: FilePath -> Handle -> Benchmark -benchmark i o = bgroup "Sort" - [ bench "String" $ whnfIO $ readFile i >>= hPutStr o . string - , bench "ByteString" $ whnfIO $ B.readFile i >>= B.hPutStr o . byteString - , bench "LazyByteString" $ whnfIO $ - BL.readFile i >>= BL.hPutStr o . lazyByteString - , bench "Text" $ whnfIO $ T.readFile i >>= T.hPutStr o . text - , bench "LazyText" $ whnfIO $ TL.readFile i >>= TL.hPutStr o . lazyText - , bench "TextByteString" $ whnfIO $ B.readFile i >>= - B.hPutStr o . T.encodeUtf8 . text . T.decodeUtf8 - , bench "LazyTextByteString" $ whnfIO $ BL.readFile i >>= - BL.hPutStr o . TL.encodeUtf8 . lazyText . TL.decodeUtf8 - , bench "TextBuilder" $ whnfIO $ B.readFile i >>= - BL.hPutStr o . TL.encodeUtf8 . textBuilder . T.decodeUtf8 - ] - -string :: String -> String -string = unlines . L.sort . lines - -byteString :: B.ByteString -> B.ByteString -byteString = BC.unlines . L.sort . BC.lines - -lazyByteString :: BL.ByteString -> BL.ByteString -lazyByteString = BLC.unlines . L.sort . BLC.lines - -text :: T.Text -> T.Text -text = T.unlines . L.sort . T.lines - -lazyText :: TL.Text -> TL.Text -lazyText = TL.unlines . L.sort . TL.lines - --- | Text variant using a builder monoid for the final concatenation --- -textBuilder :: T.Text -> TL.Text -textBuilder = TLB.toLazyText . mconcat . L.intersperse (TLB.singleton '\n') . - map TLB.fromText . L.sort . T.lines diff -Nru cabal-install-3.2-3.2+git20191216.2.e076113/src/text-1.2.4.0/benchmarks/haskell/Benchmarks/Programs/StripTags.hs cabal-install-3.2-3.2+git20200127.2.b84fd4f/src/text-1.2.4.0/benchmarks/haskell/Benchmarks/Programs/StripTags.hs --- cabal-install-3.2-3.2+git20191216.2.e076113/src/text-1.2.4.0/benchmarks/haskell/Benchmarks/Programs/StripTags.hs 2001-09-09 01:46:40.000000000 +0000 +++ cabal-install-3.2-3.2+git20200127.2.b84fd4f/src/text-1.2.4.0/benchmarks/haskell/Benchmarks/Programs/StripTags.hs 1970-01-01 00:00:00.000000000 +0000 @@ -1,53 +0,0 @@ --- | Program to replace HTML tags by whitespace --- --- This program was originally contributed by Petr Prokhorenkov. --- --- Tested in this benchmark: --- --- * Reading the file --- --- * Replacing text between HTML tags (<>) with whitespace --- --- * Writing back to a handle --- -{-# OPTIONS_GHC -fspec-constr-count=5 #-} -module Benchmarks.Programs.StripTags - ( benchmark - ) where - -import Criterion (Benchmark, bgroup, bench, whnfIO) -import Data.List (mapAccumL) -import System.IO (Handle, hPutStr) -import qualified Data.ByteString as B -import qualified Data.ByteString.Char8 as BC -import qualified Data.Text as T -import qualified Data.Text.Encoding as T -import qualified Data.Text.IO as T - -benchmark :: FilePath -> Handle -> Benchmark -benchmark i o = bgroup "StripTags" - [ bench "String" $ whnfIO $ readFile i >>= hPutStr o . string - , bench "ByteString" $ whnfIO $ B.readFile i >>= B.hPutStr o . byteString - , bench "Text" $ whnfIO $ T.readFile i >>= T.hPutStr o . text - , bench "TextByteString" $ whnfIO $ - B.readFile i >>= B.hPutStr o . T.encodeUtf8 . text . T.decodeUtf8 - ] - -string :: String -> String -string = snd . mapAccumL step 0 - -text :: T.Text -> T.Text -text = snd . T.mapAccumL step 0 - -byteString :: B.ByteString -> B.ByteString -byteString = snd . BC.mapAccumL step 0 - -step :: Int -> Char -> (Int, Char) -step d c - | d > 0 || d' > 0 = (d', ' ') - | otherwise = (d', c) - where - d' = d + depth c - depth '>' = 1 - depth '<' = -1 - depth _ = 0 diff -Nru cabal-install-3.2-3.2+git20191216.2.e076113/src/text-1.2.4.0/benchmarks/haskell/Benchmarks/Programs/Throughput.hs cabal-install-3.2-3.2+git20200127.2.b84fd4f/src/text-1.2.4.0/benchmarks/haskell/Benchmarks/Programs/Throughput.hs --- cabal-install-3.2-3.2+git20191216.2.e076113/src/text-1.2.4.0/benchmarks/haskell/Benchmarks/Programs/Throughput.hs 2001-09-09 01:46:40.000000000 +0000 +++ cabal-install-3.2-3.2+git20200127.2.b84fd4f/src/text-1.2.4.0/benchmarks/haskell/Benchmarks/Programs/Throughput.hs 1970-01-01 00:00:00.000000000 +0000 @@ -1,41 +0,0 @@ --- | This benchmark simply reads and writes a file using the various string --- libraries. The point of it is that we can make better estimations on how --- much time the other benchmarks spend doing IO. --- --- Note that we expect ByteStrings to be a whole lot faster, since they do not --- do any actual encoding/decoding here, while String and Text do have UTF-8 --- encoding/decoding. --- --- Tested in this benchmark: --- --- * Reading the file --- --- * Replacing text between HTML tags (<>) with whitespace --- --- * Writing back to a handle --- -module Benchmarks.Programs.Throughput - ( benchmark - ) where - -import Criterion (Benchmark, bgroup, bench, whnfIO) -import System.IO (Handle, hPutStr) -import qualified Data.ByteString as B -import qualified Data.ByteString.Lazy as BL -import qualified Data.Text.Encoding as T -import qualified Data.Text.IO as T -import qualified Data.Text.Lazy.Encoding as TL -import qualified Data.Text.Lazy.IO as TL - -benchmark :: FilePath -> Handle -> Benchmark -benchmark fp sink = bgroup "Throughput" - [ bench "String" $ whnfIO $ readFile fp >>= hPutStr sink - , bench "ByteString" $ whnfIO $ B.readFile fp >>= B.hPutStr sink - , bench "LazyByteString" $ whnfIO $ BL.readFile fp >>= BL.hPutStr sink - , bench "Text" $ whnfIO $ T.readFile fp >>= T.hPutStr sink - , bench "LazyText" $ whnfIO $ TL.readFile fp >>= TL.hPutStr sink - , bench "TextByteString" $ whnfIO $ - B.readFile fp >>= B.hPutStr sink . T.encodeUtf8 . T.decodeUtf8 - , bench "LazyTextByteString" $ whnfIO $ - BL.readFile fp >>= BL.hPutStr sink . TL.encodeUtf8 . TL.decodeUtf8 - ] diff -Nru cabal-install-3.2-3.2+git20191216.2.e076113/src/text-1.2.4.0/benchmarks/haskell/Benchmarks/Pure.hs cabal-install-3.2-3.2+git20200127.2.b84fd4f/src/text-1.2.4.0/benchmarks/haskell/Benchmarks/Pure.hs --- cabal-install-3.2-3.2+git20191216.2.e076113/src/text-1.2.4.0/benchmarks/haskell/Benchmarks/Pure.hs 2001-09-09 01:46:40.000000000 +0000 +++ cabal-install-3.2-3.2+git20200127.2.b84fd4f/src/text-1.2.4.0/benchmarks/haskell/Benchmarks/Pure.hs 1970-01-01 00:00:00.000000000 +0000 @@ -1,518 +0,0 @@ --- | Benchmarks various pure functions from the Text library --- --- Tested in this benchmark: --- --- * Most pure functions defined the string types --- -{-# LANGUAGE BangPatterns, CPP, GADTs, MagicHash #-} -{-# LANGUAGE DeriveAnyClass, DeriveGeneric, RecordWildCards #-} -{-# OPTIONS_GHC -fno-warn-orphans #-} -module Benchmarks.Pure - ( initEnv - , benchmark - ) where - -import Control.DeepSeq (NFData (..)) -import Control.Exception (evaluate) -import Criterion (Benchmark, bgroup, bench, nf) -import Data.Char (toLower, toUpper) -import Data.Monoid (mappend, mempty) -import GHC.Base (Char (..), Int (..), chr#, ord#, (+#)) -import GHC.Generics (Generic) -import GHC.Int (Int64) -import qualified Data.ByteString.Char8 as BS -import qualified Data.ByteString.Lazy.Char8 as BL -import qualified Data.ByteString.UTF8 as UTF8 -import qualified Data.List as L -import qualified Data.Text as T -import qualified Data.Text.Encoding as T -import qualified Data.Text.Lazy as TL -import qualified Data.Text.Lazy.Builder as TB -import qualified Data.Text.Lazy.Encoding as TL - -data Env = Env - { bsa :: !BS.ByteString - , ta :: !T.Text - , tb :: !T.Text - , tla :: !TL.Text - , tlb :: !TL.Text - , bsb :: !BS.ByteString - , bla :: !BL.ByteString - , blb :: !BL.ByteString - , sa :: !String - , sb :: !String - , bsa_len :: !Int - , ta_len :: !Int - , bla_len :: !Int64 - , tla_len :: !Int64 - , sa_len :: !Int - , bsl :: [BS.ByteString] - , bll :: [BL.ByteString] - , tl :: [T.Text] - , tll :: [TL.Text] - , sl :: [String] - } deriving (Generic, NFData) - - -initEnv :: FilePath -> IO Env -initEnv fp = do - -- Evaluate stuff before actually running the benchmark, we don't want to - -- count it here. - - -- ByteString A - bsa <- BS.readFile fp - - -- Text A/B, LazyText A/B - ta <- evaluate $ T.decodeUtf8 bsa - tb <- evaluate $ T.toUpper ta - tla <- evaluate $ TL.fromChunks (T.chunksOf 16376 ta) - tlb <- evaluate $ TL.fromChunks (T.chunksOf 16376 tb) - - -- ByteString B, LazyByteString A/B - bsb <- evaluate $ T.encodeUtf8 tb - bla <- evaluate $ BL.fromChunks (chunksOf 16376 bsa) - blb <- evaluate $ BL.fromChunks (chunksOf 16376 bsb) - - -- String A/B - sa <- evaluate $ UTF8.toString bsa - sb <- evaluate $ T.unpack tb - - -- Lengths - bsa_len <- evaluate $ BS.length bsa - ta_len <- evaluate $ T.length ta - bla_len <- evaluate $ BL.length bla - tla_len <- evaluate $ TL.length tla - sa_len <- evaluate $ L.length sa - - -- Lines - bsl <- evaluate $ BS.lines bsa - bll <- evaluate $ BL.lines bla - tl <- evaluate $ T.lines ta - tll <- evaluate $ TL.lines tla - sl <- evaluate $ L.lines sa - - return Env{..} - -benchmark :: String -> Env -> Benchmark -benchmark kind ~Env{..} = - bgroup "Pure" - [ bgroup "append" - [ benchT $ nf (T.append tb) ta - , benchTL $ nf (TL.append tlb) tla - , benchBS $ nf (BS.append bsb) bsa - , benchBSL $ nf (BL.append blb) bla - , benchS $ nf ((++) sb) sa - ] - , bgroup "concat" - [ benchT $ nf T.concat tl - , benchTL $ nf TL.concat tll - , benchBS $ nf BS.concat bsl - , benchBSL $ nf BL.concat bll - , benchS $ nf L.concat sl - ] - , bgroup "cons" - [ benchT $ nf (T.cons c) ta - , benchTL $ nf (TL.cons c) tla - , benchBS $ nf (BS.cons c) bsa - , benchBSL $ nf (BL.cons c) bla - , benchS $ nf (c:) sa - ] - , bgroup "concatMap" - [ benchT $ nf (T.concatMap (T.replicate 3 . T.singleton)) ta - , benchTL $ nf (TL.concatMap (TL.replicate 3 . TL.singleton)) tla - , benchBS $ nf (BS.concatMap (BS.replicate 3)) bsa - , benchBSL $ nf (BL.concatMap (BL.replicate 3)) bla - , benchS $ nf (L.concatMap (L.replicate 3 . (:[]))) sa - ] - , bgroup "decode" - [ benchT $ nf T.decodeUtf8 bsa - , benchTL $ nf TL.decodeUtf8 bla - , benchBS $ nf BS.unpack bsa - , benchBSL $ nf BL.unpack bla - , benchS $ nf UTF8.toString bsa - ] - , bgroup "decode'" - [ benchT $ nf T.decodeUtf8' bsa - , benchTL $ nf TL.decodeUtf8' bla - ] - , bgroup "drop" - [ benchT $ nf (T.drop (ta_len `div` 3)) ta - , benchTL $ nf (TL.drop (tla_len `div` 3)) tla - , benchBS $ nf (BS.drop (bsa_len `div` 3)) bsa - , benchBSL $ nf (BL.drop (bla_len `div` 3)) bla - , benchS $ nf (L.drop (sa_len `div` 3)) sa - ] - , bgroup "encode" - [ benchT $ nf T.encodeUtf8 ta - , benchTL $ nf TL.encodeUtf8 tla - , benchBS $ nf BS.pack sa - , benchBSL $ nf BL.pack sa - , benchS $ nf UTF8.fromString sa - ] - , bgroup "filter" - [ benchT $ nf (T.filter p0) ta - , benchTL $ nf (TL.filter p0) tla - , benchBS $ nf (BS.filter p0) bsa - , benchBSL $ nf (BL.filter p0) bla - , benchS $ nf (L.filter p0) sa - ] - , bgroup "filter.filter" - [ benchT $ nf (T.filter p1 . T.filter p0) ta - , benchTL $ nf (TL.filter p1 . TL.filter p0) tla - , benchBS $ nf (BS.filter p1 . BS.filter p0) bsa - , benchBSL $ nf (BL.filter p1 . BL.filter p0) bla - , benchS $ nf (L.filter p1 . L.filter p0) sa - ] - , bgroup "foldl'" - [ benchT $ nf (T.foldl' len 0) ta - , benchTL $ nf (TL.foldl' len 0) tla - , benchBS $ nf (BS.foldl' len 0) bsa - , benchBSL $ nf (BL.foldl' len 0) bla - , benchS $ nf (L.foldl' len 0) sa - ] - , bgroup "foldr" - [ benchT $ nf (L.length . T.foldr (:) []) ta - , benchTL $ nf (L.length . TL.foldr (:) []) tla - , benchBS $ nf (L.length . BS.foldr (:) []) bsa - , benchBSL $ nf (L.length . BL.foldr (:) []) bla - , benchS $ nf (L.length . L.foldr (:) []) sa - ] - , bgroup "head" - [ benchT $ nf T.head ta - , benchTL $ nf TL.head tla - , benchBS $ nf BS.head bsa - , benchBSL $ nf BL.head bla - , benchS $ nf L.head sa - ] - , bgroup "init" - [ benchT $ nf T.init ta - , benchTL $ nf TL.init tla - , benchBS $ nf BS.init bsa - , benchBSL $ nf BL.init bla - , benchS $ nf L.init sa - ] - , bgroup "intercalate" - [ benchT $ nf (T.intercalate tsw) tl - , benchTL $ nf (TL.intercalate tlw) tll - , benchBS $ nf (BS.intercalate bsw) bsl - , benchBSL $ nf (BL.intercalate blw) bll - , benchS $ nf (L.intercalate lw) sl - ] - , bgroup "intersperse" - [ benchT $ nf (T.intersperse c) ta - , benchTL $ nf (TL.intersperse c) tla - , benchBS $ nf (BS.intersperse c) bsa - , benchBSL $ nf (BL.intersperse c) bla - , benchS $ nf (L.intersperse c) sa - ] - , bgroup "isInfixOf" - [ benchT $ nf (T.isInfixOf tsw) ta - , benchTL $ nf (TL.isInfixOf tlw) tla - , benchBS $ nf (BS.isInfixOf bsw) bsa - -- no isInfixOf for lazy bytestrings - , benchS $ nf (L.isInfixOf lw) sa - ] - , bgroup "last" - [ benchT $ nf T.last ta - , benchTL $ nf TL.last tla - , benchBS $ nf BS.last bsa - , benchBSL $ nf BL.last bla - , benchS $ nf L.last sa - ] - , bgroup "map" - [ benchT $ nf (T.map f) ta - , benchTL $ nf (TL.map f) tla - , benchBS $ nf (BS.map f) bsa - , benchBSL $ nf (BL.map f) bla - , benchS $ nf (L.map f) sa - ] - , bgroup "mapAccumL" - [ benchT $ nf (T.mapAccumL g 0) ta - , benchTL $ nf (TL.mapAccumL g 0) tla - , benchBS $ nf (BS.mapAccumL g 0) bsa - , benchBSL $ nf (BL.mapAccumL g 0) bla - , benchS $ nf (L.mapAccumL g 0) sa - ] - , bgroup "mapAccumR" - [ benchT $ nf (T.mapAccumR g 0) ta - , benchTL $ nf (TL.mapAccumR g 0) tla - , benchBS $ nf (BS.mapAccumR g 0) bsa - , benchBSL $ nf (BL.mapAccumR g 0) bla - , benchS $ nf (L.mapAccumR g 0) sa - ] - , bgroup "map.map" - [ benchT $ nf (T.map f . T.map f) ta - , benchTL $ nf (TL.map f . TL.map f) tla - , benchBS $ nf (BS.map f . BS.map f) bsa - , benchBSL $ nf (BL.map f . BL.map f) bla - , benchS $ nf (L.map f . L.map f) sa - ] - , bgroup "replicate char" - [ benchT $ nf (T.replicate bsa_len) (T.singleton c) - , benchTL $ nf (TL.replicate (fromIntegral bsa_len)) (TL.singleton c) - , benchBS $ nf (BS.replicate bsa_len) c - , benchBSL $ nf (BL.replicate (fromIntegral bsa_len)) c - , benchS $ nf (L.replicate bsa_len) c - ] - , bgroup "replicate string" - [ benchT $ nf (T.replicate (bsa_len `div` T.length tsw)) tsw - , benchTL $ nf (TL.replicate (fromIntegral bsa_len `div` TL.length tlw)) tlw - , benchS $ nf (replicat (bsa_len `div` T.length tsw)) lw - ] - , bgroup "reverse" - [ benchT $ nf T.reverse ta - , benchTL $ nf TL.reverse tla - , benchBS $ nf BS.reverse bsa - , benchBSL $ nf BL.reverse bla - , benchS $ nf L.reverse sa - ] - , bgroup "take" - [ benchT $ nf (T.take (ta_len `div` 3)) ta - , benchTL $ nf (TL.take (tla_len `div` 3)) tla - , benchBS $ nf (BS.take (bsa_len `div` 3)) bsa - , benchBSL $ nf (BL.take (bla_len `div` 3)) bla - , benchS $ nf (L.take (sa_len `div` 3)) sa - ] - , bgroup "tail" - [ benchT $ nf T.tail ta - , benchTL $ nf TL.tail tla - , benchBS $ nf BS.tail bsa - , benchBSL $ nf BL.tail bla - , benchS $ nf L.tail sa - ] - , bgroup "toLower" - [ benchT $ nf T.toLower ta - , benchTL $ nf TL.toLower tla - , benchBS $ nf (BS.map toLower) bsa - , benchBSL $ nf (BL.map toLower) bla - , benchS $ nf (L.map toLower) sa - ] - , bgroup "toUpper" - [ benchT $ nf T.toUpper ta - , benchTL $ nf TL.toUpper tla - , benchBS $ nf (BS.map toUpper) bsa - , benchBSL $ nf (BL.map toUpper) bla - , benchS $ nf (L.map toUpper) sa - ] - , bgroup "uncons" - [ benchT $ nf T.uncons ta - , benchTL $ nf TL.uncons tla - , benchBS $ nf BS.uncons bsa - , benchBSL $ nf BL.uncons bla - , benchS $ nf L.uncons sa - ] - , bgroup "words" - [ benchT $ nf T.words ta - , benchTL $ nf TL.words tla - , benchBS $ nf BS.words bsa - , benchBSL $ nf BL.words bla - , benchS $ nf L.words sa - ] - , bgroup "zipWith" - [ benchT $ nf (T.zipWith min tb) ta - , benchTL $ nf (TL.zipWith min tlb) tla - , benchBS $ nf (BS.zipWith min bsb) bsa - , benchBSL $ nf (BL.zipWith min blb) bla - , benchS $ nf (L.zipWith min sb) sa - ] - , bgroup "length" - [ bgroup "cons" - [ benchT $ nf (T.length . T.cons c) ta - , benchTL $ nf (TL.length . TL.cons c) tla - , benchBS $ nf (BS.length . BS.cons c) bsa - , benchBSL $ nf (BL.length . BL.cons c) bla - , benchS $ nf (L.length . (:) c) sa - ] - , bgroup "decode" - [ benchT $ nf (T.length . T.decodeUtf8) bsa - , benchTL $ nf (TL.length . TL.decodeUtf8) bla - , benchBS $ nf (L.length . BS.unpack) bsa - , benchBSL $ nf (L.length . BL.unpack) bla - , bench "StringUTF8" $ nf (L.length . UTF8.toString) bsa - ] - , bgroup "drop" - [ benchT $ nf (T.length . T.drop (ta_len `div` 3)) ta - , benchTL $ nf (TL.length . TL.drop (tla_len `div` 3)) tla - , benchBS $ nf (BS.length . BS.drop (bsa_len `div` 3)) bsa - , benchBSL $ nf (BL.length . BL.drop (bla_len `div` 3)) bla - , benchS $ nf (L.length . L.drop (sa_len `div` 3)) sa - ] - , bgroup "filter" - [ benchT $ nf (T.length . T.filter p0) ta - , benchTL $ nf (TL.length . TL.filter p0) tla - , benchBS $ nf (BS.length . BS.filter p0) bsa - , benchBSL $ nf (BL.length . BL.filter p0) bla - , benchS $ nf (L.length . L.filter p0) sa - ] - , bgroup "filter.filter" - [ benchT $ nf (T.length . T.filter p1 . T.filter p0) ta - , benchTL $ nf (TL.length . TL.filter p1 . TL.filter p0) tla - , benchBS $ nf (BS.length . BS.filter p1 . BS.filter p0) bsa - , benchBSL $ nf (BL.length . BL.filter p1 . BL.filter p0) bla - , benchS $ nf (L.length . L.filter p1 . L.filter p0) sa - ] - , bgroup "init" - [ benchT $ nf (T.length . T.init) ta - , benchTL $ nf (TL.length . TL.init) tla - , benchBS $ nf (BS.length . BS.init) bsa - , benchBSL $ nf (BL.length . BL.init) bla - , benchS $ nf (L.length . L.init) sa - ] - , bgroup "intercalate" - [ benchT $ nf (T.length . T.intercalate tsw) tl - , benchTL $ nf (TL.length . TL.intercalate tlw) tll - , benchBS $ nf (BS.length . BS.intercalate bsw) bsl - , benchBSL $ nf (BL.length . BL.intercalate blw) bll - , benchS $ nf (L.length . L.intercalate lw) sl - ] - , bgroup "intersperse" - [ benchT $ nf (T.length . T.intersperse c) ta - , benchTL $ nf (TL.length . TL.intersperse c) tla - , benchBS $ nf (BS.length . BS.intersperse c) bsa - , benchBSL $ nf (BL.length . BL.intersperse c) bla - , benchS $ nf (L.length . L.intersperse c) sa - ] - , bgroup "map" - [ benchT $ nf (T.length . T.map f) ta - , benchTL $ nf (TL.length . TL.map f) tla - , benchBS $ nf (BS.length . BS.map f) bsa - , benchBSL $ nf (BL.length . BL.map f) bla - , benchS $ nf (L.length . L.map f) sa - ] - , bgroup "map.map" - [ benchT $ nf (T.length . T.map f . T.map f) ta - , benchTL $ nf (TL.length . TL.map f . TL.map f) tla - , benchBS $ nf (BS.length . BS.map f . BS.map f) bsa - , benchS $ nf (L.length . L.map f . L.map f) sa - ] - , bgroup "replicate char" - [ benchT $ nf (T.length . T.replicate bsa_len) (T.singleton c) - , benchTL $ nf (TL.length . TL.replicate (fromIntegral bsa_len)) (TL.singleton c) - , benchBS $ nf (BS.length . BS.replicate bsa_len) c - , benchBSL $ nf (BL.length . BL.replicate (fromIntegral bsa_len)) c - , benchS $ nf (L.length . L.replicate bsa_len) c - ] - , bgroup "replicate string" - [ benchT $ nf (T.length . T.replicate (bsa_len `div` T.length tsw)) tsw - , benchTL $ nf (TL.length . TL.replicate (fromIntegral bsa_len `div` TL.length tlw)) tlw - , benchS $ nf (L.length . replicat (bsa_len `div` T.length tsw)) lw - ] - , bgroup "take" - [ benchT $ nf (T.length . T.take (ta_len `div` 3)) ta - , benchTL $ nf (TL.length . TL.take (tla_len `div` 3)) tla - , benchBS $ nf (BS.length . BS.take (bsa_len `div` 3)) bsa - , benchBSL $ nf (BL.length . BL.take (bla_len `div` 3)) bla - , benchS $ nf (L.length . L.take (sa_len `div` 3)) sa - ] - , bgroup "tail" - [ benchT $ nf (T.length . T.tail) ta - , benchTL $ nf (TL.length . TL.tail) tla - , benchBS $ nf (BS.length . BS.tail) bsa - , benchBSL $ nf (BL.length . BL.tail) bla - , benchS $ nf (L.length . L.tail) sa - ] - , bgroup "toLower" - [ benchT $ nf (T.length . T.toLower) ta - , benchTL $ nf (TL.length . TL.toLower) tla - , benchBS $ nf (BS.length . BS.map toLower) bsa - , benchBSL $ nf (BL.length . BL.map toLower) bla - , benchS $ nf (L.length . L.map toLower) sa - ] - , bgroup "toUpper" - [ benchT $ nf (T.length . T.toUpper) ta - , benchTL $ nf (TL.length . TL.toUpper) tla - , benchBS $ nf (BS.length . BS.map toUpper) bsa - , benchBSL $ nf (BL.length . BL.map toUpper) bla - , benchS $ nf (L.length . L.map toUpper) sa - ] - , bgroup "words" - [ benchT $ nf (L.length . T.words) ta - , benchTL $ nf (L.length . TL.words) tla - , benchBS $ nf (L.length . BS.words) bsa - , benchBSL $ nf (L.length . BL.words) bla - , benchS $ nf (L.length . L.words) sa - ] - , bgroup "zipWith" - [ benchT $ nf (T.length . T.zipWith min tb) ta - , benchTL $ nf (TL.length . TL.zipWith min tlb) tla - , benchBS $ nf (L.length . BS.zipWith min bsb) bsa - , benchBSL $ nf (L.length . BL.zipWith min blb) bla - , benchS $ nf (L.length . L.zipWith min sb) sa - ] - ] - , bgroup "Builder" - [ bench "mappend char" $ nf (TL.length . TB.toLazyText . mappendNChar 'a') 10000 - , bench "mappend 8 char" $ nf (TL.length . TB.toLazyText . mappend8Char) 'a' - , bench "mappend text" $ nf (TL.length . TB.toLazyText . mappendNText short) 10000 - ] - ] - where - benchS = bench ("String+" ++ kind) - benchT = bench ("Text+" ++ kind) - benchTL = bench ("LazyText+" ++ kind) - benchBS = bench ("ByteString+" ++ kind) - benchBSL = bench ("LazyByteString+" ++ kind) - - c = 'й' - p0 = (== c) - p1 = (/= 'д') - lw = "право" - bsw = UTF8.fromString lw - blw = BL.fromChunks [bsw] - tsw = T.pack lw - tlw = TL.fromChunks [tsw] - f (C# c#) = C# (chr# (ord# c# +# 1#)) - g (I# i#) (C# c#) = (I# (i# +# 1#), C# (chr# (ord# c# +# i#))) - len l _ = l + (1::Int) - replicat n = concat . L.replicate n - short = T.pack "short" - -#if !MIN_VERSION_bytestring(0,10,0) -instance NFData BS.ByteString - -instance NFData BL.ByteString where - rnf BL.Empty = () - rnf (BL.Chunk _ ts) = rnf ts -#endif - -data B where - B :: NFData a => a -> B - -instance NFData B where - rnf (B b) = rnf b - --- | Split a bytestring in chunks --- -chunksOf :: Int -> BS.ByteString -> [BS.ByteString] -chunksOf k = go - where - go t = case BS.splitAt k t of - (a,b) | BS.null a -> [] - | otherwise -> a : go b - --- | Append a character n times --- -mappendNChar :: Char -> Int -> TB.Builder -mappendNChar c n = go 0 - where - go i - | i < n = TB.singleton c `mappend` go (i+1) - | otherwise = mempty - --- | Gives more opportunity for inlining and elimination of unnecesary --- bounds checks. --- -mappend8Char :: Char -> TB.Builder -mappend8Char c = TB.singleton c `mappend` TB.singleton c `mappend` - TB.singleton c `mappend` TB.singleton c `mappend` - TB.singleton c `mappend` TB.singleton c `mappend` - TB.singleton c `mappend` TB.singleton c - --- | Append a text N times --- -mappendNText :: T.Text -> Int -> TB.Builder -mappendNText t n = go 0 - where - go i - | i < n = TB.fromText t `mappend` go (i+1) - | otherwise = mempty diff -Nru cabal-install-3.2-3.2+git20191216.2.e076113/src/text-1.2.4.0/benchmarks/haskell/Benchmarks/ReadNumbers.hs cabal-install-3.2-3.2+git20200127.2.b84fd4f/src/text-1.2.4.0/benchmarks/haskell/Benchmarks/ReadNumbers.hs --- cabal-install-3.2-3.2+git20191216.2.e076113/src/text-1.2.4.0/benchmarks/haskell/Benchmarks/ReadNumbers.hs 2001-09-09 01:46:40.000000000 +0000 +++ cabal-install-3.2-3.2+git20200127.2.b84fd4f/src/text-1.2.4.0/benchmarks/haskell/Benchmarks/ReadNumbers.hs 1970-01-01 00:00:00.000000000 +0000 @@ -1,100 +0,0 @@ --- | Read numbers from a file with a just a number on each line, find the --- minimum of those numbers. The file contains different kinds of numbers: --- --- * Decimals --- --- * Hexadecimals --- --- * Floating point numbers --- --- * Floating point numbers in scientific notation --- --- The different benchmarks will only take into account the values they can --- parse. --- --- Tested in this benchmark: --- --- * Lexing/parsing of different numerical types --- -module Benchmarks.ReadNumbers - ( initEnv - , benchmark - ) where - -import Criterion (Benchmark, bgroup, bench, whnf) -import Data.List (foldl') -import Numeric (readDec, readFloat, readHex) -import qualified Data.ByteString.Char8 as B -import qualified Data.ByteString.Lazy.Char8 as BL -import qualified Data.ByteString.Lex.Fractional as B -import qualified Data.Text as T -import qualified Data.Text.IO as T -import qualified Data.Text.Lazy as TL -import qualified Data.Text.Lazy.IO as TL -import qualified Data.Text.Lazy.Read as TL -import qualified Data.Text.Read as T - -type Env = ([String], [T.Text], [TL.Text], [B.ByteString], [BL.ByteString]) - -initEnv :: FilePath -> IO Env -initEnv fp = do - -- Read all files into lines: string, text, lazy text, bytestring, lazy - -- bytestring - s <- lines `fmap` readFile fp - t <- T.lines `fmap` T.readFile fp - tl <- TL.lines `fmap` TL.readFile fp - b <- B.lines `fmap` B.readFile fp - bl <- BL.lines `fmap` BL.readFile fp - return (s, t, tl, b, bl) - -benchmark :: Env -> Benchmark -benchmark ~(s, t, tl, b, bl) = - bgroup "ReadNumbers" - [ bench "DecimalString" $ whnf (int . string readDec) s - , bench "HexadecimalString" $ whnf (int . string readHex) s - , bench "DoubleString" $ whnf (double . string readFloat) s - - , bench "DecimalText" $ whnf (int . text (T.signed T.decimal)) t - , bench "HexadecimalText" $ whnf (int . text (T.signed T.hexadecimal)) t - , bench "DoubleText" $ whnf (double . text T.double) t - , bench "RationalText" $ whnf (double . text T.rational) t - - , bench "DecimalLazyText" $ - whnf (int . text (TL.signed TL.decimal)) tl - , bench "HexadecimalLazyText" $ - whnf (int . text (TL.signed TL.hexadecimal)) tl - , bench "DoubleLazyText" $ - whnf (double . text TL.double) tl - , bench "RationalLazyText" $ - whnf (double . text TL.rational) tl - - , bench "DecimalByteString" $ whnf (int . byteString B.readInt) b - , bench "DoubleByteString" $ whnf (double . byteString B.readDecimal) b - - , bench "DecimalLazyByteString" $ - whnf (int . byteString BL.readInt) bl - ] - where - -- Used for fixing types - int :: Int -> Int - int = id - double :: Double -> Double - double = id - -string :: (Ord a, Num a) => (t -> [(a, t)]) -> [t] -> a -string reader = foldl' go 1000000 - where - go z t = case reader t of [(n, _)] -> min n z - _ -> z - -text :: (Ord a, Num a) => (t -> Either String (a,t)) -> [t] -> a -text reader = foldl' go 1000000 - where - go z t = case reader t of Left _ -> z - Right (n, _) -> min n z - -byteString :: (Ord a, Num a) => (t -> Maybe (a,t)) -> [t] -> a -byteString reader = foldl' go 1000000 - where - go z t = case reader t of Nothing -> z - Just (n, _) -> min n z diff -Nru cabal-install-3.2-3.2+git20191216.2.e076113/src/text-1.2.4.0/benchmarks/haskell/Benchmarks/Replace.hs cabal-install-3.2-3.2+git20200127.2.b84fd4f/src/text-1.2.4.0/benchmarks/haskell/Benchmarks/Replace.hs --- cabal-install-3.2-3.2+git20191216.2.e076113/src/text-1.2.4.0/benchmarks/haskell/Benchmarks/Replace.hs 2001-09-09 01:46:40.000000000 +0000 +++ cabal-install-3.2-3.2+git20200127.2.b84fd4f/src/text-1.2.4.0/benchmarks/haskell/Benchmarks/Replace.hs 1970-01-01 00:00:00.000000000 +0000 @@ -1,50 +0,0 @@ -{-# LANGUAGE BangPatterns #-} --- | Replace a string by another string --- --- Tested in this benchmark: --- --- * Search and replace of a pattern in a text --- -module Benchmarks.Replace - ( benchmark - , initEnv - ) where - -import Criterion (Benchmark, bgroup, bench, nf) -import qualified Data.ByteString.Char8 as B -import qualified Data.ByteString.Lazy as BL -import qualified Data.ByteString.Lazy.Search as BL -import qualified Data.ByteString.Search as B -import qualified Data.Text as T -import qualified Data.Text.Encoding as T -import qualified Data.Text.Lazy as TL -import qualified Data.Text.Lazy.Encoding as TL -import qualified Data.Text.Lazy.IO as TL - -type Env = (T.Text, B.ByteString, TL.Text, BL.ByteString) - -initEnv :: FilePath -> IO Env -initEnv fp = do - tl <- TL.readFile fp - bl <- BL.readFile fp - let !t = TL.toStrict tl - !b = T.encodeUtf8 t - return (t, b, tl, bl) - -benchmark :: String -> String -> Env -> Benchmark -benchmark pat sub ~(t, b, tl, bl) = - bgroup "Replace" [ - bench "Text" $ nf (T.length . T.replace tpat tsub) t - , bench "ByteString" $ nf (BL.length . B.replace bpat bsub) b - , bench "LazyText" $ nf (TL.length . TL.replace tlpat tlsub) tl - , bench "LazyByteString" $ nf (BL.length . BL.replace blpat blsub) bl - ] - where - tpat = T.pack pat - tsub = T.pack sub - tlpat = TL.pack pat - tlsub = TL.pack sub - bpat = T.encodeUtf8 tpat - bsub = T.encodeUtf8 tsub - blpat = B.concat $ BL.toChunks $ TL.encodeUtf8 tlpat - blsub = B.concat $ BL.toChunks $ TL.encodeUtf8 tlsub diff -Nru cabal-install-3.2-3.2+git20191216.2.e076113/src/text-1.2.4.0/benchmarks/haskell/Benchmarks/Search.hs cabal-install-3.2-3.2+git20200127.2.b84fd4f/src/text-1.2.4.0/benchmarks/haskell/Benchmarks/Search.hs --- cabal-install-3.2-3.2+git20191216.2.e076113/src/text-1.2.4.0/benchmarks/haskell/Benchmarks/Search.hs 2001-09-09 01:46:40.000000000 +0000 +++ cabal-install-3.2-3.2+git20200127.2.b84fd4f/src/text-1.2.4.0/benchmarks/haskell/Benchmarks/Search.hs 1970-01-01 00:00:00.000000000 +0000 @@ -1,55 +0,0 @@ --- | Search for a pattern in a file, find the number of occurences --- --- Tested in this benchmark: --- --- * Searching all occurences of a pattern using library routines --- -module Benchmarks.Search - ( initEnv - , benchmark - ) where - -import Criterion (Benchmark, bench, bgroup, whnf) -import qualified Data.ByteString as B -import qualified Data.ByteString.Lazy as BL -import qualified Data.ByteString.Lazy.Search as BL -import qualified Data.ByteString.Search as B -import qualified Data.Text as T -import qualified Data.Text.Encoding as T -import qualified Data.Text.IO as T -import qualified Data.Text.Lazy as TL -import qualified Data.Text.Lazy.IO as TL - -type Env = (B.ByteString, BL.ByteString, T.Text, TL.Text) - -initEnv :: FilePath -> IO Env -initEnv fp = do - b <- B.readFile fp - bl <- BL.readFile fp - t <- T.readFile fp - tl <- TL.readFile fp - return (b, bl, t, tl) - -benchmark :: T.Text -> Env -> Benchmark -benchmark needleT ~(b, bl, t, tl) = - bgroup "FileIndices" - [ bench "ByteString" $ whnf (byteString needleB) b - , bench "LazyByteString" $ whnf (lazyByteString needleB) bl - , bench "Text" $ whnf (text needleT) t - , bench "LazyText" $ whnf (lazyText needleTL) tl - ] - where - needleB = T.encodeUtf8 needleT - needleTL = TL.fromChunks [needleT] - -byteString :: B.ByteString -> B.ByteString -> Int -byteString needle = length . B.indices needle - -lazyByteString :: B.ByteString -> BL.ByteString -> Int -lazyByteString needle = length . BL.indices needle - -text :: T.Text -> T.Text -> Int -text = T.count - -lazyText :: TL.Text -> TL.Text -> Int -lazyText needle = fromIntegral . TL.count needle diff -Nru cabal-install-3.2-3.2+git20191216.2.e076113/src/text-1.2.4.0/benchmarks/haskell/Benchmarks/Stream.hs cabal-install-3.2-3.2+git20200127.2.b84fd4f/src/text-1.2.4.0/benchmarks/haskell/Benchmarks/Stream.hs --- cabal-install-3.2-3.2+git20191216.2.e076113/src/text-1.2.4.0/benchmarks/haskell/Benchmarks/Stream.hs 2001-09-09 01:46:40.000000000 +0000 +++ cabal-install-3.2-3.2+git20200127.2.b84fd4f/src/text-1.2.4.0/benchmarks/haskell/Benchmarks/Stream.hs 1970-01-01 00:00:00.000000000 +0000 @@ -1,118 +0,0 @@ --- | This module contains a number of benchmarks for the different streaming --- functions --- --- Tested in this benchmark: --- --- * Most streaming functions --- -{-# LANGUAGE BangPatterns #-} -{-# LANGUAGE DeriveAnyClass, DeriveGeneric, RecordWildCards #-} -{-# OPTIONS_GHC -fno-warn-orphans #-} -module Benchmarks.Stream - ( initEnv - , benchmark - ) where - -import Control.DeepSeq (NFData (..)) -import Criterion (Benchmark, bgroup, bench, nf) -import qualified Data.Text as T -import qualified Data.ByteString as B -import qualified Data.Text.Lazy as TL -import qualified Data.ByteString.Lazy as BL -import Data.Text.Internal.Fusion.Types (Step (..), Stream (..)) -import qualified Data.Text.Encoding as T -import qualified Data.Text.Encoding.Error as E -import qualified Data.Text.Internal.Encoding.Fusion as T -import qualified Data.Text.Internal.Encoding.Fusion.Common as F -import qualified Data.Text.Internal.Fusion as T -import qualified Data.Text.IO as T -import qualified Data.Text.Lazy.Encoding as TL -import qualified Data.Text.Internal.Lazy.Encoding.Fusion as TL -import qualified Data.Text.Internal.Lazy.Fusion as TL -import qualified Data.Text.Lazy.IO as TL -import GHC.Generics (Generic) - -instance NFData a => NFData (Stream a) where - -- Currently, this implementation does not force evaluation of the size hint - rnf (Stream next s0 _) = go s0 - where - go !s = case next s of - Done -> () - Skip s' -> go s' - Yield x s' -> rnf x `seq` go s' - -data Env = Env - { t :: !T.Text - , utf8 :: !B.ByteString - , utf16le :: !B.ByteString - , utf16be :: !B.ByteString - , utf32le :: !B.ByteString - , utf32be :: !B.ByteString - , tl :: !TL.Text - , utf8L :: !BL.ByteString - , utf16leL :: !BL.ByteString - , utf16beL :: !BL.ByteString - , utf32leL :: !BL.ByteString - , utf32beL :: !BL.ByteString - , s :: T.Stream Char - } deriving (Generic, NFData) - -initEnv :: FilePath -> IO Env -initEnv fp = do - -- Different formats - t <- T.readFile fp - let !utf8 = T.encodeUtf8 t - !utf16le = T.encodeUtf16LE t - !utf16be = T.encodeUtf16BE t - !utf32le = T.encodeUtf32LE t - !utf32be = T.encodeUtf32BE t - - -- Once again for the lazy variants - tl <- TL.readFile fp - let !utf8L = TL.encodeUtf8 tl - !utf16leL = TL.encodeUtf16LE tl - !utf16beL = TL.encodeUtf16BE tl - !utf32leL = TL.encodeUtf32LE tl - !utf32beL = TL.encodeUtf32BE tl - - -- For the functions which operate on streams - let !s = T.stream t - return Env{..} - -benchmark :: Env -> Benchmark -benchmark ~Env{..} = - bgroup "Stream" - -- Fusion - [ bgroup "stream" $ - [ bench "Text" $ nf T.stream t - , bench "LazyText" $ nf TL.stream tl - ] - - -- Encoding.Fusion - , bgroup "streamUtf8" - [ bench "Text" $ nf (T.streamUtf8 E.lenientDecode) utf8 - , bench "LazyText" $ nf (TL.streamUtf8 E.lenientDecode) utf8L - ] - , bgroup "streamUtf16LE" - [ bench "Text" $ nf (T.streamUtf16LE E.lenientDecode) utf16le - , bench "LazyText" $ nf (TL.streamUtf16LE E.lenientDecode) utf16leL - ] - , bgroup "streamUtf16BE" - [ bench "Text" $ nf (T.streamUtf16BE E.lenientDecode) utf16be - , bench "LazyText" $ nf (TL.streamUtf16BE E.lenientDecode) utf16beL - ] - , bgroup "streamUtf32LE" - [ bench "Text" $ nf (T.streamUtf32LE E.lenientDecode) utf32le - , bench "LazyText" $ nf (TL.streamUtf32LE E.lenientDecode) utf32leL - ] - , bgroup "streamUtf32BE" - [ bench "Text" $ nf (T.streamUtf32BE E.lenientDecode) utf32be - , bench "LazyText" $ nf (TL.streamUtf32BE E.lenientDecode) utf32beL - ] - - -- Encoding.Fusion.Common - , bench "restreamUtf16LE" $ nf F.restreamUtf16LE s - , bench "restreamUtf16BE" $ nf F.restreamUtf16BE s - , bench "restreamUtf32LE" $ nf F.restreamUtf32LE s - , bench "restreamUtf32BE" $ nf F.restreamUtf32BE s - ] diff -Nru cabal-install-3.2-3.2+git20191216.2.e076113/src/text-1.2.4.0/benchmarks/haskell/Benchmarks/WordFrequencies.hs cabal-install-3.2-3.2+git20200127.2.b84fd4f/src/text-1.2.4.0/benchmarks/haskell/Benchmarks/WordFrequencies.hs --- cabal-install-3.2-3.2+git20191216.2.e076113/src/text-1.2.4.0/benchmarks/haskell/Benchmarks/WordFrequencies.hs 2001-09-09 01:46:40.000000000 +0000 +++ cabal-install-3.2-3.2+git20200127.2.b84fd4f/src/text-1.2.4.0/benchmarks/haskell/Benchmarks/WordFrequencies.hs 1970-01-01 00:00:00.000000000 +0000 @@ -1,43 +0,0 @@ --- | A word frequency count using the different string types --- --- Tested in this benchmark: --- --- * Splitting into words --- --- * Converting to lowercase --- --- * Comparing: Eq/Ord instances --- -module Benchmarks.WordFrequencies - ( initEnv - , benchmark - ) where - -import Criterion (Benchmark, bench, bgroup, whnf) -import Data.Char (toLower) -import Data.List (foldl') -import Data.Map (Map) -import qualified Data.ByteString.Char8 as B -import qualified Data.Map as M -import qualified Data.Text as T -import qualified Data.Text.IO as T - -type Env = (String, B.ByteString, T.Text) - -initEnv :: FilePath -> IO Env -initEnv fp = do - s <- readFile fp - b <- B.readFile fp - t <- T.readFile fp - return (s, b, t) - -benchmark :: Env -> Benchmark -benchmark ~(s, b, t) = - bgroup "WordFrequencies" - [ bench "String" $ whnf (frequencies . words . map toLower) s - , bench "ByteString" $ whnf (frequencies . B.words . B.map toLower) b - , bench "Text" $ whnf (frequencies . T.words . T.toLower) t - ] - -frequencies :: Ord a => [a] -> Map a Int -frequencies = foldl' (\m k -> M.insertWith (+) k 1 m) M.empty diff -Nru cabal-install-3.2-3.2+git20191216.2.e076113/src/text-1.2.4.0/benchmarks/haskell/Benchmarks.hs cabal-install-3.2-3.2+git20200127.2.b84fd4f/src/text-1.2.4.0/benchmarks/haskell/Benchmarks.hs --- cabal-install-3.2-3.2+git20191216.2.e076113/src/text-1.2.4.0/benchmarks/haskell/Benchmarks.hs 2001-09-09 01:46:40.000000000 +0000 +++ cabal-install-3.2-3.2+git20200127.2.b84fd4f/src/text-1.2.4.0/benchmarks/haskell/Benchmarks.hs 1970-01-01 00:00:00.000000000 +0000 @@ -1,73 +0,0 @@ --- | Main module to run the micro benchmarks --- -{-# LANGUAGE OverloadedStrings #-} -module Main - ( main - ) where - -import Criterion.Main (defaultMain, bgroup, env) -import System.FilePath (()) -import System.IO (IOMode (WriteMode), openFile, hSetEncoding, utf8) - -import qualified Benchmarks.Builder as Builder -import qualified Benchmarks.Concat as Concat -import qualified Benchmarks.DecodeUtf8 as DecodeUtf8 -import qualified Benchmarks.EncodeUtf8 as EncodeUtf8 -import qualified Benchmarks.Equality as Equality -import qualified Benchmarks.FileRead as FileRead -import qualified Benchmarks.FoldLines as FoldLines -import qualified Benchmarks.Mul as Mul -import qualified Benchmarks.Pure as Pure -import qualified Benchmarks.ReadNumbers as ReadNumbers -import qualified Benchmarks.Replace as Replace -import qualified Benchmarks.Search as Search -import qualified Benchmarks.Stream as Stream -import qualified Benchmarks.WordFrequencies as WordFrequencies - -import qualified Benchmarks.Programs.BigTable as Programs.BigTable -import qualified Benchmarks.Programs.Cut as Programs.Cut -import qualified Benchmarks.Programs.Fold as Programs.Fold -import qualified Benchmarks.Programs.Sort as Programs.Sort -import qualified Benchmarks.Programs.StripTags as Programs.StripTags -import qualified Benchmarks.Programs.Throughput as Programs.Throughput - -main :: IO () -main = do - sink <- openFile "/dev/null" WriteMode - hSetEncoding sink utf8 - defaultMain - [ Builder.benchmark - , Concat.benchmark - , env (DecodeUtf8.initEnv (tf "libya-chinese.html")) (DecodeUtf8.benchmark "html") - , env (DecodeUtf8.initEnv (tf "yiwiki.xml")) (DecodeUtf8.benchmark "xml") - , env (DecodeUtf8.initEnv (tf "ascii.txt")) (DecodeUtf8.benchmark "ascii") - , env (DecodeUtf8.initEnv (tf "russian.txt")) (DecodeUtf8.benchmark "russian") - , env (DecodeUtf8.initEnv (tf "japanese.txt")) (DecodeUtf8.benchmark "japanese") - , EncodeUtf8.benchmark "επανάληψη 竺法蘭共譯" - , env (Equality.initEnv (tf "japanese.txt")) Equality.benchmark - , FileRead.benchmark (tf "russian.txt") - , FoldLines.benchmark (tf "russian.txt") - , env Mul.initEnv Mul.benchmark - , env (Pure.initEnv (tf "tiny.txt")) (Pure.benchmark "tiny") - , env (Pure.initEnv (tf "ascii-small.txt")) (Pure.benchmark "ascii-small") - , env (Pure.initEnv (tf "ascii.txt")) (Pure.benchmark "ascii") - , env (Pure.initEnv (tf "english.txt")) (Pure.benchmark "english") - , env (Pure.initEnv (tf "russian-small.txt")) (Pure.benchmark "russian") - , env (Pure.initEnv (tf "japanese.txt")) (Pure.benchmark "japanese") - , env (ReadNumbers.initEnv (tf "numbers.txt")) ReadNumbers.benchmark - , env (Replace.initEnv (tf "russian.txt")) (Replace.benchmark "принимая" "своем") - , env (Search.initEnv (tf "russian.txt")) (Search.benchmark "принимая") - , env (Stream.initEnv (tf "russian.txt")) Stream.benchmark - , env (WordFrequencies.initEnv (tf "russian.txt")) WordFrequencies.benchmark - , bgroup "Programs" - [ Programs.BigTable.benchmark sink - , Programs.Cut.benchmark (tf "russian.txt") sink 20 40 - , Programs.Fold.benchmark (tf "russian.txt") sink - , Programs.Sort.benchmark (tf "russian.txt") sink - , Programs.StripTags.benchmark (tf "yiwiki.xml") sink - , Programs.Throughput.benchmark (tf "russian.txt") sink - ] - ] - where - -- Location of a test file - tf = ("../tests/text-test-data" ) diff -Nru cabal-install-3.2-3.2+git20191216.2.e076113/src/text-1.2.4.0/benchmarks/haskell/Multilang.hs cabal-install-3.2-3.2+git20200127.2.b84fd4f/src/text-1.2.4.0/benchmarks/haskell/Multilang.hs --- cabal-install-3.2-3.2+git20191216.2.e076113/src/text-1.2.4.0/benchmarks/haskell/Multilang.hs 2001-09-09 01:46:40.000000000 +0000 +++ cabal-install-3.2-3.2+git20200127.2.b84fd4f/src/text-1.2.4.0/benchmarks/haskell/Multilang.hs 1970-01-01 00:00:00.000000000 +0000 @@ -1,32 +0,0 @@ -{-# LANGUAGE BangPatterns, OverloadedStrings, RankNTypes #-} - -module Main ( - main - ) where - -import Control.Monad (forM_) -import qualified Data.ByteString as B -import qualified Data.Text as Text -import Data.Text.Encoding (decodeUtf8) -import Data.Text (Text) -import System.IO (hFlush, stdout) -import Timer (timer) - -type BM = Text -> () - -bm :: forall a. (Text -> a) -> BM -bm f t = f t `seq` () - -benchmarks :: [(String, Text.Text -> ())] -benchmarks = [ - ("find_first", bm $ Text.isInfixOf "en:Benin") - , ("find_index", bm $ Text.findIndex (=='c')) - ] - -main :: IO () -main = do - !contents <- decodeUtf8 `fmap` B.readFile "../tests/text-test-data/yiwiki.xml" - forM_ benchmarks $ \(name, bmark) -> do - putStr $ name ++ " " - hFlush stdout - putStrLn =<< (timer 100 contents bmark) diff -Nru cabal-install-3.2-3.2+git20191216.2.e076113/src/text-1.2.4.0/benchmarks/haskell/Timer.hs cabal-install-3.2-3.2+git20200127.2.b84fd4f/src/text-1.2.4.0/benchmarks/haskell/Timer.hs --- cabal-install-3.2-3.2+git20191216.2.e076113/src/text-1.2.4.0/benchmarks/haskell/Timer.hs 2001-09-09 01:46:40.000000000 +0000 +++ cabal-install-3.2-3.2+git20200127.2.b84fd4f/src/text-1.2.4.0/benchmarks/haskell/Timer.hs 1970-01-01 00:00:00.000000000 +0000 @@ -1,30 +0,0 @@ -{-# LANGUAGE BangPatterns #-} - -module Timer (timer) where - -import Control.Exception (evaluate) -import Data.Time.Clock.POSIX (getPOSIXTime) -import GHC.Float (FFFormat(..), formatRealFloat) - -ickyRound :: Int -> Double -> String -ickyRound k = formatRealFloat FFFixed (Just k) - -timer :: Int -> a -> (a -> b) -> IO String -timer count a0 f = do - let loop !k !fastest - | k <= 0 = return fastest - | otherwise = do - start <- getPOSIXTime - let inner a i - | i <= 0 = return () - | otherwise = evaluate (f a) >> inner a (i-1) - inner a0 count - end <- getPOSIXTime - let elapsed = end - start - loop (k-1) (min fastest (elapsed / fromIntegral count)) - t <- loop (3::Int) 1e300 - let log10 x = log x / log 10 - ft = realToFrac t - prec = round (log10 (fromIntegral count) - log10 ft) - return $! ickyRound prec ft -{-# NOINLINE timer #-} diff -Nru cabal-install-3.2-3.2+git20191216.2.e076113/src/text-1.2.4.0/benchmarks/python/cut.py cabal-install-3.2-3.2+git20200127.2.b84fd4f/src/text-1.2.4.0/benchmarks/python/cut.py --- cabal-install-3.2-3.2+git20191216.2.e076113/src/text-1.2.4.0/benchmarks/python/cut.py 2001-09-09 01:46:40.000000000 +0000 +++ cabal-install-3.2-3.2+git20200127.2.b84fd4f/src/text-1.2.4.0/benchmarks/python/cut.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,12 +0,0 @@ -#!/usr/bin/env python - -import utils, sys, codecs - -def cut(filename, l, r): - content = open(filename, encoding='utf-8') - for line in content: - print(line[l:r]) - -for f in sys.argv[1:]: - t = utils.benchmark(lambda: cut(f, 20, 40)) - sys.stderr.write('{0}: {1}\n'.format(f, t)) diff -Nru cabal-install-3.2-3.2+git20191216.2.e076113/src/text-1.2.4.0/benchmarks/python/multilang.py cabal-install-3.2-3.2+git20200127.2.b84fd4f/src/text-1.2.4.0/benchmarks/python/multilang.py --- cabal-install-3.2-3.2+git20191216.2.e076113/src/text-1.2.4.0/benchmarks/python/multilang.py 2001-09-09 01:46:40.000000000 +0000 +++ cabal-install-3.2-3.2+git20200127.2.b84fd4f/src/text-1.2.4.0/benchmarks/python/multilang.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,50 +0,0 @@ -#!/usr/bin/env python - -import math -import sys -import time - -def find_first(): - cf = contents.find - return timer(lambda: cf("en:Benin")) - -def timer(f, count=100): - a = 1e300 - def g(): - return - for i in xrange(3): - start = time.time() - for j in xrange(count): - g() - a = min(a, (time.time() - start) / count) - - b = 1e300 - for i in xrange(3): - start = time.time() - for j in xrange(count): - f() - b = min(b, (time.time() - start) / count) - - return round(b - a, int(round(math.log(count, 10) - math.log(b - a, 10)))) - -contents = open('../../tests/text-test-data/yiwiki.xml', 'r').read() -contents = contents.decode('utf-8') - -benchmarks = ( - find_first, - ) - -to_run = sys.argv[1:] -bms = [] -if to_run: - for r in to_run: - for b in benchmarks: - if b.__name__.startswith(r): - bms.append(b) -else: - bms = benchmarks - -for b in bms: - sys.stdout.write(b.__name__ + ' ') - sys.stdout.flush() - print b() diff -Nru cabal-install-3.2-3.2+git20191216.2.e076113/src/text-1.2.4.0/benchmarks/python/sort.py cabal-install-3.2-3.2+git20200127.2.b84fd4f/src/text-1.2.4.0/benchmarks/python/sort.py --- cabal-install-3.2-3.2+git20191216.2.e076113/src/text-1.2.4.0/benchmarks/python/sort.py 2001-09-09 01:46:40.000000000 +0000 +++ cabal-install-3.2-3.2+git20200127.2.b84fd4f/src/text-1.2.4.0/benchmarks/python/sort.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,13 +0,0 @@ -#!/usr/bin/env python - -import utils, sys, codecs - -def sort(filename): - content = open(filename, encoding='utf-8').read() - lines = content.splitlines() - lines.sort() - print('\n'.join(lines)) - -for f in sys.argv[1:]: - t = utils.benchmark(lambda: sort(f)) - sys.stderr.write('{0}: {1}\n'.format(f, t)) diff -Nru cabal-install-3.2-3.2+git20191216.2.e076113/src/text-1.2.4.0/benchmarks/python/strip_tags.py cabal-install-3.2-3.2+git20200127.2.b84fd4f/src/text-1.2.4.0/benchmarks/python/strip_tags.py --- cabal-install-3.2-3.2+git20191216.2.e076113/src/text-1.2.4.0/benchmarks/python/strip_tags.py 2001-09-09 01:46:40.000000000 +0000 +++ cabal-install-3.2-3.2+git20200127.2.b84fd4f/src/text-1.2.4.0/benchmarks/python/strip_tags.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,25 +0,0 @@ -#!/usr/bin/env python - -import utils, sys - -def strip_tags(filename): - string = open(filename, encoding='utf-8').read() - - d = 0 - out = [] - - for c in string: - if c == '<': d += 1 - - if d > 0: - out += ' ' - else: - out += c - - if c == '>': d -= 1 - - print(''.join(out)) - -for f in sys.argv[1:]: - t = utils.benchmark(lambda: strip_tags(f)) - sys.stderr.write('{0}: {1}\n'.format(f, t)) diff -Nru cabal-install-3.2-3.2+git20191216.2.e076113/src/text-1.2.4.0/benchmarks/python/utils.py cabal-install-3.2-3.2+git20200127.2.b84fd4f/src/text-1.2.4.0/benchmarks/python/utils.py --- cabal-install-3.2-3.2+git20191216.2.e076113/src/text-1.2.4.0/benchmarks/python/utils.py 2001-09-09 01:46:40.000000000 +0000 +++ cabal-install-3.2-3.2+git20200127.2.b84fd4f/src/text-1.2.4.0/benchmarks/python/utils.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,18 +0,0 @@ -#!/usr/bin/env python - -import sys, time - -def benchmark_once(f): - start = time.time() - f() - end = time.time() - return end - start - -def benchmark(f): - runs = 100 - total = 0.0 - for i in range(runs): - result = benchmark_once(f) - sys.stderr.write('Run {0}: {1}\n'.format(i, result)) - total += result - return total / runs diff -Nru cabal-install-3.2-3.2+git20191216.2.e076113/src/text-1.2.4.0/benchmarks/ruby/cut.rb cabal-install-3.2-3.2+git20200127.2.b84fd4f/src/text-1.2.4.0/benchmarks/ruby/cut.rb --- cabal-install-3.2-3.2+git20191216.2.e076113/src/text-1.2.4.0/benchmarks/ruby/cut.rb 2001-09-09 01:46:40.000000000 +0000 +++ cabal-install-3.2-3.2+git20200127.2.b84fd4f/src/text-1.2.4.0/benchmarks/ruby/cut.rb 1970-01-01 00:00:00.000000000 +0000 @@ -1,16 +0,0 @@ -#!/usr/bin/env ruby - -require './utils.rb' - -def cut(filename, l, r) - File.open(filename, 'r:utf-8') do |file| - file.each_line do |line| - puts line[l, r - l] - end - end -end - -ARGV.each do |f| - t = benchmark { cut(f, 20, 40) } - STDERR.puts "#{f}: #{t}" -end diff -Nru cabal-install-3.2-3.2+git20191216.2.e076113/src/text-1.2.4.0/benchmarks/ruby/fold.rb cabal-install-3.2-3.2+git20200127.2.b84fd4f/src/text-1.2.4.0/benchmarks/ruby/fold.rb --- cabal-install-3.2-3.2+git20191216.2.e076113/src/text-1.2.4.0/benchmarks/ruby/fold.rb 2001-09-09 01:46:40.000000000 +0000 +++ cabal-install-3.2-3.2+git20200127.2.b84fd4f/src/text-1.2.4.0/benchmarks/ruby/fold.rb 1970-01-01 00:00:00.000000000 +0000 @@ -1,50 +0,0 @@ -#!/usr/bin/env ruby - -require './utils.rb' - -def fold(filename, max_width) - File.open(filename, 'r:utf-8') do |file| - # Words in this paragraph - paragraph = [] - - file.each_line do |line| - # If we encounter an empty line, we reformat and dump the current - # paragraph - if line.strip.empty? - puts fold_paragraph(paragraph, max_width) - puts - paragraph = [] - # Otherwise, we append the words found in the line to the paragraph - else - paragraph.concat line.split - end - end - - # Last paragraph - puts fold_paragraph(paragraph, max_width) unless paragraph.empty? - end -end - -# Fold a single paragraph to the desired width -def fold_paragraph(paragraph, max_width) - # Gradually build our output - str, *rest = paragraph - width = str.length - - rest.each do |word| - if width + word.length + 1 <= max_width - str << ' ' << word - width += word.length + 1 - else - str << "\n" << word - width = word.length - end - end - - str -end - -ARGV.each do |f| - t = benchmark { fold(f, 80) } - STDERR.puts "#{f}: #{t}" -end diff -Nru cabal-install-3.2-3.2+git20191216.2.e076113/src/text-1.2.4.0/benchmarks/ruby/sort.rb cabal-install-3.2-3.2+git20200127.2.b84fd4f/src/text-1.2.4.0/benchmarks/ruby/sort.rb --- cabal-install-3.2-3.2+git20191216.2.e076113/src/text-1.2.4.0/benchmarks/ruby/sort.rb 2001-09-09 01:46:40.000000000 +0000 +++ cabal-install-3.2-3.2+git20200127.2.b84fd4f/src/text-1.2.4.0/benchmarks/ruby/sort.rb 1970-01-01 00:00:00.000000000 +0000 @@ -1,15 +0,0 @@ -#!/usr/bin/env ruby - -require './utils.rb' - -def sort(filename) - File.open(filename, 'r:utf-8') do |file| - content = file.read - puts content.lines.sort.join - end -end - -ARGV.each do |f| - t = benchmark { sort(f) } - STDERR.puts "#{f}: #{t}" -end diff -Nru cabal-install-3.2-3.2+git20191216.2.e076113/src/text-1.2.4.0/benchmarks/ruby/strip_tags.rb cabal-install-3.2-3.2+git20200127.2.b84fd4f/src/text-1.2.4.0/benchmarks/ruby/strip_tags.rb --- cabal-install-3.2-3.2+git20191216.2.e076113/src/text-1.2.4.0/benchmarks/ruby/strip_tags.rb 2001-09-09 01:46:40.000000000 +0000 +++ cabal-install-3.2-3.2+git20200127.2.b84fd4f/src/text-1.2.4.0/benchmarks/ruby/strip_tags.rb 1970-01-01 00:00:00.000000000 +0000 @@ -1,22 +0,0 @@ -#!/usr/bin/env ruby - -require './utils.rb' - -def strip_tags(filename) - File.open(filename, 'r:utf-8') do |file| - str = file.read - - d = 0 - - str.each_char do |c| - d += 1 if c == '<' - putc(if d > 0 then ' ' else c end) - d -= 1 if c == '>' - end - end -end - -ARGV.each do |f| - t = benchmark { strip_tags(f) } - STDERR.puts "#{f}: #{t}" -end diff -Nru cabal-install-3.2-3.2+git20191216.2.e076113/src/text-1.2.4.0/benchmarks/ruby/utils.rb cabal-install-3.2-3.2+git20200127.2.b84fd4f/src/text-1.2.4.0/benchmarks/ruby/utils.rb --- cabal-install-3.2-3.2+git20191216.2.e076113/src/text-1.2.4.0/benchmarks/ruby/utils.rb 2001-09-09 01:46:40.000000000 +0000 +++ cabal-install-3.2-3.2+git20200127.2.b84fd4f/src/text-1.2.4.0/benchmarks/ruby/utils.rb 1970-01-01 00:00:00.000000000 +0000 @@ -1,14 +0,0 @@ -require 'benchmark' - -def benchmark(&block) - runs = 100 - total = 0 - - runs.times do |i| - result = Benchmark.measure(&block).total - $stderr.puts "Run #{i}: #{result}" - total += result - end - - total / runs -end diff -Nru cabal-install-3.2-3.2+git20191216.2.e076113/src/text-1.2.4.0/benchmarks/Setup.hs cabal-install-3.2-3.2+git20200127.2.b84fd4f/src/text-1.2.4.0/benchmarks/Setup.hs --- cabal-install-3.2-3.2+git20191216.2.e076113/src/text-1.2.4.0/benchmarks/Setup.hs 2001-09-09 01:46:40.000000000 +0000 +++ cabal-install-3.2-3.2+git20200127.2.b84fd4f/src/text-1.2.4.0/benchmarks/Setup.hs 1970-01-01 00:00:00.000000000 +0000 @@ -1,2 +0,0 @@ -import Distribution.Simple -main = defaultMain diff -Nru cabal-install-3.2-3.2+git20191216.2.e076113/src/text-1.2.4.0/benchmarks/text-benchmarks.cabal cabal-install-3.2-3.2+git20200127.2.b84fd4f/src/text-1.2.4.0/benchmarks/text-benchmarks.cabal --- cabal-install-3.2-3.2+git20191216.2.e076113/src/text-1.2.4.0/benchmarks/text-benchmarks.cabal 2001-09-09 01:46:40.000000000 +0000 +++ cabal-install-3.2-3.2+git20200127.2.b84fd4f/src/text-1.2.4.0/benchmarks/text-benchmarks.cabal 1970-01-01 00:00:00.000000000 +0000 @@ -1,148 +0,0 @@ -cabal-version: 1.12 -name: text-benchmarks -version: 0.0.0.0 -synopsis: Benchmarks for the text package -description: Benchmarks for the text package -homepage: https://bitbucket.org/bos/text -license: BSD2 -license-file: ../LICENSE -author: Jasper Van der Jeugt , - Bryan O'Sullivan , - Tom Harper , - Duncan Coutts -maintainer: jaspervdj@gmail.com -category: Text -build-type: Simple - -flag bytestring-builder - description: Depend on the bytestring-builder package for backwards compatibility. - default: False - manual: False - -flag llvm - description: use LLVM - default: False - manual: True - -executable text-benchmarks - ghc-options: -Wall -O2 -rtsopts - if flag(llvm) - ghc-options: -fllvm - cpp-options: -DINTEGER_GMP - build-depends: array, - base == 4.*, - binary, - blaze-builder, - bytestring-lexing >= 0.5.0, - containers, - criterion >= 0.10.0.0, - deepseq, - directory, - filepath, - ghc-prim, - integer-gmp, - stringsearch, - template-haskell, - transformers, - utf8-string, - vector - - if flag(bytestring-builder) - build-depends: bytestring >= 0.9 && < 0.10.4, - bytestring-builder >= 0.10.4 - else - build-depends: bytestring >= 0.10.4 - - -- modules for benchmark proper - c-sources: cbits/time_iconv.c - hs-source-dirs: haskell - main-is: Benchmarks.hs - other-modules: - Benchmarks.Builder - Benchmarks.Concat - Benchmarks.DecodeUtf8 - Benchmarks.EncodeUtf8 - Benchmarks.Equality - Benchmarks.FileRead - Benchmarks.FoldLines - Benchmarks.Mul - Benchmarks.Programs.BigTable - Benchmarks.Programs.Cut - Benchmarks.Programs.Fold - Benchmarks.Programs.Sort - Benchmarks.Programs.StripTags - Benchmarks.Programs.Throughput - Benchmarks.Pure - Benchmarks.ReadNumbers - Benchmarks.Replace - Benchmarks.Search - Benchmarks.Stream - Benchmarks.WordFrequencies - - -- Source code for IUT (implementation under test) - -- "borrowed" from parent folder - include-dirs: ../include - c-sources: ../cbits/cbits.c - hs-source-dirs: .. - other-modules: - Data.Text - Data.Text.Array - Data.Text.Encoding - Data.Text.Encoding.Error - Data.Text.Foreign - Data.Text.IO - Data.Text.Internal - Data.Text.Internal.Builder - Data.Text.Internal.Builder.Functions - Data.Text.Internal.Builder.Int.Digits - Data.Text.Internal.Builder.RealFloat.Functions - Data.Text.Internal.Encoding.Fusion - Data.Text.Internal.Encoding.Fusion.Common - Data.Text.Internal.Encoding.Utf16 - Data.Text.Internal.Encoding.Utf32 - Data.Text.Internal.Encoding.Utf8 - Data.Text.Internal.Functions - Data.Text.Internal.Fusion - Data.Text.Internal.Fusion.CaseMapping - Data.Text.Internal.Fusion.Common - Data.Text.Internal.Fusion.Size - Data.Text.Internal.Fusion.Types - Data.Text.Internal.IO - Data.Text.Internal.Lazy - Data.Text.Internal.Lazy.Encoding.Fusion - Data.Text.Internal.Lazy.Fusion - Data.Text.Internal.Lazy.Search - Data.Text.Internal.Private - Data.Text.Internal.Read - Data.Text.Internal.Search - Data.Text.Internal.Unsafe - Data.Text.Internal.Unsafe.Char - Data.Text.Internal.Unsafe.Shift - Data.Text.Lazy - Data.Text.Lazy.Builder - Data.Text.Lazy.Builder.Int - Data.Text.Lazy.Builder.RealFloat - Data.Text.Lazy.Encoding - Data.Text.Lazy.IO - Data.Text.Lazy.Internal - Data.Text.Lazy.Read - Data.Text.Read - Data.Text.Unsafe - Data.Text.Show - - default-language: Haskell2010 - default-extensions: NondecreasingIndentation - - -executable text-multilang - hs-source-dirs: haskell - main-is: Multilang.hs - other-modules: Timer - ghc-options: -Wall -O2 - build-depends: base == 4.*, - bytestring, - text, - time - - default-language: Haskell2010 - default-extensions: NondecreasingIndentation diff -Nru cabal-install-3.2-3.2+git20191216.2.e076113/src/text-1.2.4.0/cbits/cbits.c cabal-install-3.2-3.2+git20200127.2.b84fd4f/src/text-1.2.4.0/cbits/cbits.c --- cabal-install-3.2-3.2+git20191216.2.e076113/src/text-1.2.4.0/cbits/cbits.c 2001-09-09 01:46:40.000000000 +0000 +++ cabal-install-3.2-3.2+git20200127.2.b84fd4f/src/text-1.2.4.0/cbits/cbits.c 1970-01-01 00:00:00.000000000 +0000 @@ -1,306 +0,0 @@ -/* - * Copyright (c) 2011 Bryan O'Sullivan . - * - * Portions copyright (c) 2008-2010 Björn Höhrmann . - * - * See http://bjoern.hoehrmann.de/utf-8/decoder/dfa/ for details. - */ - -#include -#include -#include -#include "text_cbits.h" - -void _hs_text_memcpy(void *dest, size_t doff, const void *src, size_t soff, - size_t n) -{ - memcpy(dest + (doff<<1), src + (soff<<1), n<<1); -} - -int _hs_text_memcmp(const void *a, size_t aoff, const void *b, size_t boff, - size_t n) -{ - return memcmp(a + (aoff<<1), b + (boff<<1), n<<1); -} - -#define UTF8_ACCEPT 0 -#define UTF8_REJECT 12 - -static const uint8_t utf8d[] = { - /* - * The first part of the table maps bytes to character classes that - * to reduce the size of the transition table and create bitmasks. - */ - 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, - 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, 9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9, - 7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7, 7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7, - 8,8,2,2,2,2,2,2,2,2,2,2,2,2,2,2, 2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2, - 10,3,3,3,3,3,3,3,3,3,3,3,3,4,3,3, 11,6,6,6,5,8,8,8,8,8,8,8,8,8,8,8, - - /* - * The second part is a transition table that maps a combination of - * a state of the automaton and a character class to a state. - */ - 0,12,24,36,60,96,84,12,12,12,48,72, 12,12,12,12,12,12,12,12,12,12,12,12, - 12, 0,12,12,12,12,12, 0,12, 0,12,12, 12,24,12,12,12,12,12,24,12,24,12,12, - 12,12,12,12,12,12,12,24,12,12,12,12, 12,24,12,12,12,12,12,12,12,24,12,12, - 12,12,12,12,12,12,12,36,12,36,12,12, 12,36,12,12,12,12,12,36,12,36,12,12, - 12,36,12,12,12,12,12,12,12,12,12,12, -}; - -static inline uint32_t -decode(uint32_t *state, uint32_t* codep, uint32_t byte) { - uint32_t type = utf8d[byte]; - - *codep = (*state != UTF8_ACCEPT) ? - (byte & 0x3fu) | (*codep << 6) : - (0xff >> type) & (byte); - - return *state = utf8d[256 + *state + type]; -} - -/* - * The ISO 8859-1 (aka latin-1) code points correspond exactly to the first 256 unicode - * code-points, therefore we can trivially convert from a latin-1 encoded bytestring to - * an UTF16 array - */ -void -_hs_text_decode_latin1(uint16_t *dest, const uint8_t *src, - const uint8_t *srcend) -{ - const uint8_t *p = src; - -#if defined(__i386__) || defined(__x86_64__) - /* This optimization works on a little-endian systems by using - (aligned) 32-bit loads instead of 8-bit loads - */ - - /* consume unaligned prefix */ - while (p != srcend && (uintptr_t)p & 0x3) - *dest++ = *p++; - - /* iterate over 32-bit aligned loads */ - while (p < srcend - 3) { - const uint32_t w = *((const uint32_t *)p); - - *dest++ = w & 0xff; - *dest++ = (w >> 8) & 0xff; - *dest++ = (w >> 16) & 0xff; - *dest++ = (w >> 24) & 0xff; - - p += 4; - } -#endif - - /* handle unaligned suffix */ - while (p != srcend) - *dest++ = *p++; -} - -/* - * A best-effort decoder. Runs until it hits either end of input or - * the start of an invalid byte sequence. - * - * At exit, we update *destoff with the next offset to write to, *src - * with the next source location past the last one successfully - * decoded, and return the next source location to read from. - * - * Moreover, we expose the internal decoder state (state0 and - * codepoint0), allowing one to restart the decoder after it - * terminates (say, due to a partial codepoint). - * - * In particular, there are a few possible outcomes, - * - * 1) We decoded the buffer entirely: - * In this case we return srcend - * state0 == UTF8_ACCEPT - * - * 2) We met an invalid encoding - * In this case we return the address of the first invalid byte - * state0 == UTF8_REJECT - * - * 3) We reached the end of the buffer while decoding a codepoint - * In this case we return a pointer to the first byte of the partial codepoint - * state0 != UTF8_ACCEPT, UTF8_REJECT - * - */ -#if defined(__GNUC__) || defined(__clang__) -static inline uint8_t const * -_hs_text_decode_utf8_int(uint16_t *const dest, size_t *destoff, - const uint8_t **src, const uint8_t *srcend, - uint32_t *codepoint0, uint32_t *state0) - __attribute((always_inline)); -#endif - -static inline uint8_t const * -_hs_text_decode_utf8_int(uint16_t *const dest, size_t *destoff, - const uint8_t **src, const uint8_t *srcend, - uint32_t *codepoint0, uint32_t *state0) -{ - uint16_t *d = dest + *destoff; - const uint8_t *s = *src, *last = *src; - uint32_t state = *state0; - uint32_t codepoint = *codepoint0; - - while (s < srcend) { -#if defined(__i386__) || defined(__x86_64__) - /* - * This code will only work on a little-endian system that - * supports unaligned loads. - * - * It gives a substantial speed win on data that is purely or - * partly ASCII (e.g. HTML), at only a slight cost on purely - * non-ASCII text. - */ - - if (state == UTF8_ACCEPT) { - while (s < srcend - 4) { - codepoint = *((uint32_t *) s); - if ((codepoint & 0x80808080) != 0) - break; - s += 4; - - /* - * Tried 32-bit stores here, but the extra bit-twiddling - * slowed the code down. - */ - - *d++ = (uint16_t) (codepoint & 0xff); - *d++ = (uint16_t) ((codepoint >> 8) & 0xff); - *d++ = (uint16_t) ((codepoint >> 16) & 0xff); - *d++ = (uint16_t) ((codepoint >> 24) & 0xff); - } - last = s; - } -#endif - - if (decode(&state, &codepoint, *s++) != UTF8_ACCEPT) { - if (state != UTF8_REJECT) - continue; - break; - } - - if (codepoint <= 0xffff) - *d++ = (uint16_t) codepoint; - else { - *d++ = (uint16_t) (0xD7C0 + (codepoint >> 10)); - *d++ = (uint16_t) (0xDC00 + (codepoint & 0x3FF)); - } - last = s; - } - - *destoff = d - dest; - *codepoint0 = codepoint; - *state0 = state; - *src = last; - - return s; -} - -uint8_t const * -_hs_text_decode_utf8_state(uint16_t *const dest, size_t *destoff, - const uint8_t **src, - const uint8_t *srcend, - uint32_t *codepoint0, uint32_t *state0) -{ - _hs_text_decode_utf8_int(dest, destoff, src, srcend, codepoint0, state0); - - return *src; -} - -/* - * Helper to decode buffer and discard final decoder state - */ -const uint8_t * -_hs_text_decode_utf8(uint16_t *const dest, size_t *destoff, - const uint8_t *src, const uint8_t *const srcend) -{ - uint32_t codepoint; - uint32_t state = UTF8_ACCEPT; - _hs_text_decode_utf8_int(dest, destoff, &src, srcend, - &codepoint, &state); - return src; -} - -void -_hs_text_encode_utf8(uint8_t **destp, const uint16_t *src, size_t srcoff, - size_t srclen) -{ - const uint16_t *srcend; - uint8_t *dest = *destp; - - src += srcoff; - srcend = src + srclen; - - ascii: -#if defined(__x86_64__) - while (srcend - src >= 4) { - uint64_t w = *((uint64_t *) src); - - if (w & 0xFF80FF80FF80FF80ULL) { - if (!(w & 0x000000000000FF80ULL)) { - *dest++ = w & 0xFFFF; - src++; - if (!(w & 0x00000000FF800000ULL)) { - *dest++ = (w >> 16) & 0xFFFF; - src++; - if (!(w & 0x0000FF8000000000ULL)) { - *dest++ = (w >> 32) & 0xFFFF; - src++; - } - } - } - break; - } - *dest++ = w & 0xFFFF; - *dest++ = (w >> 16) & 0xFFFF; - *dest++ = (w >> 32) & 0xFFFF; - *dest++ = w >> 48; - src += 4; - } -#endif - -#if defined(__i386__) - while (srcend - src >= 2) { - uint32_t w = *((uint32_t *) src); - - if (w & 0xFF80FF80) - break; - *dest++ = w & 0xFFFF; - *dest++ = w >> 16; - src += 2; - } -#endif - - while (src < srcend) { - uint16_t w = *src++; - - if (w <= 0x7F) { - *dest++ = w; - /* An ASCII byte is likely to begin a run of ASCII bytes. - Falling back into the fast path really helps performance. */ - goto ascii; - } - else if (w <= 0x7FF) { - *dest++ = (w >> 6) | 0xC0; - *dest++ = (w & 0x3f) | 0x80; - } - else if (w < 0xD800 || w > 0xDBFF) { - *dest++ = (w >> 12) | 0xE0; - *dest++ = ((w >> 6) & 0x3F) | 0x80; - *dest++ = (w & 0x3F) | 0x80; - } else { - uint32_t c = ((((uint32_t) w) - 0xD800) << 10) + - (((uint32_t) *src++) - 0xDC00) + 0x10000; - *dest++ = (c >> 18) | 0xF0; - *dest++ = ((c >> 12) & 0x3F) | 0x80; - *dest++ = ((c >> 6) & 0x3F) | 0x80; - *dest++ = (c & 0x3F) | 0x80; - } - } - - *destp = dest; -} diff -Nru cabal-install-3.2-3.2+git20191216.2.e076113/src/text-1.2.4.0/changelog.md cabal-install-3.2-3.2+git20200127.2.b84fd4f/src/text-1.2.4.0/changelog.md --- cabal-install-3.2-3.2+git20191216.2.e076113/src/text-1.2.4.0/changelog.md 2001-09-09 01:46:40.000000000 +0000 +++ cabal-install-3.2-3.2+git20200127.2.b84fd4f/src/text-1.2.4.0/changelog.md 1970-01-01 00:00:00.000000000 +0000 @@ -1,226 +0,0 @@ -### 1.2.4.0 - -* Add TH `Lift` instances for `Data.Text.Text` and `Data.Text.Lazy.Text` (gh-232) - -* Update Haddock documentation to better reflect fusion eligibility; improve fusion - rules for `takeWhileEnd` and `length` (gh-241, ghc-202) - -* Optimise `Data.Text.replicate` from `O(n)` to `O(log n)` (gh-209) - -* Support `base-4.13.0.0` - -### 1.2.3.1 - -* Make `decodeUtf8With` fail explicitly for unsupported non-BMP - replacement characters instead silent undefined behaviour (gh-213) - -* Fix termination condition for file reads via `Data.Text.IO` - operations (gh-223) - -* A serious correctness issue affecting uses of `take` and `drop` with - negative counts has been fixed (gh-227) - -* A bug in the case-mapping functions resulting in unreasonably large - allocations with large arguments has been fixed (gh-221) - -### 1.2.3.0 - -* Spec compliance: `toCaseFold` now follows the Unicode 9.0 spec - (updated from 8.0). - -* Bug fix: the lazy `takeWhileEnd` function violated the - [lazy text invariant](https://github.com/bos/text/blob/1.2.3.0/Data/Text/Internal/Lazy.hs#L51) - (gh-184). - -* Bug fix: Fixed usage of size hints causing incorrect behavior (gh-197). - -* New function: `unsnoc` (gh-173). - -* Reduce memory overhead in `encodeUTF8` (gh-194). - -* Improve UTF-8 decoder error-recovery (gh-182). - -* Minor documentation improvements (`@since` annotations, more - examples, clarifications). - -#### 1.2.2.2 - -* The `toTitle` function now correctly handles letters that - immediately follow punctuation. Before, `"there's"` would turn into - `"There'S"`. Now, it becomes `"There's"`. - -* The implementation of unstreaming is faster, resulting in operations - such as `map` and `intersperse` speeding up by up to 30%, with - smaller code generated. - -* The optimised length comparison function is now more likely to be - used after some rewrite rule tweaking. - -* Bug fix: an off-by-one bug in `takeEnd` is fixed. - -* Bug fix: a logic error in `takeWord16` is fixed. - -#### 1.2.2.1 - -* The switch to `integer-pure` in 1.2.2.0 was apparently mistaken. - The build flag has been renamed accordingly. Your army of diligent - maintainers apologizes for the churn. - -* Spec compliance: `toCaseFold` now follows the Unicode 8.0 spec - (updated from 7.0) - -* An STG lint error has been fixed - -### 1.2.2.0 - -* The `integer-simple` package, upon which this package optionally - depended, has been replaced with `integer-pure`. The build flag has - been renamed accordingly. - -* Bug fix: For the `Binary` instance, If UTF-8 decoding fails during a - `get`, the error is propagated via `fail` instead of an uncatchable - crash. - -* New function: `takeWhileEnd` - -* New instances for the `Text` types: - * if `base` >= 4.7: `PrintfArg` - * if `base` >= 4.9: `Semigroup` - -#### 1.2.1.3 - -* Bug fix: As it turns out, moving the literal rewrite rules to simplifier - phase 2 does not prevent competition with the `unpack` rule, which is - also active in this phase. Unfortunately this was hidden due to a silly - test environment mistake. Moving literal rules back to phase 1 finally - fixes GHC Trac #10528 correctly. - -#### 1.2.1.2 - -* Bug fix: Run literal rewrite rules in simplifier phase 2. - The behavior of the simplifier changed in GHC 7.10.2, - causing these rules to fail to fire, leading to poor code generation - and long compilation times. See - [GHC Trac #10528](https://ghc.haskell.org/trac/ghc/ticket/10528). - -#### 1.2.1.1 - -* Expose unpackCString#, which you should never use. - -### 1.2.1.0 - -* Added Binary instances for both Text types. (If you have previously - been using the text-binary package to get a Binary instance, it is - now obsolete.) - -#### 1.2.0.6 - -* Fixed a space leak in UTF-8 decoding - -#### 1.2.0.5 - -* Feature parity: repeat, cycle, iterate are now implemented for lazy - Text, and the Data instance is more complete - -* Build speed: an inliner space explosion has been fixed with toCaseFold - -* Bug fix: encoding Int to a Builder would infinite-loop if the - integer-simple package was used - -* Deprecation: OnEncodeError and EncodeError are deprecated, as they - are never used - -* Internals: some types that are used internally in fusion-related - functions have moved around, been renamed, or been deleted (we don't - bump the major version if .Internal modules change) - -* Spec compliance: toCaseFold now follows the Unicode 7.0 spec - (updated from 6.3) - -#### 1.2.0.4 - -* Fixed an incompatibility with base < 4.5 - -#### 1.2.0.3 - -* Update formatRealFloat to correspond to the definition in versions - of base newer than 4.5 (https://github.com/bos/text/issues/105) - -#### 1.2.0.2 - -* Bumped lower bound on deepseq to 1.4 for compatibility with the - upcoming GHC 7.10 - -#### 1.2.0.1 - -* Fixed a buffer overflow in rendering of large Integers - (https://github.com/bos/text/issues/99) - -## 1.2.0.0 - -* Fixed an integer overflow in the replace function - (https://github.com/bos/text/issues/81) - -* Fixed a hang in lazy decodeUtf8With - (https://github.com/bos/text/issues/87) - -* Reduced codegen bloat caused by use of empty and single-character - literals - -* Added an instance of IsList for GHC 7.8 and above - -### 1.1.1.0 - -* The Data.Data instance now allows gunfold to work, via a virtual - pack constructor - -* dropEnd, takeEnd: new functions - -* Comparing the length of a Text against a number can now - short-circuit in more cases - -#### 1.1.0.1 - -* streamDecodeUtf8: fixed gh-70, did not return all unconsumed bytes - in single-byte chunks - -## 1.1.0.0 - -* encodeUtf8: Performance is improved by up to 4x. - -* encodeUtf8Builder, encodeUtf8BuilderEscaped: new functions, - available only if bytestring >= 0.10.4.0 is installed, that allow - very fast and flexible encoding of a Text value to a bytestring - Builder. - - As an example of the performance gain to be had, the - encodeUtf8BuilderEscaped function helps to double the speed of JSON - encoding in the latest version of aeson! (Note: if all you need is a - plain ByteString, encodeUtf8 is still the faster way to go.) - -* All of the internal module hierarchy is now publicly exposed. If a - module is in the .Internal hierarchy, or is documented as internal, - use at your own risk - there are no API stability guarantees for - internal modules! - -#### 1.0.0.1 - -* decodeUtf8: Fixed a regression that caused us to incorrectly - identify truncated UTF-8 as valid (gh-61) - -# 1.0.0.0 - -* Added support for Unicode 6.3.0 to case conversion functions - -* New function toTitle converts words in a string to title case - -* New functions peekCStringLen and withCStringLen simplify - interoperability with C functionns - -* Added support for decoding UTF-8 in stream-friendly fashion - -* Fixed a bug in mapAccumL - -* Added trusted Haskell support - -* Removed support for GHC 6.10 (released in 2008) and older diff -Nru cabal-install-3.2-3.2+git20191216.2.e076113/src/text-1.2.4.0/Data/Text/Array.hs cabal-install-3.2-3.2+git20200127.2.b84fd4f/src/text-1.2.4.0/Data/Text/Array.hs --- cabal-install-3.2-3.2+git20191216.2.e076113/src/text-1.2.4.0/Data/Text/Array.hs 2001-09-09 01:46:40.000000000 +0000 +++ cabal-install-3.2-3.2+git20200127.2.b84fd4f/src/text-1.2.4.0/Data/Text/Array.hs 1970-01-01 00:00:00.000000000 +0000 @@ -1,249 +0,0 @@ -{-# LANGUAGE BangPatterns, CPP, MagicHash, Rank2Types, - RecordWildCards, UnboxedTuples, UnliftedFFITypes #-} -{-# OPTIONS_GHC -fno-warn-unused-matches #-} --- | --- Module : Data.Text.Array --- Copyright : (c) 2009, 2010, 2011 Bryan O'Sullivan --- --- License : BSD-style --- Maintainer : bos@serpentine.com --- Portability : portable --- --- Packed, unboxed, heap-resident arrays. Suitable for performance --- critical use, both in terms of large data quantities and high --- speed. --- --- This module is intended to be imported @qualified@, to avoid name --- clashes with "Prelude" functions, e.g. --- --- > import qualified Data.Text.Array as A --- --- The names in this module resemble those in the 'Data.Array' family --- of modules, but are shorter due to the assumption of qualified --- naming. -module Data.Text.Array - ( - -- * Types - Array(Array, aBA) - , MArray(MArray, maBA) - - -- * Functions - , copyM - , copyI - , empty - , equal -#if defined(ASSERTS) - , length -#endif - , run - , run2 - , toList - , unsafeFreeze - , unsafeIndex - , new - , unsafeWrite - ) where - -#if defined(ASSERTS) --- This fugly hack is brought by GHC's apparent reluctance to deal --- with MagicHash and UnboxedTuples when inferring types. Eek! -# define CHECK_BOUNDS(_func_,_len_,_k_) \ -if (_k_) < 0 || (_k_) >= (_len_) then error ("Data.Text.Array." ++ (_func_) ++ ": bounds error, offset " ++ show (_k_) ++ ", length " ++ show (_len_)) else -#else -# define CHECK_BOUNDS(_func_,_len_,_k_) -#endif - -#include "MachDeps.h" - -#if defined(ASSERTS) -import Control.Exception (assert) -#endif -#if MIN_VERSION_base(4,4,0) -import Control.Monad.ST.Unsafe (unsafeIOToST) -#else -import Control.Monad.ST (unsafeIOToST) -#endif -import Data.Bits ((.&.), xor) -import Data.Text.Internal.Unsafe (inlinePerformIO) -import Data.Text.Internal.Unsafe.Shift (shiftL, shiftR) -#if MIN_VERSION_base(4,5,0) -import Foreign.C.Types (CInt(CInt), CSize(CSize)) -#else -import Foreign.C.Types (CInt, CSize) -#endif -import GHC.Base (ByteArray#, MutableByteArray#, Int(..), - indexWord16Array#, newByteArray#, - unsafeFreezeByteArray#, writeWord16Array#) -import GHC.ST (ST(..), runST) -import GHC.Word (Word16(..)) -import Prelude hiding (length, read) - --- | Immutable array type. --- --- The 'Array' constructor is exposed since @text-1.1.1.3@ -data Array = Array { - aBA :: ByteArray# -#if defined(ASSERTS) - , aLen :: {-# UNPACK #-} !Int -- length (in units of Word16, not bytes) -#endif - } - --- | Mutable array type, for use in the ST monad. --- --- The 'MArray' constructor is exposed since @text-1.1.1.3@ -data MArray s = MArray { - maBA :: MutableByteArray# s -#if defined(ASSERTS) - , maLen :: {-# UNPACK #-} !Int -- length (in units of Word16, not bytes) -#endif - } - -#if defined(ASSERTS) --- | Operations supported by all arrays. -class IArray a where - -- | Return the length of an array. - length :: a -> Int - -instance IArray Array where - length = aLen - {-# INLINE length #-} - -instance IArray (MArray s) where - length = maLen - {-# INLINE length #-} -#endif - --- | Create an uninitialized mutable array. -new :: forall s. Int -> ST s (MArray s) -new n - | n < 0 || n .&. highBit /= 0 = array_size_error - | otherwise = ST $ \s1# -> - case newByteArray# len# s1# of - (# s2#, marr# #) -> (# s2#, MArray marr# -#if defined(ASSERTS) - n -#endif - #) - where !(I# len#) = bytesInArray n - highBit = maxBound `xor` (maxBound `shiftR` 1) -{-# INLINE new #-} - -array_size_error :: a -array_size_error = error "Data.Text.Array.new: size overflow" - --- | Freeze a mutable array. Do not mutate the 'MArray' afterwards! -unsafeFreeze :: MArray s -> ST s Array -unsafeFreeze MArray{..} = ST $ \s1# -> - case unsafeFreezeByteArray# maBA s1# of - (# s2#, ba# #) -> (# s2#, Array ba# -#if defined(ASSERTS) - maLen -#endif - #) -{-# INLINE unsafeFreeze #-} - --- | Indicate how many bytes would be used for an array of the given --- size. -bytesInArray :: Int -> Int -bytesInArray n = n `shiftL` 1 -{-# INLINE bytesInArray #-} - --- | Unchecked read of an immutable array. May return garbage or --- crash on an out-of-bounds access. -unsafeIndex :: Array -> Int -> Word16 -unsafeIndex Array{..} i@(I# i#) = - CHECK_BOUNDS("unsafeIndex",aLen,i) - case indexWord16Array# aBA i# of r# -> (W16# r#) -{-# INLINE unsafeIndex #-} - --- | Unchecked write of a mutable array. May return garbage or crash --- on an out-of-bounds access. -unsafeWrite :: MArray s -> Int -> Word16 -> ST s () -unsafeWrite MArray{..} i@(I# i#) (W16# e#) = ST $ \s1# -> - CHECK_BOUNDS("unsafeWrite",maLen,i) - case writeWord16Array# maBA i# e# s1# of - s2# -> (# s2#, () #) -{-# INLINE unsafeWrite #-} - --- | Convert an immutable array to a list. -toList :: Array -> Int -> Int -> [Word16] -toList ary off len = loop 0 - where loop i | i < len = unsafeIndex ary (off+i) : loop (i+1) - | otherwise = [] - --- | An empty immutable array. -empty :: Array -empty = runST (new 0 >>= unsafeFreeze) - --- | Run an action in the ST monad and return an immutable array of --- its result. -run :: (forall s. ST s (MArray s)) -> Array -run k = runST (k >>= unsafeFreeze) - --- | Run an action in the ST monad and return an immutable array of --- its result paired with whatever else the action returns. -run2 :: (forall s. ST s (MArray s, a)) -> (Array, a) -run2 k = runST (do - (marr,b) <- k - arr <- unsafeFreeze marr - return (arr,b)) -{-# INLINE run2 #-} - --- | Copy some elements of a mutable array. -copyM :: MArray s -- ^ Destination - -> Int -- ^ Destination offset - -> MArray s -- ^ Source - -> Int -- ^ Source offset - -> Int -- ^ Count - -> ST s () -copyM dest didx src sidx count - | count <= 0 = return () - | otherwise = -#if defined(ASSERTS) - assert (sidx + count <= length src) . - assert (didx + count <= length dest) . -#endif - unsafeIOToST $ memcpyM (maBA dest) (fromIntegral didx) - (maBA src) (fromIntegral sidx) - (fromIntegral count) -{-# INLINE copyM #-} - --- | Copy some elements of an immutable array. -copyI :: MArray s -- ^ Destination - -> Int -- ^ Destination offset - -> Array -- ^ Source - -> Int -- ^ Source offset - -> Int -- ^ First offset in destination /not/ to - -- copy (i.e. /not/ length) - -> ST s () -copyI dest i0 src j0 top - | i0 >= top = return () - | otherwise = unsafeIOToST $ - memcpyI (maBA dest) (fromIntegral i0) - (aBA src) (fromIntegral j0) - (fromIntegral (top-i0)) -{-# INLINE copyI #-} - --- | Compare portions of two arrays for equality. No bounds checking --- is performed. -equal :: Array -- ^ First - -> Int -- ^ Offset into first - -> Array -- ^ Second - -> Int -- ^ Offset into second - -> Int -- ^ Count - -> Bool -equal arrA offA arrB offB count = inlinePerformIO $ do - i <- memcmp (aBA arrA) (fromIntegral offA) - (aBA arrB) (fromIntegral offB) (fromIntegral count) - return $! i == 0 -{-# INLINE equal #-} - -foreign import ccall unsafe "_hs_text_memcpy" memcpyI - :: MutableByteArray# s -> CSize -> ByteArray# -> CSize -> CSize -> IO () - -foreign import ccall unsafe "_hs_text_memcmp" memcmp - :: ByteArray# -> CSize -> ByteArray# -> CSize -> CSize -> IO CInt - -foreign import ccall unsafe "_hs_text_memcpy" memcpyM - :: MutableByteArray# s -> CSize -> MutableByteArray# s -> CSize -> CSize - -> IO () diff -Nru cabal-install-3.2-3.2+git20191216.2.e076113/src/text-1.2.4.0/Data/Text/Encoding/Error.hs cabal-install-3.2-3.2+git20200127.2.b84fd4f/src/text-1.2.4.0/Data/Text/Encoding/Error.hs --- cabal-install-3.2-3.2+git20191216.2.e076113/src/text-1.2.4.0/Data/Text/Encoding/Error.hs 2001-09-09 01:46:40.000000000 +0000 +++ cabal-install-3.2-3.2+git20200127.2.b84fd4f/src/text-1.2.4.0/Data/Text/Encoding/Error.hs 1970-01-01 00:00:00.000000000 +0000 @@ -1,124 +0,0 @@ -{-# LANGUAGE CPP, DeriveDataTypeable #-} -#if __GLASGOW_HASKELL__ >= 704 -{-# LANGUAGE Safe #-} -#elif __GLASGOW_HASKELL__ >= 702 -{-# LANGUAGE Trustworthy #-} -#endif --- | --- Module : Data.Text.Encoding.Error --- Copyright : (c) Bryan O'Sullivan 2009 --- --- License : BSD-style --- Maintainer : bos@serpentine.com --- Portability : GHC --- --- Types and functions for dealing with encoding and decoding errors --- in Unicode text. --- --- The standard functions for encoding and decoding text are strict, --- which is to say that they throw exceptions on invalid input. This --- is often unhelpful on real world input, so alternative functions --- exist that accept custom handlers for dealing with invalid inputs. --- These 'OnError' handlers are normal Haskell functions. You can use --- one of the presupplied functions in this module, or you can write a --- custom handler of your own. - -module Data.Text.Encoding.Error - ( - -- * Error handling types - UnicodeException(..) - , OnError - , OnDecodeError - , OnEncodeError - -- * Useful error handling functions - , lenientDecode - , strictDecode - , strictEncode - , ignore - , replace - ) where - -import Control.DeepSeq (NFData (..)) -import Control.Exception (Exception, throw) -import Data.Typeable (Typeable) -import Data.Word (Word8) -import Numeric (showHex) - --- | Function type for handling a coding error. It is supplied with --- two inputs: --- --- * A 'String' that describes the error. --- --- * The input value that caused the error. If the error arose --- because the end of input was reached or could not be identified --- precisely, this value will be 'Nothing'. --- --- If the handler returns a value wrapped with 'Just', that value will --- be used in the output as the replacement for the invalid input. If --- it returns 'Nothing', no value will be used in the output. --- --- Should the handler need to abort processing, it should use 'error' --- or 'throw' an exception (preferably a 'UnicodeException'). It may --- use the description provided to construct a more helpful error --- report. -type OnError a b = String -> Maybe a -> Maybe b - --- | A handler for a decoding error. -type OnDecodeError = OnError Word8 Char - --- | A handler for an encoding error. -{-# DEPRECATED OnEncodeError "This exception is never used in practice, and will be removed." #-} -type OnEncodeError = OnError Char Word8 - --- | An exception type for representing Unicode encoding errors. -data UnicodeException = - DecodeError String (Maybe Word8) - -- ^ Could not decode a byte sequence because it was invalid under - -- the given encoding, or ran out of input in mid-decode. - | EncodeError String (Maybe Char) - -- ^ Tried to encode a character that could not be represented - -- under the given encoding, or ran out of input in mid-encode. - deriving (Eq, Typeable) - -{-# DEPRECATED EncodeError "This constructor is never used, and will be removed." #-} - -showUnicodeException :: UnicodeException -> String -showUnicodeException (DecodeError desc (Just w)) - = "Cannot decode byte '\\x" ++ showHex w ("': " ++ desc) -showUnicodeException (DecodeError desc Nothing) - = "Cannot decode input: " ++ desc -showUnicodeException (EncodeError desc (Just c)) - = "Cannot encode character '\\x" ++ showHex (fromEnum c) ("': " ++ desc) -showUnicodeException (EncodeError desc Nothing) - = "Cannot encode input: " ++ desc - -instance Show UnicodeException where - show = showUnicodeException - -instance Exception UnicodeException - -instance NFData UnicodeException where - rnf (DecodeError desc w) = rnf desc `seq` rnf w `seq` () - rnf (EncodeError desc c) = rnf desc `seq` rnf c `seq` () - --- | Throw a 'UnicodeException' if decoding fails. -strictDecode :: OnDecodeError -strictDecode desc c = throw (DecodeError desc c) - --- | Replace an invalid input byte with the Unicode replacement --- character U+FFFD. -lenientDecode :: OnDecodeError -lenientDecode _ _ = Just '\xfffd' - --- | Throw a 'UnicodeException' if encoding fails. -{-# DEPRECATED strictEncode "This function always throws an exception, and will be removed." #-} -strictEncode :: OnEncodeError -strictEncode desc c = throw (EncodeError desc c) - --- | Ignore an invalid input, substituting nothing in the output. -ignore :: OnError a b -ignore _ _ = Nothing - --- | Replace an invalid input with a valid output. -replace :: b -> OnError a b -replace c _ _ = Just c diff -Nru cabal-install-3.2-3.2+git20191216.2.e076113/src/text-1.2.4.0/Data/Text/Encoding.hs cabal-install-3.2-3.2+git20200127.2.b84fd4f/src/text-1.2.4.0/Data/Text/Encoding.hs --- cabal-install-3.2-3.2+git20191216.2.e076113/src/text-1.2.4.0/Data/Text/Encoding.hs 2001-09-09 01:46:40.000000000 +0000 +++ cabal-install-3.2-3.2+git20200127.2.b84fd4f/src/text-1.2.4.0/Data/Text/Encoding.hs 1970-01-01 00:00:00.000000000 +0000 @@ -1,535 +0,0 @@ -{-# LANGUAGE BangPatterns, CPP, GeneralizedNewtypeDeriving, MagicHash, - UnliftedFFITypes #-} -#if __GLASGOW_HASKELL__ >= 702 -{-# LANGUAGE Trustworthy #-} -#endif --- | --- Module : Data.Text.Encoding --- Copyright : (c) 2009, 2010, 2011 Bryan O'Sullivan, --- (c) 2009 Duncan Coutts, --- (c) 2008, 2009 Tom Harper --- --- License : BSD-style --- Maintainer : bos@serpentine.com --- Portability : portable --- --- Functions for converting 'Text' values to and from 'ByteString', --- using several standard encodings. --- --- To gain access to a much larger family of encodings, use the --- . - -module Data.Text.Encoding - ( - -- * Decoding ByteStrings to Text - -- $strict - decodeASCII - , decodeLatin1 - , decodeUtf8 - , decodeUtf16LE - , decodeUtf16BE - , decodeUtf32LE - , decodeUtf32BE - - -- ** Catchable failure - , decodeUtf8' - - -- ** Controllable error handling - , decodeUtf8With - , decodeUtf16LEWith - , decodeUtf16BEWith - , decodeUtf32LEWith - , decodeUtf32BEWith - - -- ** Stream oriented decoding - -- $stream - , streamDecodeUtf8 - , streamDecodeUtf8With - , Decoding(..) - - -- * Encoding Text to ByteStrings - , encodeUtf8 - , encodeUtf16LE - , encodeUtf16BE - , encodeUtf32LE - , encodeUtf32BE - - -- * Encoding Text using ByteString Builders - , encodeUtf8Builder - , encodeUtf8BuilderEscaped - ) where - -#if MIN_VERSION_base(4,4,0) -import Control.Monad.ST.Unsafe (unsafeIOToST, unsafeSTToIO) -#else -import Control.Monad.ST (unsafeIOToST, unsafeSTToIO) -#endif - -import Control.Exception (evaluate, try, throwIO, ErrorCall(ErrorCall)) -import Control.Monad.ST (runST) -import Data.Bits ((.&.)) -import Data.ByteString as B -import Data.ByteString.Internal as B hiding (c2w) -import Data.Text.Encoding.Error (OnDecodeError, UnicodeException, strictDecode) -import Data.Text.Internal (Text(..), safe, text) -import Data.Text.Internal.Private (runText) -import Data.Text.Internal.Unsafe.Char (ord, unsafeWrite) -import Data.Text.Internal.Unsafe.Shift (shiftR) -import Data.Text.Show () -import Data.Text.Unsafe (unsafeDupablePerformIO) -import Data.Word (Word8, Word32) -#if MIN_VERSION_base(4,5,0) -import Foreign.C.Types (CSize(CSize)) -#else -import Foreign.C.Types (CSize) -#endif -import Foreign.ForeignPtr (withForeignPtr) -import Foreign.Marshal.Utils (with) -import Foreign.Ptr (Ptr, minusPtr, nullPtr, plusPtr) -import Foreign.Storable (Storable, peek, poke) -import GHC.Base (ByteArray#, MutableByteArray#) -import qualified Data.ByteString.Builder as B -import qualified Data.ByteString.Builder.Internal as B hiding (empty, append) -import qualified Data.ByteString.Builder.Prim as BP -import qualified Data.ByteString.Builder.Prim.Internal as BP -import qualified Data.Text.Array as A -import qualified Data.Text.Internal.Encoding.Fusion as E -import qualified Data.Text.Internal.Encoding.Utf16 as U16 -import qualified Data.Text.Internal.Fusion as F - -#include "text_cbits.h" - --- $strict --- --- All of the single-parameter functions for decoding bytestrings --- encoded in one of the Unicode Transformation Formats (UTF) operate --- in a /strict/ mode: each will throw an exception if given invalid --- input. --- --- Each function has a variant, whose name is suffixed with -'With', --- that gives greater control over the handling of decoding errors. --- For instance, 'decodeUtf8' will throw an exception, but --- 'decodeUtf8With' allows the programmer to determine what to do on a --- decoding error. - --- | /Deprecated/. Decode a 'ByteString' containing 7-bit ASCII --- encoded text. -decodeASCII :: ByteString -> Text -decodeASCII = decodeUtf8 -{-# DEPRECATED decodeASCII "Use decodeUtf8 instead" #-} - --- | Decode a 'ByteString' containing Latin-1 (aka ISO-8859-1) encoded text. --- --- 'decodeLatin1' is semantically equivalent to --- @Data.Text.pack . Data.ByteString.Char8.unpack@ -decodeLatin1 :: ByteString -> Text -decodeLatin1 (PS fp off len) = text a 0 len - where - a = A.run (A.new len >>= unsafeIOToST . go) - go dest = withForeignPtr fp $ \ptr -> do - c_decode_latin1 (A.maBA dest) (ptr `plusPtr` off) (ptr `plusPtr` (off+len)) - return dest - --- | Decode a 'ByteString' containing UTF-8 encoded text. --- --- __NOTE__: The replacement character returned by 'OnDecodeError' --- MUST be within the BMP plane; surrogate code points will --- automatically be remapped to the replacement char @U+FFFD@ --- (/since 0.11.3.0/), whereas code points beyond the BMP will throw an --- 'error' (/since 1.2.3.1/); For earlier versions of @text@ using --- those unsupported code points would result in undefined behavior. -decodeUtf8With :: OnDecodeError -> ByteString -> Text -decodeUtf8With onErr (PS fp off len) = runText $ \done -> do - let go dest = withForeignPtr fp $ \ptr -> - with (0::CSize) $ \destOffPtr -> do - let end = ptr `plusPtr` (off + len) - loop curPtr = do - curPtr' <- c_decode_utf8 (A.maBA dest) destOffPtr curPtr end - if curPtr' == end - then do - n <- peek destOffPtr - unsafeSTToIO (done dest (fromIntegral n)) - else do - x <- peek curPtr' - case onErr desc (Just x) of - Nothing -> loop $ curPtr' `plusPtr` 1 - Just c - | c > '\xFFFF' -> throwUnsupportedReplChar - | otherwise -> do - destOff <- peek destOffPtr - w <- unsafeSTToIO $ - unsafeWrite dest (fromIntegral destOff) - (safe c) - poke destOffPtr (destOff + fromIntegral w) - loop $ curPtr' `plusPtr` 1 - loop (ptr `plusPtr` off) - (unsafeIOToST . go) =<< A.new len - where - desc = "Data.Text.Internal.Encoding.decodeUtf8: Invalid UTF-8 stream" - - throwUnsupportedReplChar = throwIO $ - ErrorCall "decodeUtf8With: non-BMP replacement characters not supported" - -- TODO: The code currently assumes that the transcoded UTF-16 - -- stream is at most twice as long (in bytes) as the input UTF-8 - -- stream. To justify this assumption one has to assume that the - -- error handler replacement character also satisfies this - -- invariant, by emitting at most one UTF16 code unit. - -- - -- One easy way to support the full range of code-points for - -- replacement characters in the error handler is to simply change - -- the (over-)allocation to `A.new (2*len)` and then shrink back the - -- `ByteArray#` to the real size (recent GHCs have a cheap - -- `ByteArray#` resize-primop for that which allow the GC to reclaim - -- the overallocation). However, this would require 4 times as much - -- (temporary) storage as the original UTF-8 required. - -- - -- Another strategy would be to optimistically assume that - -- replacement characters are within the BMP, and if the case of a - -- non-BMP replacement occurs reallocate the target buffer (or throw - -- an exception, and fallback to a pessimistic codepath, like e.g. - -- `decodeUtf8With onErr bs = F.unstream (E.streamUtf8 onErr bs)`) - -- - -- Alternatively, `OnDecodeError` could become a datastructure which - -- statically encodes the replacement-character range, - -- e.g. something isomorphic to - -- - -- Either (... -> Maybe Word16) (... -> Maybe Char) - -- - -- And allow to statically switch between the BMP/non-BMP - -- replacement-character codepaths. There's multiple ways to address - -- this with different tradeoffs; but ideally we should optimise for - -- the optimistic/error-free case. -{- INLINE[0] decodeUtf8With #-} - --- $stream --- --- The 'streamDecodeUtf8' and 'streamDecodeUtf8With' functions accept --- a 'ByteString' that represents a possibly incomplete input (e.g. a --- packet from a network stream) that may not end on a UTF-8 boundary. --- --- 1. The maximal prefix of 'Text' that could be decoded from the --- given input. --- --- 2. The suffix of the 'ByteString' that could not be decoded due to --- insufficient input. --- --- 3. A function that accepts another 'ByteString'. That string will --- be assumed to directly follow the string that was passed as --- input to the original function, and it will in turn be decoded. --- --- To help understand the use of these functions, consider the Unicode --- string @\"hi ☃\"@. If encoded as UTF-8, this becomes @\"hi --- \\xe2\\x98\\x83\"@; the final @\'☃\'@ is encoded as 3 bytes. --- --- Now suppose that we receive this encoded string as 3 packets that --- are split up on untidy boundaries: @[\"hi \\xe2\", \"\\x98\", --- \"\\x83\"]@. We cannot decode the entire Unicode string until we --- have received all three packets, but we would like to make progress --- as we receive each one. --- --- @ --- ghci> let s0\@('Some' _ _ f0) = 'streamDecodeUtf8' \"hi \\xe2\" --- ghci> s0 --- 'Some' \"hi \" \"\\xe2\" _ --- @ --- --- We use the continuation @f0@ to decode our second packet. --- --- @ --- ghci> let s1\@('Some' _ _ f1) = f0 \"\\x98\" --- ghci> s1 --- 'Some' \"\" \"\\xe2\\x98\" --- @ --- --- We could not give @f0@ enough input to decode anything, so it --- returned an empty string. Once we feed our second continuation @f1@ --- the last byte of input, it will make progress. --- --- @ --- ghci> let s2\@('Some' _ _ f2) = f1 \"\\x83\" --- ghci> s2 --- 'Some' \"\\x2603\" \"\" _ --- @ --- --- If given invalid input, an exception will be thrown by the function --- or continuation where it is encountered. - --- | A stream oriented decoding result. --- --- @since 1.0.0.0 -data Decoding = Some Text ByteString (ByteString -> Decoding) - -instance Show Decoding where - showsPrec d (Some t bs _) = showParen (d > prec) $ - showString "Some " . showsPrec prec' t . - showChar ' ' . showsPrec prec' bs . - showString " _" - where prec = 10; prec' = prec + 1 - -newtype CodePoint = CodePoint Word32 deriving (Eq, Show, Num, Storable) -newtype DecoderState = DecoderState Word32 deriving (Eq, Show, Num, Storable) - --- | Decode, in a stream oriented way, a 'ByteString' containing UTF-8 --- encoded text that is known to be valid. --- --- If the input contains any invalid UTF-8 data, an exception will be --- thrown (either by this function or a continuation) that cannot be --- caught in pure code. For more control over the handling of invalid --- data, use 'streamDecodeUtf8With'. --- --- @since 1.0.0.0 -streamDecodeUtf8 :: ByteString -> Decoding -streamDecodeUtf8 = streamDecodeUtf8With strictDecode - --- | Decode, in a stream oriented way, a 'ByteString' containing UTF-8 --- encoded text. --- --- @since 1.0.0.0 -streamDecodeUtf8With :: OnDecodeError -> ByteString -> Decoding -streamDecodeUtf8With onErr = decodeChunk B.empty 0 0 - where - -- We create a slightly larger than necessary buffer to accommodate a - -- potential surrogate pair started in the last buffer - decodeChunk :: ByteString -> CodePoint -> DecoderState -> ByteString - -> Decoding - decodeChunk undecoded0 codepoint0 state0 bs@(PS fp off len) = - runST $ (unsafeIOToST . decodeChunkToBuffer) =<< A.new (len+1) - where - decodeChunkToBuffer :: A.MArray s -> IO Decoding - decodeChunkToBuffer dest = withForeignPtr fp $ \ptr -> - with (0::CSize) $ \destOffPtr -> - with codepoint0 $ \codepointPtr -> - with state0 $ \statePtr -> - with nullPtr $ \curPtrPtr -> - let end = ptr `plusPtr` (off + len) - loop curPtr = do - poke curPtrPtr curPtr - curPtr' <- c_decode_utf8_with_state (A.maBA dest) destOffPtr - curPtrPtr end codepointPtr statePtr - state <- peek statePtr - case state of - UTF8_REJECT -> do - -- We encountered an encoding error - x <- peek curPtr' - poke statePtr 0 - case onErr desc (Just x) of - Nothing -> loop $ curPtr' `plusPtr` 1 - Just c -> do - destOff <- peek destOffPtr - w <- unsafeSTToIO $ - unsafeWrite dest (fromIntegral destOff) (safe c) - poke destOffPtr (destOff + fromIntegral w) - loop $ curPtr' `plusPtr` 1 - - _ -> do - -- We encountered the end of the buffer while decoding - n <- peek destOffPtr - codepoint <- peek codepointPtr - chunkText <- unsafeSTToIO $ do - arr <- A.unsafeFreeze dest - return $! text arr 0 (fromIntegral n) - lastPtr <- peek curPtrPtr - let left = lastPtr `minusPtr` curPtr - !undecoded = case state of - UTF8_ACCEPT -> B.empty - _ -> B.append undecoded0 (B.drop left bs) - return $ Some chunkText undecoded - (decodeChunk undecoded codepoint state) - in loop (ptr `plusPtr` off) - desc = "Data.Text.Internal.Encoding.streamDecodeUtf8With: Invalid UTF-8 stream" - --- | Decode a 'ByteString' containing UTF-8 encoded text that is known --- to be valid. --- --- If the input contains any invalid UTF-8 data, an exception will be --- thrown that cannot be caught in pure code. For more control over --- the handling of invalid data, use 'decodeUtf8'' or --- 'decodeUtf8With'. -decodeUtf8 :: ByteString -> Text -decodeUtf8 = decodeUtf8With strictDecode -{-# INLINE[0] decodeUtf8 #-} -{-# RULES "STREAM stream/decodeUtf8 fusion" [1] - forall bs. F.stream (decodeUtf8 bs) = E.streamUtf8 strictDecode bs #-} - --- | Decode a 'ByteString' containing UTF-8 encoded text. --- --- If the input contains any invalid UTF-8 data, the relevant --- exception will be returned, otherwise the decoded text. -decodeUtf8' :: ByteString -> Either UnicodeException Text -decodeUtf8' = unsafeDupablePerformIO . try . evaluate . decodeUtf8With strictDecode -{-# INLINE decodeUtf8' #-} - --- | Encode text to a ByteString 'B.Builder' using UTF-8 encoding. --- --- @since 1.1.0.0 -encodeUtf8Builder :: Text -> B.Builder -encodeUtf8Builder = encodeUtf8BuilderEscaped (BP.liftFixedToBounded BP.word8) - --- | Encode text using UTF-8 encoding and escape the ASCII characters using --- a 'BP.BoundedPrim'. --- --- Use this function is to implement efficient encoders for text-based formats --- like JSON or HTML. --- --- @since 1.1.0.0 -{-# INLINE encodeUtf8BuilderEscaped #-} --- TODO: Extend documentation with references to source code in @blaze-html@ --- or @aeson@ that uses this function. -encodeUtf8BuilderEscaped :: BP.BoundedPrim Word8 -> Text -> B.Builder -encodeUtf8BuilderEscaped be = - -- manual eta-expansion to ensure inlining works as expected - \txt -> B.builder (mkBuildstep txt) - where - bound = max 4 $ BP.sizeBound be - - mkBuildstep (Text arr off len) !k = - outerLoop off - where - iend = off + len - - outerLoop !i0 !br@(B.BufferRange op0 ope) - | i0 >= iend = k br - | outRemaining > 0 = goPartial (i0 + min outRemaining inpRemaining) - -- TODO: Use a loop with an integrated bound's check if outRemaining - -- is smaller than 8, as this will save on divisions. - | otherwise = return $ B.bufferFull bound op0 (outerLoop i0) - where - outRemaining = (ope `minusPtr` op0) `div` bound - inpRemaining = iend - i0 - - goPartial !iendTmp = go i0 op0 - where - go !i !op - | i < iendTmp = case A.unsafeIndex arr i of - w | w <= 0x7F -> do - BP.runB be (fromIntegral w) op >>= go (i + 1) - | w <= 0x7FF -> do - poke8 0 $ (w `shiftR` 6) + 0xC0 - poke8 1 $ (w .&. 0x3f) + 0x80 - go (i + 1) (op `plusPtr` 2) - | 0xD800 <= w && w <= 0xDBFF -> do - let c = ord $ U16.chr2 w (A.unsafeIndex arr (i+1)) - poke8 0 $ (c `shiftR` 18) + 0xF0 - poke8 1 $ ((c `shiftR` 12) .&. 0x3F) + 0x80 - poke8 2 $ ((c `shiftR` 6) .&. 0x3F) + 0x80 - poke8 3 $ (c .&. 0x3F) + 0x80 - go (i + 2) (op `plusPtr` 4) - | otherwise -> do - poke8 0 $ (w `shiftR` 12) + 0xE0 - poke8 1 $ ((w `shiftR` 6) .&. 0x3F) + 0x80 - poke8 2 $ (w .&. 0x3F) + 0x80 - go (i + 1) (op `plusPtr` 3) - | otherwise = - outerLoop i (B.BufferRange op ope) - where - poke8 j v = poke (op `plusPtr` j) (fromIntegral v :: Word8) - --- | Encode text using UTF-8 encoding. -encodeUtf8 :: Text -> ByteString -encodeUtf8 (Text arr off len) - | len == 0 = B.empty - | otherwise = unsafeDupablePerformIO $ do - fp <- mallocByteString (len*3) -- see https://github.com/haskell/text/issues/194 for why len*3 is enough - withForeignPtr fp $ \ptr -> - with ptr $ \destPtr -> do - c_encode_utf8 destPtr (A.aBA arr) (fromIntegral off) (fromIntegral len) - newDest <- peek destPtr - let utf8len = newDest `minusPtr` ptr - if utf8len >= len `shiftR` 1 - then return (PS fp 0 utf8len) - else do - fp' <- mallocByteString utf8len - withForeignPtr fp' $ \ptr' -> do - memcpy ptr' ptr (fromIntegral utf8len) - return (PS fp' 0 utf8len) - --- | Decode text from little endian UTF-16 encoding. -decodeUtf16LEWith :: OnDecodeError -> ByteString -> Text -decodeUtf16LEWith onErr bs = F.unstream (E.streamUtf16LE onErr bs) -{-# INLINE decodeUtf16LEWith #-} - --- | Decode text from little endian UTF-16 encoding. --- --- If the input contains any invalid little endian UTF-16 data, an --- exception will be thrown. For more control over the handling of --- invalid data, use 'decodeUtf16LEWith'. -decodeUtf16LE :: ByteString -> Text -decodeUtf16LE = decodeUtf16LEWith strictDecode -{-# INLINE decodeUtf16LE #-} - --- | Decode text from big endian UTF-16 encoding. -decodeUtf16BEWith :: OnDecodeError -> ByteString -> Text -decodeUtf16BEWith onErr bs = F.unstream (E.streamUtf16BE onErr bs) -{-# INLINE decodeUtf16BEWith #-} - --- | Decode text from big endian UTF-16 encoding. --- --- If the input contains any invalid big endian UTF-16 data, an --- exception will be thrown. For more control over the handling of --- invalid data, use 'decodeUtf16BEWith'. -decodeUtf16BE :: ByteString -> Text -decodeUtf16BE = decodeUtf16BEWith strictDecode -{-# INLINE decodeUtf16BE #-} - --- | Encode text using little endian UTF-16 encoding. -encodeUtf16LE :: Text -> ByteString -encodeUtf16LE txt = E.unstream (E.restreamUtf16LE (F.stream txt)) -{-# INLINE encodeUtf16LE #-} - --- | Encode text using big endian UTF-16 encoding. -encodeUtf16BE :: Text -> ByteString -encodeUtf16BE txt = E.unstream (E.restreamUtf16BE (F.stream txt)) -{-# INLINE encodeUtf16BE #-} - --- | Decode text from little endian UTF-32 encoding. -decodeUtf32LEWith :: OnDecodeError -> ByteString -> Text -decodeUtf32LEWith onErr bs = F.unstream (E.streamUtf32LE onErr bs) -{-# INLINE decodeUtf32LEWith #-} - --- | Decode text from little endian UTF-32 encoding. --- --- If the input contains any invalid little endian UTF-32 data, an --- exception will be thrown. For more control over the handling of --- invalid data, use 'decodeUtf32LEWith'. -decodeUtf32LE :: ByteString -> Text -decodeUtf32LE = decodeUtf32LEWith strictDecode -{-# INLINE decodeUtf32LE #-} - --- | Decode text from big endian UTF-32 encoding. -decodeUtf32BEWith :: OnDecodeError -> ByteString -> Text -decodeUtf32BEWith onErr bs = F.unstream (E.streamUtf32BE onErr bs) -{-# INLINE decodeUtf32BEWith #-} - --- | Decode text from big endian UTF-32 encoding. --- --- If the input contains any invalid big endian UTF-32 data, an --- exception will be thrown. For more control over the handling of --- invalid data, use 'decodeUtf32BEWith'. -decodeUtf32BE :: ByteString -> Text -decodeUtf32BE = decodeUtf32BEWith strictDecode -{-# INLINE decodeUtf32BE #-} - --- | Encode text using little endian UTF-32 encoding. -encodeUtf32LE :: Text -> ByteString -encodeUtf32LE txt = E.unstream (E.restreamUtf32LE (F.stream txt)) -{-# INLINE encodeUtf32LE #-} - --- | Encode text using big endian UTF-32 encoding. -encodeUtf32BE :: Text -> ByteString -encodeUtf32BE txt = E.unstream (E.restreamUtf32BE (F.stream txt)) -{-# INLINE encodeUtf32BE #-} - -foreign import ccall unsafe "_hs_text_decode_utf8" c_decode_utf8 - :: MutableByteArray# s -> Ptr CSize - -> Ptr Word8 -> Ptr Word8 -> IO (Ptr Word8) - -foreign import ccall unsafe "_hs_text_decode_utf8_state" c_decode_utf8_with_state - :: MutableByteArray# s -> Ptr CSize - -> Ptr (Ptr Word8) -> Ptr Word8 - -> Ptr CodePoint -> Ptr DecoderState -> IO (Ptr Word8) - -foreign import ccall unsafe "_hs_text_decode_latin1" c_decode_latin1 - :: MutableByteArray# s -> Ptr Word8 -> Ptr Word8 -> IO () - -foreign import ccall unsafe "_hs_text_encode_utf8" c_encode_utf8 - :: Ptr (Ptr Word8) -> ByteArray# -> CSize -> CSize -> IO () diff -Nru cabal-install-3.2-3.2+git20191216.2.e076113/src/text-1.2.4.0/Data/Text/Foreign.hs cabal-install-3.2-3.2+git20200127.2.b84fd4f/src/text-1.2.4.0/Data/Text/Foreign.hs --- cabal-install-3.2-3.2+git20191216.2.e076113/src/text-1.2.4.0/Data/Text/Foreign.hs 2001-09-09 01:46:40.000000000 +0000 +++ cabal-install-3.2-3.2+git20200127.2.b84fd4f/src/text-1.2.4.0/Data/Text/Foreign.hs 1970-01-01 00:00:00.000000000 +0000 @@ -1,176 +0,0 @@ -{-# LANGUAGE BangPatterns, CPP, GeneralizedNewtypeDeriving #-} --- | --- Module : Data.Text.Foreign --- Copyright : (c) 2009, 2010 Bryan O'Sullivan --- --- License : BSD-style --- Maintainer : bos@serpentine.com --- Portability : GHC --- --- Support for using 'Text' data with native code via the Haskell --- foreign function interface. - -module Data.Text.Foreign - ( - -- * Interoperability with native code - -- $interop - I16 - -- * Safe conversion functions - , fromPtr - , useAsPtr - , asForeignPtr - -- ** Encoding as UTF-8 - , peekCStringLen - , withCStringLen - -- * Unsafe conversion code - , lengthWord16 - , unsafeCopyToPtr - -- * Low-level manipulation - -- $lowlevel - , dropWord16 - , takeWord16 - ) where - -#if defined(ASSERTS) -import Control.Exception (assert) -#endif -#if MIN_VERSION_base(4,4,0) -import Control.Monad.ST.Unsafe (unsafeIOToST) -#else -import Control.Monad.ST (unsafeIOToST) -#endif -import Data.ByteString.Unsafe (unsafePackCStringLen, unsafeUseAsCStringLen) -import Data.Text.Encoding (decodeUtf8, encodeUtf8) -import Data.Text.Internal (Text(..), empty) -import Data.Text.Unsafe (lengthWord16) -import Data.Word (Word16) -import Foreign.C.String (CStringLen) -import Foreign.ForeignPtr (ForeignPtr, mallocForeignPtrArray, withForeignPtr) -import Foreign.Marshal.Alloc (allocaBytes) -import Foreign.Ptr (Ptr, castPtr, plusPtr) -import Foreign.Storable (peek, poke) -import qualified Data.Text.Array as A - --- $interop --- --- The 'Text' type is implemented using arrays that are not guaranteed --- to have a fixed address in the Haskell heap. All communication with --- native code must thus occur by copying data back and forth. --- --- The 'Text' type's internal representation is UTF-16, using the --- platform's native endianness. This makes copied data suitable for --- use with native libraries that use a similar representation, such --- as ICU. To interoperate with native libraries that use different --- internal representations, such as UTF-8 or UTF-32, consider using --- the functions in the 'Data.Text.Encoding' module. - --- | A type representing a number of UTF-16 code units. -newtype I16 = I16 Int - deriving (Bounded, Enum, Eq, Integral, Num, Ord, Read, Real, Show) - --- | /O(n)/ Create a new 'Text' from a 'Ptr' 'Word16' by copying the --- contents of the array. -fromPtr :: Ptr Word16 -- ^ source array - -> I16 -- ^ length of source array (in 'Word16' units) - -> IO Text -fromPtr _ (I16 0) = return empty -fromPtr ptr (I16 len) = -#if defined(ASSERTS) - assert (len > 0) $ -#endif - return $! Text arr 0 len - where - arr = A.run (A.new len >>= copy) - copy marr = loop ptr 0 - where - loop !p !i | i == len = return marr - | otherwise = do - A.unsafeWrite marr i =<< unsafeIOToST (peek p) - loop (p `plusPtr` 2) (i + 1) - --- $lowlevel --- --- Foreign functions that use UTF-16 internally may return indices in --- units of 'Word16' instead of characters. These functions may --- safely be used with such indices, as they will adjust offsets if --- necessary to preserve the validity of a Unicode string. - --- | /O(1)/ Return the prefix of the 'Text' of @n@ 'Word16' units in --- length. --- --- If @n@ would cause the 'Text' to end inside a surrogate pair, the --- end of the prefix will be advanced by one additional 'Word16' unit --- to maintain its validity. -takeWord16 :: I16 -> Text -> Text -takeWord16 (I16 n) t@(Text arr off len) - | n <= 0 = empty - | n >= len || m >= len = t - | otherwise = Text arr off m - where - m | w < 0xD800 || w > 0xDBFF = n - | otherwise = n+1 - w = A.unsafeIndex arr (off+n-1) - --- | /O(1)/ Return the suffix of the 'Text', with @n@ 'Word16' units --- dropped from its beginning. --- --- If @n@ would cause the 'Text' to begin inside a surrogate pair, the --- beginning of the suffix will be advanced by one additional 'Word16' --- unit to maintain its validity. -dropWord16 :: I16 -> Text -> Text -dropWord16 (I16 n) t@(Text arr off len) - | n <= 0 = t - | n >= len || m >= len = empty - | otherwise = Text arr (off+m) (len-m) - where - m | w < 0xD800 || w > 0xDBFF = n - | otherwise = n+1 - w = A.unsafeIndex arr (off+n-1) - --- | /O(n)/ Copy a 'Text' to an array. The array is assumed to be big --- enough to hold the contents of the entire 'Text'. -unsafeCopyToPtr :: Text -> Ptr Word16 -> IO () -unsafeCopyToPtr (Text arr off len) ptr = loop ptr off - where - end = off + len - loop !p !i | i == end = return () - | otherwise = do - poke p (A.unsafeIndex arr i) - loop (p `plusPtr` 2) (i + 1) - --- | /O(n)/ Perform an action on a temporary, mutable copy of a --- 'Text'. The copy is freed as soon as the action returns. -useAsPtr :: Text -> (Ptr Word16 -> I16 -> IO a) -> IO a -useAsPtr t@(Text _arr _off len) action = - allocaBytes (len * 2) $ \buf -> do - unsafeCopyToPtr t buf - action (castPtr buf) (fromIntegral len) - --- | /O(n)/ Make a mutable copy of a 'Text'. -asForeignPtr :: Text -> IO (ForeignPtr Word16, I16) -asForeignPtr t@(Text _arr _off len) = do - fp <- mallocForeignPtrArray len - withForeignPtr fp $ unsafeCopyToPtr t - return (fp, I16 len) - --- | /O(n)/ Decode a C string with explicit length, which is assumed --- to have been encoded as UTF-8. If decoding fails, a --- 'UnicodeException' is thrown. --- --- @since 1.0.0.0 -peekCStringLen :: CStringLen -> IO Text -peekCStringLen cs = do - bs <- unsafePackCStringLen cs - return $! decodeUtf8 bs - --- | Marshal a 'Text' into a C string encoded as UTF-8 in temporary --- storage, with explicit length information. The encoded string may --- contain NUL bytes, and is not followed by a trailing NUL byte. --- --- The temporary storage is freed when the subcomputation terminates --- (either normally or via an exception), so the pointer to the --- temporary storage must /not/ be used after this function returns. --- --- @since 1.0.0.0 -withCStringLen :: Text -> (CStringLen -> IO a) -> IO a -withCStringLen t act = unsafeUseAsCStringLen (encodeUtf8 t) act diff -Nru cabal-install-3.2-3.2+git20191216.2.e076113/src/text-1.2.4.0/Data/Text/Internal/Builder/Functions.hs cabal-install-3.2-3.2+git20200127.2.b84fd4f/src/text-1.2.4.0/Data/Text/Internal/Builder/Functions.hs --- cabal-install-3.2-3.2+git20191216.2.e076113/src/text-1.2.4.0/Data/Text/Internal/Builder/Functions.hs 2001-09-09 01:46:40.000000000 +0000 +++ cabal-install-3.2-3.2+git20200127.2.b84fd4f/src/text-1.2.4.0/Data/Text/Internal/Builder/Functions.hs 1970-01-01 00:00:00.000000000 +0000 @@ -1,40 +0,0 @@ -{-# LANGUAGE MagicHash #-} - --- | --- Module : Data.Text.Internal.Builder.Functions --- Copyright : (c) 2011 MailRank, Inc. --- --- License : BSD-style --- Maintainer : bos@serpentine.com --- Stability : experimental --- Portability : GHC --- --- /Warning/: this is an internal module, and does not have a stable --- API or name. Functions in this module may not check or enforce --- preconditions expected by public modules. Use at your own risk! --- --- Useful functions and combinators. - -module Data.Text.Internal.Builder.Functions - ( - (<>) - , i2d - ) where - -import Data.Monoid (mappend) -import Data.Text.Lazy.Builder (Builder) -import GHC.Base (chr#,ord#,(+#),Int(I#),Char(C#)) -import Prelude () - --- | Unsafe conversion for decimal digits. -{-# INLINE i2d #-} -i2d :: Int -> Char -i2d (I# i#) = C# (chr# (ord# '0'# +# i#)) - --- | The normal 'mappend' function with right associativity instead of --- left. -(<>) :: Builder -> Builder -> Builder -(<>) = mappend -{-# INLINE (<>) #-} - -infixr 4 <> diff -Nru cabal-install-3.2-3.2+git20191216.2.e076113/src/text-1.2.4.0/Data/Text/Internal/Builder/Int/Digits.hs cabal-install-3.2-3.2+git20200127.2.b84fd4f/src/text-1.2.4.0/Data/Text/Internal/Builder/Int/Digits.hs --- cabal-install-3.2-3.2+git20191216.2.e076113/src/text-1.2.4.0/Data/Text/Internal/Builder/Int/Digits.hs 2001-09-09 01:46:40.000000000 +0000 +++ cabal-install-3.2-3.2+git20200127.2.b84fd4f/src/text-1.2.4.0/Data/Text/Internal/Builder/Int/Digits.hs 1970-01-01 00:00:00.000000000 +0000 @@ -1,26 +0,0 @@ -{-# LANGUAGE OverloadedStrings #-} - --- Module: Data.Text.Internal.Builder.Int.Digits --- Copyright: (c) 2013 Bryan O'Sullivan --- License: BSD-style --- Maintainer: Bryan O'Sullivan --- Stability: experimental --- Portability: portable --- --- /Warning/: this is an internal module, and does not have a stable --- API or name. Functions in this module may not check or enforce --- preconditions expected by public modules. Use at your own risk! --- --- This module exists because the C preprocessor does things that we --- shall not speak of when confronted with Haskell multiline strings. - -module Data.Text.Internal.Builder.Int.Digits (digits) where - -import Data.ByteString.Char8 (ByteString) - -digits :: ByteString -digits = "0001020304050607080910111213141516171819\ - \2021222324252627282930313233343536373839\ - \4041424344454647484950515253545556575859\ - \6061626364656667686970717273747576777879\ - \8081828384858687888990919293949596979899" diff -Nru cabal-install-3.2-3.2+git20191216.2.e076113/src/text-1.2.4.0/Data/Text/Internal/Builder/RealFloat/Functions.hs cabal-install-3.2-3.2+git20200127.2.b84fd4f/src/text-1.2.4.0/Data/Text/Internal/Builder/RealFloat/Functions.hs --- cabal-install-3.2-3.2+git20191216.2.e076113/src/text-1.2.4.0/Data/Text/Internal/Builder/RealFloat/Functions.hs 2001-09-09 01:46:40.000000000 +0000 +++ cabal-install-3.2-3.2+git20200127.2.b84fd4f/src/text-1.2.4.0/Data/Text/Internal/Builder/RealFloat/Functions.hs 1970-01-01 00:00:00.000000000 +0000 @@ -1,57 +0,0 @@ -{-# LANGUAGE CPP #-} - --- | --- Module: Data.Text.Internal.Builder.RealFloat.Functions --- Copyright: (c) The University of Glasgow 1994-2002 --- License: see libraries/base/LICENSE --- --- /Warning/: this is an internal module, and does not have a stable --- API or name. Functions in this module may not check or enforce --- preconditions expected by public modules. Use at your own risk! - -module Data.Text.Internal.Builder.RealFloat.Functions - ( - roundTo - ) where - -roundTo :: Int -> [Int] -> (Int,[Int]) - -#if MIN_VERSION_base(4,6,0) - -roundTo d is = - case f d True is of - x@(0,_) -> x - (1,xs) -> (1, 1:xs) - _ -> error "roundTo: bad Value" - where - b2 = base `quot` 2 - - f n _ [] = (0, replicate n 0) - f 0 e (x:xs) | x == b2 && e && all (== 0) xs = (0, []) -- Round to even when at exactly half the base - | otherwise = (if x >= b2 then 1 else 0, []) - f n _ (i:xs) - | i' == base = (1,0:ds) - | otherwise = (0,i':ds) - where - (c,ds) = f (n-1) (even i) xs - i' = c + i - base = 10 - -#else - -roundTo d is = - case f d is of - x@(0,_) -> x - (1,xs) -> (1, 1:xs) - _ -> error "roundTo: bad Value" - where - f n [] = (0, replicate n 0) - f 0 (x:_) = (if x >= 5 then 1 else 0, []) - f n (i:xs) - | i' == 10 = (1,0:ds) - | otherwise = (0,i':ds) - where - (c,ds) = f (n-1) xs - i' = c + i - -#endif diff -Nru cabal-install-3.2-3.2+git20191216.2.e076113/src/text-1.2.4.0/Data/Text/Internal/Builder.hs cabal-install-3.2-3.2+git20200127.2.b84fd4f/src/text-1.2.4.0/Data/Text/Internal/Builder.hs --- cabal-install-3.2-3.2+git20191216.2.e076113/src/text-1.2.4.0/Data/Text/Internal/Builder.hs 2001-09-09 01:46:40.000000000 +0000 +++ cabal-install-3.2-3.2+git20200127.2.b84fd4f/src/text-1.2.4.0/Data/Text/Internal/Builder.hs 1970-01-01 00:00:00.000000000 +0000 @@ -1,329 +0,0 @@ -{-# LANGUAGE BangPatterns, CPP, Rank2Types #-} -{-# OPTIONS_HADDOCK not-home #-} - ------------------------------------------------------------------------------ --- | --- Module : Data.Text.Internal.Builder --- Copyright : (c) 2013 Bryan O'Sullivan --- (c) 2010 Johan Tibell --- License : BSD-style (see LICENSE) --- --- Maintainer : Johan Tibell --- Stability : experimental --- Portability : portable to Hugs and GHC --- --- /Warning/: this is an internal module, and does not have a stable --- API or name. Functions in this module may not check or enforce --- preconditions expected by public modules. Use at your own risk! --- --- Efficient construction of lazy @Text@ values. The principal --- operations on a @Builder@ are @singleton@, @fromText@, and --- @fromLazyText@, which construct new builders, and 'mappend', which --- concatenates two builders. --- --- To get maximum performance when building lazy @Text@ values using a --- builder, associate @mappend@ calls to the right. For example, --- prefer --- --- > singleton 'a' `mappend` (singleton 'b' `mappend` singleton 'c') --- --- to --- --- > singleton 'a' `mappend` singleton 'b' `mappend` singleton 'c' --- --- as the latter associates @mappend@ to the left. --- ------------------------------------------------------------------------------ - -module Data.Text.Internal.Builder - ( -- * Public API - -- ** The Builder type - Builder - , toLazyText - , toLazyTextWith - - -- ** Constructing Builders - , singleton - , fromText - , fromLazyText - , fromString - - -- ** Flushing the buffer state - , flush - - -- * Internal functions - , append' - , ensureFree - , writeN - ) where - -import Control.Monad.ST (ST, runST) -import Data.Monoid (Monoid(..)) -#if !MIN_VERSION_base(4,11,0) && MIN_VERSION_base(4,9,0) -import Data.Semigroup (Semigroup(..)) -#endif -import Data.Text.Internal (Text(..)) -import Data.Text.Internal.Lazy (smallChunkSize) -import Data.Text.Unsafe (inlineInterleaveST) -import Data.Text.Internal.Unsafe.Char (unsafeWrite) -import Prelude hiding (map, putChar) - -import qualified Data.String as String -import qualified Data.Text as S -import qualified Data.Text.Array as A -import qualified Data.Text.Lazy as L - ------------------------------------------------------------------------- - --- | A @Builder@ is an efficient way to build lazy @Text@ values. --- There are several functions for constructing builders, but only one --- to inspect them: to extract any data, you have to turn them into --- lazy @Text@ values using @toLazyText@. --- --- Internally, a builder constructs a lazy @Text@ by filling arrays --- piece by piece. As each buffer is filled, it is \'popped\' off, to --- become a new chunk of the resulting lazy @Text@. All this is --- hidden from the user of the @Builder@. -newtype Builder = Builder { - -- Invariant (from Data.Text.Lazy): - -- The lists include no null Texts. - runBuilder :: forall s. (Buffer s -> ST s [S.Text]) - -> Buffer s - -> ST s [S.Text] - } - -#if MIN_VERSION_base(4,9,0) -instance Semigroup Builder where - (<>) = append - {-# INLINE (<>) #-} -#endif - -instance Monoid Builder where - mempty = empty - {-# INLINE mempty #-} -#if MIN_VERSION_base(4,9,0) - mappend = (<>) -- future-proof definition -#else - mappend = append -#endif - {-# INLINE mappend #-} - mconcat = foldr mappend Data.Monoid.mempty - {-# INLINE mconcat #-} - -instance String.IsString Builder where - fromString = fromString - {-# INLINE fromString #-} - -instance Show Builder where - show = show . toLazyText - -instance Eq Builder where - a == b = toLazyText a == toLazyText b - -instance Ord Builder where - a <= b = toLazyText a <= toLazyText b - ------------------------------------------------------------------------- - --- | /O(1)./ The empty @Builder@, satisfying --- --- * @'toLazyText' 'empty' = 'L.empty'@ --- -empty :: Builder -empty = Builder (\ k buf -> k buf) -{-# INLINE empty #-} - --- | /O(1)./ A @Builder@ taking a single character, satisfying --- --- * @'toLazyText' ('singleton' c) = 'L.singleton' c@ --- -singleton :: Char -> Builder -singleton c = writeAtMost 2 $ \ marr o -> unsafeWrite marr o c -{-# INLINE singleton #-} - ------------------------------------------------------------------------- - --- | /O(1)./ The concatenation of two builders, an associative --- operation with identity 'empty', satisfying --- --- * @'toLazyText' ('append' x y) = 'L.append' ('toLazyText' x) ('toLazyText' y)@ --- -append :: Builder -> Builder -> Builder -append (Builder f) (Builder g) = Builder (f . g) -{-# INLINE [0] append #-} - --- TODO: Experiment to find the right threshold. -copyLimit :: Int -copyLimit = 128 - --- This function attempts to merge small @Text@ values instead of --- treating each value as its own chunk. We may not always want this. - --- | /O(1)./ A @Builder@ taking a 'S.Text', satisfying --- --- * @'toLazyText' ('fromText' t) = 'L.fromChunks' [t]@ --- -fromText :: S.Text -> Builder -fromText t@(Text arr off l) - | S.null t = empty - | l <= copyLimit = writeN l $ \marr o -> A.copyI marr o arr off (l+o) - | otherwise = flush `append` mapBuilder (t :) -{-# INLINE [1] fromText #-} - -{-# RULES -"fromText/pack" forall s . - fromText (S.pack s) = fromString s - #-} - --- | /O(1)./ A Builder taking a @String@, satisfying --- --- * @'toLazyText' ('fromString' s) = 'L.fromChunks' [S.pack s]@ --- -fromString :: String -> Builder -fromString str = Builder $ \k (Buffer p0 o0 u0 l0) -> - let loop !marr !o !u !l [] = k (Buffer marr o u l) - loop marr o u l s@(c:cs) - | l <= 1 = do - arr <- A.unsafeFreeze marr - let !t = Text arr o u - marr' <- A.new chunkSize - ts <- inlineInterleaveST (loop marr' 0 0 chunkSize s) - return $ t : ts - | otherwise = do - n <- unsafeWrite marr (o+u) c - loop marr o (u+n) (l-n) cs - in loop p0 o0 u0 l0 str - where - chunkSize = smallChunkSize -{-# INLINE fromString #-} - --- | /O(1)./ A @Builder@ taking a lazy @Text@, satisfying --- --- * @'toLazyText' ('fromLazyText' t) = t@ --- -fromLazyText :: L.Text -> Builder -fromLazyText ts = flush `append` mapBuilder (L.toChunks ts ++) -{-# INLINE fromLazyText #-} - ------------------------------------------------------------------------- - --- Our internal buffer type -data Buffer s = Buffer {-# UNPACK #-} !(A.MArray s) - {-# UNPACK #-} !Int -- offset - {-# UNPACK #-} !Int -- used units - {-# UNPACK #-} !Int -- length left - ------------------------------------------------------------------------- - --- | /O(n)./ Extract a lazy @Text@ from a @Builder@ with a default --- buffer size. The construction work takes place if and when the --- relevant part of the lazy @Text@ is demanded. -toLazyText :: Builder -> L.Text -toLazyText = toLazyTextWith smallChunkSize - --- | /O(n)./ Extract a lazy @Text@ from a @Builder@, using the given --- size for the initial buffer. The construction work takes place if --- and when the relevant part of the lazy @Text@ is demanded. --- --- If the initial buffer is too small to hold all data, subsequent --- buffers will be the default buffer size. -toLazyTextWith :: Int -> Builder -> L.Text -toLazyTextWith chunkSize m = L.fromChunks (runST $ - newBuffer chunkSize >>= runBuilder (m `append` flush) (const (return []))) - --- | /O(1)./ Pop the strict @Text@ we have constructed so far, if any, --- yielding a new chunk in the result lazy @Text@. -flush :: Builder -flush = Builder $ \ k buf@(Buffer p o u l) -> - if u == 0 - then k buf - else do arr <- A.unsafeFreeze p - let !b = Buffer p (o+u) 0 l - !t = Text arr o u - ts <- inlineInterleaveST (k b) - return $! t : ts -{-# INLINE [1] flush #-} --- defer inlining so that flush/flush rule may fire. - ------------------------------------------------------------------------- - --- | Sequence an ST operation on the buffer -withBuffer :: (forall s. Buffer s -> ST s (Buffer s)) -> Builder -withBuffer f = Builder $ \k buf -> f buf >>= k -{-# INLINE withBuffer #-} - --- | Get the size of the buffer -withSize :: (Int -> Builder) -> Builder -withSize f = Builder $ \ k buf@(Buffer _ _ _ l) -> - runBuilder (f l) k buf -{-# INLINE withSize #-} - --- | Map the resulting list of texts. -mapBuilder :: ([S.Text] -> [S.Text]) -> Builder -mapBuilder f = Builder (fmap f .) - ------------------------------------------------------------------------- - --- | Ensure that there are at least @n@ many elements available. -ensureFree :: Int -> Builder -ensureFree !n = withSize $ \ l -> - if n <= l - then empty - else flush `append'` withBuffer (const (newBuffer (max n smallChunkSize))) -{-# INLINE [0] ensureFree #-} - -writeAtMost :: Int -> (forall s. A.MArray s -> Int -> ST s Int) -> Builder -writeAtMost n f = ensureFree n `append'` withBuffer (writeBuffer f) -{-# INLINE [0] writeAtMost #-} - --- | Ensure that @n@ many elements are available, and then use @f@ to --- write some elements into the memory. -writeN :: Int -> (forall s. A.MArray s -> Int -> ST s ()) -> Builder -writeN n f = writeAtMost n (\ p o -> f p o >> return n) -{-# INLINE writeN #-} - -writeBuffer :: (A.MArray s -> Int -> ST s Int) -> Buffer s -> ST s (Buffer s) -writeBuffer f (Buffer p o u l) = do - n <- f p (o+u) - return $! Buffer p o (u+n) (l-n) -{-# INLINE writeBuffer #-} - -newBuffer :: Int -> ST s (Buffer s) -newBuffer size = do - arr <- A.new size - return $! Buffer arr 0 0 size -{-# INLINE newBuffer #-} - ------------------------------------------------------------------------- --- Some nice rules for Builder - --- This function makes GHC understand that 'writeN' and 'ensureFree' --- are *not* recursive in the precense of the rewrite rules below. --- This is not needed with GHC 7+. -append' :: Builder -> Builder -> Builder -append' (Builder f) (Builder g) = Builder (f . g) -{-# INLINE append' #-} - -{-# RULES - -"append/writeAtMost" forall a b (f::forall s. A.MArray s -> Int -> ST s Int) - (g::forall s. A.MArray s -> Int -> ST s Int) ws. - append (writeAtMost a f) (append (writeAtMost b g) ws) = - append (writeAtMost (a+b) (\marr o -> f marr o >>= \ n -> - g marr (o+n) >>= \ m -> - let s = n+m in s `seq` return s)) ws - -"writeAtMost/writeAtMost" forall a b (f::forall s. A.MArray s -> Int -> ST s Int) - (g::forall s. A.MArray s -> Int -> ST s Int). - append (writeAtMost a f) (writeAtMost b g) = - writeAtMost (a+b) (\marr o -> f marr o >>= \ n -> - g marr (o+n) >>= \ m -> - let s = n+m in s `seq` return s) - -"ensureFree/ensureFree" forall a b . - append (ensureFree a) (ensureFree b) = ensureFree (max a b) - -"flush/flush" - append flush flush = flush - - #-} diff -Nru cabal-install-3.2-3.2+git20191216.2.e076113/src/text-1.2.4.0/Data/Text/Internal/Encoding/Fusion/Common.hs cabal-install-3.2-3.2+git20200127.2.b84fd4f/src/text-1.2.4.0/Data/Text/Internal/Encoding/Fusion/Common.hs --- cabal-install-3.2-3.2+git20191216.2.e076113/src/text-1.2.4.0/Data/Text/Internal/Encoding/Fusion/Common.hs 2001-09-09 01:46:40.000000000 +0000 +++ cabal-install-3.2-3.2+git20200127.2.b84fd4f/src/text-1.2.4.0/Data/Text/Internal/Encoding/Fusion/Common.hs 1970-01-01 00:00:00.000000000 +0000 @@ -1,123 +0,0 @@ -{-# LANGUAGE BangPatterns #-} - --- | --- Module : Data.Text.Internal.Encoding.Fusion.Common --- Copyright : (c) Tom Harper 2008-2009, --- (c) Bryan O'Sullivan 2009, --- (c) Duncan Coutts 2009, --- (c) Jasper Van der Jeugt 2011 --- --- License : BSD-style --- Maintainer : bos@serpentine.com --- Stability : experimental --- Portability : portable --- --- /Warning/: this is an internal module, and does not have a stable --- API or name. Use at your own risk! --- --- Fusible 'Stream'-oriented functions for converting between 'Text' --- and several common encodings. - -module Data.Text.Internal.Encoding.Fusion.Common - ( - -- * Restreaming - -- Restreaming is the act of converting from one 'Stream' - -- representation to another. - restreamUtf16LE - , restreamUtf16BE - , restreamUtf32LE - , restreamUtf32BE - ) where - -import Data.Bits ((.&.)) -import Data.Text.Internal.Fusion (Step(..), Stream(..)) -import Data.Text.Internal.Fusion.Types (RS(..)) -import Data.Text.Internal.Unsafe.Char (ord) -import Data.Text.Internal.Unsafe.Shift (shiftR) -import Data.Word (Word8) - -restreamUtf16BE :: Stream Char -> Stream Word8 -restreamUtf16BE (Stream next0 s0 len) = Stream next (RS0 s0) (len * 2) - where - next (RS0 s) = case next0 s of - Done -> Done - Skip s' -> Skip (RS0 s') - Yield x s' - | n < 0x10000 -> Yield (fromIntegral $ n `shiftR` 8) $ - RS1 s' (fromIntegral n) - | otherwise -> Yield c1 $ RS3 s' c2 c3 c4 - where - n = ord x - n1 = n - 0x10000 - c1 = fromIntegral (n1 `shiftR` 18 + 0xD8) - c2 = fromIntegral (n1 `shiftR` 10) - n2 = n1 .&. 0x3FF - c3 = fromIntegral (n2 `shiftR` 8 + 0xDC) - c4 = fromIntegral n2 - next (RS1 s x2) = Yield x2 (RS0 s) - next (RS2 s x2 x3) = Yield x2 (RS1 s x3) - next (RS3 s x2 x3 x4) = Yield x2 (RS2 s x3 x4) - {-# INLINE next #-} -{-# INLINE restreamUtf16BE #-} - -restreamUtf16LE :: Stream Char -> Stream Word8 -restreamUtf16LE (Stream next0 s0 len) = Stream next (RS0 s0) (len * 2) - where - next (RS0 s) = case next0 s of - Done -> Done - Skip s' -> Skip (RS0 s') - Yield x s' - | n < 0x10000 -> Yield (fromIntegral n) $ - RS1 s' (fromIntegral $ shiftR n 8) - | otherwise -> Yield c1 $ RS3 s' c2 c3 c4 - where - n = ord x - n1 = n - 0x10000 - c2 = fromIntegral (shiftR n1 18 + 0xD8) - c1 = fromIntegral (shiftR n1 10) - n2 = n1 .&. 0x3FF - c4 = fromIntegral (shiftR n2 8 + 0xDC) - c3 = fromIntegral n2 - next (RS1 s x2) = Yield x2 (RS0 s) - next (RS2 s x2 x3) = Yield x2 (RS1 s x3) - next (RS3 s x2 x3 x4) = Yield x2 (RS2 s x3 x4) - {-# INLINE next #-} -{-# INLINE restreamUtf16LE #-} - -restreamUtf32BE :: Stream Char -> Stream Word8 -restreamUtf32BE (Stream next0 s0 len) = Stream next (RS0 s0) (len * 2) - where - next (RS0 s) = case next0 s of - Done -> Done - Skip s' -> Skip (RS0 s') - Yield x s' -> Yield c1 (RS3 s' c2 c3 c4) - where - n = ord x - c1 = fromIntegral $ shiftR n 24 - c2 = fromIntegral $ shiftR n 16 - c3 = fromIntegral $ shiftR n 8 - c4 = fromIntegral n - next (RS1 s x2) = Yield x2 (RS0 s) - next (RS2 s x2 x3) = Yield x2 (RS1 s x3) - next (RS3 s x2 x3 x4) = Yield x2 (RS2 s x3 x4) - {-# INLINE next #-} -{-# INLINE restreamUtf32BE #-} - -restreamUtf32LE :: Stream Char -> Stream Word8 -restreamUtf32LE (Stream next0 s0 len) = Stream next (RS0 s0) (len * 2) - where - next (RS0 s) = case next0 s of - Done -> Done - Skip s' -> Skip (RS0 s') - Yield x s' -> Yield c1 (RS3 s' c2 c3 c4) - where - n = ord x - c4 = fromIntegral $ shiftR n 24 - c3 = fromIntegral $ shiftR n 16 - c2 = fromIntegral $ shiftR n 8 - c1 = fromIntegral n - next (RS1 s x2) = Yield x2 (RS0 s) - next (RS2 s x2 x3) = Yield x2 (RS1 s x3) - next (RS3 s x2 x3 x4) = Yield x2 (RS2 s x3 x4) - {-# INLINE next #-} -{-# INLINE restreamUtf32LE #-} diff -Nru cabal-install-3.2-3.2+git20191216.2.e076113/src/text-1.2.4.0/Data/Text/Internal/Encoding/Fusion.hs cabal-install-3.2-3.2+git20200127.2.b84fd4f/src/text-1.2.4.0/Data/Text/Internal/Encoding/Fusion.hs --- cabal-install-3.2-3.2+git20191216.2.e076113/src/text-1.2.4.0/Data/Text/Internal/Encoding/Fusion.hs 2001-09-09 01:46:40.000000000 +0000 +++ cabal-install-3.2-3.2+git20200127.2.b84fd4f/src/text-1.2.4.0/Data/Text/Internal/Encoding/Fusion.hs 1970-01-01 00:00:00.000000000 +0000 @@ -1,208 +0,0 @@ -{-# LANGUAGE BangPatterns, CPP, Rank2Types #-} - --- | --- Module : Data.Text.Internal.Encoding.Fusion --- Copyright : (c) Tom Harper 2008-2009, --- (c) Bryan O'Sullivan 2009, --- (c) Duncan Coutts 2009 --- --- License : BSD-style --- Maintainer : bos@serpentine.com --- Stability : experimental --- Portability : portable --- --- /Warning/: this is an internal module, and does not have a stable --- API or name. Functions in this module may not check or enforce --- preconditions expected by public modules. Use at your own risk! --- --- Fusible 'Stream'-oriented functions for converting between 'Text' --- and several common encodings. - -module Data.Text.Internal.Encoding.Fusion - ( - -- * Streaming - streamASCII - , streamUtf8 - , streamUtf16LE - , streamUtf16BE - , streamUtf32LE - , streamUtf32BE - - -- * Unstreaming - , unstream - - , module Data.Text.Internal.Encoding.Fusion.Common - ) where - -#if defined(ASSERTS) -import Control.Exception (assert) -#endif -import Data.ByteString.Internal (ByteString(..), mallocByteString, memcpy) -import Data.Text.Internal.Fusion (Step(..), Stream(..)) -import Data.Text.Internal.Fusion.Size -import Data.Text.Encoding.Error -import Data.Text.Internal.Encoding.Fusion.Common -import Data.Text.Internal.Unsafe.Char (unsafeChr, unsafeChr8, unsafeChr32) -import Data.Text.Internal.Unsafe.Shift (shiftL, shiftR) -import Data.Word (Word8, Word16, Word32) -import Foreign.ForeignPtr (withForeignPtr, ForeignPtr) -import Foreign.Storable (pokeByteOff) -import qualified Data.ByteString as B -import qualified Data.ByteString.Unsafe as B -import qualified Data.Text.Internal.Encoding.Utf8 as U8 -import qualified Data.Text.Internal.Encoding.Utf16 as U16 -import qualified Data.Text.Internal.Encoding.Utf32 as U32 -import Data.Text.Unsafe (unsafeDupablePerformIO) - -streamASCII :: ByteString -> Stream Char -streamASCII bs = Stream next 0 (maxSize l) - where - l = B.length bs - {-# INLINE next #-} - next i - | i >= l = Done - | otherwise = Yield (unsafeChr8 x1) (i+1) - where - x1 = B.unsafeIndex bs i -{-# DEPRECATED streamASCII "Do not use this function" #-} -{-# INLINE [0] streamASCII #-} - --- | /O(n)/ Convert a 'ByteString' into a 'Stream Char', using UTF-8 --- encoding. -streamUtf8 :: OnDecodeError -> ByteString -> Stream Char -streamUtf8 onErr bs = Stream next 0 (maxSize l) - where - l = B.length bs - next i - | i >= l = Done - | U8.validate1 x1 = Yield (unsafeChr8 x1) (i+1) - | i+1 < l && U8.validate2 x1 x2 = Yield (U8.chr2 x1 x2) (i+2) - | i+2 < l && U8.validate3 x1 x2 x3 = Yield (U8.chr3 x1 x2 x3) (i+3) - | i+3 < l && U8.validate4 x1 x2 x3 x4 = Yield (U8.chr4 x1 x2 x3 x4) (i+4) - | otherwise = decodeError "streamUtf8" "UTF-8" onErr (Just x1) (i+1) - where - x1 = idx i - x2 = idx (i + 1) - x3 = idx (i + 2) - x4 = idx (i + 3) - idx = B.unsafeIndex bs -{-# INLINE [0] streamUtf8 #-} - --- | /O(n)/ Convert a 'ByteString' into a 'Stream Char', using little --- endian UTF-16 encoding. -streamUtf16LE :: OnDecodeError -> ByteString -> Stream Char -streamUtf16LE onErr bs = Stream next 0 (maxSize (l `shiftR` 1)) - where - l = B.length bs - {-# INLINE next #-} - next i - | i >= l = Done - | i+1 < l && U16.validate1 x1 = Yield (unsafeChr x1) (i+2) - | i+3 < l && U16.validate2 x1 x2 = Yield (U16.chr2 x1 x2) (i+4) - | otherwise = decodeError "streamUtf16LE" "UTF-16LE" onErr Nothing (i+1) - where - x1 = idx i + (idx (i + 1) `shiftL` 8) - x2 = idx (i + 2) + (idx (i + 3) `shiftL` 8) - idx = fromIntegral . B.unsafeIndex bs :: Int -> Word16 -{-# INLINE [0] streamUtf16LE #-} - --- | /O(n)/ Convert a 'ByteString' into a 'Stream Char', using big --- endian UTF-16 encoding. -streamUtf16BE :: OnDecodeError -> ByteString -> Stream Char -streamUtf16BE onErr bs = Stream next 0 (maxSize (l `shiftR` 1)) - where - l = B.length bs - {-# INLINE next #-} - next i - | i >= l = Done - | i+1 < l && U16.validate1 x1 = Yield (unsafeChr x1) (i+2) - | i+3 < l && U16.validate2 x1 x2 = Yield (U16.chr2 x1 x2) (i+4) - | otherwise = decodeError "streamUtf16BE" "UTF-16BE" onErr Nothing (i+1) - where - x1 = (idx i `shiftL` 8) + idx (i + 1) - x2 = (idx (i + 2) `shiftL` 8) + idx (i + 3) - idx = fromIntegral . B.unsafeIndex bs :: Int -> Word16 -{-# INLINE [0] streamUtf16BE #-} - --- | /O(n)/ Convert a 'ByteString' into a 'Stream Char', using big --- endian UTF-32 encoding. -streamUtf32BE :: OnDecodeError -> ByteString -> Stream Char -streamUtf32BE onErr bs = Stream next 0 (maxSize (l `shiftR` 2)) - where - l = B.length bs - {-# INLINE next #-} - next i - | i >= l = Done - | i+3 < l && U32.validate x = Yield (unsafeChr32 x) (i+4) - | otherwise = decodeError "streamUtf32BE" "UTF-32BE" onErr Nothing (i+1) - where - x = shiftL x1 24 + shiftL x2 16 + shiftL x3 8 + x4 - x1 = idx i - x2 = idx (i+1) - x3 = idx (i+2) - x4 = idx (i+3) - idx = fromIntegral . B.unsafeIndex bs :: Int -> Word32 -{-# INLINE [0] streamUtf32BE #-} - --- | /O(n)/ Convert a 'ByteString' into a 'Stream Char', using little --- endian UTF-32 encoding. -streamUtf32LE :: OnDecodeError -> ByteString -> Stream Char -streamUtf32LE onErr bs = Stream next 0 (maxSize (l `shiftR` 2)) - where - l = B.length bs - {-# INLINE next #-} - next i - | i >= l = Done - | i+3 < l && U32.validate x = Yield (unsafeChr32 x) (i+4) - | otherwise = decodeError "streamUtf32LE" "UTF-32LE" onErr Nothing (i+1) - where - x = shiftL x4 24 + shiftL x3 16 + shiftL x2 8 + x1 - x1 = idx i - x2 = idx $ i+1 - x3 = idx $ i+2 - x4 = idx $ i+3 - idx = fromIntegral . B.unsafeIndex bs :: Int -> Word32 -{-# INLINE [0] streamUtf32LE #-} - --- | /O(n)/ Convert a 'Stream' 'Word8' to a 'ByteString'. -unstream :: Stream Word8 -> ByteString -unstream (Stream next s0 len) = unsafeDupablePerformIO $ do - let mlen = upperBound 4 len - mallocByteString mlen >>= loop mlen 0 s0 - where - loop !n !off !s fp = case next s of - Done -> trimUp fp n off - Skip s' -> loop n off s' fp - Yield x s' - | off == n -> realloc fp n off s' x - | otherwise -> do - withForeignPtr fp $ \p -> pokeByteOff p off x - loop n (off+1) s' fp - {-# NOINLINE realloc #-} - realloc fp n off s x = do - let n' = n+n - fp' <- copy0 fp n n' - withForeignPtr fp' $ \p -> pokeByteOff p off x - loop n' (off+1) s fp' - {-# NOINLINE trimUp #-} - trimUp fp _ off = return $! PS fp 0 off - copy0 :: ForeignPtr Word8 -> Int -> Int -> IO (ForeignPtr Word8) - copy0 !src !srcLen !destLen = -#if defined(ASSERTS) - assert (srcLen <= destLen) $ -#endif - do - dest <- mallocByteString destLen - withForeignPtr src $ \src' -> - withForeignPtr dest $ \dest' -> - memcpy dest' src' (fromIntegral srcLen) - return dest - -decodeError :: forall s. String -> String -> OnDecodeError -> Maybe Word8 - -> s -> Step s Char -decodeError func kind onErr mb i = - case onErr desc mb of - Nothing -> Skip i - Just c -> Yield c i - where desc = "Data.Text.Internal.Encoding.Fusion." ++ func ++ ": Invalid " ++ - kind ++ " stream" diff -Nru cabal-install-3.2-3.2+git20191216.2.e076113/src/text-1.2.4.0/Data/Text/Internal/Encoding/Utf16.hs cabal-install-3.2-3.2+git20200127.2.b84fd4f/src/text-1.2.4.0/Data/Text/Internal/Encoding/Utf16.hs --- cabal-install-3.2-3.2+git20191216.2.e076113/src/text-1.2.4.0/Data/Text/Internal/Encoding/Utf16.hs 2001-09-09 01:46:40.000000000 +0000 +++ cabal-install-3.2-3.2+git20200127.2.b84fd4f/src/text-1.2.4.0/Data/Text/Internal/Encoding/Utf16.hs 1970-01-01 00:00:00.000000000 +0000 @@ -1,45 +0,0 @@ -{-# LANGUAGE MagicHash, BangPatterns #-} - --- | --- Module : Data.Text.Internal.Encoding.Utf16 --- Copyright : (c) 2008, 2009 Tom Harper, --- (c) 2009 Bryan O'Sullivan, --- (c) 2009 Duncan Coutts --- --- License : BSD-style --- Maintainer : bos@serpentine.com --- Stability : experimental --- Portability : GHC --- --- /Warning/: this is an internal module, and does not have a stable --- API or name. Functions in this module may not check or enforce --- preconditions expected by public modules. Use at your own risk! --- --- Basic UTF-16 validation and character manipulation. -module Data.Text.Internal.Encoding.Utf16 - ( - chr2 - , validate1 - , validate2 - ) where - -import GHC.Exts -import GHC.Word (Word16(..)) - -chr2 :: Word16 -> Word16 -> Char -chr2 (W16# a#) (W16# b#) = C# (chr# (upper# +# lower# +# 0x10000#)) - where - !x# = word2Int# a# - !y# = word2Int# b# - !upper# = uncheckedIShiftL# (x# -# 0xD800#) 10# - !lower# = y# -# 0xDC00# -{-# INLINE chr2 #-} - -validate1 :: Word16 -> Bool -validate1 x1 = x1 < 0xD800 || x1 > 0xDFFF -{-# INLINE validate1 #-} - -validate2 :: Word16 -> Word16 -> Bool -validate2 x1 x2 = x1 >= 0xD800 && x1 <= 0xDBFF && - x2 >= 0xDC00 && x2 <= 0xDFFF -{-# INLINE validate2 #-} diff -Nru cabal-install-3.2-3.2+git20191216.2.e076113/src/text-1.2.4.0/Data/Text/Internal/Encoding/Utf32.hs cabal-install-3.2-3.2+git20200127.2.b84fd4f/src/text-1.2.4.0/Data/Text/Internal/Encoding/Utf32.hs --- cabal-install-3.2-3.2+git20191216.2.e076113/src/text-1.2.4.0/Data/Text/Internal/Encoding/Utf32.hs 2001-09-09 01:46:40.000000000 +0000 +++ cabal-install-3.2-3.2+git20200127.2.b84fd4f/src/text-1.2.4.0/Data/Text/Internal/Encoding/Utf32.hs 1970-01-01 00:00:00.000000000 +0000 @@ -1,26 +0,0 @@ --- | --- Module : Data.Text.Internal.Encoding.Utf32 --- Copyright : (c) 2008, 2009 Tom Harper, --- (c) 2009, 2010 Bryan O'Sullivan, --- (c) 2009 Duncan Coutts --- --- License : BSD-style --- Maintainer : bos@serpentine.com --- Stability : experimental --- Portability : portable --- --- /Warning/: this is an internal module, and does not have a stable --- API or name. Functions in this module may not check or enforce --- preconditions expected by public modules. Use at your own risk! --- --- Basic UTF-32 validation. -module Data.Text.Internal.Encoding.Utf32 - ( - validate - ) where - -import Data.Word (Word32) - -validate :: Word32 -> Bool -validate x1 = x1 < 0xD800 || (x1 > 0xDFFF && x1 <= 0x10FFFF) -{-# INLINE validate #-} diff -Nru cabal-install-3.2-3.2+git20191216.2.e076113/src/text-1.2.4.0/Data/Text/Internal/Encoding/Utf8.hs cabal-install-3.2-3.2+git20200127.2.b84fd4f/src/text-1.2.4.0/Data/Text/Internal/Encoding/Utf8.hs --- cabal-install-3.2-3.2+git20191216.2.e076113/src/text-1.2.4.0/Data/Text/Internal/Encoding/Utf8.hs 2001-09-09 01:46:40.000000000 +0000 +++ cabal-install-3.2-3.2+git20200127.2.b84fd4f/src/text-1.2.4.0/Data/Text/Internal/Encoding/Utf8.hs 1970-01-01 00:00:00.000000000 +0000 @@ -1,168 +0,0 @@ -{-# LANGUAGE CPP, MagicHash, BangPatterns #-} - --- | --- Module : Data.Text.Internal.Encoding.Utf8 --- Copyright : (c) 2008, 2009 Tom Harper, --- (c) 2009, 2010 Bryan O'Sullivan, --- (c) 2009 Duncan Coutts --- --- License : BSD-style --- Maintainer : bos@serpentine.com --- Stability : experimental --- Portability : GHC --- --- /Warning/: this is an internal module, and does not have a stable --- API or name. Functions in this module may not check or enforce --- preconditions expected by public modules. Use at your own risk! --- --- Basic UTF-8 validation and character manipulation. -module Data.Text.Internal.Encoding.Utf8 - ( - -- Decomposition - ord2 - , ord3 - , ord4 - -- Construction - , chr2 - , chr3 - , chr4 - -- * Validation - , validate1 - , validate2 - , validate3 - , validate4 - ) where - -#if defined(TEST_SUITE) -# undef ASSERTS -#endif - -#if defined(ASSERTS) -import Control.Exception (assert) -#endif -import Data.Bits ((.&.)) -import Data.Text.Internal.Unsafe.Char (ord) -import Data.Text.Internal.Unsafe.Shift (shiftR) -import GHC.Exts -import GHC.Word (Word8(..)) - -default(Int) - -between :: Word8 -- ^ byte to check - -> Word8 -- ^ lower bound - -> Word8 -- ^ upper bound - -> Bool -between x y z = x >= y && x <= z -{-# INLINE between #-} - -ord2 :: Char -> (Word8,Word8) -ord2 c = -#if defined(ASSERTS) - assert (n >= 0x80 && n <= 0x07ff) -#endif - (x1,x2) - where - n = ord c - x1 = fromIntegral $ (n `shiftR` 6) + 0xC0 - x2 = fromIntegral $ (n .&. 0x3F) + 0x80 - -ord3 :: Char -> (Word8,Word8,Word8) -ord3 c = -#if defined(ASSERTS) - assert (n >= 0x0800 && n <= 0xffff) -#endif - (x1,x2,x3) - where - n = ord c - x1 = fromIntegral $ (n `shiftR` 12) + 0xE0 - x2 = fromIntegral $ ((n `shiftR` 6) .&. 0x3F) + 0x80 - x3 = fromIntegral $ (n .&. 0x3F) + 0x80 - -ord4 :: Char -> (Word8,Word8,Word8,Word8) -ord4 c = -#if defined(ASSERTS) - assert (n >= 0x10000) -#endif - (x1,x2,x3,x4) - where - n = ord c - x1 = fromIntegral $ (n `shiftR` 18) + 0xF0 - x2 = fromIntegral $ ((n `shiftR` 12) .&. 0x3F) + 0x80 - x3 = fromIntegral $ ((n `shiftR` 6) .&. 0x3F) + 0x80 - x4 = fromIntegral $ (n .&. 0x3F) + 0x80 - -chr2 :: Word8 -> Word8 -> Char -chr2 (W8# x1#) (W8# x2#) = C# (chr# (z1# +# z2#)) - where - !y1# = word2Int# x1# - !y2# = word2Int# x2# - !z1# = uncheckedIShiftL# (y1# -# 0xC0#) 6# - !z2# = y2# -# 0x80# -{-# INLINE chr2 #-} - -chr3 :: Word8 -> Word8 -> Word8 -> Char -chr3 (W8# x1#) (W8# x2#) (W8# x3#) = C# (chr# (z1# +# z2# +# z3#)) - where - !y1# = word2Int# x1# - !y2# = word2Int# x2# - !y3# = word2Int# x3# - !z1# = uncheckedIShiftL# (y1# -# 0xE0#) 12# - !z2# = uncheckedIShiftL# (y2# -# 0x80#) 6# - !z3# = y3# -# 0x80# -{-# INLINE chr3 #-} - -chr4 :: Word8 -> Word8 -> Word8 -> Word8 -> Char -chr4 (W8# x1#) (W8# x2#) (W8# x3#) (W8# x4#) = - C# (chr# (z1# +# z2# +# z3# +# z4#)) - where - !y1# = word2Int# x1# - !y2# = word2Int# x2# - !y3# = word2Int# x3# - !y4# = word2Int# x4# - !z1# = uncheckedIShiftL# (y1# -# 0xF0#) 18# - !z2# = uncheckedIShiftL# (y2# -# 0x80#) 12# - !z3# = uncheckedIShiftL# (y3# -# 0x80#) 6# - !z4# = y4# -# 0x80# -{-# INLINE chr4 #-} - -validate1 :: Word8 -> Bool -validate1 x1 = x1 <= 0x7F -{-# INLINE validate1 #-} - -validate2 :: Word8 -> Word8 -> Bool -validate2 x1 x2 = between x1 0xC2 0xDF && between x2 0x80 0xBF -{-# INLINE validate2 #-} - -validate3 :: Word8 -> Word8 -> Word8 -> Bool -{-# INLINE validate3 #-} -validate3 x1 x2 x3 = validate3_1 || validate3_2 || validate3_3 || validate3_4 - where - validate3_1 = (x1 == 0xE0) && - between x2 0xA0 0xBF && - between x3 0x80 0xBF - validate3_2 = between x1 0xE1 0xEC && - between x2 0x80 0xBF && - between x3 0x80 0xBF - validate3_3 = x1 == 0xED && - between x2 0x80 0x9F && - between x3 0x80 0xBF - validate3_4 = between x1 0xEE 0xEF && - between x2 0x80 0xBF && - between x3 0x80 0xBF - -validate4 :: Word8 -> Word8 -> Word8 -> Word8 -> Bool -{-# INLINE validate4 #-} -validate4 x1 x2 x3 x4 = validate4_1 || validate4_2 || validate4_3 - where - validate4_1 = x1 == 0xF0 && - between x2 0x90 0xBF && - between x3 0x80 0xBF && - between x4 0x80 0xBF - validate4_2 = between x1 0xF1 0xF3 && - between x2 0x80 0xBF && - between x3 0x80 0xBF && - between x4 0x80 0xBF - validate4_3 = x1 == 0xF4 && - between x2 0x80 0x8F && - between x3 0x80 0xBF && - between x4 0x80 0xBF diff -Nru cabal-install-3.2-3.2+git20191216.2.e076113/src/text-1.2.4.0/Data/Text/Internal/Functions.hs cabal-install-3.2-3.2+git20200127.2.b84fd4f/src/text-1.2.4.0/Data/Text/Internal/Functions.hs --- cabal-install-3.2-3.2+git20191216.2.e076113/src/text-1.2.4.0/Data/Text/Internal/Functions.hs 2001-09-09 01:46:40.000000000 +0000 +++ cabal-install-3.2-3.2+git20200127.2.b84fd4f/src/text-1.2.4.0/Data/Text/Internal/Functions.hs 1970-01-01 00:00:00.000000000 +0000 @@ -1,29 +0,0 @@ --- | --- Module : Data.Text.Internal.Functions --- Copyright : 2010 Bryan O'Sullivan --- --- License : BSD-style --- Maintainer : bos@serpentine.com --- Stability : experimental --- Portability : GHC --- --- /Warning/: this is an internal module, and does not have a stable --- API or name. Functions in this module may not check or enforce --- preconditions expected by public modules. Use at your own risk! --- --- Useful functions. - -module Data.Text.Internal.Functions - ( - intersperse - ) where - --- | A lazier version of Data.List.intersperse. The other version --- causes space leaks! -intersperse :: a -> [a] -> [a] -intersperse _ [] = [] -intersperse sep (x:xs) = x : go xs - where - go [] = [] - go (y:ys) = sep : y: go ys -{-# INLINE intersperse #-} diff -Nru cabal-install-3.2-3.2+git20191216.2.e076113/src/text-1.2.4.0/Data/Text/Internal/Fusion/CaseMapping.hs cabal-install-3.2-3.2+git20200127.2.b84fd4f/src/text-1.2.4.0/Data/Text/Internal/Fusion/CaseMapping.hs --- cabal-install-3.2-3.2+git20191216.2.e076113/src/text-1.2.4.0/Data/Text/Internal/Fusion/CaseMapping.hs 2001-09-09 01:46:40.000000000 +0000 +++ cabal-install-3.2-3.2+git20200127.2.b84fd4f/src/text-1.2.4.0/Data/Text/Internal/Fusion/CaseMapping.hs 1970-01-01 00:00:00.000000000 +0000 @@ -1,1002 +0,0 @@ -{-# LANGUAGE Rank2Types #-} --- AUTOMATICALLY GENERATED - DO NOT EDIT --- Generated by scripts/CaseMapping.hs --- CaseFolding-9.0.0.txt --- Date: 2016-03-02, 18:54:54 GMT --- SpecialCasing-9.0.0.txt --- Date: 2016-03-02, 18:55:13 GMT - -module Data.Text.Internal.Fusion.CaseMapping where -import Data.Char -import Data.Text.Internal.Fusion.Types - -upperMapping :: forall s. Char -> s -> Step (CC s) Char -{-# NOINLINE upperMapping #-} --- LATIN SMALL LETTER SHARP S -upperMapping '\x00df' s = Yield '\x0053' (CC s '\x0053' '\x0000') --- LATIN SMALL LIGATURE FF -upperMapping '\xfb00' s = Yield '\x0046' (CC s '\x0046' '\x0000') --- LATIN SMALL LIGATURE FI -upperMapping '\xfb01' s = Yield '\x0046' (CC s '\x0049' '\x0000') --- LATIN SMALL LIGATURE FL -upperMapping '\xfb02' s = Yield '\x0046' (CC s '\x004c' '\x0000') --- LATIN SMALL LIGATURE FFI -upperMapping '\xfb03' s = Yield '\x0046' (CC s '\x0046' '\x0049') --- LATIN SMALL LIGATURE FFL -upperMapping '\xfb04' s = Yield '\x0046' (CC s '\x0046' '\x004c') --- LATIN SMALL LIGATURE LONG S T -upperMapping '\xfb05' s = Yield '\x0053' (CC s '\x0054' '\x0000') --- LATIN SMALL LIGATURE ST -upperMapping '\xfb06' s = Yield '\x0053' (CC s '\x0054' '\x0000') --- ARMENIAN SMALL LIGATURE ECH YIWN -upperMapping '\x0587' s = Yield '\x0535' (CC s '\x0552' '\x0000') --- ARMENIAN SMALL LIGATURE MEN NOW -upperMapping '\xfb13' s = Yield '\x0544' (CC s '\x0546' '\x0000') --- ARMENIAN SMALL LIGATURE MEN ECH -upperMapping '\xfb14' s = Yield '\x0544' (CC s '\x0535' '\x0000') --- ARMENIAN SMALL LIGATURE MEN INI -upperMapping '\xfb15' s = Yield '\x0544' (CC s '\x053b' '\x0000') --- ARMENIAN SMALL LIGATURE VEW NOW -upperMapping '\xfb16' s = Yield '\x054e' (CC s '\x0546' '\x0000') --- ARMENIAN SMALL LIGATURE MEN XEH -upperMapping '\xfb17' s = Yield '\x0544' (CC s '\x053d' '\x0000') --- LATIN SMALL LETTER N PRECEDED BY APOSTROPHE -upperMapping '\x0149' s = Yield '\x02bc' (CC s '\x004e' '\x0000') --- GREEK SMALL LETTER IOTA WITH DIALYTIKA AND TONOS -upperMapping '\x0390' s = Yield '\x0399' (CC s '\x0308' '\x0301') --- GREEK SMALL LETTER UPSILON WITH DIALYTIKA AND TONOS -upperMapping '\x03b0' s = Yield '\x03a5' (CC s '\x0308' '\x0301') --- LATIN SMALL LETTER J WITH CARON -upperMapping '\x01f0' s = Yield '\x004a' (CC s '\x030c' '\x0000') --- LATIN SMALL LETTER H WITH LINE BELOW -upperMapping '\x1e96' s = Yield '\x0048' (CC s '\x0331' '\x0000') --- LATIN SMALL LETTER T WITH DIAERESIS -upperMapping '\x1e97' s = Yield '\x0054' (CC s '\x0308' '\x0000') --- LATIN SMALL LETTER W WITH RING ABOVE -upperMapping '\x1e98' s = Yield '\x0057' (CC s '\x030a' '\x0000') --- LATIN SMALL LETTER Y WITH RING ABOVE -upperMapping '\x1e99' s = Yield '\x0059' (CC s '\x030a' '\x0000') --- LATIN SMALL LETTER A WITH RIGHT HALF RING -upperMapping '\x1e9a' s = Yield '\x0041' (CC s '\x02be' '\x0000') --- GREEK SMALL LETTER UPSILON WITH PSILI -upperMapping '\x1f50' s = Yield '\x03a5' (CC s '\x0313' '\x0000') --- GREEK SMALL LETTER UPSILON WITH PSILI AND VARIA -upperMapping '\x1f52' s = Yield '\x03a5' (CC s '\x0313' '\x0300') --- GREEK SMALL LETTER UPSILON WITH PSILI AND OXIA -upperMapping '\x1f54' s = Yield '\x03a5' (CC s '\x0313' '\x0301') --- GREEK SMALL LETTER UPSILON WITH PSILI AND PERISPOMENI -upperMapping '\x1f56' s = Yield '\x03a5' (CC s '\x0313' '\x0342') --- GREEK SMALL LETTER ALPHA WITH PERISPOMENI -upperMapping '\x1fb6' s = Yield '\x0391' (CC s '\x0342' '\x0000') --- GREEK SMALL LETTER ETA WITH PERISPOMENI -upperMapping '\x1fc6' s = Yield '\x0397' (CC s '\x0342' '\x0000') --- GREEK SMALL LETTER IOTA WITH DIALYTIKA AND VARIA -upperMapping '\x1fd2' s = Yield '\x0399' (CC s '\x0308' '\x0300') --- GREEK SMALL LETTER IOTA WITH DIALYTIKA AND OXIA -upperMapping '\x1fd3' s = Yield '\x0399' (CC s '\x0308' '\x0301') --- GREEK SMALL LETTER IOTA WITH PERISPOMENI -upperMapping '\x1fd6' s = Yield '\x0399' (CC s '\x0342' '\x0000') --- GREEK SMALL LETTER IOTA WITH DIALYTIKA AND PERISPOMENI -upperMapping '\x1fd7' s = Yield '\x0399' (CC s '\x0308' '\x0342') --- GREEK SMALL LETTER UPSILON WITH DIALYTIKA AND VARIA -upperMapping '\x1fe2' s = Yield '\x03a5' (CC s '\x0308' '\x0300') --- GREEK SMALL LETTER UPSILON WITH DIALYTIKA AND OXIA -upperMapping '\x1fe3' s = Yield '\x03a5' (CC s '\x0308' '\x0301') --- GREEK SMALL LETTER RHO WITH PSILI -upperMapping '\x1fe4' s = Yield '\x03a1' (CC s '\x0313' '\x0000') --- GREEK SMALL LETTER UPSILON WITH PERISPOMENI -upperMapping '\x1fe6' s = Yield '\x03a5' (CC s '\x0342' '\x0000') --- GREEK SMALL LETTER UPSILON WITH DIALYTIKA AND PERISPOMENI -upperMapping '\x1fe7' s = Yield '\x03a5' (CC s '\x0308' '\x0342') --- GREEK SMALL LETTER OMEGA WITH PERISPOMENI -upperMapping '\x1ff6' s = Yield '\x03a9' (CC s '\x0342' '\x0000') --- GREEK SMALL LETTER ALPHA WITH PSILI AND YPOGEGRAMMENI -upperMapping '\x1f80' s = Yield '\x1f08' (CC s '\x0399' '\x0000') --- GREEK SMALL LETTER ALPHA WITH DASIA AND YPOGEGRAMMENI -upperMapping '\x1f81' s = Yield '\x1f09' (CC s '\x0399' '\x0000') --- GREEK SMALL LETTER ALPHA WITH PSILI AND VARIA AND YPOGEGRAMMENI -upperMapping '\x1f82' s = Yield '\x1f0a' (CC s '\x0399' '\x0000') --- GREEK SMALL LETTER ALPHA WITH DASIA AND VARIA AND YPOGEGRAMMENI -upperMapping '\x1f83' s = Yield '\x1f0b' (CC s '\x0399' '\x0000') --- GREEK SMALL LETTER ALPHA WITH PSILI AND OXIA AND YPOGEGRAMMENI -upperMapping '\x1f84' s = Yield '\x1f0c' (CC s '\x0399' '\x0000') --- GREEK SMALL LETTER ALPHA WITH DASIA AND OXIA AND YPOGEGRAMMENI -upperMapping '\x1f85' s = Yield '\x1f0d' (CC s '\x0399' '\x0000') --- GREEK SMALL LETTER ALPHA WITH PSILI AND PERISPOMENI AND YPOGEGRAMMENI -upperMapping '\x1f86' s = Yield '\x1f0e' (CC s '\x0399' '\x0000') --- GREEK SMALL LETTER ALPHA WITH DASIA AND PERISPOMENI AND YPOGEGRAMMENI -upperMapping '\x1f87' s = Yield '\x1f0f' (CC s '\x0399' '\x0000') --- GREEK CAPITAL LETTER ALPHA WITH PSILI AND PROSGEGRAMMENI -upperMapping '\x1f88' s = Yield '\x1f08' (CC s '\x0399' '\x0000') --- GREEK CAPITAL LETTER ALPHA WITH DASIA AND PROSGEGRAMMENI -upperMapping '\x1f89' s = Yield '\x1f09' (CC s '\x0399' '\x0000') --- GREEK CAPITAL LETTER ALPHA WITH PSILI AND VARIA AND PROSGEGRAMMENI -upperMapping '\x1f8a' s = Yield '\x1f0a' (CC s '\x0399' '\x0000') --- GREEK CAPITAL LETTER ALPHA WITH DASIA AND VARIA AND PROSGEGRAMMENI -upperMapping '\x1f8b' s = Yield '\x1f0b' (CC s '\x0399' '\x0000') --- GREEK CAPITAL LETTER ALPHA WITH PSILI AND OXIA AND PROSGEGRAMMENI -upperMapping '\x1f8c' s = Yield '\x1f0c' (CC s '\x0399' '\x0000') --- GREEK CAPITAL LETTER ALPHA WITH DASIA AND OXIA AND PROSGEGRAMMENI -upperMapping '\x1f8d' s = Yield '\x1f0d' (CC s '\x0399' '\x0000') --- GREEK CAPITAL LETTER ALPHA WITH PSILI AND PERISPOMENI AND PROSGEGRAMMENI -upperMapping '\x1f8e' s = Yield '\x1f0e' (CC s '\x0399' '\x0000') --- GREEK CAPITAL LETTER ALPHA WITH DASIA AND PERISPOMENI AND PROSGEGRAMMENI -upperMapping '\x1f8f' s = Yield '\x1f0f' (CC s '\x0399' '\x0000') --- GREEK SMALL LETTER ETA WITH PSILI AND YPOGEGRAMMENI -upperMapping '\x1f90' s = Yield '\x1f28' (CC s '\x0399' '\x0000') --- GREEK SMALL LETTER ETA WITH DASIA AND YPOGEGRAMMENI -upperMapping '\x1f91' s = Yield '\x1f29' (CC s '\x0399' '\x0000') --- GREEK SMALL LETTER ETA WITH PSILI AND VARIA AND YPOGEGRAMMENI -upperMapping '\x1f92' s = Yield '\x1f2a' (CC s '\x0399' '\x0000') --- GREEK SMALL LETTER ETA WITH DASIA AND VARIA AND YPOGEGRAMMENI -upperMapping '\x1f93' s = Yield '\x1f2b' (CC s '\x0399' '\x0000') --- GREEK SMALL LETTER ETA WITH PSILI AND OXIA AND YPOGEGRAMMENI -upperMapping '\x1f94' s = Yield '\x1f2c' (CC s '\x0399' '\x0000') --- GREEK SMALL LETTER ETA WITH DASIA AND OXIA AND YPOGEGRAMMENI -upperMapping '\x1f95' s = Yield '\x1f2d' (CC s '\x0399' '\x0000') --- GREEK SMALL LETTER ETA WITH PSILI AND PERISPOMENI AND YPOGEGRAMMENI -upperMapping '\x1f96' s = Yield '\x1f2e' (CC s '\x0399' '\x0000') --- GREEK SMALL LETTER ETA WITH DASIA AND PERISPOMENI AND YPOGEGRAMMENI -upperMapping '\x1f97' s = Yield '\x1f2f' (CC s '\x0399' '\x0000') --- GREEK CAPITAL LETTER ETA WITH PSILI AND PROSGEGRAMMENI -upperMapping '\x1f98' s = Yield '\x1f28' (CC s '\x0399' '\x0000') --- GREEK CAPITAL LETTER ETA WITH DASIA AND PROSGEGRAMMENI -upperMapping '\x1f99' s = Yield '\x1f29' (CC s '\x0399' '\x0000') --- GREEK CAPITAL LETTER ETA WITH PSILI AND VARIA AND PROSGEGRAMMENI -upperMapping '\x1f9a' s = Yield '\x1f2a' (CC s '\x0399' '\x0000') --- GREEK CAPITAL LETTER ETA WITH DASIA AND VARIA AND PROSGEGRAMMENI -upperMapping '\x1f9b' s = Yield '\x1f2b' (CC s '\x0399' '\x0000') --- GREEK CAPITAL LETTER ETA WITH PSILI AND OXIA AND PROSGEGRAMMENI -upperMapping '\x1f9c' s = Yield '\x1f2c' (CC s '\x0399' '\x0000') --- GREEK CAPITAL LETTER ETA WITH DASIA AND OXIA AND PROSGEGRAMMENI -upperMapping '\x1f9d' s = Yield '\x1f2d' (CC s '\x0399' '\x0000') --- GREEK CAPITAL LETTER ETA WITH PSILI AND PERISPOMENI AND PROSGEGRAMMENI -upperMapping '\x1f9e' s = Yield '\x1f2e' (CC s '\x0399' '\x0000') --- GREEK CAPITAL LETTER ETA WITH DASIA AND PERISPOMENI AND PROSGEGRAMMENI -upperMapping '\x1f9f' s = Yield '\x1f2f' (CC s '\x0399' '\x0000') --- GREEK SMALL LETTER OMEGA WITH PSILI AND YPOGEGRAMMENI -upperMapping '\x1fa0' s = Yield '\x1f68' (CC s '\x0399' '\x0000') --- GREEK SMALL LETTER OMEGA WITH DASIA AND YPOGEGRAMMENI -upperMapping '\x1fa1' s = Yield '\x1f69' (CC s '\x0399' '\x0000') --- GREEK SMALL LETTER OMEGA WITH PSILI AND VARIA AND YPOGEGRAMMENI -upperMapping '\x1fa2' s = Yield '\x1f6a' (CC s '\x0399' '\x0000') --- GREEK SMALL LETTER OMEGA WITH DASIA AND VARIA AND YPOGEGRAMMENI -upperMapping '\x1fa3' s = Yield '\x1f6b' (CC s '\x0399' '\x0000') --- GREEK SMALL LETTER OMEGA WITH PSILI AND OXIA AND YPOGEGRAMMENI -upperMapping '\x1fa4' s = Yield '\x1f6c' (CC s '\x0399' '\x0000') --- GREEK SMALL LETTER OMEGA WITH DASIA AND OXIA AND YPOGEGRAMMENI -upperMapping '\x1fa5' s = Yield '\x1f6d' (CC s '\x0399' '\x0000') --- GREEK SMALL LETTER OMEGA WITH PSILI AND PERISPOMENI AND YPOGEGRAMMENI -upperMapping '\x1fa6' s = Yield '\x1f6e' (CC s '\x0399' '\x0000') --- GREEK SMALL LETTER OMEGA WITH DASIA AND PERISPOMENI AND YPOGEGRAMMENI -upperMapping '\x1fa7' s = Yield '\x1f6f' (CC s '\x0399' '\x0000') --- GREEK CAPITAL LETTER OMEGA WITH PSILI AND PROSGEGRAMMENI -upperMapping '\x1fa8' s = Yield '\x1f68' (CC s '\x0399' '\x0000') --- GREEK CAPITAL LETTER OMEGA WITH DASIA AND PROSGEGRAMMENI -upperMapping '\x1fa9' s = Yield '\x1f69' (CC s '\x0399' '\x0000') --- GREEK CAPITAL LETTER OMEGA WITH PSILI AND VARIA AND PROSGEGRAMMENI -upperMapping '\x1faa' s = Yield '\x1f6a' (CC s '\x0399' '\x0000') --- GREEK CAPITAL LETTER OMEGA WITH DASIA AND VARIA AND PROSGEGRAMMENI -upperMapping '\x1fab' s = Yield '\x1f6b' (CC s '\x0399' '\x0000') --- GREEK CAPITAL LETTER OMEGA WITH PSILI AND OXIA AND PROSGEGRAMMENI -upperMapping '\x1fac' s = Yield '\x1f6c' (CC s '\x0399' '\x0000') --- GREEK CAPITAL LETTER OMEGA WITH DASIA AND OXIA AND PROSGEGRAMMENI -upperMapping '\x1fad' s = Yield '\x1f6d' (CC s '\x0399' '\x0000') --- GREEK CAPITAL LETTER OMEGA WITH PSILI AND PERISPOMENI AND PROSGEGRAMMENI -upperMapping '\x1fae' s = Yield '\x1f6e' (CC s '\x0399' '\x0000') --- GREEK CAPITAL LETTER OMEGA WITH DASIA AND PERISPOMENI AND PROSGEGRAMMENI -upperMapping '\x1faf' s = Yield '\x1f6f' (CC s '\x0399' '\x0000') --- GREEK SMALL LETTER ALPHA WITH YPOGEGRAMMENI -upperMapping '\x1fb3' s = Yield '\x0391' (CC s '\x0399' '\x0000') --- GREEK CAPITAL LETTER ALPHA WITH PROSGEGRAMMENI -upperMapping '\x1fbc' s = Yield '\x0391' (CC s '\x0399' '\x0000') --- GREEK SMALL LETTER ETA WITH YPOGEGRAMMENI -upperMapping '\x1fc3' s = Yield '\x0397' (CC s '\x0399' '\x0000') --- GREEK CAPITAL LETTER ETA WITH PROSGEGRAMMENI -upperMapping '\x1fcc' s = Yield '\x0397' (CC s '\x0399' '\x0000') --- GREEK SMALL LETTER OMEGA WITH YPOGEGRAMMENI -upperMapping '\x1ff3' s = Yield '\x03a9' (CC s '\x0399' '\x0000') --- GREEK CAPITAL LETTER OMEGA WITH PROSGEGRAMMENI -upperMapping '\x1ffc' s = Yield '\x03a9' (CC s '\x0399' '\x0000') --- GREEK SMALL LETTER ALPHA WITH VARIA AND YPOGEGRAMMENI -upperMapping '\x1fb2' s = Yield '\x1fba' (CC s '\x0399' '\x0000') --- GREEK SMALL LETTER ALPHA WITH OXIA AND YPOGEGRAMMENI -upperMapping '\x1fb4' s = Yield '\x0386' (CC s '\x0399' '\x0000') --- GREEK SMALL LETTER ETA WITH VARIA AND YPOGEGRAMMENI -upperMapping '\x1fc2' s = Yield '\x1fca' (CC s '\x0399' '\x0000') --- GREEK SMALL LETTER ETA WITH OXIA AND YPOGEGRAMMENI -upperMapping '\x1fc4' s = Yield '\x0389' (CC s '\x0399' '\x0000') --- GREEK SMALL LETTER OMEGA WITH VARIA AND YPOGEGRAMMENI -upperMapping '\x1ff2' s = Yield '\x1ffa' (CC s '\x0399' '\x0000') --- GREEK SMALL LETTER OMEGA WITH OXIA AND YPOGEGRAMMENI -upperMapping '\x1ff4' s = Yield '\x038f' (CC s '\x0399' '\x0000') --- GREEK SMALL LETTER ALPHA WITH PERISPOMENI AND YPOGEGRAMMENI -upperMapping '\x1fb7' s = Yield '\x0391' (CC s '\x0342' '\x0399') --- GREEK SMALL LETTER ETA WITH PERISPOMENI AND YPOGEGRAMMENI -upperMapping '\x1fc7' s = Yield '\x0397' (CC s '\x0342' '\x0399') --- GREEK SMALL LETTER OMEGA WITH PERISPOMENI AND YPOGEGRAMMENI -upperMapping '\x1ff7' s = Yield '\x03a9' (CC s '\x0342' '\x0399') -upperMapping c s = Yield (toUpper c) (CC s '\0' '\0') -lowerMapping :: forall s. Char -> s -> Step (CC s) Char -{-# NOINLINE lowerMapping #-} --- LATIN CAPITAL LETTER I WITH DOT ABOVE -lowerMapping '\x0130' s = Yield '\x0069' (CC s '\x0307' '\x0000') -lowerMapping c s = Yield (toLower c) (CC s '\0' '\0') -titleMapping :: forall s. Char -> s -> Step (CC s) Char -{-# NOINLINE titleMapping #-} --- LATIN SMALL LETTER SHARP S -titleMapping '\x00df' s = Yield '\x0053' (CC s '\x0073' '\x0000') --- LATIN SMALL LIGATURE FF -titleMapping '\xfb00' s = Yield '\x0046' (CC s '\x0066' '\x0000') --- LATIN SMALL LIGATURE FI -titleMapping '\xfb01' s = Yield '\x0046' (CC s '\x0069' '\x0000') --- LATIN SMALL LIGATURE FL -titleMapping '\xfb02' s = Yield '\x0046' (CC s '\x006c' '\x0000') --- LATIN SMALL LIGATURE FFI -titleMapping '\xfb03' s = Yield '\x0046' (CC s '\x0066' '\x0069') --- LATIN SMALL LIGATURE FFL -titleMapping '\xfb04' s = Yield '\x0046' (CC s '\x0066' '\x006c') --- LATIN SMALL LIGATURE LONG S T -titleMapping '\xfb05' s = Yield '\x0053' (CC s '\x0074' '\x0000') --- LATIN SMALL LIGATURE ST -titleMapping '\xfb06' s = Yield '\x0053' (CC s '\x0074' '\x0000') --- ARMENIAN SMALL LIGATURE ECH YIWN -titleMapping '\x0587' s = Yield '\x0535' (CC s '\x0582' '\x0000') --- ARMENIAN SMALL LIGATURE MEN NOW -titleMapping '\xfb13' s = Yield '\x0544' (CC s '\x0576' '\x0000') --- ARMENIAN SMALL LIGATURE MEN ECH -titleMapping '\xfb14' s = Yield '\x0544' (CC s '\x0565' '\x0000') --- ARMENIAN SMALL LIGATURE MEN INI -titleMapping '\xfb15' s = Yield '\x0544' (CC s '\x056b' '\x0000') --- ARMENIAN SMALL LIGATURE VEW NOW -titleMapping '\xfb16' s = Yield '\x054e' (CC s '\x0576' '\x0000') --- ARMENIAN SMALL LIGATURE MEN XEH -titleMapping '\xfb17' s = Yield '\x0544' (CC s '\x056d' '\x0000') --- LATIN SMALL LETTER N PRECEDED BY APOSTROPHE -titleMapping '\x0149' s = Yield '\x02bc' (CC s '\x004e' '\x0000') --- GREEK SMALL LETTER IOTA WITH DIALYTIKA AND TONOS -titleMapping '\x0390' s = Yield '\x0399' (CC s '\x0308' '\x0301') --- GREEK SMALL LETTER UPSILON WITH DIALYTIKA AND TONOS -titleMapping '\x03b0' s = Yield '\x03a5' (CC s '\x0308' '\x0301') --- LATIN SMALL LETTER J WITH CARON -titleMapping '\x01f0' s = Yield '\x004a' (CC s '\x030c' '\x0000') --- LATIN SMALL LETTER H WITH LINE BELOW -titleMapping '\x1e96' s = Yield '\x0048' (CC s '\x0331' '\x0000') --- LATIN SMALL LETTER T WITH DIAERESIS -titleMapping '\x1e97' s = Yield '\x0054' (CC s '\x0308' '\x0000') --- LATIN SMALL LETTER W WITH RING ABOVE -titleMapping '\x1e98' s = Yield '\x0057' (CC s '\x030a' '\x0000') --- LATIN SMALL LETTER Y WITH RING ABOVE -titleMapping '\x1e99' s = Yield '\x0059' (CC s '\x030a' '\x0000') --- LATIN SMALL LETTER A WITH RIGHT HALF RING -titleMapping '\x1e9a' s = Yield '\x0041' (CC s '\x02be' '\x0000') --- GREEK SMALL LETTER UPSILON WITH PSILI -titleMapping '\x1f50' s = Yield '\x03a5' (CC s '\x0313' '\x0000') --- GREEK SMALL LETTER UPSILON WITH PSILI AND VARIA -titleMapping '\x1f52' s = Yield '\x03a5' (CC s '\x0313' '\x0300') --- GREEK SMALL LETTER UPSILON WITH PSILI AND OXIA -titleMapping '\x1f54' s = Yield '\x03a5' (CC s '\x0313' '\x0301') --- GREEK SMALL LETTER UPSILON WITH PSILI AND PERISPOMENI -titleMapping '\x1f56' s = Yield '\x03a5' (CC s '\x0313' '\x0342') --- GREEK SMALL LETTER ALPHA WITH PERISPOMENI -titleMapping '\x1fb6' s = Yield '\x0391' (CC s '\x0342' '\x0000') --- GREEK SMALL LETTER ETA WITH PERISPOMENI -titleMapping '\x1fc6' s = Yield '\x0397' (CC s '\x0342' '\x0000') --- GREEK SMALL LETTER IOTA WITH DIALYTIKA AND VARIA -titleMapping '\x1fd2' s = Yield '\x0399' (CC s '\x0308' '\x0300') --- GREEK SMALL LETTER IOTA WITH DIALYTIKA AND OXIA -titleMapping '\x1fd3' s = Yield '\x0399' (CC s '\x0308' '\x0301') --- GREEK SMALL LETTER IOTA WITH PERISPOMENI -titleMapping '\x1fd6' s = Yield '\x0399' (CC s '\x0342' '\x0000') --- GREEK SMALL LETTER IOTA WITH DIALYTIKA AND PERISPOMENI -titleMapping '\x1fd7' s = Yield '\x0399' (CC s '\x0308' '\x0342') --- GREEK SMALL LETTER UPSILON WITH DIALYTIKA AND VARIA -titleMapping '\x1fe2' s = Yield '\x03a5' (CC s '\x0308' '\x0300') --- GREEK SMALL LETTER UPSILON WITH DIALYTIKA AND OXIA -titleMapping '\x1fe3' s = Yield '\x03a5' (CC s '\x0308' '\x0301') --- GREEK SMALL LETTER RHO WITH PSILI -titleMapping '\x1fe4' s = Yield '\x03a1' (CC s '\x0313' '\x0000') --- GREEK SMALL LETTER UPSILON WITH PERISPOMENI -titleMapping '\x1fe6' s = Yield '\x03a5' (CC s '\x0342' '\x0000') --- GREEK SMALL LETTER UPSILON WITH DIALYTIKA AND PERISPOMENI -titleMapping '\x1fe7' s = Yield '\x03a5' (CC s '\x0308' '\x0342') --- GREEK SMALL LETTER OMEGA WITH PERISPOMENI -titleMapping '\x1ff6' s = Yield '\x03a9' (CC s '\x0342' '\x0000') --- GREEK SMALL LETTER ALPHA WITH VARIA AND YPOGEGRAMMENI -titleMapping '\x1fb2' s = Yield '\x1fba' (CC s '\x0345' '\x0000') --- GREEK SMALL LETTER ALPHA WITH OXIA AND YPOGEGRAMMENI -titleMapping '\x1fb4' s = Yield '\x0386' (CC s '\x0345' '\x0000') --- GREEK SMALL LETTER ETA WITH VARIA AND YPOGEGRAMMENI -titleMapping '\x1fc2' s = Yield '\x1fca' (CC s '\x0345' '\x0000') --- GREEK SMALL LETTER ETA WITH OXIA AND YPOGEGRAMMENI -titleMapping '\x1fc4' s = Yield '\x0389' (CC s '\x0345' '\x0000') --- GREEK SMALL LETTER OMEGA WITH VARIA AND YPOGEGRAMMENI -titleMapping '\x1ff2' s = Yield '\x1ffa' (CC s '\x0345' '\x0000') --- GREEK SMALL LETTER OMEGA WITH OXIA AND YPOGEGRAMMENI -titleMapping '\x1ff4' s = Yield '\x038f' (CC s '\x0345' '\x0000') --- GREEK SMALL LETTER ALPHA WITH PERISPOMENI AND YPOGEGRAMMENI -titleMapping '\x1fb7' s = Yield '\x0391' (CC s '\x0342' '\x0345') --- GREEK SMALL LETTER ETA WITH PERISPOMENI AND YPOGEGRAMMENI -titleMapping '\x1fc7' s = Yield '\x0397' (CC s '\x0342' '\x0345') --- GREEK SMALL LETTER OMEGA WITH PERISPOMENI AND YPOGEGRAMMENI -titleMapping '\x1ff7' s = Yield '\x03a9' (CC s '\x0342' '\x0345') -titleMapping c s = Yield (toTitle c) (CC s '\0' '\0') -foldMapping :: forall s. Char -> s -> Step (CC s) Char -{-# NOINLINE foldMapping #-} --- MICRO SIGN -foldMapping '\x00b5' s = Yield '\x03bc' (CC s '\x0000' '\x0000') --- LATIN SMALL LETTER SHARP S -foldMapping '\x00df' s = Yield '\x0073' (CC s '\x0073' '\x0000') --- LATIN CAPITAL LETTER I WITH DOT ABOVE -foldMapping '\x0130' s = Yield '\x0069' (CC s '\x0307' '\x0000') --- LATIN SMALL LETTER N PRECEDED BY APOSTROPHE -foldMapping '\x0149' s = Yield '\x02bc' (CC s '\x006e' '\x0000') --- LATIN SMALL LETTER LONG S -foldMapping '\x017f' s = Yield '\x0073' (CC s '\x0000' '\x0000') --- LATIN SMALL LETTER J WITH CARON -foldMapping '\x01f0' s = Yield '\x006a' (CC s '\x030c' '\x0000') --- COMBINING GREEK YPOGEGRAMMENI -foldMapping '\x0345' s = Yield '\x03b9' (CC s '\x0000' '\x0000') --- GREEK SMALL LETTER IOTA WITH DIALYTIKA AND TONOS -foldMapping '\x0390' s = Yield '\x03b9' (CC s '\x0308' '\x0301') --- GREEK SMALL LETTER UPSILON WITH DIALYTIKA AND TONOS -foldMapping '\x03b0' s = Yield '\x03c5' (CC s '\x0308' '\x0301') --- GREEK SMALL LETTER FINAL SIGMA -foldMapping '\x03c2' s = Yield '\x03c3' (CC s '\x0000' '\x0000') --- GREEK BETA SYMBOL -foldMapping '\x03d0' s = Yield '\x03b2' (CC s '\x0000' '\x0000') --- GREEK THETA SYMBOL -foldMapping '\x03d1' s = Yield '\x03b8' (CC s '\x0000' '\x0000') --- GREEK PHI SYMBOL -foldMapping '\x03d5' s = Yield '\x03c6' (CC s '\x0000' '\x0000') --- GREEK PI SYMBOL -foldMapping '\x03d6' s = Yield '\x03c0' (CC s '\x0000' '\x0000') --- GREEK KAPPA SYMBOL -foldMapping '\x03f0' s = Yield '\x03ba' (CC s '\x0000' '\x0000') --- GREEK RHO SYMBOL -foldMapping '\x03f1' s = Yield '\x03c1' (CC s '\x0000' '\x0000') --- GREEK LUNATE EPSILON SYMBOL -foldMapping '\x03f5' s = Yield '\x03b5' (CC s '\x0000' '\x0000') --- ARMENIAN SMALL LIGATURE ECH YIWN -foldMapping '\x0587' s = Yield '\x0565' (CC s '\x0582' '\x0000') --- CHEROKEE SMALL LETTER YE -foldMapping '\x13f8' s = Yield '\x13f0' (CC s '\x0000' '\x0000') --- CHEROKEE SMALL LETTER YI -foldMapping '\x13f9' s = Yield '\x13f1' (CC s '\x0000' '\x0000') --- CHEROKEE SMALL LETTER YO -foldMapping '\x13fa' s = Yield '\x13f2' (CC s '\x0000' '\x0000') --- CHEROKEE SMALL LETTER YU -foldMapping '\x13fb' s = Yield '\x13f3' (CC s '\x0000' '\x0000') --- CHEROKEE SMALL LETTER YV -foldMapping '\x13fc' s = Yield '\x13f4' (CC s '\x0000' '\x0000') --- CHEROKEE SMALL LETTER MV -foldMapping '\x13fd' s = Yield '\x13f5' (CC s '\x0000' '\x0000') --- CYRILLIC SMALL LETTER ROUNDED VE -foldMapping '\x1c80' s = Yield '\x0432' (CC s '\x0000' '\x0000') --- CYRILLIC SMALL LETTER LONG-LEGGED DE -foldMapping '\x1c81' s = Yield '\x0434' (CC s '\x0000' '\x0000') --- CYRILLIC SMALL LETTER NARROW O -foldMapping '\x1c82' s = Yield '\x043e' (CC s '\x0000' '\x0000') --- CYRILLIC SMALL LETTER WIDE ES -foldMapping '\x1c83' s = Yield '\x0441' (CC s '\x0000' '\x0000') --- CYRILLIC SMALL LETTER TALL TE -foldMapping '\x1c84' s = Yield '\x0442' (CC s '\x0000' '\x0000') --- CYRILLIC SMALL LETTER THREE-LEGGED TE -foldMapping '\x1c85' s = Yield '\x0442' (CC s '\x0000' '\x0000') --- CYRILLIC SMALL LETTER TALL HARD SIGN -foldMapping '\x1c86' s = Yield '\x044a' (CC s '\x0000' '\x0000') --- CYRILLIC SMALL LETTER TALL YAT -foldMapping '\x1c87' s = Yield '\x0463' (CC s '\x0000' '\x0000') --- CYRILLIC SMALL LETTER UNBLENDED UK -foldMapping '\x1c88' s = Yield '\xa64b' (CC s '\x0000' '\x0000') --- LATIN SMALL LETTER H WITH LINE BELOW -foldMapping '\x1e96' s = Yield '\x0068' (CC s '\x0331' '\x0000') --- LATIN SMALL LETTER T WITH DIAERESIS -foldMapping '\x1e97' s = Yield '\x0074' (CC s '\x0308' '\x0000') --- LATIN SMALL LETTER W WITH RING ABOVE -foldMapping '\x1e98' s = Yield '\x0077' (CC s '\x030a' '\x0000') --- LATIN SMALL LETTER Y WITH RING ABOVE -foldMapping '\x1e99' s = Yield '\x0079' (CC s '\x030a' '\x0000') --- LATIN SMALL LETTER A WITH RIGHT HALF RING -foldMapping '\x1e9a' s = Yield '\x0061' (CC s '\x02be' '\x0000') --- LATIN SMALL LETTER LONG S WITH DOT ABOVE -foldMapping '\x1e9b' s = Yield '\x1e61' (CC s '\x0000' '\x0000') --- LATIN CAPITAL LETTER SHARP S -foldMapping '\x1e9e' s = Yield '\x0073' (CC s '\x0073' '\x0000') --- GREEK SMALL LETTER UPSILON WITH PSILI -foldMapping '\x1f50' s = Yield '\x03c5' (CC s '\x0313' '\x0000') --- GREEK SMALL LETTER UPSILON WITH PSILI AND VARIA -foldMapping '\x1f52' s = Yield '\x03c5' (CC s '\x0313' '\x0300') --- GREEK SMALL LETTER UPSILON WITH PSILI AND OXIA -foldMapping '\x1f54' s = Yield '\x03c5' (CC s '\x0313' '\x0301') --- GREEK SMALL LETTER UPSILON WITH PSILI AND PERISPOMENI -foldMapping '\x1f56' s = Yield '\x03c5' (CC s '\x0313' '\x0342') --- GREEK SMALL LETTER ALPHA WITH PSILI AND YPOGEGRAMMENI -foldMapping '\x1f80' s = Yield '\x1f00' (CC s '\x03b9' '\x0000') --- GREEK SMALL LETTER ALPHA WITH DASIA AND YPOGEGRAMMENI -foldMapping '\x1f81' s = Yield '\x1f01' (CC s '\x03b9' '\x0000') --- GREEK SMALL LETTER ALPHA WITH PSILI AND VARIA AND YPOGEGRAMMENI -foldMapping '\x1f82' s = Yield '\x1f02' (CC s '\x03b9' '\x0000') --- GREEK SMALL LETTER ALPHA WITH DASIA AND VARIA AND YPOGEGRAMMENI -foldMapping '\x1f83' s = Yield '\x1f03' (CC s '\x03b9' '\x0000') --- GREEK SMALL LETTER ALPHA WITH PSILI AND OXIA AND YPOGEGRAMMENI -foldMapping '\x1f84' s = Yield '\x1f04' (CC s '\x03b9' '\x0000') --- GREEK SMALL LETTER ALPHA WITH DASIA AND OXIA AND YPOGEGRAMMENI -foldMapping '\x1f85' s = Yield '\x1f05' (CC s '\x03b9' '\x0000') --- GREEK SMALL LETTER ALPHA WITH PSILI AND PERISPOMENI AND YPOGEGRAMMENI -foldMapping '\x1f86' s = Yield '\x1f06' (CC s '\x03b9' '\x0000') --- GREEK SMALL LETTER ALPHA WITH DASIA AND PERISPOMENI AND YPOGEGRAMMENI -foldMapping '\x1f87' s = Yield '\x1f07' (CC s '\x03b9' '\x0000') --- GREEK CAPITAL LETTER ALPHA WITH PSILI AND PROSGEGRAMMENI -foldMapping '\x1f88' s = Yield '\x1f00' (CC s '\x03b9' '\x0000') --- GREEK CAPITAL LETTER ALPHA WITH DASIA AND PROSGEGRAMMENI -foldMapping '\x1f89' s = Yield '\x1f01' (CC s '\x03b9' '\x0000') --- GREEK CAPITAL LETTER ALPHA WITH PSILI AND VARIA AND PROSGEGRAMMENI -foldMapping '\x1f8a' s = Yield '\x1f02' (CC s '\x03b9' '\x0000') --- GREEK CAPITAL LETTER ALPHA WITH DASIA AND VARIA AND PROSGEGRAMMENI -foldMapping '\x1f8b' s = Yield '\x1f03' (CC s '\x03b9' '\x0000') --- GREEK CAPITAL LETTER ALPHA WITH PSILI AND OXIA AND PROSGEGRAMMENI -foldMapping '\x1f8c' s = Yield '\x1f04' (CC s '\x03b9' '\x0000') --- GREEK CAPITAL LETTER ALPHA WITH DASIA AND OXIA AND PROSGEGRAMMENI -foldMapping '\x1f8d' s = Yield '\x1f05' (CC s '\x03b9' '\x0000') --- GREEK CAPITAL LETTER ALPHA WITH PSILI AND PERISPOMENI AND PROSGEGRAMMENI -foldMapping '\x1f8e' s = Yield '\x1f06' (CC s '\x03b9' '\x0000') --- GREEK CAPITAL LETTER ALPHA WITH DASIA AND PERISPOMENI AND PROSGEGRAMMENI -foldMapping '\x1f8f' s = Yield '\x1f07' (CC s '\x03b9' '\x0000') --- GREEK SMALL LETTER ETA WITH PSILI AND YPOGEGRAMMENI -foldMapping '\x1f90' s = Yield '\x1f20' (CC s '\x03b9' '\x0000') --- GREEK SMALL LETTER ETA WITH DASIA AND YPOGEGRAMMENI -foldMapping '\x1f91' s = Yield '\x1f21' (CC s '\x03b9' '\x0000') --- GREEK SMALL LETTER ETA WITH PSILI AND VARIA AND YPOGEGRAMMENI -foldMapping '\x1f92' s = Yield '\x1f22' (CC s '\x03b9' '\x0000') --- GREEK SMALL LETTER ETA WITH DASIA AND VARIA AND YPOGEGRAMMENI -foldMapping '\x1f93' s = Yield '\x1f23' (CC s '\x03b9' '\x0000') --- GREEK SMALL LETTER ETA WITH PSILI AND OXIA AND YPOGEGRAMMENI -foldMapping '\x1f94' s = Yield '\x1f24' (CC s '\x03b9' '\x0000') --- GREEK SMALL LETTER ETA WITH DASIA AND OXIA AND YPOGEGRAMMENI -foldMapping '\x1f95' s = Yield '\x1f25' (CC s '\x03b9' '\x0000') --- GREEK SMALL LETTER ETA WITH PSILI AND PERISPOMENI AND YPOGEGRAMMENI -foldMapping '\x1f96' s = Yield '\x1f26' (CC s '\x03b9' '\x0000') --- GREEK SMALL LETTER ETA WITH DASIA AND PERISPOMENI AND YPOGEGRAMMENI -foldMapping '\x1f97' s = Yield '\x1f27' (CC s '\x03b9' '\x0000') --- GREEK CAPITAL LETTER ETA WITH PSILI AND PROSGEGRAMMENI -foldMapping '\x1f98' s = Yield '\x1f20' (CC s '\x03b9' '\x0000') --- GREEK CAPITAL LETTER ETA WITH DASIA AND PROSGEGRAMMENI -foldMapping '\x1f99' s = Yield '\x1f21' (CC s '\x03b9' '\x0000') --- GREEK CAPITAL LETTER ETA WITH PSILI AND VARIA AND PROSGEGRAMMENI -foldMapping '\x1f9a' s = Yield '\x1f22' (CC s '\x03b9' '\x0000') --- GREEK CAPITAL LETTER ETA WITH DASIA AND VARIA AND PROSGEGRAMMENI -foldMapping '\x1f9b' s = Yield '\x1f23' (CC s '\x03b9' '\x0000') --- GREEK CAPITAL LETTER ETA WITH PSILI AND OXIA AND PROSGEGRAMMENI -foldMapping '\x1f9c' s = Yield '\x1f24' (CC s '\x03b9' '\x0000') --- GREEK CAPITAL LETTER ETA WITH DASIA AND OXIA AND PROSGEGRAMMENI -foldMapping '\x1f9d' s = Yield '\x1f25' (CC s '\x03b9' '\x0000') --- GREEK CAPITAL LETTER ETA WITH PSILI AND PERISPOMENI AND PROSGEGRAMMENI -foldMapping '\x1f9e' s = Yield '\x1f26' (CC s '\x03b9' '\x0000') --- GREEK CAPITAL LETTER ETA WITH DASIA AND PERISPOMENI AND PROSGEGRAMMENI -foldMapping '\x1f9f' s = Yield '\x1f27' (CC s '\x03b9' '\x0000') --- GREEK SMALL LETTER OMEGA WITH PSILI AND YPOGEGRAMMENI -foldMapping '\x1fa0' s = Yield '\x1f60' (CC s '\x03b9' '\x0000') --- GREEK SMALL LETTER OMEGA WITH DASIA AND YPOGEGRAMMENI -foldMapping '\x1fa1' s = Yield '\x1f61' (CC s '\x03b9' '\x0000') --- GREEK SMALL LETTER OMEGA WITH PSILI AND VARIA AND YPOGEGRAMMENI -foldMapping '\x1fa2' s = Yield '\x1f62' (CC s '\x03b9' '\x0000') --- GREEK SMALL LETTER OMEGA WITH DASIA AND VARIA AND YPOGEGRAMMENI -foldMapping '\x1fa3' s = Yield '\x1f63' (CC s '\x03b9' '\x0000') --- GREEK SMALL LETTER OMEGA WITH PSILI AND OXIA AND YPOGEGRAMMENI -foldMapping '\x1fa4' s = Yield '\x1f64' (CC s '\x03b9' '\x0000') --- GREEK SMALL LETTER OMEGA WITH DASIA AND OXIA AND YPOGEGRAMMENI -foldMapping '\x1fa5' s = Yield '\x1f65' (CC s '\x03b9' '\x0000') --- GREEK SMALL LETTER OMEGA WITH PSILI AND PERISPOMENI AND YPOGEGRAMMENI -foldMapping '\x1fa6' s = Yield '\x1f66' (CC s '\x03b9' '\x0000') --- GREEK SMALL LETTER OMEGA WITH DASIA AND PERISPOMENI AND YPOGEGRAMMENI -foldMapping '\x1fa7' s = Yield '\x1f67' (CC s '\x03b9' '\x0000') --- GREEK CAPITAL LETTER OMEGA WITH PSILI AND PROSGEGRAMMENI -foldMapping '\x1fa8' s = Yield '\x1f60' (CC s '\x03b9' '\x0000') --- GREEK CAPITAL LETTER OMEGA WITH DASIA AND PROSGEGRAMMENI -foldMapping '\x1fa9' s = Yield '\x1f61' (CC s '\x03b9' '\x0000') --- GREEK CAPITAL LETTER OMEGA WITH PSILI AND VARIA AND PROSGEGRAMMENI -foldMapping '\x1faa' s = Yield '\x1f62' (CC s '\x03b9' '\x0000') --- GREEK CAPITAL LETTER OMEGA WITH DASIA AND VARIA AND PROSGEGRAMMENI -foldMapping '\x1fab' s = Yield '\x1f63' (CC s '\x03b9' '\x0000') --- GREEK CAPITAL LETTER OMEGA WITH PSILI AND OXIA AND PROSGEGRAMMENI -foldMapping '\x1fac' s = Yield '\x1f64' (CC s '\x03b9' '\x0000') --- GREEK CAPITAL LETTER OMEGA WITH DASIA AND OXIA AND PROSGEGRAMMENI -foldMapping '\x1fad' s = Yield '\x1f65' (CC s '\x03b9' '\x0000') --- GREEK CAPITAL LETTER OMEGA WITH PSILI AND PERISPOMENI AND PROSGEGRAMMENI -foldMapping '\x1fae' s = Yield '\x1f66' (CC s '\x03b9' '\x0000') --- GREEK CAPITAL LETTER OMEGA WITH DASIA AND PERISPOMENI AND PROSGEGRAMMENI -foldMapping '\x1faf' s = Yield '\x1f67' (CC s '\x03b9' '\x0000') --- GREEK SMALL LETTER ALPHA WITH VARIA AND YPOGEGRAMMENI -foldMapping '\x1fb2' s = Yield '\x1f70' (CC s '\x03b9' '\x0000') --- GREEK SMALL LETTER ALPHA WITH YPOGEGRAMMENI -foldMapping '\x1fb3' s = Yield '\x03b1' (CC s '\x03b9' '\x0000') --- GREEK SMALL LETTER ALPHA WITH OXIA AND YPOGEGRAMMENI -foldMapping '\x1fb4' s = Yield '\x03ac' (CC s '\x03b9' '\x0000') --- GREEK SMALL LETTER ALPHA WITH PERISPOMENI -foldMapping '\x1fb6' s = Yield '\x03b1' (CC s '\x0342' '\x0000') --- GREEK SMALL LETTER ALPHA WITH PERISPOMENI AND YPOGEGRAMMENI -foldMapping '\x1fb7' s = Yield '\x03b1' (CC s '\x0342' '\x03b9') --- GREEK CAPITAL LETTER ALPHA WITH PROSGEGRAMMENI -foldMapping '\x1fbc' s = Yield '\x03b1' (CC s '\x03b9' '\x0000') --- GREEK PROSGEGRAMMENI -foldMapping '\x1fbe' s = Yield '\x03b9' (CC s '\x0000' '\x0000') --- GREEK SMALL LETTER ETA WITH VARIA AND YPOGEGRAMMENI -foldMapping '\x1fc2' s = Yield '\x1f74' (CC s '\x03b9' '\x0000') --- GREEK SMALL LETTER ETA WITH YPOGEGRAMMENI -foldMapping '\x1fc3' s = Yield '\x03b7' (CC s '\x03b9' '\x0000') --- GREEK SMALL LETTER ETA WITH OXIA AND YPOGEGRAMMENI -foldMapping '\x1fc4' s = Yield '\x03ae' (CC s '\x03b9' '\x0000') --- GREEK SMALL LETTER ETA WITH PERISPOMENI -foldMapping '\x1fc6' s = Yield '\x03b7' (CC s '\x0342' '\x0000') --- GREEK SMALL LETTER ETA WITH PERISPOMENI AND YPOGEGRAMMENI -foldMapping '\x1fc7' s = Yield '\x03b7' (CC s '\x0342' '\x03b9') --- GREEK CAPITAL LETTER ETA WITH PROSGEGRAMMENI -foldMapping '\x1fcc' s = Yield '\x03b7' (CC s '\x03b9' '\x0000') --- GREEK SMALL LETTER IOTA WITH DIALYTIKA AND VARIA -foldMapping '\x1fd2' s = Yield '\x03b9' (CC s '\x0308' '\x0300') --- GREEK SMALL LETTER IOTA WITH DIALYTIKA AND OXIA -foldMapping '\x1fd3' s = Yield '\x03b9' (CC s '\x0308' '\x0301') --- GREEK SMALL LETTER IOTA WITH PERISPOMENI -foldMapping '\x1fd6' s = Yield '\x03b9' (CC s '\x0342' '\x0000') --- GREEK SMALL LETTER IOTA WITH DIALYTIKA AND PERISPOMENI -foldMapping '\x1fd7' s = Yield '\x03b9' (CC s '\x0308' '\x0342') --- GREEK SMALL LETTER UPSILON WITH DIALYTIKA AND VARIA -foldMapping '\x1fe2' s = Yield '\x03c5' (CC s '\x0308' '\x0300') --- GREEK SMALL LETTER UPSILON WITH DIALYTIKA AND OXIA -foldMapping '\x1fe3' s = Yield '\x03c5' (CC s '\x0308' '\x0301') --- GREEK SMALL LETTER RHO WITH PSILI -foldMapping '\x1fe4' s = Yield '\x03c1' (CC s '\x0313' '\x0000') --- GREEK SMALL LETTER UPSILON WITH PERISPOMENI -foldMapping '\x1fe6' s = Yield '\x03c5' (CC s '\x0342' '\x0000') --- GREEK SMALL LETTER UPSILON WITH DIALYTIKA AND PERISPOMENI -foldMapping '\x1fe7' s = Yield '\x03c5' (CC s '\x0308' '\x0342') --- GREEK SMALL LETTER OMEGA WITH VARIA AND YPOGEGRAMMENI -foldMapping '\x1ff2' s = Yield '\x1f7c' (CC s '\x03b9' '\x0000') --- GREEK SMALL LETTER OMEGA WITH YPOGEGRAMMENI -foldMapping '\x1ff3' s = Yield '\x03c9' (CC s '\x03b9' '\x0000') --- GREEK SMALL LETTER OMEGA WITH OXIA AND YPOGEGRAMMENI -foldMapping '\x1ff4' s = Yield '\x03ce' (CC s '\x03b9' '\x0000') --- GREEK SMALL LETTER OMEGA WITH PERISPOMENI -foldMapping '\x1ff6' s = Yield '\x03c9' (CC s '\x0342' '\x0000') --- GREEK SMALL LETTER OMEGA WITH PERISPOMENI AND YPOGEGRAMMENI -foldMapping '\x1ff7' s = Yield '\x03c9' (CC s '\x0342' '\x03b9') --- GREEK CAPITAL LETTER OMEGA WITH PROSGEGRAMMENI -foldMapping '\x1ffc' s = Yield '\x03c9' (CC s '\x03b9' '\x0000') --- LATIN CAPITAL LETTER SMALL CAPITAL I -foldMapping '\xa7ae' s = Yield '\x026a' (CC s '\x0000' '\x0000') --- LATIN CAPITAL LETTER J WITH CROSSED-TAIL -foldMapping '\xa7b2' s = Yield '\x029d' (CC s '\x0000' '\x0000') --- LATIN CAPITAL LETTER CHI -foldMapping '\xa7b3' s = Yield '\xab53' (CC s '\x0000' '\x0000') --- LATIN CAPITAL LETTER BETA -foldMapping '\xa7b4' s = Yield '\xa7b5' (CC s '\x0000' '\x0000') --- LATIN CAPITAL LETTER OMEGA -foldMapping '\xa7b6' s = Yield '\xa7b7' (CC s '\x0000' '\x0000') --- CHEROKEE SMALL LETTER A -foldMapping '\xab70' s = Yield '\x13a0' (CC s '\x0000' '\x0000') --- CHEROKEE SMALL LETTER E -foldMapping '\xab71' s = Yield '\x13a1' (CC s '\x0000' '\x0000') --- CHEROKEE SMALL LETTER I -foldMapping '\xab72' s = Yield '\x13a2' (CC s '\x0000' '\x0000') --- CHEROKEE SMALL LETTER O -foldMapping '\xab73' s = Yield '\x13a3' (CC s '\x0000' '\x0000') --- CHEROKEE SMALL LETTER U -foldMapping '\xab74' s = Yield '\x13a4' (CC s '\x0000' '\x0000') --- CHEROKEE SMALL LETTER V -foldMapping '\xab75' s = Yield '\x13a5' (CC s '\x0000' '\x0000') --- CHEROKEE SMALL LETTER GA -foldMapping '\xab76' s = Yield '\x13a6' (CC s '\x0000' '\x0000') --- CHEROKEE SMALL LETTER KA -foldMapping '\xab77' s = Yield '\x13a7' (CC s '\x0000' '\x0000') --- CHEROKEE SMALL LETTER GE -foldMapping '\xab78' s = Yield '\x13a8' (CC s '\x0000' '\x0000') --- CHEROKEE SMALL LETTER GI -foldMapping '\xab79' s = Yield '\x13a9' (CC s '\x0000' '\x0000') --- CHEROKEE SMALL LETTER GO -foldMapping '\xab7a' s = Yield '\x13aa' (CC s '\x0000' '\x0000') --- CHEROKEE SMALL LETTER GU -foldMapping '\xab7b' s = Yield '\x13ab' (CC s '\x0000' '\x0000') --- CHEROKEE SMALL LETTER GV -foldMapping '\xab7c' s = Yield '\x13ac' (CC s '\x0000' '\x0000') --- CHEROKEE SMALL LETTER HA -foldMapping '\xab7d' s = Yield '\x13ad' (CC s '\x0000' '\x0000') --- CHEROKEE SMALL LETTER HE -foldMapping '\xab7e' s = Yield '\x13ae' (CC s '\x0000' '\x0000') --- CHEROKEE SMALL LETTER HI -foldMapping '\xab7f' s = Yield '\x13af' (CC s '\x0000' '\x0000') --- CHEROKEE SMALL LETTER HO -foldMapping '\xab80' s = Yield '\x13b0' (CC s '\x0000' '\x0000') --- CHEROKEE SMALL LETTER HU -foldMapping '\xab81' s = Yield '\x13b1' (CC s '\x0000' '\x0000') --- CHEROKEE SMALL LETTER HV -foldMapping '\xab82' s = Yield '\x13b2' (CC s '\x0000' '\x0000') --- CHEROKEE SMALL LETTER LA -foldMapping '\xab83' s = Yield '\x13b3' (CC s '\x0000' '\x0000') --- CHEROKEE SMALL LETTER LE -foldMapping '\xab84' s = Yield '\x13b4' (CC s '\x0000' '\x0000') --- CHEROKEE SMALL LETTER LI -foldMapping '\xab85' s = Yield '\x13b5' (CC s '\x0000' '\x0000') --- CHEROKEE SMALL LETTER LO -foldMapping '\xab86' s = Yield '\x13b6' (CC s '\x0000' '\x0000') --- CHEROKEE SMALL LETTER LU -foldMapping '\xab87' s = Yield '\x13b7' (CC s '\x0000' '\x0000') --- CHEROKEE SMALL LETTER LV -foldMapping '\xab88' s = Yield '\x13b8' (CC s '\x0000' '\x0000') --- CHEROKEE SMALL LETTER MA -foldMapping '\xab89' s = Yield '\x13b9' (CC s '\x0000' '\x0000') --- CHEROKEE SMALL LETTER ME -foldMapping '\xab8a' s = Yield '\x13ba' (CC s '\x0000' '\x0000') --- CHEROKEE SMALL LETTER MI -foldMapping '\xab8b' s = Yield '\x13bb' (CC s '\x0000' '\x0000') --- CHEROKEE SMALL LETTER MO -foldMapping '\xab8c' s = Yield '\x13bc' (CC s '\x0000' '\x0000') --- CHEROKEE SMALL LETTER MU -foldMapping '\xab8d' s = Yield '\x13bd' (CC s '\x0000' '\x0000') --- CHEROKEE SMALL LETTER NA -foldMapping '\xab8e' s = Yield '\x13be' (CC s '\x0000' '\x0000') --- CHEROKEE SMALL LETTER HNA -foldMapping '\xab8f' s = Yield '\x13bf' (CC s '\x0000' '\x0000') --- CHEROKEE SMALL LETTER NAH -foldMapping '\xab90' s = Yield '\x13c0' (CC s '\x0000' '\x0000') --- CHEROKEE SMALL LETTER NE -foldMapping '\xab91' s = Yield '\x13c1' (CC s '\x0000' '\x0000') --- CHEROKEE SMALL LETTER NI -foldMapping '\xab92' s = Yield '\x13c2' (CC s '\x0000' '\x0000') --- CHEROKEE SMALL LETTER NO -foldMapping '\xab93' s = Yield '\x13c3' (CC s '\x0000' '\x0000') --- CHEROKEE SMALL LETTER NU -foldMapping '\xab94' s = Yield '\x13c4' (CC s '\x0000' '\x0000') --- CHEROKEE SMALL LETTER NV -foldMapping '\xab95' s = Yield '\x13c5' (CC s '\x0000' '\x0000') --- CHEROKEE SMALL LETTER QUA -foldMapping '\xab96' s = Yield '\x13c6' (CC s '\x0000' '\x0000') --- CHEROKEE SMALL LETTER QUE -foldMapping '\xab97' s = Yield '\x13c7' (CC s '\x0000' '\x0000') --- CHEROKEE SMALL LETTER QUI -foldMapping '\xab98' s = Yield '\x13c8' (CC s '\x0000' '\x0000') --- CHEROKEE SMALL LETTER QUO -foldMapping '\xab99' s = Yield '\x13c9' (CC s '\x0000' '\x0000') --- CHEROKEE SMALL LETTER QUU -foldMapping '\xab9a' s = Yield '\x13ca' (CC s '\x0000' '\x0000') --- CHEROKEE SMALL LETTER QUV -foldMapping '\xab9b' s = Yield '\x13cb' (CC s '\x0000' '\x0000') --- CHEROKEE SMALL LETTER SA -foldMapping '\xab9c' s = Yield '\x13cc' (CC s '\x0000' '\x0000') --- CHEROKEE SMALL LETTER S -foldMapping '\xab9d' s = Yield '\x13cd' (CC s '\x0000' '\x0000') --- CHEROKEE SMALL LETTER SE -foldMapping '\xab9e' s = Yield '\x13ce' (CC s '\x0000' '\x0000') --- CHEROKEE SMALL LETTER SI -foldMapping '\xab9f' s = Yield '\x13cf' (CC s '\x0000' '\x0000') --- CHEROKEE SMALL LETTER SO -foldMapping '\xaba0' s = Yield '\x13d0' (CC s '\x0000' '\x0000') --- CHEROKEE SMALL LETTER SU -foldMapping '\xaba1' s = Yield '\x13d1' (CC s '\x0000' '\x0000') --- CHEROKEE SMALL LETTER SV -foldMapping '\xaba2' s = Yield '\x13d2' (CC s '\x0000' '\x0000') --- CHEROKEE SMALL LETTER DA -foldMapping '\xaba3' s = Yield '\x13d3' (CC s '\x0000' '\x0000') --- CHEROKEE SMALL LETTER TA -foldMapping '\xaba4' s = Yield '\x13d4' (CC s '\x0000' '\x0000') --- CHEROKEE SMALL LETTER DE -foldMapping '\xaba5' s = Yield '\x13d5' (CC s '\x0000' '\x0000') --- CHEROKEE SMALL LETTER TE -foldMapping '\xaba6' s = Yield '\x13d6' (CC s '\x0000' '\x0000') --- CHEROKEE SMALL LETTER DI -foldMapping '\xaba7' s = Yield '\x13d7' (CC s '\x0000' '\x0000') --- CHEROKEE SMALL LETTER TI -foldMapping '\xaba8' s = Yield '\x13d8' (CC s '\x0000' '\x0000') --- CHEROKEE SMALL LETTER DO -foldMapping '\xaba9' s = Yield '\x13d9' (CC s '\x0000' '\x0000') --- CHEROKEE SMALL LETTER DU -foldMapping '\xabaa' s = Yield '\x13da' (CC s '\x0000' '\x0000') --- CHEROKEE SMALL LETTER DV -foldMapping '\xabab' s = Yield '\x13db' (CC s '\x0000' '\x0000') --- CHEROKEE SMALL LETTER DLA -foldMapping '\xabac' s = Yield '\x13dc' (CC s '\x0000' '\x0000') --- CHEROKEE SMALL LETTER TLA -foldMapping '\xabad' s = Yield '\x13dd' (CC s '\x0000' '\x0000') --- CHEROKEE SMALL LETTER TLE -foldMapping '\xabae' s = Yield '\x13de' (CC s '\x0000' '\x0000') --- CHEROKEE SMALL LETTER TLI -foldMapping '\xabaf' s = Yield '\x13df' (CC s '\x0000' '\x0000') --- CHEROKEE SMALL LETTER TLO -foldMapping '\xabb0' s = Yield '\x13e0' (CC s '\x0000' '\x0000') --- CHEROKEE SMALL LETTER TLU -foldMapping '\xabb1' s = Yield '\x13e1' (CC s '\x0000' '\x0000') --- CHEROKEE SMALL LETTER TLV -foldMapping '\xabb2' s = Yield '\x13e2' (CC s '\x0000' '\x0000') --- CHEROKEE SMALL LETTER TSA -foldMapping '\xabb3' s = Yield '\x13e3' (CC s '\x0000' '\x0000') --- CHEROKEE SMALL LETTER TSE -foldMapping '\xabb4' s = Yield '\x13e4' (CC s '\x0000' '\x0000') --- CHEROKEE SMALL LETTER TSI -foldMapping '\xabb5' s = Yield '\x13e5' (CC s '\x0000' '\x0000') --- CHEROKEE SMALL LETTER TSO -foldMapping '\xabb6' s = Yield '\x13e6' (CC s '\x0000' '\x0000') --- CHEROKEE SMALL LETTER TSU -foldMapping '\xabb7' s = Yield '\x13e7' (CC s '\x0000' '\x0000') --- CHEROKEE SMALL LETTER TSV -foldMapping '\xabb8' s = Yield '\x13e8' (CC s '\x0000' '\x0000') --- CHEROKEE SMALL LETTER WA -foldMapping '\xabb9' s = Yield '\x13e9' (CC s '\x0000' '\x0000') --- CHEROKEE SMALL LETTER WE -foldMapping '\xabba' s = Yield '\x13ea' (CC s '\x0000' '\x0000') --- CHEROKEE SMALL LETTER WI -foldMapping '\xabbb' s = Yield '\x13eb' (CC s '\x0000' '\x0000') --- CHEROKEE SMALL LETTER WO -foldMapping '\xabbc' s = Yield '\x13ec' (CC s '\x0000' '\x0000') --- CHEROKEE SMALL LETTER WU -foldMapping '\xabbd' s = Yield '\x13ed' (CC s '\x0000' '\x0000') --- CHEROKEE SMALL LETTER WV -foldMapping '\xabbe' s = Yield '\x13ee' (CC s '\x0000' '\x0000') --- CHEROKEE SMALL LETTER YA -foldMapping '\xabbf' s = Yield '\x13ef' (CC s '\x0000' '\x0000') --- LATIN SMALL LIGATURE FF -foldMapping '\xfb00' s = Yield '\x0066' (CC s '\x0066' '\x0000') --- LATIN SMALL LIGATURE FI -foldMapping '\xfb01' s = Yield '\x0066' (CC s '\x0069' '\x0000') --- LATIN SMALL LIGATURE FL -foldMapping '\xfb02' s = Yield '\x0066' (CC s '\x006c' '\x0000') --- LATIN SMALL LIGATURE FFI -foldMapping '\xfb03' s = Yield '\x0066' (CC s '\x0066' '\x0069') --- LATIN SMALL LIGATURE FFL -foldMapping '\xfb04' s = Yield '\x0066' (CC s '\x0066' '\x006c') --- LATIN SMALL LIGATURE LONG S T -foldMapping '\xfb05' s = Yield '\x0073' (CC s '\x0074' '\x0000') --- LATIN SMALL LIGATURE ST -foldMapping '\xfb06' s = Yield '\x0073' (CC s '\x0074' '\x0000') --- ARMENIAN SMALL LIGATURE MEN NOW -foldMapping '\xfb13' s = Yield '\x0574' (CC s '\x0576' '\x0000') --- ARMENIAN SMALL LIGATURE MEN ECH -foldMapping '\xfb14' s = Yield '\x0574' (CC s '\x0565' '\x0000') --- ARMENIAN SMALL LIGATURE MEN INI -foldMapping '\xfb15' s = Yield '\x0574' (CC s '\x056b' '\x0000') --- ARMENIAN SMALL LIGATURE VEW NOW -foldMapping '\xfb16' s = Yield '\x057e' (CC s '\x0576' '\x0000') --- ARMENIAN SMALL LIGATURE MEN XEH -foldMapping '\xfb17' s = Yield '\x0574' (CC s '\x056d' '\x0000') --- OSAGE CAPITAL LETTER A -foldMapping '\x104b0' s = Yield '\x104d8' (CC s '\x0000' '\x0000') --- OSAGE CAPITAL LETTER AI -foldMapping '\x104b1' s = Yield '\x104d9' (CC s '\x0000' '\x0000') --- OSAGE CAPITAL LETTER AIN -foldMapping '\x104b2' s = Yield '\x104da' (CC s '\x0000' '\x0000') --- OSAGE CAPITAL LETTER AH -foldMapping '\x104b3' s = Yield '\x104db' (CC s '\x0000' '\x0000') --- OSAGE CAPITAL LETTER BRA -foldMapping '\x104b4' s = Yield '\x104dc' (CC s '\x0000' '\x0000') --- OSAGE CAPITAL LETTER CHA -foldMapping '\x104b5' s = Yield '\x104dd' (CC s '\x0000' '\x0000') --- OSAGE CAPITAL LETTER EHCHA -foldMapping '\x104b6' s = Yield '\x104de' (CC s '\x0000' '\x0000') --- OSAGE CAPITAL LETTER E -foldMapping '\x104b7' s = Yield '\x104df' (CC s '\x0000' '\x0000') --- OSAGE CAPITAL LETTER EIN -foldMapping '\x104b8' s = Yield '\x104e0' (CC s '\x0000' '\x0000') --- OSAGE CAPITAL LETTER HA -foldMapping '\x104b9' s = Yield '\x104e1' (CC s '\x0000' '\x0000') --- OSAGE CAPITAL LETTER HYA -foldMapping '\x104ba' s = Yield '\x104e2' (CC s '\x0000' '\x0000') --- OSAGE CAPITAL LETTER I -foldMapping '\x104bb' s = Yield '\x104e3' (CC s '\x0000' '\x0000') --- OSAGE CAPITAL LETTER KA -foldMapping '\x104bc' s = Yield '\x104e4' (CC s '\x0000' '\x0000') --- OSAGE CAPITAL LETTER EHKA -foldMapping '\x104bd' s = Yield '\x104e5' (CC s '\x0000' '\x0000') --- OSAGE CAPITAL LETTER KYA -foldMapping '\x104be' s = Yield '\x104e6' (CC s '\x0000' '\x0000') --- OSAGE CAPITAL LETTER LA -foldMapping '\x104bf' s = Yield '\x104e7' (CC s '\x0000' '\x0000') --- OSAGE CAPITAL LETTER MA -foldMapping '\x104c0' s = Yield '\x104e8' (CC s '\x0000' '\x0000') --- OSAGE CAPITAL LETTER NA -foldMapping '\x104c1' s = Yield '\x104e9' (CC s '\x0000' '\x0000') --- OSAGE CAPITAL LETTER O -foldMapping '\x104c2' s = Yield '\x104ea' (CC s '\x0000' '\x0000') --- OSAGE CAPITAL LETTER OIN -foldMapping '\x104c3' s = Yield '\x104eb' (CC s '\x0000' '\x0000') --- OSAGE CAPITAL LETTER PA -foldMapping '\x104c4' s = Yield '\x104ec' (CC s '\x0000' '\x0000') --- OSAGE CAPITAL LETTER EHPA -foldMapping '\x104c5' s = Yield '\x104ed' (CC s '\x0000' '\x0000') --- OSAGE CAPITAL LETTER SA -foldMapping '\x104c6' s = Yield '\x104ee' (CC s '\x0000' '\x0000') --- OSAGE CAPITAL LETTER SHA -foldMapping '\x104c7' s = Yield '\x104ef' (CC s '\x0000' '\x0000') --- OSAGE CAPITAL LETTER TA -foldMapping '\x104c8' s = Yield '\x104f0' (CC s '\x0000' '\x0000') --- OSAGE CAPITAL LETTER EHTA -foldMapping '\x104c9' s = Yield '\x104f1' (CC s '\x0000' '\x0000') --- OSAGE CAPITAL LETTER TSA -foldMapping '\x104ca' s = Yield '\x104f2' (CC s '\x0000' '\x0000') --- OSAGE CAPITAL LETTER EHTSA -foldMapping '\x104cb' s = Yield '\x104f3' (CC s '\x0000' '\x0000') --- OSAGE CAPITAL LETTER TSHA -foldMapping '\x104cc' s = Yield '\x104f4' (CC s '\x0000' '\x0000') --- OSAGE CAPITAL LETTER DHA -foldMapping '\x104cd' s = Yield '\x104f5' (CC s '\x0000' '\x0000') --- OSAGE CAPITAL LETTER U -foldMapping '\x104ce' s = Yield '\x104f6' (CC s '\x0000' '\x0000') --- OSAGE CAPITAL LETTER WA -foldMapping '\x104cf' s = Yield '\x104f7' (CC s '\x0000' '\x0000') --- OSAGE CAPITAL LETTER KHA -foldMapping '\x104d0' s = Yield '\x104f8' (CC s '\x0000' '\x0000') --- OSAGE CAPITAL LETTER GHA -foldMapping '\x104d1' s = Yield '\x104f9' (CC s '\x0000' '\x0000') --- OSAGE CAPITAL LETTER ZA -foldMapping '\x104d2' s = Yield '\x104fa' (CC s '\x0000' '\x0000') --- OSAGE CAPITAL LETTER ZHA -foldMapping '\x104d3' s = Yield '\x104fb' (CC s '\x0000' '\x0000') --- OLD HUNGARIAN CAPITAL LETTER A -foldMapping '\x10c80' s = Yield '\x10cc0' (CC s '\x0000' '\x0000') --- OLD HUNGARIAN CAPITAL LETTER AA -foldMapping '\x10c81' s = Yield '\x10cc1' (CC s '\x0000' '\x0000') --- OLD HUNGARIAN CAPITAL LETTER EB -foldMapping '\x10c82' s = Yield '\x10cc2' (CC s '\x0000' '\x0000') --- OLD HUNGARIAN CAPITAL LETTER AMB -foldMapping '\x10c83' s = Yield '\x10cc3' (CC s '\x0000' '\x0000') --- OLD HUNGARIAN CAPITAL LETTER EC -foldMapping '\x10c84' s = Yield '\x10cc4' (CC s '\x0000' '\x0000') --- OLD HUNGARIAN CAPITAL LETTER ENC -foldMapping '\x10c85' s = Yield '\x10cc5' (CC s '\x0000' '\x0000') --- OLD HUNGARIAN CAPITAL LETTER ECS -foldMapping '\x10c86' s = Yield '\x10cc6' (CC s '\x0000' '\x0000') --- OLD HUNGARIAN CAPITAL LETTER ED -foldMapping '\x10c87' s = Yield '\x10cc7' (CC s '\x0000' '\x0000') --- OLD HUNGARIAN CAPITAL LETTER AND -foldMapping '\x10c88' s = Yield '\x10cc8' (CC s '\x0000' '\x0000') --- OLD HUNGARIAN CAPITAL LETTER E -foldMapping '\x10c89' s = Yield '\x10cc9' (CC s '\x0000' '\x0000') --- OLD HUNGARIAN CAPITAL LETTER CLOSE E -foldMapping '\x10c8a' s = Yield '\x10cca' (CC s '\x0000' '\x0000') --- OLD HUNGARIAN CAPITAL LETTER EE -foldMapping '\x10c8b' s = Yield '\x10ccb' (CC s '\x0000' '\x0000') --- OLD HUNGARIAN CAPITAL LETTER EF -foldMapping '\x10c8c' s = Yield '\x10ccc' (CC s '\x0000' '\x0000') --- OLD HUNGARIAN CAPITAL LETTER EG -foldMapping '\x10c8d' s = Yield '\x10ccd' (CC s '\x0000' '\x0000') --- OLD HUNGARIAN CAPITAL LETTER EGY -foldMapping '\x10c8e' s = Yield '\x10cce' (CC s '\x0000' '\x0000') --- OLD HUNGARIAN CAPITAL LETTER EH -foldMapping '\x10c8f' s = Yield '\x10ccf' (CC s '\x0000' '\x0000') --- OLD HUNGARIAN CAPITAL LETTER I -foldMapping '\x10c90' s = Yield '\x10cd0' (CC s '\x0000' '\x0000') --- OLD HUNGARIAN CAPITAL LETTER II -foldMapping '\x10c91' s = Yield '\x10cd1' (CC s '\x0000' '\x0000') --- OLD HUNGARIAN CAPITAL LETTER EJ -foldMapping '\x10c92' s = Yield '\x10cd2' (CC s '\x0000' '\x0000') --- OLD HUNGARIAN CAPITAL LETTER EK -foldMapping '\x10c93' s = Yield '\x10cd3' (CC s '\x0000' '\x0000') --- OLD HUNGARIAN CAPITAL LETTER AK -foldMapping '\x10c94' s = Yield '\x10cd4' (CC s '\x0000' '\x0000') --- OLD HUNGARIAN CAPITAL LETTER UNK -foldMapping '\x10c95' s = Yield '\x10cd5' (CC s '\x0000' '\x0000') --- OLD HUNGARIAN CAPITAL LETTER EL -foldMapping '\x10c96' s = Yield '\x10cd6' (CC s '\x0000' '\x0000') --- OLD HUNGARIAN CAPITAL LETTER ELY -foldMapping '\x10c97' s = Yield '\x10cd7' (CC s '\x0000' '\x0000') --- OLD HUNGARIAN CAPITAL LETTER EM -foldMapping '\x10c98' s = Yield '\x10cd8' (CC s '\x0000' '\x0000') --- OLD HUNGARIAN CAPITAL LETTER EN -foldMapping '\x10c99' s = Yield '\x10cd9' (CC s '\x0000' '\x0000') --- OLD HUNGARIAN CAPITAL LETTER ENY -foldMapping '\x10c9a' s = Yield '\x10cda' (CC s '\x0000' '\x0000') --- OLD HUNGARIAN CAPITAL LETTER O -foldMapping '\x10c9b' s = Yield '\x10cdb' (CC s '\x0000' '\x0000') --- OLD HUNGARIAN CAPITAL LETTER OO -foldMapping '\x10c9c' s = Yield '\x10cdc' (CC s '\x0000' '\x0000') --- OLD HUNGARIAN CAPITAL LETTER NIKOLSBURG OE -foldMapping '\x10c9d' s = Yield '\x10cdd' (CC s '\x0000' '\x0000') --- OLD HUNGARIAN CAPITAL LETTER RUDIMENTA OE -foldMapping '\x10c9e' s = Yield '\x10cde' (CC s '\x0000' '\x0000') --- OLD HUNGARIAN CAPITAL LETTER OEE -foldMapping '\x10c9f' s = Yield '\x10cdf' (CC s '\x0000' '\x0000') --- OLD HUNGARIAN CAPITAL LETTER EP -foldMapping '\x10ca0' s = Yield '\x10ce0' (CC s '\x0000' '\x0000') --- OLD HUNGARIAN CAPITAL LETTER EMP -foldMapping '\x10ca1' s = Yield '\x10ce1' (CC s '\x0000' '\x0000') --- OLD HUNGARIAN CAPITAL LETTER ER -foldMapping '\x10ca2' s = Yield '\x10ce2' (CC s '\x0000' '\x0000') --- OLD HUNGARIAN CAPITAL LETTER SHORT ER -foldMapping '\x10ca3' s = Yield '\x10ce3' (CC s '\x0000' '\x0000') --- OLD HUNGARIAN CAPITAL LETTER ES -foldMapping '\x10ca4' s = Yield '\x10ce4' (CC s '\x0000' '\x0000') --- OLD HUNGARIAN CAPITAL LETTER ESZ -foldMapping '\x10ca5' s = Yield '\x10ce5' (CC s '\x0000' '\x0000') --- OLD HUNGARIAN CAPITAL LETTER ET -foldMapping '\x10ca6' s = Yield '\x10ce6' (CC s '\x0000' '\x0000') --- OLD HUNGARIAN CAPITAL LETTER ENT -foldMapping '\x10ca7' s = Yield '\x10ce7' (CC s '\x0000' '\x0000') --- OLD HUNGARIAN CAPITAL LETTER ETY -foldMapping '\x10ca8' s = Yield '\x10ce8' (CC s '\x0000' '\x0000') --- OLD HUNGARIAN CAPITAL LETTER ECH -foldMapping '\x10ca9' s = Yield '\x10ce9' (CC s '\x0000' '\x0000') --- OLD HUNGARIAN CAPITAL LETTER U -foldMapping '\x10caa' s = Yield '\x10cea' (CC s '\x0000' '\x0000') --- OLD HUNGARIAN CAPITAL LETTER UU -foldMapping '\x10cab' s = Yield '\x10ceb' (CC s '\x0000' '\x0000') --- OLD HUNGARIAN CAPITAL LETTER NIKOLSBURG UE -foldMapping '\x10cac' s = Yield '\x10cec' (CC s '\x0000' '\x0000') --- OLD HUNGARIAN CAPITAL LETTER RUDIMENTA UE -foldMapping '\x10cad' s = Yield '\x10ced' (CC s '\x0000' '\x0000') --- OLD HUNGARIAN CAPITAL LETTER EV -foldMapping '\x10cae' s = Yield '\x10cee' (CC s '\x0000' '\x0000') --- OLD HUNGARIAN CAPITAL LETTER EZ -foldMapping '\x10caf' s = Yield '\x10cef' (CC s '\x0000' '\x0000') --- OLD HUNGARIAN CAPITAL LETTER EZS -foldMapping '\x10cb0' s = Yield '\x10cf0' (CC s '\x0000' '\x0000') --- OLD HUNGARIAN CAPITAL LETTER ENT-SHAPED SIGN -foldMapping '\x10cb1' s = Yield '\x10cf1' (CC s '\x0000' '\x0000') --- OLD HUNGARIAN CAPITAL LETTER US -foldMapping '\x10cb2' s = Yield '\x10cf2' (CC s '\x0000' '\x0000') --- ADLAM CAPITAL LETTER ALIF -foldMapping '\x1e900' s = Yield '\x1e922' (CC s '\x0000' '\x0000') --- ADLAM CAPITAL LETTER DAALI -foldMapping '\x1e901' s = Yield '\x1e923' (CC s '\x0000' '\x0000') --- ADLAM CAPITAL LETTER LAAM -foldMapping '\x1e902' s = Yield '\x1e924' (CC s '\x0000' '\x0000') --- ADLAM CAPITAL LETTER MIIM -foldMapping '\x1e903' s = Yield '\x1e925' (CC s '\x0000' '\x0000') --- ADLAM CAPITAL LETTER BA -foldMapping '\x1e904' s = Yield '\x1e926' (CC s '\x0000' '\x0000') --- ADLAM CAPITAL LETTER SINNYIIYHE -foldMapping '\x1e905' s = Yield '\x1e927' (CC s '\x0000' '\x0000') --- ADLAM CAPITAL LETTER PE -foldMapping '\x1e906' s = Yield '\x1e928' (CC s '\x0000' '\x0000') --- ADLAM CAPITAL LETTER BHE -foldMapping '\x1e907' s = Yield '\x1e929' (CC s '\x0000' '\x0000') --- ADLAM CAPITAL LETTER RA -foldMapping '\x1e908' s = Yield '\x1e92a' (CC s '\x0000' '\x0000') --- ADLAM CAPITAL LETTER E -foldMapping '\x1e909' s = Yield '\x1e92b' (CC s '\x0000' '\x0000') --- ADLAM CAPITAL LETTER FA -foldMapping '\x1e90a' s = Yield '\x1e92c' (CC s '\x0000' '\x0000') --- ADLAM CAPITAL LETTER I -foldMapping '\x1e90b' s = Yield '\x1e92d' (CC s '\x0000' '\x0000') --- ADLAM CAPITAL LETTER O -foldMapping '\x1e90c' s = Yield '\x1e92e' (CC s '\x0000' '\x0000') --- ADLAM CAPITAL LETTER DHA -foldMapping '\x1e90d' s = Yield '\x1e92f' (CC s '\x0000' '\x0000') --- ADLAM CAPITAL LETTER YHE -foldMapping '\x1e90e' s = Yield '\x1e930' (CC s '\x0000' '\x0000') --- ADLAM CAPITAL LETTER WAW -foldMapping '\x1e90f' s = Yield '\x1e931' (CC s '\x0000' '\x0000') --- ADLAM CAPITAL LETTER NUN -foldMapping '\x1e910' s = Yield '\x1e932' (CC s '\x0000' '\x0000') --- ADLAM CAPITAL LETTER KAF -foldMapping '\x1e911' s = Yield '\x1e933' (CC s '\x0000' '\x0000') --- ADLAM CAPITAL LETTER YA -foldMapping '\x1e912' s = Yield '\x1e934' (CC s '\x0000' '\x0000') --- ADLAM CAPITAL LETTER U -foldMapping '\x1e913' s = Yield '\x1e935' (CC s '\x0000' '\x0000') --- ADLAM CAPITAL LETTER JIIM -foldMapping '\x1e914' s = Yield '\x1e936' (CC s '\x0000' '\x0000') --- ADLAM CAPITAL LETTER CHI -foldMapping '\x1e915' s = Yield '\x1e937' (CC s '\x0000' '\x0000') --- ADLAM CAPITAL LETTER HA -foldMapping '\x1e916' s = Yield '\x1e938' (CC s '\x0000' '\x0000') --- ADLAM CAPITAL LETTER QAAF -foldMapping '\x1e917' s = Yield '\x1e939' (CC s '\x0000' '\x0000') --- ADLAM CAPITAL LETTER GA -foldMapping '\x1e918' s = Yield '\x1e93a' (CC s '\x0000' '\x0000') --- ADLAM CAPITAL LETTER NYA -foldMapping '\x1e919' s = Yield '\x1e93b' (CC s '\x0000' '\x0000') --- ADLAM CAPITAL LETTER TU -foldMapping '\x1e91a' s = Yield '\x1e93c' (CC s '\x0000' '\x0000') --- ADLAM CAPITAL LETTER NHA -foldMapping '\x1e91b' s = Yield '\x1e93d' (CC s '\x0000' '\x0000') --- ADLAM CAPITAL LETTER VA -foldMapping '\x1e91c' s = Yield '\x1e93e' (CC s '\x0000' '\x0000') --- ADLAM CAPITAL LETTER KHA -foldMapping '\x1e91d' s = Yield '\x1e93f' (CC s '\x0000' '\x0000') --- ADLAM CAPITAL LETTER GBE -foldMapping '\x1e91e' s = Yield '\x1e940' (CC s '\x0000' '\x0000') --- ADLAM CAPITAL LETTER ZAL -foldMapping '\x1e91f' s = Yield '\x1e941' (CC s '\x0000' '\x0000') --- ADLAM CAPITAL LETTER KPO -foldMapping '\x1e920' s = Yield '\x1e942' (CC s '\x0000' '\x0000') --- ADLAM CAPITAL LETTER SHA -foldMapping '\x1e921' s = Yield '\x1e943' (CC s '\x0000' '\x0000') -foldMapping c s = Yield (toLower c) (CC s '\0' '\0') diff -Nru cabal-install-3.2-3.2+git20191216.2.e076113/src/text-1.2.4.0/Data/Text/Internal/Fusion/Common.hs cabal-install-3.2-3.2+git20200127.2.b84fd4f/src/text-1.2.4.0/Data/Text/Internal/Fusion/Common.hs --- cabal-install-3.2-3.2+git20191216.2.e076113/src/text-1.2.4.0/Data/Text/Internal/Fusion/Common.hs 2001-09-09 01:46:40.000000000 +0000 +++ cabal-install-3.2-3.2+git20200127.2.b84fd4f/src/text-1.2.4.0/Data/Text/Internal/Fusion/Common.hs 1970-01-01 00:00:00.000000000 +0000 @@ -1,949 +0,0 @@ -{-# LANGUAGE BangPatterns, MagicHash, Rank2Types #-} --- | --- Module : Data.Text.Internal.Fusion.Common --- Copyright : (c) Bryan O'Sullivan 2009, 2012 --- --- License : BSD-style --- Maintainer : bos@serpentine.com --- Stability : experimental --- Portability : GHC --- --- /Warning/: this is an internal module, and does not have a stable --- API or name. Functions in this module may not check or enforce --- preconditions expected by public modules. Use at your own risk! --- --- Common stream fusion functionality for text. - -module Data.Text.Internal.Fusion.Common - ( - -- * Creation and elimination - singleton - , streamList - , unstreamList - , streamCString# - - -- * Basic interface - , cons - , snoc - , append - , head - , uncons - , last - , tail - , init - , null - , lengthI - , compareLengthI - , isSingleton - - -- * Transformations - , map - , intercalate - , intersperse - - -- ** Case conversion - -- $case - , toCaseFold - , toLower - , toTitle - , toUpper - - -- ** Justification - , justifyLeftI - - -- * Folds - , foldl - , foldl' - , foldl1 - , foldl1' - , foldr - , foldr1 - - -- ** Special folds - , concat - , concatMap - , any - , all - , maximum - , minimum - - -- * Construction - -- ** Scans - , scanl - - -- ** Generation and unfolding - , replicateCharI - , replicateI - , unfoldr - , unfoldrNI - - -- * Substrings - -- ** Breaking strings - , take - , drop - , takeWhile - , dropWhile - - -- * Predicates - , isPrefixOf - - -- * Searching - , elem - , filter - - -- * Indexing - , findBy - , indexI - , findIndexI - , countCharI - - -- * Zipping and unzipping - , zipWith - ) where - -import Prelude (Bool(..), Char, Eq(..), Int, Integral, Maybe(..), - Ord(..), Ordering(..), String, (.), ($), (+), (-), (*), (++), - (&&), fromIntegral, otherwise) -import qualified Data.List as L -import qualified Prelude as P -import Data.Bits (shiftL) -import Data.Char (isLetter, isSpace) -import Data.Int (Int64) -import Data.Text.Internal.Fusion.Types -import Data.Text.Internal.Fusion.CaseMapping (foldMapping, lowerMapping, titleMapping, - upperMapping) -import Data.Text.Internal.Fusion.Size -import GHC.Prim (Addr#, chr#, indexCharOffAddr#, ord#) -import GHC.Types (Char(..), Int(..)) - -singleton :: Char -> Stream Char -singleton c = Stream next False (codePointsSize 1) - where next False = Yield c True - next True = Done -{-# INLINE [0] singleton #-} - -streamList :: [a] -> Stream a -{-# INLINE [0] streamList #-} -streamList s = Stream next s unknownSize - where next [] = Done - next (x:xs) = Yield x xs - -unstreamList :: Stream a -> [a] -unstreamList (Stream next s0 _len) = unfold s0 - where unfold !s = case next s of - Done -> [] - Skip s' -> unfold s' - Yield x s' -> x : unfold s' -{-# INLINE [0] unstreamList #-} - -{-# RULES "STREAM streamList/unstreamList fusion" forall s. streamList (unstreamList s) = s #-} - --- | Stream the UTF-8-like packed encoding used by GHC to represent --- constant strings in generated code. --- --- This encoding uses the byte sequence "\xc0\x80" to represent NUL, --- and the string is NUL-terminated. -streamCString# :: Addr# -> Stream Char -streamCString# addr = Stream step 0 unknownSize - where - step !i - | b == 0 = Done - | b <= 0x7f = Yield (C# b#) (i+1) - | b <= 0xdf = let !c = chr $ ((b-0xc0) `shiftL` 6) + next 1 - in Yield c (i+2) - | b <= 0xef = let !c = chr $ ((b-0xe0) `shiftL` 12) + - (next 1 `shiftL` 6) + - next 2 - in Yield c (i+3) - | otherwise = let !c = chr $ ((b-0xf0) `shiftL` 18) + - (next 1 `shiftL` 12) + - (next 2 `shiftL` 6) + - next 3 - in Yield c (i+4) - where b = I# (ord# b#) - next n = I# (ord# (at# (i+n))) - 0x80 - !b# = at# i - at# (I# i#) = indexCharOffAddr# addr i# - chr (I# i#) = C# (chr# i#) -{-# INLINE [0] streamCString# #-} - --- ---------------------------------------------------------------------------- --- * Basic stream functions - -data C s = C0 !s - | C1 !s - --- | /O(n)/ Adds a character to the front of a Stream Char. -cons :: Char -> Stream Char -> Stream Char -cons !w (Stream next0 s0 len) = Stream next (C1 s0) (len + codePointsSize 1) - where - next (C1 s) = Yield w (C0 s) - next (C0 s) = case next0 s of - Done -> Done - Skip s' -> Skip (C0 s') - Yield x s' -> Yield x (C0 s') -{-# INLINE [0] cons #-} - -data Snoc a = N - | J !a - --- | /O(n)/ Adds a character to the end of a stream. -snoc :: Stream Char -> Char -> Stream Char -snoc (Stream next0 xs0 len) w = Stream next (J xs0) (len + codePointsSize 1) - where - next (J xs) = case next0 xs of - Done -> Yield w N - Skip xs' -> Skip (J xs') - Yield x xs' -> Yield x (J xs') - next N = Done -{-# INLINE [0] snoc #-} - -data E l r = L !l - | R !r - --- | /O(n)/ Appends one Stream to the other. -append :: Stream Char -> Stream Char -> Stream Char -append (Stream next0 s01 len1) (Stream next1 s02 len2) = - Stream next (L s01) (len1 + len2) - where - next (L s1) = case next0 s1 of - Done -> Skip (R s02) - Skip s1' -> Skip (L s1') - Yield x s1' -> Yield x (L s1') - next (R s2) = case next1 s2 of - Done -> Done - Skip s2' -> Skip (R s2') - Yield x s2' -> Yield x (R s2') -{-# INLINE [0] append #-} - --- | /O(1)/ Returns the first character of a Text, which must be non-empty. --- Subject to array fusion. -head :: Stream Char -> Char -head (Stream next s0 _len) = loop_head s0 - where - loop_head !s = case next s of - Yield x _ -> x - Skip s' -> loop_head s' - Done -> head_empty -{-# INLINE [0] head #-} - -head_empty :: a -head_empty = streamError "head" "Empty stream" -{-# NOINLINE head_empty #-} - --- | /O(1)/ Returns the first character and remainder of a 'Stream --- Char', or 'Nothing' if empty. Subject to array fusion. -uncons :: Stream Char -> Maybe (Char, Stream Char) -uncons (Stream next s0 len) = loop_uncons s0 - where - loop_uncons !s = case next s of - Yield x s1 -> Just (x, Stream next s1 (len - codePointsSize 1)) - Skip s' -> loop_uncons s' - Done -> Nothing -{-# INLINE [0] uncons #-} - --- | /O(n)/ Returns the last character of a 'Stream Char', which must --- be non-empty. -last :: Stream Char -> Char -last (Stream next s0 _len) = loop0_last s0 - where - loop0_last !s = case next s of - Done -> emptyError "last" - Skip s' -> loop0_last s' - Yield x s' -> loop_last x s' - loop_last !x !s = case next s of - Done -> x - Skip s' -> loop_last x s' - Yield x' s' -> loop_last x' s' -{-# INLINE[0] last #-} - --- | /O(1)/ Returns all characters after the head of a Stream Char, which must --- be non-empty. -tail :: Stream Char -> Stream Char -tail (Stream next0 s0 len) = Stream next (C0 s0) (len - codePointsSize 1) - where - next (C0 s) = case next0 s of - Done -> emptyError "tail" - Skip s' -> Skip (C0 s') - Yield _ s' -> Skip (C1 s') - next (C1 s) = case next0 s of - Done -> Done - Skip s' -> Skip (C1 s') - Yield x s' -> Yield x (C1 s') -{-# INLINE [0] tail #-} - -data Init s = Init0 !s - | Init1 {-# UNPACK #-} !Char !s - --- | /O(1)/ Returns all but the last character of a Stream Char, which --- must be non-empty. -init :: Stream Char -> Stream Char -init (Stream next0 s0 len) = Stream next (Init0 s0) (len - codePointsSize 1) - where - next (Init0 s) = case next0 s of - Done -> emptyError "init" - Skip s' -> Skip (Init0 s') - Yield x s' -> Skip (Init1 x s') - next (Init1 x s) = case next0 s of - Done -> Done - Skip s' -> Skip (Init1 x s') - Yield x' s' -> Yield x (Init1 x' s') -{-# INLINE [0] init #-} - --- | /O(1)/ Tests whether a Stream Char is empty or not. -null :: Stream Char -> Bool -null (Stream next s0 _len) = loop_null s0 - where - loop_null !s = case next s of - Done -> True - Yield _ _ -> False - Skip s' -> loop_null s' -{-# INLINE[0] null #-} - --- | /O(n)/ Returns the number of characters in a string. -lengthI :: Integral a => Stream Char -> a -lengthI (Stream next s0 _len) = loop_length 0 s0 - where - loop_length !z s = case next s of - Done -> z - Skip s' -> loop_length z s' - Yield _ s' -> loop_length (z + 1) s' -{-# INLINE[0] lengthI #-} - --- | /O(n)/ Compares the count of characters in a string to a number. --- Subject to fusion. --- --- This function gives the same answer as comparing against the result --- of 'lengthI', but can short circuit if the count of characters is --- greater than the number or if the stream can't possibly be as long --- as the number supplied, and hence be more efficient. -compareLengthI :: Integral a => Stream Char -> a -> Ordering -compareLengthI (Stream next s0 len) n - -- Note that @len@ tracks code units whereas we want to compare the length - -- in code points. Specifically, a stream with hint @len@ may consist of - -- anywhere from @len/2@ to @len@ code points. - | Just r <- compareSize len n' = r - | otherwise = loop_cmp 0 s0 - where - n' = codePointsSize $ fromIntegral n - loop_cmp !z s = case next s of - Done -> compare z n - Skip s' -> loop_cmp z s' - Yield _ s' | z > n -> GT - | otherwise -> loop_cmp (z + 1) s' -{-# INLINE[0] compareLengthI #-} - --- | /O(n)/ Indicate whether a string contains exactly one element. -isSingleton :: Stream Char -> Bool -isSingleton (Stream next s0 _len) = loop 0 s0 - where - loop !z s = case next s of - Done -> z == (1::Int) - Skip s' -> loop z s' - Yield _ s' - | z >= 1 -> False - | otherwise -> loop (z+1) s' -{-# INLINE[0] isSingleton #-} - --- ---------------------------------------------------------------------------- --- * Stream transformations - --- | /O(n)/ 'map' @f @xs is the Stream Char obtained by applying @f@ --- to each element of @xs@. -map :: (Char -> Char) -> Stream Char -> Stream Char -map f (Stream next0 s0 len) = Stream next s0 len - where - next !s = case next0 s of - Done -> Done - Skip s' -> Skip s' - Yield x s' -> Yield (f x) s' -{-# INLINE [0] map #-} - -{-# - RULES "STREAM map/map fusion" forall f g s. - map f (map g s) = map (\x -> f (g x)) s - #-} - -data I s = I1 !s - | I2 !s {-# UNPACK #-} !Char - | I3 !s - --- | /O(n)/ Take a character and place it between each of the --- characters of a 'Stream Char'. -intersperse :: Char -> Stream Char -> Stream Char -intersperse c (Stream next0 s0 len) = Stream next (I1 s0) (len + unknownSize) - where - next (I1 s) = case next0 s of - Done -> Done - Skip s' -> Skip (I1 s') - Yield x s' -> Skip (I2 s' x) - next (I2 s x) = Yield x (I3 s) - next (I3 s) = case next0 s of - Done -> Done - Skip s' -> Skip (I3 s') - Yield x s' -> Yield c (I2 s' x) -{-# INLINE [0] intersperse #-} - --- ---------------------------------------------------------------------------- --- ** Case conversions (folds) - --- $case --- --- With Unicode text, it is incorrect to use combinators like @map --- toUpper@ to case convert each character of a string individually. --- Instead, use the whole-string case conversion functions from this --- module. For correctness in different writing systems, these --- functions may map one input character to two or three output --- characters. - --- | Map a 'Stream' through the given case-mapping function. -caseConvert :: (forall s. Char -> s -> Step (CC s) Char) - -> Stream Char -> Stream Char -caseConvert remap (Stream next0 s0 len) = - Stream next (CC s0 '\0' '\0') (len `unionSize` (3*len)) - where - next (CC s '\0' _) = - case next0 s of - Done -> Done - Skip s' -> Skip (CC s' '\0' '\0') - Yield c s' -> remap c s' - next (CC s a b) = Yield a (CC s b '\0') - --- | /O(n)/ Convert a string to folded case. This function is mainly --- useful for performing caseless (or case insensitive) string --- comparisons. --- --- A string @x@ is a caseless match for a string @y@ if and only if: --- --- @toCaseFold x == toCaseFold y@ --- --- The result string may be longer than the input string, and may --- differ from applying 'toLower' to the input string. For instance, --- the Armenian small ligature men now (U+FB13) is case folded to the --- bigram men now (U+0574 U+0576), while the micro sign (U+00B5) is --- case folded to the Greek small letter letter mu (U+03BC) instead of --- itself. -toCaseFold :: Stream Char -> Stream Char -toCaseFold = caseConvert foldMapping -{-# INLINE [0] toCaseFold #-} - --- | /O(n)/ Convert a string to upper case, using simple case --- conversion. The result string may be longer than the input string. --- For instance, the German eszett (U+00DF) maps to the two-letter --- sequence SS. -toUpper :: Stream Char -> Stream Char -toUpper = caseConvert upperMapping -{-# INLINE [0] toUpper #-} - --- | /O(n)/ Convert a string to lower case, using simple case --- conversion. The result string may be longer than the input string. --- For instance, the Latin capital letter I with dot above (U+0130) --- maps to the sequence Latin small letter i (U+0069) followed by --- combining dot above (U+0307). -toLower :: Stream Char -> Stream Char -toLower = caseConvert lowerMapping -{-# INLINE [0] toLower #-} - --- | /O(n)/ Convert a string to title case, using simple case --- conversion. --- --- The first letter of the input is converted to title case, as is --- every subsequent letter that immediately follows a non-letter. --- Every letter that immediately follows another letter is converted --- to lower case. --- --- The result string may be longer than the input string. For example, --- the Latin small ligature fl (U+FB02) is converted to the --- sequence Latin capital letter F (U+0046) followed by Latin small --- letter l (U+006C). --- --- /Note/: this function does not take language or culture specific --- rules into account. For instance, in English, different style --- guides disagree on whether the book name \"The Hill of the Red --- Fox\" is correctly title cased—but this function will --- capitalize /every/ word. -toTitle :: Stream Char -> Stream Char -toTitle (Stream next0 s0 len) = Stream next (CC (False :*: s0) '\0' '\0') (len + unknownSize) - where - next (CC (letter :*: s) '\0' _) = - case next0 s of - Done -> Done - Skip s' -> Skip (CC (letter :*: s') '\0' '\0') - Yield c s' - | nonSpace -> if letter - then lowerMapping c (nonSpace :*: s') - else titleMapping c (letter' :*: s') - | otherwise -> Yield c (CC (letter' :*: s') '\0' '\0') - where nonSpace = P.not (isSpace c) - letter' = isLetter c - next (CC s a b) = Yield a (CC s b '\0') -{-# INLINE [0] toTitle #-} - -data Justify i s = Just1 !i !s - | Just2 !i !s - -justifyLeftI :: Integral a => a -> Char -> Stream Char -> Stream Char -justifyLeftI k c (Stream next0 s0 len) = - Stream next (Just1 0 s0) (larger (fromIntegral k * charSize c + len) len) - where - next (Just1 n s) = - case next0 s of - Done -> next (Just2 n s) - Skip s' -> Skip (Just1 n s') - Yield x s' -> Yield x (Just1 (n+1) s') - next (Just2 n s) - | n < k = Yield c (Just2 (n+1) s) - | otherwise = Done - {-# INLINE next #-} -{-# INLINE [0] justifyLeftI #-} - --- ---------------------------------------------------------------------------- --- * Reducing Streams (folds) - --- | foldl, applied to a binary operator, a starting value (typically the --- left-identity of the operator), and a Stream, reduces the Stream using the --- binary operator, from left to right. -foldl :: (b -> Char -> b) -> b -> Stream Char -> b -foldl f z0 (Stream next s0 _len) = loop_foldl z0 s0 - where - loop_foldl z !s = case next s of - Done -> z - Skip s' -> loop_foldl z s' - Yield x s' -> loop_foldl (f z x) s' -{-# INLINE [0] foldl #-} - --- | A strict version of foldl. -foldl' :: (b -> Char -> b) -> b -> Stream Char -> b -foldl' f z0 (Stream next s0 _len) = loop_foldl' z0 s0 - where - loop_foldl' !z !s = case next s of - Done -> z - Skip s' -> loop_foldl' z s' - Yield x s' -> loop_foldl' (f z x) s' -{-# INLINE [0] foldl' #-} - --- | foldl1 is a variant of foldl that has no starting value argument, --- and thus must be applied to non-empty Streams. -foldl1 :: (Char -> Char -> Char) -> Stream Char -> Char -foldl1 f (Stream next s0 _len) = loop0_foldl1 s0 - where - loop0_foldl1 !s = case next s of - Skip s' -> loop0_foldl1 s' - Yield x s' -> loop_foldl1 x s' - Done -> emptyError "foldl1" - loop_foldl1 z !s = case next s of - Done -> z - Skip s' -> loop_foldl1 z s' - Yield x s' -> loop_foldl1 (f z x) s' -{-# INLINE [0] foldl1 #-} - --- | A strict version of foldl1. -foldl1' :: (Char -> Char -> Char) -> Stream Char -> Char -foldl1' f (Stream next s0 _len) = loop0_foldl1' s0 - where - loop0_foldl1' !s = case next s of - Skip s' -> loop0_foldl1' s' - Yield x s' -> loop_foldl1' x s' - Done -> emptyError "foldl1" - loop_foldl1' !z !s = case next s of - Done -> z - Skip s' -> loop_foldl1' z s' - Yield x s' -> loop_foldl1' (f z x) s' -{-# INLINE [0] foldl1' #-} - --- | 'foldr', applied to a binary operator, a starting value (typically the --- right-identity of the operator), and a stream, reduces the stream using the --- binary operator, from right to left. -foldr :: (Char -> b -> b) -> b -> Stream Char -> b -foldr f z (Stream next s0 _len) = loop_foldr s0 - where - loop_foldr !s = case next s of - Done -> z - Skip s' -> loop_foldr s' - Yield x s' -> f x (loop_foldr s') -{-# INLINE [0] foldr #-} - --- | foldr1 is a variant of 'foldr' that has no starting value argument, --- and thus must be applied to non-empty streams. --- Subject to array fusion. -foldr1 :: (Char -> Char -> Char) -> Stream Char -> Char -foldr1 f (Stream next s0 _len) = loop0_foldr1 s0 - where - loop0_foldr1 !s = case next s of - Done -> emptyError "foldr1" - Skip s' -> loop0_foldr1 s' - Yield x s' -> loop_foldr1 x s' - - loop_foldr1 x !s = case next s of - Done -> x - Skip s' -> loop_foldr1 x s' - Yield x' s' -> f x (loop_foldr1 x' s') -{-# INLINE [0] foldr1 #-} - -intercalate :: Stream Char -> [Stream Char] -> Stream Char -intercalate s = concat . (L.intersperse s) -{-# INLINE [0] intercalate #-} - --- ---------------------------------------------------------------------------- --- ** Special folds - --- | /O(n)/ Concatenate a list of streams. Subject to array fusion. -concat :: [Stream Char] -> Stream Char -concat = L.foldr append empty -{-# INLINE [0] concat #-} - --- | Map a function over a stream that results in a stream and concatenate the --- results. -concatMap :: (Char -> Stream Char) -> Stream Char -> Stream Char -concatMap f = foldr (append . f) empty -{-# INLINE [0] concatMap #-} - --- | /O(n)/ any @p @xs determines if any character in the stream --- @xs@ satisfies the predicate @p@. -any :: (Char -> Bool) -> Stream Char -> Bool -any p (Stream next0 s0 _len) = loop_any s0 - where - loop_any !s = case next0 s of - Done -> False - Skip s' -> loop_any s' - Yield x s' | p x -> True - | otherwise -> loop_any s' -{-# INLINE [0] any #-} - --- | /O(n)/ all @p @xs determines if all characters in the 'Text' --- @xs@ satisfy the predicate @p@. -all :: (Char -> Bool) -> Stream Char -> Bool -all p (Stream next0 s0 _len) = loop_all s0 - where - loop_all !s = case next0 s of - Done -> True - Skip s' -> loop_all s' - Yield x s' | p x -> loop_all s' - | otherwise -> False -{-# INLINE [0] all #-} - --- | /O(n)/ maximum returns the maximum value from a stream, which must be --- non-empty. -maximum :: Stream Char -> Char -maximum (Stream next0 s0 _len) = loop0_maximum s0 - where - loop0_maximum !s = case next0 s of - Done -> emptyError "maximum" - Skip s' -> loop0_maximum s' - Yield x s' -> loop_maximum x s' - loop_maximum !z !s = case next0 s of - Done -> z - Skip s' -> loop_maximum z s' - Yield x s' - | x > z -> loop_maximum x s' - | otherwise -> loop_maximum z s' -{-# INLINE [0] maximum #-} - --- | /O(n)/ minimum returns the minimum value from a 'Text', which must be --- non-empty. -minimum :: Stream Char -> Char -minimum (Stream next0 s0 _len) = loop0_minimum s0 - where - loop0_minimum !s = case next0 s of - Done -> emptyError "minimum" - Skip s' -> loop0_minimum s' - Yield x s' -> loop_minimum x s' - loop_minimum !z !s = case next0 s of - Done -> z - Skip s' -> loop_minimum z s' - Yield x s' - | x < z -> loop_minimum x s' - | otherwise -> loop_minimum z s' -{-# INLINE [0] minimum #-} - --- ----------------------------------------------------------------------------- --- * Building streams - -scanl :: (Char -> Char -> Char) -> Char -> Stream Char -> Stream Char -scanl f z0 (Stream next0 s0 len) = Stream next (Scan1 z0 s0) (len+1) -- HINT maybe too low - where - {-# INLINE next #-} - next (Scan1 z s) = Yield z (Scan2 z s) - next (Scan2 z s) = case next0 s of - Yield x s' -> let !x' = f z x - in Yield x' (Scan2 x' s') - Skip s' -> Skip (Scan2 z s') - Done -> Done -{-# INLINE [0] scanl #-} - --- ----------------------------------------------------------------------------- --- ** Generating and unfolding streams - -replicateCharI :: Integral a => a -> Char -> Stream Char -replicateCharI !n !c - | n < 0 = empty - | otherwise = Stream next 0 (fromIntegral n) -- HINT maybe too low - where - next !i | i >= n = Done - | otherwise = Yield c (i + 1) -{-# INLINE [0] replicateCharI #-} - -data RI s = RI !s {-# UNPACK #-} !Int64 - -replicateI :: Int64 -> Stream Char -> Stream Char -replicateI n (Stream next0 s0 len) = - Stream next (RI s0 0) (fromIntegral (max 0 n) * len) - where - next (RI s k) - | k >= n = Done - | otherwise = case next0 s of - Done -> Skip (RI s0 (k+1)) - Skip s' -> Skip (RI s' k) - Yield x s' -> Yield x (RI s' k) -{-# INLINE [0] replicateI #-} - --- | /O(n)/, where @n@ is the length of the result. The unfoldr function --- is analogous to the List 'unfoldr'. unfoldr builds a stream --- from a seed value. The function takes the element and returns --- Nothing if it is done producing the stream or returns Just --- (a,b), in which case, a is the next Char in the string, and b is --- the seed value for further production. -unfoldr :: (a -> Maybe (Char,a)) -> a -> Stream Char -unfoldr f s0 = Stream next s0 unknownSize - where - {-# INLINE next #-} - next !s = case f s of - Nothing -> Done - Just (w, s') -> Yield w s' -{-# INLINE [0] unfoldr #-} - --- | /O(n)/ Like 'unfoldr', 'unfoldrNI' builds a stream from a seed --- value. However, the length of the result is limited by the --- first argument to 'unfoldrNI'. This function is more efficient than --- 'unfoldr' when the length of the result is known. -unfoldrNI :: Integral a => a -> (b -> Maybe (Char,b)) -> b -> Stream Char -unfoldrNI n f s0 | n < 0 = empty - | otherwise = Stream next (0 :*: s0) (maxSize $ fromIntegral (n*2)) - where - {-# INLINE next #-} - next (z :*: s) = case f s of - Nothing -> Done - Just (w, s') | z >= n -> Done - | otherwise -> Yield w ((z + 1) :*: s') -{-# INLINE unfoldrNI #-} - -------------------------------------------------------------------------------- --- * Substreams - --- | /O(n)/ @'take' n@, applied to a stream, returns the prefix of the --- stream of length @n@, or the stream itself if @n@ is greater than the --- length of the stream. -take :: Integral a => a -> Stream Char -> Stream Char -take n0 (Stream next0 s0 len) = - Stream next (n0' :*: s0) (smaller len (codePointsSize $ fromIntegral n0')) - where - n0' = max n0 0 - - {-# INLINE next #-} - next (n :*: s) | n <= 0 = Done - | otherwise = case next0 s of - Done -> Done - Skip s' -> Skip (n :*: s') - Yield x s' -> Yield x ((n-1) :*: s') -{-# INLINE [0] take #-} - -data Drop a s = NS !s - | JS !a !s - --- | /O(n)/ @'drop' n@, applied to a stream, returns the suffix of the --- stream after the first @n@ characters, or the empty stream if @n@ --- is greater than the length of the stream. -drop :: Integral a => a -> Stream Char -> Stream Char -drop n0 (Stream next0 s0 len) = - Stream next (JS n0' s0) (len - codePointsSize (fromIntegral n0')) - where - n0' = max n0 0 - - {-# INLINE next #-} - next (JS n s) - | n <= 0 = Skip (NS s) - | otherwise = case next0 s of - Done -> Done - Skip s' -> Skip (JS n s') - Yield _ s' -> Skip (JS (n-1) s') - next (NS s) = case next0 s of - Done -> Done - Skip s' -> Skip (NS s') - Yield x s' -> Yield x (NS s') -{-# INLINE [0] drop #-} - --- | 'takeWhile', applied to a predicate @p@ and a stream, returns the --- longest prefix (possibly empty) of elements that satisfy @p@. -takeWhile :: (Char -> Bool) -> Stream Char -> Stream Char -takeWhile p (Stream next0 s0 len) = Stream next s0 (len - unknownSize) - where - {-# INLINE next #-} - next !s = case next0 s of - Done -> Done - Skip s' -> Skip s' - Yield x s' | p x -> Yield x s' - | otherwise -> Done -{-# INLINE [0] takeWhile #-} - --- | @'dropWhile' p xs@ returns the suffix remaining after @'takeWhile' p xs@. -dropWhile :: (Char -> Bool) -> Stream Char -> Stream Char -dropWhile p (Stream next0 s0 len) = Stream next (L s0) (len - unknownSize) - where - {-# INLINE next #-} - next (L s) = case next0 s of - Done -> Done - Skip s' -> Skip (L s') - Yield x s' | p x -> Skip (L s') - | otherwise -> Yield x (R s') - next (R s) = case next0 s of - Done -> Done - Skip s' -> Skip (R s') - Yield x s' -> Yield x (R s') -{-# INLINE [0] dropWhile #-} - --- | /O(n)/ The 'isPrefixOf' function takes two 'Stream's and returns --- 'True' iff the first is a prefix of the second. -isPrefixOf :: (Eq a) => Stream a -> Stream a -> Bool -isPrefixOf (Stream next1 s1 _) (Stream next2 s2 _) = loop (next1 s1) (next2 s2) - where - loop Done _ = True - loop _ Done = False - loop (Skip s1') (Skip s2') = loop (next1 s1') (next2 s2') - loop (Skip s1') x2 = loop (next1 s1') x2 - loop x1 (Skip s2') = loop x1 (next2 s2') - loop (Yield x1 s1') (Yield x2 s2') = x1 == x2 && - loop (next1 s1') (next2 s2') -{-# INLINE [0] isPrefixOf #-} - --- ---------------------------------------------------------------------------- --- * Searching - -------------------------------------------------------------------------------- --- ** Searching by equality - --- | /O(n)/ 'elem' is the stream membership predicate. -elem :: Char -> Stream Char -> Bool -elem w (Stream next s0 _len) = loop_elem s0 - where - loop_elem !s = case next s of - Done -> False - Skip s' -> loop_elem s' - Yield x s' | x == w -> True - | otherwise -> loop_elem s' -{-# INLINE [0] elem #-} - -------------------------------------------------------------------------------- --- ** Searching with a predicate - --- | /O(n)/ The 'findBy' function takes a predicate and a stream, --- and returns the first element in matching the predicate, or 'Nothing' --- if there is no such element. - -findBy :: (Char -> Bool) -> Stream Char -> Maybe Char -findBy p (Stream next s0 _len) = loop_find s0 - where - loop_find !s = case next s of - Done -> Nothing - Skip s' -> loop_find s' - Yield x s' | p x -> Just x - | otherwise -> loop_find s' -{-# INLINE [0] findBy #-} - --- | /O(n)/ Stream index (subscript) operator, starting from 0. -indexI :: Integral a => Stream Char -> a -> Char -indexI (Stream next s0 _len) n0 - | n0 < 0 = streamError "index" "Negative index" - | otherwise = loop_index n0 s0 - where - loop_index !n !s = case next s of - Done -> streamError "index" "Index too large" - Skip s' -> loop_index n s' - Yield x s' | n == 0 -> x - | otherwise -> loop_index (n-1) s' -{-# INLINE [0] indexI #-} - --- | /O(n)/ 'filter', applied to a predicate and a stream, --- returns a stream containing those characters that satisfy the --- predicate. -filter :: (Char -> Bool) -> Stream Char -> Stream Char -filter p (Stream next0 s0 len) = - Stream next s0 (len - unknownSize) -- HINT maybe too high - where - next !s = case next0 s of - Done -> Done - Skip s' -> Skip s' - Yield x s' | p x -> Yield x s' - | otherwise -> Skip s' -{-# INLINE [0] filter #-} - -{-# RULES - "STREAM filter/filter fusion" forall p q s. - filter p (filter q s) = filter (\x -> q x && p x) s - #-} - --- | The 'findIndexI' function takes a predicate and a stream and --- returns the index of the first element in the stream satisfying the --- predicate. -findIndexI :: Integral a => (Char -> Bool) -> Stream Char -> Maybe a -findIndexI p s = case findIndicesI p s of - (i:_) -> Just i - _ -> Nothing -{-# INLINE [0] findIndexI #-} - --- | The 'findIndicesI' function takes a predicate and a stream and --- returns all indices of the elements in the stream satisfying the --- predicate. -findIndicesI :: Integral a => (Char -> Bool) -> Stream Char -> [a] -findIndicesI p (Stream next s0 _len) = loop_findIndex 0 s0 - where - loop_findIndex !i !s = case next s of - Done -> [] - Skip s' -> loop_findIndex i s' -- hmm. not caught by QC - Yield x s' | p x -> i : loop_findIndex (i+1) s' - | otherwise -> loop_findIndex (i+1) s' -{-# INLINE [0] findIndicesI #-} - -------------------------------------------------------------------------------- --- * Zipping - --- | Strict triple. -data Zip a b m = Z1 !a !b - | Z2 !a !b !m - --- | zipWith generalises 'zip' by zipping with the function given as --- the first argument, instead of a tupling function. -zipWith :: (a -> a -> b) -> Stream a -> Stream a -> Stream b -zipWith f (Stream next0 sa0 len1) (Stream next1 sb0 len2) = - Stream next (Z1 sa0 sb0) (smaller len1 len2) - where - next (Z1 sa sb) = case next0 sa of - Done -> Done - Skip sa' -> Skip (Z1 sa' sb) - Yield a sa' -> Skip (Z2 sa' sb a) - - next (Z2 sa' sb a) = case next1 sb of - Done -> Done - Skip sb' -> Skip (Z2 sa' sb' a) - Yield b sb' -> Yield (f a b) (Z1 sa' sb') -{-# INLINE [0] zipWith #-} - --- | /O(n)/ The 'countCharI' function returns the number of times the --- query element appears in the given stream. -countCharI :: Integral a => Char -> Stream Char -> a -countCharI a (Stream next s0 _len) = loop 0 s0 - where - loop !i !s = case next s of - Done -> i - Skip s' -> loop i s' - Yield x s' | a == x -> loop (i+1) s' - | otherwise -> loop i s' -{-# INLINE [0] countCharI #-} - -streamError :: String -> String -> a -streamError func msg = P.error $ "Data.Text.Internal.Fusion.Common." ++ func ++ ": " ++ msg - -emptyError :: String -> a -emptyError func = internalError func "Empty input" - -internalError :: String -> a -internalError func = streamError func "Internal error" diff -Nru cabal-install-3.2-3.2+git20191216.2.e076113/src/text-1.2.4.0/Data/Text/Internal/Fusion/Size.hs cabal-install-3.2-3.2+git20200127.2.b84fd4f/src/text-1.2.4.0/Data/Text/Internal/Fusion/Size.hs --- cabal-install-3.2-3.2+git20191216.2.e076113/src/text-1.2.4.0/Data/Text/Internal/Fusion/Size.hs 2001-09-09 01:46:40.000000000 +0000 +++ cabal-install-3.2-3.2+git20200127.2.b84fd4f/src/text-1.2.4.0/Data/Text/Internal/Fusion/Size.hs 1970-01-01 00:00:00.000000000 +0000 @@ -1,187 +0,0 @@ -{-# LANGUAGE CPP #-} -{-# OPTIONS_GHC -fno-warn-missing-methods #-} --- | --- Module : Data.Text.Internal.Fusion.Internal --- Copyright : (c) Roman Leshchinskiy 2008, --- (c) Bryan O'Sullivan 2009 --- --- License : BSD-style --- Maintainer : bos@serpentine.com --- Stability : experimental --- Portability : portable --- --- /Warning/: this is an internal module, and does not have a stable --- API or name. Functions in this module may not check or enforce --- preconditions expected by public modules. Use at your own risk! --- --- Size hints. - -module Data.Text.Internal.Fusion.Size - ( - Size - -- * Sizes - , exactSize - , maxSize - , betweenSize - , unknownSize - , unionSize - , charSize - , codePointsSize - -- * Querying sizes - , exactly - , smaller - , larger - , upperBound - , lowerBound - , compareSize - , isEmpty - ) where - -import Data.Char (ord) -import Data.Text.Internal (mul) -#if defined(ASSERTS) -import Control.Exception (assert) -#endif - --- | A size in UTF-16 code units. -data Size = Between {-# UNPACK #-} !Int {-# UNPACK #-} !Int -- ^ Lower and upper bounds on size. - | Unknown -- ^ Unknown size. - deriving (Eq, Show) - -exactly :: Size -> Maybe Int -exactly (Between na nb) | na == nb = Just na -exactly _ = Nothing -{-# INLINE exactly #-} - --- | The 'Size' of the given code point. -charSize :: Char -> Size -charSize c - | ord c < 0x10000 = exactSize 1 - | otherwise = exactSize 2 - --- | The 'Size' of @n@ code points. -codePointsSize :: Int -> Size -codePointsSize n = -#if defined(ASSERTS) - assert (n >= 0) -#endif - Between n (2*n) -{-# INLINE codePointsSize #-} - -exactSize :: Int -> Size -exactSize n = -#if defined(ASSERTS) - assert (n >= 0) -#endif - Between n n -{-# INLINE exactSize #-} - -maxSize :: Int -> Size -maxSize n = -#if defined(ASSERTS) - assert (n >= 0) -#endif - Between 0 n -{-# INLINE maxSize #-} - -betweenSize :: Int -> Int -> Size -betweenSize m n = -#if defined(ASSERTS) - assert (m >= 0) - assert (n >= m) -#endif - Between m n -{-# INLINE betweenSize #-} - -unionSize :: Size -> Size -> Size -unionSize (Between a b) (Between c d) = Between (min a c) (max b d) -unionSize _ _ = Unknown - -unknownSize :: Size -unknownSize = Unknown -{-# INLINE unknownSize #-} - -instance Num Size where - (+) = addSize - (-) = subtractSize - (*) = mulSize - - fromInteger = f where f = exactSize . fromInteger - {-# INLINE f #-} - -add :: Int -> Int -> Int -add m n | mn >= 0 = mn - | otherwise = overflowError - where mn = m + n -{-# INLINE add #-} - -addSize :: Size -> Size -> Size -addSize (Between ma mb) (Between na nb) = Between (add ma na) (add mb nb) -addSize _ _ = Unknown -{-# INLINE addSize #-} - -subtractSize :: Size -> Size -> Size -subtractSize (Between ma mb) (Between na nb) = Between (max (ma-nb) 0) (max (mb-na) 0) -subtractSize a@(Between 0 _) Unknown = a -subtractSize (Between _ mb) Unknown = Between 0 mb -subtractSize _ _ = Unknown -{-# INLINE subtractSize #-} - -mulSize :: Size -> Size -> Size -mulSize (Between ma mb) (Between na nb) = Between (mul ma na) (mul mb nb) -mulSize _ _ = Unknown -{-# INLINE mulSize #-} - --- | Minimum of two size hints. -smaller :: Size -> Size -> Size -smaller a@(Between ma mb) b@(Between na nb) - | mb <= na = a - | nb <= ma = b - | otherwise = Between (ma `min` na) (mb `min` nb) -smaller a@(Between 0 _) Unknown = a -smaller (Between _ mb) Unknown = Between 0 mb -smaller Unknown b@(Between 0 _) = b -smaller Unknown (Between _ nb) = Between 0 nb -smaller Unknown Unknown = Unknown -{-# INLINE smaller #-} - --- | Maximum of two size hints. -larger :: Size -> Size -> Size -larger a@(Between ma mb) b@(Between na nb) - | ma >= nb = a - | na >= mb = b - | otherwise = Between (ma `max` na) (mb `max` nb) -larger _ _ = Unknown -{-# INLINE larger #-} - --- | Compute the maximum size from a size hint, if possible. -upperBound :: Int -> Size -> Int -upperBound _ (Between _ n) = n -upperBound k _ = k -{-# INLINE upperBound #-} - --- | Compute the maximum size from a size hint, if possible. -lowerBound :: Int -> Size -> Int -lowerBound _ (Between n _) = n -lowerBound k _ = k -{-# INLINE lowerBound #-} - --- | Determine the ordering relationship between two 'Size's, or 'Nothing' in --- the indeterminate case. -compareSize :: Size -> Size -> Maybe Ordering -compareSize (Between ma mb) (Between na nb) - | mb < na = Just LT - | ma > nb = Just GT - | ma == mb - , ma == na - , ma == nb = Just EQ -compareSize _ _ = Nothing - - -isEmpty :: Size -> Bool -isEmpty (Between _ n) = n <= 0 -isEmpty _ = False -{-# INLINE isEmpty #-} - -overflowError :: Int -overflowError = error "Data.Text.Internal.Fusion.Size: size overflow" diff -Nru cabal-install-3.2-3.2+git20191216.2.e076113/src/text-1.2.4.0/Data/Text/Internal/Fusion/Types.hs cabal-install-3.2-3.2+git20200127.2.b84fd4f/src/text-1.2.4.0/Data/Text/Internal/Fusion/Types.hs --- cabal-install-3.2-3.2+git20191216.2.e076113/src/text-1.2.4.0/Data/Text/Internal/Fusion/Types.hs 2001-09-09 01:46:40.000000000 +0000 +++ cabal-install-3.2-3.2+git20200127.2.b84fd4f/src/text-1.2.4.0/Data/Text/Internal/Fusion/Types.hs 1970-01-01 00:00:00.000000000 +0000 @@ -1,122 +0,0 @@ -{-# LANGUAGE BangPatterns, ExistentialQuantification #-} --- | --- Module : Data.Text.Internal.Fusion.Types --- Copyright : (c) Tom Harper 2008-2009, --- (c) Bryan O'Sullivan 2009, --- (c) Duncan Coutts 2009, --- (c) Jasper Van der Jeugt 2011 --- --- License : BSD-style --- Maintainer : bos@serpentine.com --- Stability : experimental --- Portability : GHC --- --- /Warning/: this is an internal module, and does not have a stable --- API or name. Functions in this module may not check or enforce --- preconditions expected by public modules. Use at your own risk! --- --- Core stream fusion functionality for text. - -module Data.Text.Internal.Fusion.Types - ( - CC(..) - , PairS(..) - , Scan(..) - , RS(..) - , Step(..) - , Stream(..) - , empty - ) where - -import Data.Text.Internal.Fusion.Size -import Data.Word (Word8) - --- | Specialised tuple for case conversion. -data CC s = CC !s {-# UNPACK #-} !Char {-# UNPACK #-} !Char - --- | Restreaming state. -data RS s - = RS0 !s - | RS1 !s {-# UNPACK #-} !Word8 - | RS2 !s {-# UNPACK #-} !Word8 {-# UNPACK #-} !Word8 - | RS3 !s {-# UNPACK #-} !Word8 {-# UNPACK #-} !Word8 {-# UNPACK #-} !Word8 - --- | Strict pair. -data PairS a b = !a :*: !b - -- deriving (Eq, Ord, Show) -infixl 2 :*: - --- | An intermediate result in a scan. -data Scan s = Scan1 {-# UNPACK #-} !Char !s - | Scan2 {-# UNPACK #-} !Char !s - --- | Intermediate result in a processing pipeline. -data Step s a = Done - | Skip !s - | Yield !a !s - -{- -instance (Show a) => Show (Step s a) - where show Done = "Done" - show (Skip _) = "Skip" - show (Yield x _) = "Yield " ++ show x --} - -instance (Eq a) => Eq (Stream a) where - (==) = eq - -instance (Ord a) => Ord (Stream a) where - compare = cmp - --- The length hint in a Stream has two roles. If its value is zero, --- we trust it, and treat the stream as empty. Otherwise, we treat it --- as a hint: it should usually be accurate, so we use it when --- unstreaming to decide what size array to allocate. However, the --- unstreaming functions must be able to cope with the hint being too --- small or too large. --- --- The size hint tries to track the UTF-16 code units in a stream, --- but often counts the number of code points instead. It can easily --- undercount if, for instance, a transformed stream contains astral --- plane code points (those above 0x10000). - -data Stream a = - forall s. Stream - (s -> Step s a) -- stepper function - !s -- current state - !Size -- size hint in code units - --- | /O(n)/ Determines if two streams are equal. -eq :: (Eq a) => Stream a -> Stream a -> Bool -eq (Stream next1 s1 _) (Stream next2 s2 _) = loop (next1 s1) (next2 s2) - where - loop Done Done = True - loop (Skip s1') (Skip s2') = loop (next1 s1') (next2 s2') - loop (Skip s1') x2 = loop (next1 s1') x2 - loop x1 (Skip s2') = loop x1 (next2 s2') - loop Done _ = False - loop _ Done = False - loop (Yield x1 s1') (Yield x2 s2') = x1 == x2 && - loop (next1 s1') (next2 s2') -{-# INLINE [0] eq #-} - -cmp :: (Ord a) => Stream a -> Stream a -> Ordering -cmp (Stream next1 s1 _) (Stream next2 s2 _) = loop (next1 s1) (next2 s2) - where - loop Done Done = EQ - loop (Skip s1') (Skip s2') = loop (next1 s1') (next2 s2') - loop (Skip s1') x2 = loop (next1 s1') x2 - loop x1 (Skip s2') = loop x1 (next2 s2') - loop Done _ = LT - loop _ Done = GT - loop (Yield x1 s1') (Yield x2 s2') = - case compare x1 x2 of - EQ -> loop (next1 s1') (next2 s2') - other -> other -{-# INLINE [0] cmp #-} - --- | The empty stream. -empty :: Stream a -empty = Stream next () 0 - where next _ = Done -{-# INLINE [0] empty #-} diff -Nru cabal-install-3.2-3.2+git20191216.2.e076113/src/text-1.2.4.0/Data/Text/Internal/Fusion.hs cabal-install-3.2-3.2+git20200127.2.b84fd4f/src/text-1.2.4.0/Data/Text/Internal/Fusion.hs --- cabal-install-3.2-3.2+git20191216.2.e076113/src/text-1.2.4.0/Data/Text/Internal/Fusion.hs 2001-09-09 01:46:40.000000000 +0000 +++ cabal-install-3.2-3.2+git20200127.2.b84fd4f/src/text-1.2.4.0/Data/Text/Internal/Fusion.hs 1970-01-01 00:00:00.000000000 +0000 @@ -1,244 +0,0 @@ -{-# LANGUAGE BangPatterns, MagicHash #-} - --- | --- Module : Data.Text.Internal.Fusion --- Copyright : (c) Tom Harper 2008-2009, --- (c) Bryan O'Sullivan 2009-2010, --- (c) Duncan Coutts 2009 --- --- License : BSD-style --- Maintainer : bos@serpentine.com --- Stability : experimental --- Portability : GHC --- --- /Warning/: this is an internal module, and does not have a stable --- API or name. Functions in this module may not check or enforce --- preconditions expected by public modules. Use at your own risk! --- --- Text manipulation functions represented as fusible operations over --- streams. -module Data.Text.Internal.Fusion - ( - -- * Types - Stream(..) - , Step(..) - - -- * Creation and elimination - , stream - , unstream - , reverseStream - - , length - - -- * Transformations - , reverse - - -- * Construction - -- ** Scans - , reverseScanr - - -- ** Accumulating maps - , mapAccumL - - -- ** Generation and unfolding - , unfoldrN - - -- * Indexing - , index - , findIndex - , countChar - ) where - -import Prelude (Bool(..), Char, Maybe(..), Monad(..), Int, - Num(..), Ord(..), ($), (&&), - fromIntegral, otherwise) -import Data.Bits ((.&.)) -import Data.Text.Internal (Text(..)) -import Data.Text.Internal.Private (runText) -import Data.Text.Internal.Unsafe.Char (ord, unsafeChr, unsafeWrite) -import Data.Text.Internal.Unsafe.Shift (shiftL, shiftR) -import qualified Data.Text.Array as A -import qualified Data.Text.Internal.Fusion.Common as S -import Data.Text.Internal.Fusion.Types -import Data.Text.Internal.Fusion.Size -import qualified Data.Text.Internal as I -import qualified Data.Text.Internal.Encoding.Utf16 as U16 - -default(Int) - --- | /O(n)/ Convert a 'Text' into a 'Stream Char'. -stream :: Text -> Stream Char -stream (Text arr off len) = Stream next off (betweenSize (len `shiftR` 1) len) - where - !end = off+len - next !i - | i >= end = Done - | n >= 0xD800 && n <= 0xDBFF = Yield (U16.chr2 n n2) (i + 2) - | otherwise = Yield (unsafeChr n) (i + 1) - where - n = A.unsafeIndex arr i - n2 = A.unsafeIndex arr (i + 1) -{-# INLINE [0] stream #-} - --- | /O(n)/ Convert a 'Text' into a 'Stream Char', but iterate --- backwards. -reverseStream :: Text -> Stream Char -reverseStream (Text arr off len) = Stream next (off+len-1) (betweenSize (len `shiftR` 1) len) - where - {-# INLINE next #-} - next !i - | i < off = Done - | n >= 0xDC00 && n <= 0xDFFF = Yield (U16.chr2 n2 n) (i - 2) - | otherwise = Yield (unsafeChr n) (i - 1) - where - n = A.unsafeIndex arr i - n2 = A.unsafeIndex arr (i - 1) -{-# INLINE [0] reverseStream #-} - --- | /O(n)/ Convert a 'Stream Char' into a 'Text'. -unstream :: Stream Char -> Text -unstream (Stream next0 s0 len) = runText $ \done -> do - -- Before encoding each char we perform a buffer realloc check assuming - -- worst case encoding size of two 16-bit units for the char. Just add an - -- extra space to the buffer so that we do not end up reallocating even when - -- all the chars are encoded as single unit. - let mlen = upperBound 4 len + 1 - arr0 <- A.new mlen - let outer !arr !maxi = encode - where - -- keep the common case loop as small as possible - encode !si !di = - case next0 si of - Done -> done arr di - Skip si' -> encode si' di - Yield c si' - -- simply check for the worst case - | maxi < di + 1 -> realloc si di - | otherwise -> do - n <- unsafeWrite arr di c - encode si' (di + n) - - -- keep uncommon case separate from the common case code - {-# NOINLINE realloc #-} - realloc !si !di = do - let newlen = (maxi + 1) * 2 - arr' <- A.new newlen - A.copyM arr' 0 arr 0 di - outer arr' (newlen - 1) si di - - outer arr0 (mlen - 1) s0 0 -{-# INLINE [0] unstream #-} -{-# RULES "STREAM stream/unstream fusion" forall s. stream (unstream s) = s #-} - - --- ---------------------------------------------------------------------------- --- * Basic stream functions - -length :: Stream Char -> Int -length = S.lengthI -{-# INLINE[0] length #-} - --- | /O(n)/ Reverse the characters of a string. -reverse :: Stream Char -> Text -reverse (Stream next s len0) - | isEmpty len0 = I.empty - | otherwise = I.text arr off' len' - where - len0' = upperBound 4 (larger len0 4) - (arr, (off', len')) = A.run2 (A.new len0' >>= loop s (len0'-1) len0') - loop !s0 !i !len marr = - case next s0 of - Done -> return (marr, (j, len-j)) - where j = i + 1 - Skip s1 -> loop s1 i len marr - Yield x s1 | i < least -> {-# SCC "reverse/resize" #-} do - let newLen = len `shiftL` 1 - marr' <- A.new newLen - A.copyM marr' (newLen-len) marr 0 len - write s1 (len+i) newLen marr' - | otherwise -> write s1 i len marr - where n = ord x - least | n < 0x10000 = 0 - | otherwise = 1 - m = n - 0x10000 - lo = fromIntegral $ (m `shiftR` 10) + 0xD800 - hi = fromIntegral $ (m .&. 0x3FF) + 0xDC00 - write t j l mar - | n < 0x10000 = do - A.unsafeWrite mar j (fromIntegral n) - loop t (j-1) l mar - | otherwise = do - A.unsafeWrite mar (j-1) lo - A.unsafeWrite mar j hi - loop t (j-2) l mar -{-# INLINE [0] reverse #-} - --- | /O(n)/ Perform the equivalent of 'scanr' over a list, only with --- the input and result reversed. -reverseScanr :: (Char -> Char -> Char) -> Char -> Stream Char -> Stream Char -reverseScanr f z0 (Stream next0 s0 len) = Stream next (Scan1 z0 s0) (len+1) -- HINT maybe too low - where - {-# INLINE next #-} - next (Scan1 z s) = Yield z (Scan2 z s) - next (Scan2 z s) = case next0 s of - Yield x s' -> let !x' = f x z - in Yield x' (Scan2 x' s') - Skip s' -> Skip (Scan2 z s') - Done -> Done -{-# INLINE reverseScanr #-} - --- | /O(n)/ Like 'unfoldr', 'unfoldrN' builds a stream from a seed --- value. However, the length of the result is limited by the --- first argument to 'unfoldrN'. This function is more efficient than --- 'unfoldr' when the length of the result is known. -unfoldrN :: Int -> (a -> Maybe (Char,a)) -> a -> Stream Char -unfoldrN n = S.unfoldrNI n -{-# INLINE [0] unfoldrN #-} - -------------------------------------------------------------------------------- --- ** Indexing streams - --- | /O(n)/ stream index (subscript) operator, starting from 0. -index :: Stream Char -> Int -> Char -index = S.indexI -{-# INLINE [0] index #-} - --- | The 'findIndex' function takes a predicate and a stream and --- returns the index of the first element in the stream --- satisfying the predicate. -findIndex :: (Char -> Bool) -> Stream Char -> Maybe Int -findIndex = S.findIndexI -{-# INLINE [0] findIndex #-} - --- | /O(n)/ The 'count' function returns the number of times the query --- element appears in the given stream. -countChar :: Char -> Stream Char -> Int -countChar = S.countCharI -{-# INLINE [0] countChar #-} - --- | /O(n)/ Like a combination of 'map' and 'foldl''. Applies a --- function to each element of a 'Text', passing an accumulating --- parameter from left to right, and returns a final 'Text'. -mapAccumL :: (a -> Char -> (a,Char)) -> a -> Stream Char -> (a, Text) -mapAccumL f z0 (Stream next0 s0 len) = (nz, I.text na 0 nl) - where - (na,(nz,nl)) = A.run2 (A.new mlen >>= \arr -> outer arr mlen z0 s0 0) - where mlen = upperBound 4 len - outer arr top = loop - where - loop !z !s !i = - case next0 s of - Done -> return (arr, (z,i)) - Skip s' -> loop z s' i - Yield x s' - | j >= top -> {-# SCC "mapAccumL/resize" #-} do - let top' = (top + 1) `shiftL` 1 - arr' <- A.new top' - A.copyM arr' 0 arr 0 top - outer arr' top' z s i - | otherwise -> do d <- unsafeWrite arr i c - loop z' s' (i+d) - where (z',c) = f z x - j | ord c < 0x10000 = i - | otherwise = i + 1 -{-# INLINE [0] mapAccumL #-} diff -Nru cabal-install-3.2-3.2+git20191216.2.e076113/src/text-1.2.4.0/Data/Text/Internal/IO.hs cabal-install-3.2-3.2+git20200127.2.b84fd4f/src/text-1.2.4.0/Data/Text/Internal/IO.hs --- cabal-install-3.2-3.2+git20191216.2.e076113/src/text-1.2.4.0/Data/Text/Internal/IO.hs 2001-09-09 01:46:40.000000000 +0000 +++ cabal-install-3.2-3.2+git20200127.2.b84fd4f/src/text-1.2.4.0/Data/Text/Internal/IO.hs 1970-01-01 00:00:00.000000000 +0000 @@ -1,166 +0,0 @@ -{-# LANGUAGE BangPatterns, RecordWildCards #-} --- | --- Module : Data.Text.Internal.IO --- Copyright : (c) 2009, 2010 Bryan O'Sullivan, --- (c) 2009 Simon Marlow --- License : BSD-style --- Maintainer : bos@serpentine.com --- Stability : experimental --- Portability : GHC --- --- /Warning/: this is an internal module, and does not have a stable --- API or name. Functions in this module may not check or enforce --- preconditions expected by public modules. Use at your own risk! --- --- Low-level support for text I\/O. - -module Data.Text.Internal.IO - ( - hGetLineWith - , readChunk - ) where - -import qualified Control.Exception as E -import Data.IORef (readIORef, writeIORef) -import Data.Text (Text) -import Data.Text.Internal.Fusion (unstream) -import Data.Text.Internal.Fusion.Types (Step(..), Stream(..)) -import Data.Text.Internal.Fusion.Size (exactSize, maxSize) -import Data.Text.Unsafe (inlinePerformIO) -import Foreign.Storable (peekElemOff) -import GHC.IO.Buffer (Buffer(..), CharBuffer, RawCharBuffer, bufferAdjustL, - bufferElems, charSize, isEmptyBuffer, readCharBuf, - withRawBuffer, writeCharBuf) -import GHC.IO.Handle.Internals (ioe_EOF, readTextDevice, wantReadableHandle_) -import GHC.IO.Handle.Types (Handle__(..), Newline(..)) -import System.IO (Handle) -import System.IO.Error (isEOFError) -import qualified Data.Text as T - --- | Read a single line of input from a handle, constructing a list of --- decoded chunks as we go. When we're done, transform them into the --- destination type. -hGetLineWith :: ([Text] -> t) -> Handle -> IO t -hGetLineWith f h = wantReadableHandle_ "hGetLine" h go - where - go hh@Handle__{..} = readIORef haCharBuffer >>= fmap f . hGetLineLoop hh [] - -hGetLineLoop :: Handle__ -> [Text] -> CharBuffer -> IO [Text] -hGetLineLoop hh@Handle__{..} = go where - go ts buf@Buffer{ bufL=r0, bufR=w, bufRaw=raw0 } = do - let findEOL raw r | r == w = return (False, w) - | otherwise = do - (c,r') <- readCharBuf raw r - if c == '\n' - then return (True, r) - else findEOL raw r' - (eol, off) <- findEOL raw0 r0 - (t,r') <- if haInputNL == CRLF - then unpack_nl raw0 r0 off - else do t <- unpack raw0 r0 off - return (t,off) - if eol - then do writeIORef haCharBuffer (bufferAdjustL (off+1) buf) - return $ reverse (t:ts) - else do - let buf1 = bufferAdjustL r' buf - maybe_buf <- maybeFillReadBuffer hh buf1 - case maybe_buf of - -- Nothing indicates we caught an EOF, and we may have a - -- partial line to return. - Nothing -> do - -- we reached EOF. There might be a lone \r left - -- in the buffer, so check for that and - -- append it to the line if necessary. - let pre | isEmptyBuffer buf1 = T.empty - | otherwise = T.singleton '\r' - writeIORef haCharBuffer buf1{ bufL=0, bufR=0 } - let str = reverse . filter (not . T.null) $ pre:t:ts - if null str - then ioe_EOF - else return str - Just new_buf -> go (t:ts) new_buf - --- This function is lifted almost verbatim from GHC.IO.Handle.Text. -maybeFillReadBuffer :: Handle__ -> CharBuffer -> IO (Maybe CharBuffer) -maybeFillReadBuffer handle_ buf - = E.catch (Just `fmap` getSomeCharacters handle_ buf) $ \e -> - if isEOFError e - then return Nothing - else ioError e - -unpack :: RawCharBuffer -> Int -> Int -> IO Text -unpack !buf !r !w - | charSize /= 4 = sizeError "unpack" - | r >= w = return T.empty - | otherwise = withRawBuffer buf go - where - go pbuf = return $! unstream (Stream next r (exactSize (w-r))) - where - next !i | i >= w = Done - | otherwise = Yield (ix i) (i+1) - ix i = inlinePerformIO $ peekElemOff pbuf i - -unpack_nl :: RawCharBuffer -> Int -> Int -> IO (Text, Int) -unpack_nl !buf !r !w - | charSize /= 4 = sizeError "unpack_nl" - | r >= w = return (T.empty, 0) - | otherwise = withRawBuffer buf $ go - where - go pbuf = do - let !t = unstream (Stream next r (maxSize (w-r))) - w' = w - 1 - return $ if ix w' == '\r' - then (t,w') - else (t,w) - where - next !i | i >= w = Done - | c == '\r' = let i' = i + 1 - in if i' < w - then if ix i' == '\n' - then Yield '\n' (i+2) - else Yield '\n' i' - else Done - | otherwise = Yield c (i+1) - where c = ix i - ix i = inlinePerformIO $ peekElemOff pbuf i - --- This function is completely lifted from GHC.IO.Handle.Text. -getSomeCharacters :: Handle__ -> CharBuffer -> IO CharBuffer -getSomeCharacters handle_@Handle__{..} buf@Buffer{..} = - case bufferElems buf of - -- buffer empty: read some more - 0 -> {-# SCC "readTextDevice" #-} readTextDevice handle_ buf - - -- if the buffer has a single '\r' in it and we're doing newline - -- translation: read some more - 1 | haInputNL == CRLF -> do - (c,_) <- readCharBuf bufRaw bufL - if c == '\r' - then do -- shuffle the '\r' to the beginning. This is only safe - -- if we're about to call readTextDevice, otherwise it - -- would mess up flushCharBuffer. - -- See [note Buffer Flushing], GHC.IO.Handle.Types - _ <- writeCharBuf bufRaw 0 '\r' - let buf' = buf{ bufL=0, bufR=1 } - readTextDevice handle_ buf' - else do - return buf - - -- buffer has some chars in it already: just return it - _otherwise -> {-# SCC "otherwise" #-} return buf - --- | Read a single chunk of strict text from a buffer. Used by both --- the strict and lazy implementations of hGetContents. -readChunk :: Handle__ -> CharBuffer -> IO Text -readChunk hh@Handle__{..} buf = do - buf'@Buffer{..} <- getSomeCharacters hh buf - (t,r) <- if haInputNL == CRLF - then unpack_nl bufRaw bufL bufR - else do t <- unpack bufRaw bufL bufR - return (t,bufR) - writeIORef haCharBuffer (bufferAdjustL r buf') - return t - -sizeError :: String -> a -sizeError loc = error $ "Data.Text.IO." ++ loc ++ ": bad internal buffer size" diff -Nru cabal-install-3.2-3.2+git20191216.2.e076113/src/text-1.2.4.0/Data/Text/Internal/Lazy/Encoding/Fusion.hs cabal-install-3.2-3.2+git20200127.2.b84fd4f/src/text-1.2.4.0/Data/Text/Internal/Lazy/Encoding/Fusion.hs --- cabal-install-3.2-3.2+git20191216.2.e076113/src/text-1.2.4.0/Data/Text/Internal/Lazy/Encoding/Fusion.hs 2001-09-09 01:46:40.000000000 +0000 +++ cabal-install-3.2-3.2+git20200127.2.b84fd4f/src/text-1.2.4.0/Data/Text/Internal/Lazy/Encoding/Fusion.hs 1970-01-01 00:00:00.000000000 +0000 @@ -1,324 +0,0 @@ -{-# LANGUAGE BangPatterns, CPP, Rank2Types #-} - --- | --- Module : Data.Text.Lazy.Encoding.Fusion --- Copyright : (c) 2009, 2010 Bryan O'Sullivan --- --- License : BSD-style --- Maintainer : bos@serpentine.com --- Stability : experimental --- Portability : portable --- --- /Warning/: this is an internal module, and does not have a stable --- API or name. Functions in this module may not check or enforce --- preconditions expected by public modules. Use at your own risk! --- --- Fusible 'Stream'-oriented functions for converting between lazy --- 'Text' and several common encodings. - -module Data.Text.Internal.Lazy.Encoding.Fusion - ( - -- * Streaming - -- streamASCII - streamUtf8 - , streamUtf16LE - , streamUtf16BE - , streamUtf32LE - , streamUtf32BE - - -- * Unstreaming - , unstream - - , module Data.Text.Internal.Encoding.Fusion.Common - ) where - -import Data.ByteString.Lazy.Internal (ByteString(..), defaultChunkSize) -import qualified Data.ByteString as B -import qualified Data.ByteString.Unsafe as B -import Data.Text.Internal.Encoding.Fusion.Common -import Data.Text.Encoding.Error -import Data.Text.Internal.Fusion (Step(..), Stream(..)) -import Data.Text.Internal.Fusion.Size -import Data.Text.Internal.Unsafe.Char (unsafeChr, unsafeChr8, unsafeChr32) -import Data.Text.Internal.Unsafe.Shift (shiftL) -import Data.Word (Word8, Word16, Word32) -import qualified Data.Text.Internal.Encoding.Utf8 as U8 -import qualified Data.Text.Internal.Encoding.Utf16 as U16 -import qualified Data.Text.Internal.Encoding.Utf32 as U32 -import Data.Text.Unsafe (unsafeDupablePerformIO) -import Foreign.ForeignPtr (withForeignPtr, ForeignPtr) -import Foreign.Storable (pokeByteOff) -import Data.ByteString.Internal (mallocByteString, memcpy) -#if defined(ASSERTS) -import Control.Exception (assert) -#endif -import qualified Data.ByteString.Internal as B - -data S = S0 - | S1 {-# UNPACK #-} !Word8 - | S2 {-# UNPACK #-} !Word8 {-# UNPACK #-} !Word8 - | S3 {-# UNPACK #-} !Word8 {-# UNPACK #-} !Word8 {-# UNPACK #-} !Word8 - | S4 {-# UNPACK #-} !Word8 {-# UNPACK #-} !Word8 {-# UNPACK #-} !Word8 {-# UNPACK #-} !Word8 - -data T = T !ByteString !S {-# UNPACK #-} !Int - --- | /O(n)/ Convert a lazy 'ByteString' into a 'Stream Char', using --- UTF-8 encoding. -streamUtf8 :: OnDecodeError -> ByteString -> Stream Char -streamUtf8 onErr bs0 = Stream next (T bs0 S0 0) unknownSize - where - next (T bs@(Chunk ps _) S0 i) - | i < len && U8.validate1 a = - Yield (unsafeChr8 a) (T bs S0 (i+1)) - | i + 1 < len && U8.validate2 a b = - Yield (U8.chr2 a b) (T bs S0 (i+2)) - | i + 2 < len && U8.validate3 a b c = - Yield (U8.chr3 a b c) (T bs S0 (i+3)) - | i + 3 < len && U8.validate4 a b c d = - Yield (U8.chr4 a b c d) (T bs S0 (i+4)) - where len = B.length ps - a = B.unsafeIndex ps i - b = B.unsafeIndex ps (i+1) - c = B.unsafeIndex ps (i+2) - d = B.unsafeIndex ps (i+3) - next st@(T bs s i) = - case s of - S1 a | U8.validate1 a -> Yield (unsafeChr8 a) es - S2 a b | U8.validate2 a b -> Yield (U8.chr2 a b) es - S3 a b c | U8.validate3 a b c -> Yield (U8.chr3 a b c) es - S4 a b c d | U8.validate4 a b c d -> Yield (U8.chr4 a b c d) es - _ -> consume st - where es = T bs S0 i - consume (T bs@(Chunk ps rest) s i) - | i >= B.length ps = consume (T rest s 0) - | otherwise = - case s of - S0 -> next (T bs (S1 x) (i+1)) - S1 a -> next (T bs (S2 a x) (i+1)) - S2 a b -> next (T bs (S3 a b x) (i+1)) - S3 a b c -> next (T bs (S4 a b c x) (i+1)) - S4 a b c d -> decodeError "streamUtf8" "UTF-8" onErr (Just a) - (T bs (S3 b c d) (i+1)) - where x = B.unsafeIndex ps i - consume (T Empty S0 _) = Done - consume st = decodeError "streamUtf8" "UTF-8" onErr Nothing st -{-# INLINE [0] streamUtf8 #-} - --- | /O(n)/ Convert a 'ByteString' into a 'Stream Char', using little --- endian UTF-16 encoding. -streamUtf16LE :: OnDecodeError -> ByteString -> Stream Char -streamUtf16LE onErr bs0 = Stream next (T bs0 S0 0) unknownSize - where - next (T bs@(Chunk ps _) S0 i) - | i + 1 < len && U16.validate1 x1 = - Yield (unsafeChr x1) (T bs S0 (i+2)) - | i + 3 < len && U16.validate2 x1 x2 = - Yield (U16.chr2 x1 x2) (T bs S0 (i+4)) - where len = B.length ps - x1 = c (idx i) (idx (i + 1)) - x2 = c (idx (i + 2)) (idx (i + 3)) - c w1 w2 = w1 + (w2 `shiftL` 8) - idx = fromIntegral . B.unsafeIndex ps :: Int -> Word16 - next st@(T bs s i) = - case s of - S2 w1 w2 | U16.validate1 (c w1 w2) -> - Yield (unsafeChr (c w1 w2)) es - S4 w1 w2 w3 w4 | U16.validate2 (c w1 w2) (c w3 w4) -> - Yield (U16.chr2 (c w1 w2) (c w3 w4)) es - _ -> consume st - where es = T bs S0 i - c :: Word8 -> Word8 -> Word16 - c w1 w2 = fromIntegral w1 + (fromIntegral w2 `shiftL` 8) - consume (T bs@(Chunk ps rest) s i) - | i >= B.length ps = consume (T rest s 0) - | otherwise = - case s of - S0 -> next (T bs (S1 x) (i+1)) - S1 w1 -> next (T bs (S2 w1 x) (i+1)) - S2 w1 w2 -> next (T bs (S3 w1 w2 x) (i+1)) - S3 w1 w2 w3 -> next (T bs (S4 w1 w2 w3 x) (i+1)) - S4 w1 w2 w3 w4 -> decodeError "streamUtf16LE" "UTF-16LE" onErr (Just w1) - (T bs (S3 w2 w3 w4) (i+1)) - where x = B.unsafeIndex ps i - consume (T Empty S0 _) = Done - consume st = decodeError "streamUtf16LE" "UTF-16LE" onErr Nothing st -{-# INLINE [0] streamUtf16LE #-} - --- | /O(n)/ Convert a 'ByteString' into a 'Stream Char', using big --- endian UTF-16 encoding. -streamUtf16BE :: OnDecodeError -> ByteString -> Stream Char -streamUtf16BE onErr bs0 = Stream next (T bs0 S0 0) unknownSize - where - next (T bs@(Chunk ps _) S0 i) - | i + 1 < len && U16.validate1 x1 = - Yield (unsafeChr x1) (T bs S0 (i+2)) - | i + 3 < len && U16.validate2 x1 x2 = - Yield (U16.chr2 x1 x2) (T bs S0 (i+4)) - where len = B.length ps - x1 = c (idx i) (idx (i + 1)) - x2 = c (idx (i + 2)) (idx (i + 3)) - c w1 w2 = (w1 `shiftL` 8) + w2 - idx = fromIntegral . B.unsafeIndex ps :: Int -> Word16 - next st@(T bs s i) = - case s of - S2 w1 w2 | U16.validate1 (c w1 w2) -> - Yield (unsafeChr (c w1 w2)) es - S4 w1 w2 w3 w4 | U16.validate2 (c w1 w2) (c w3 w4) -> - Yield (U16.chr2 (c w1 w2) (c w3 w4)) es - _ -> consume st - where es = T bs S0 i - c :: Word8 -> Word8 -> Word16 - c w1 w2 = (fromIntegral w1 `shiftL` 8) + fromIntegral w2 - consume (T bs@(Chunk ps rest) s i) - | i >= B.length ps = consume (T rest s 0) - | otherwise = - case s of - S0 -> next (T bs (S1 x) (i+1)) - S1 w1 -> next (T bs (S2 w1 x) (i+1)) - S2 w1 w2 -> next (T bs (S3 w1 w2 x) (i+1)) - S3 w1 w2 w3 -> next (T bs (S4 w1 w2 w3 x) (i+1)) - S4 w1 w2 w3 w4 -> decodeError "streamUtf16BE" "UTF-16BE" onErr (Just w1) - (T bs (S3 w2 w3 w4) (i+1)) - where x = B.unsafeIndex ps i - consume (T Empty S0 _) = Done - consume st = decodeError "streamUtf16BE" "UTF-16BE" onErr Nothing st -{-# INLINE [0] streamUtf16BE #-} - --- | /O(n)/ Convert a 'ByteString' into a 'Stream Char', using big --- endian UTF-32 encoding. -streamUtf32BE :: OnDecodeError -> ByteString -> Stream Char -streamUtf32BE onErr bs0 = Stream next (T bs0 S0 0) unknownSize - where - next (T bs@(Chunk ps _) S0 i) - | i + 3 < len && U32.validate x = - Yield (unsafeChr32 x) (T bs S0 (i+4)) - where len = B.length ps - x = shiftL x1 24 + shiftL x2 16 + shiftL x3 8 + x4 - x1 = idx i - x2 = idx (i+1) - x3 = idx (i+2) - x4 = idx (i+3) - idx = fromIntegral . B.unsafeIndex ps :: Int -> Word32 - next st@(T bs s i) = - case s of - S4 w1 w2 w3 w4 | U32.validate (c w1 w2 w3 w4) -> - Yield (unsafeChr32 (c w1 w2 w3 w4)) es - _ -> consume st - where es = T bs S0 i - c :: Word8 -> Word8 -> Word8 -> Word8 -> Word32 - c w1 w2 w3 w4 = shifted - where - shifted = shiftL x1 24 + shiftL x2 16 + shiftL x3 8 + x4 - x1 = fromIntegral w1 - x2 = fromIntegral w2 - x3 = fromIntegral w3 - x4 = fromIntegral w4 - consume (T bs@(Chunk ps rest) s i) - | i >= B.length ps = consume (T rest s 0) - | otherwise = - case s of - S0 -> next (T bs (S1 x) (i+1)) - S1 w1 -> next (T bs (S2 w1 x) (i+1)) - S2 w1 w2 -> next (T bs (S3 w1 w2 x) (i+1)) - S3 w1 w2 w3 -> next (T bs (S4 w1 w2 w3 x) (i+1)) - S4 w1 w2 w3 w4 -> decodeError "streamUtf32BE" "UTF-32BE" onErr (Just w1) - (T bs (S3 w2 w3 w4) (i+1)) - where x = B.unsafeIndex ps i - consume (T Empty S0 _) = Done - consume st = decodeError "streamUtf32BE" "UTF-32BE" onErr Nothing st -{-# INLINE [0] streamUtf32BE #-} - --- | /O(n)/ Convert a 'ByteString' into a 'Stream Char', using little --- endian UTF-32 encoding. -streamUtf32LE :: OnDecodeError -> ByteString -> Stream Char -streamUtf32LE onErr bs0 = Stream next (T bs0 S0 0) unknownSize - where - next (T bs@(Chunk ps _) S0 i) - | i + 3 < len && U32.validate x = - Yield (unsafeChr32 x) (T bs S0 (i+4)) - where len = B.length ps - x = shiftL x4 24 + shiftL x3 16 + shiftL x2 8 + x1 - x1 = idx i - x2 = idx (i+1) - x3 = idx (i+2) - x4 = idx (i+3) - idx = fromIntegral . B.unsafeIndex ps :: Int -> Word32 - next st@(T bs s i) = - case s of - S4 w1 w2 w3 w4 | U32.validate (c w1 w2 w3 w4) -> - Yield (unsafeChr32 (c w1 w2 w3 w4)) es - _ -> consume st - where es = T bs S0 i - c :: Word8 -> Word8 -> Word8 -> Word8 -> Word32 - c w1 w2 w3 w4 = shifted - where - shifted = shiftL x4 24 + shiftL x3 16 + shiftL x2 8 + x1 - x1 = fromIntegral w1 - x2 = fromIntegral w2 - x3 = fromIntegral w3 - x4 = fromIntegral w4 - consume (T bs@(Chunk ps rest) s i) - | i >= B.length ps = consume (T rest s 0) - | otherwise = - case s of - S0 -> next (T bs (S1 x) (i+1)) - S1 w1 -> next (T bs (S2 w1 x) (i+1)) - S2 w1 w2 -> next (T bs (S3 w1 w2 x) (i+1)) - S3 w1 w2 w3 -> next (T bs (S4 w1 w2 w3 x) (i+1)) - S4 w1 w2 w3 w4 -> decodeError "streamUtf32LE" "UTF-32LE" onErr (Just w1) - (T bs (S3 w2 w3 w4) (i+1)) - where x = B.unsafeIndex ps i - consume (T Empty S0 _) = Done - consume st = decodeError "streamUtf32LE" "UTF-32LE" onErr Nothing st -{-# INLINE [0] streamUtf32LE #-} - --- | /O(n)/ Convert a 'Stream' 'Word8' to a lazy 'ByteString'. -unstreamChunks :: Int -> Stream Word8 -> ByteString -unstreamChunks chunkSize (Stream next s0 len0) = chunk s0 (upperBound 4 len0) - where chunk s1 len1 = unsafeDupablePerformIO $ do - let len = max 4 (min len1 chunkSize) - mallocByteString len >>= loop len 0 s1 - where - loop !n !off !s fp = case next s of - Done | off == 0 -> return Empty - | otherwise -> return $! Chunk (trimUp fp off) Empty - Skip s' -> loop n off s' fp - Yield x s' - | off == chunkSize -> do - let !newLen = n - off - return $! Chunk (trimUp fp off) (chunk s newLen) - | off == n -> realloc fp n off s' x - | otherwise -> do - withForeignPtr fp $ \p -> pokeByteOff p off x - loop n (off+1) s' fp - {-# NOINLINE realloc #-} - realloc fp n off s x = do - let n' = min (n+n) chunkSize - fp' <- copy0 fp n n' - withForeignPtr fp' $ \p -> pokeByteOff p off x - loop n' (off+1) s fp' - trimUp fp off = B.PS fp 0 off - copy0 :: ForeignPtr Word8 -> Int -> Int -> IO (ForeignPtr Word8) - copy0 !src !srcLen !destLen = -#if defined(ASSERTS) - assert (srcLen <= destLen) $ -#endif - do - dest <- mallocByteString destLen - withForeignPtr src $ \src' -> - withForeignPtr dest $ \dest' -> - memcpy dest' src' (fromIntegral srcLen) - return dest - --- | /O(n)/ Convert a 'Stream' 'Word8' to a lazy 'ByteString'. -unstream :: Stream Word8 -> ByteString -unstream = unstreamChunks defaultChunkSize - -decodeError :: forall s. String -> String -> OnDecodeError -> Maybe Word8 - -> s -> Step s Char -decodeError func kind onErr mb i = - case onErr desc mb of - Nothing -> Skip i - Just c -> Yield c i - where desc = "Data.Text.Lazy.Encoding.Fusion." ++ func ++ ": Invalid " ++ - kind ++ " stream" diff -Nru cabal-install-3.2-3.2+git20191216.2.e076113/src/text-1.2.4.0/Data/Text/Internal/Lazy/Fusion.hs cabal-install-3.2-3.2+git20200127.2.b84fd4f/src/text-1.2.4.0/Data/Text/Internal/Lazy/Fusion.hs --- cabal-install-3.2-3.2+git20191216.2.e076113/src/text-1.2.4.0/Data/Text/Internal/Lazy/Fusion.hs 2001-09-09 01:46:40.000000000 +0000 +++ cabal-install-3.2-3.2+git20200127.2.b84fd4f/src/text-1.2.4.0/Data/Text/Internal/Lazy/Fusion.hs 1970-01-01 00:00:00.000000000 +0000 @@ -1,120 +0,0 @@ -{-# LANGUAGE BangPatterns #-} --- | --- Module : Data.Text.Lazy.Fusion --- Copyright : (c) 2009, 2010 Bryan O'Sullivan --- --- License : BSD-style --- Maintainer : bos@serpentine.com --- Stability : experimental --- Portability : GHC --- --- /Warning/: this is an internal module, and does not have a stable --- API or name. Functions in this module may not check or enforce --- preconditions expected by public modules. Use at your own risk! --- --- Core stream fusion functionality for text. - -module Data.Text.Internal.Lazy.Fusion - ( - stream - , unstream - , unstreamChunks - , length - , unfoldrN - , index - , countChar - ) where - -import Prelude hiding (length) -import qualified Data.Text.Internal.Fusion.Common as S -import Control.Monad.ST (runST) -import Data.Text.Internal.Fusion.Types -import Data.Text.Internal.Fusion.Size (isEmpty, unknownSize) -import Data.Text.Internal.Lazy -import qualified Data.Text.Internal as I -import qualified Data.Text.Array as A -import Data.Text.Internal.Unsafe.Char (unsafeWrite) -import Data.Text.Internal.Unsafe.Shift (shiftL) -import Data.Text.Unsafe (Iter(..), iter) -import Data.Int (Int64) - -default(Int64) - --- | /O(n)/ Convert a 'Text' into a 'Stream Char'. -stream :: Text -> Stream Char -stream text = Stream next (text :*: 0) unknownSize - where - next (Empty :*: _) = Done - next (txt@(Chunk t@(I.Text _ _ len) ts) :*: i) - | i >= len = next (ts :*: 0) - | otherwise = Yield c (txt :*: i+d) - where Iter c d = iter t i -{-# INLINE [0] stream #-} - --- | /O(n)/ Convert a 'Stream Char' into a 'Text', using the given --- chunk size. -unstreamChunks :: Int -> Stream Char -> Text -unstreamChunks !chunkSize (Stream next s0 len0) - | isEmpty len0 = Empty - | otherwise = outer s0 - where - outer so = {-# SCC "unstreamChunks/outer" #-} - case next so of - Done -> Empty - Skip s' -> outer s' - Yield x s' -> runST $ do - a <- A.new unknownLength - unsafeWrite a 0 x >>= inner a unknownLength s' - where unknownLength = 4 - where - inner marr !len s !i - | i + 1 >= chunkSize = finish marr i s - | i + 1 >= len = {-# SCC "unstreamChunks/resize" #-} do - let newLen = min (len `shiftL` 1) chunkSize - marr' <- A.new newLen - A.copyM marr' 0 marr 0 len - inner marr' newLen s i - | otherwise = - {-# SCC "unstreamChunks/inner" #-} - case next s of - Done -> finish marr i s - Skip s' -> inner marr len s' i - Yield x s' -> do d <- unsafeWrite marr i x - inner marr len s' (i+d) - finish marr len s' = do - arr <- A.unsafeFreeze marr - return (I.Text arr 0 len `Chunk` outer s') -{-# INLINE [0] unstreamChunks #-} - --- | /O(n)/ Convert a 'Stream Char' into a 'Text', using --- 'defaultChunkSize'. -unstream :: Stream Char -> Text -unstream = unstreamChunks defaultChunkSize -{-# INLINE [0] unstream #-} - --- | /O(n)/ Returns the number of characters in a text. -length :: Stream Char -> Int64 -length = S.lengthI -{-# INLINE[0] length #-} - -{-# RULES "LAZY STREAM stream/unstream fusion" forall s. - stream (unstream s) = s #-} - --- | /O(n)/ Like 'unfoldr', 'unfoldrN' builds a stream from a seed --- value. However, the length of the result is limited by the --- first argument to 'unfoldrN'. This function is more efficient than --- 'unfoldr' when the length of the result is known. -unfoldrN :: Int64 -> (a -> Maybe (Char,a)) -> a -> Stream Char -unfoldrN n = S.unfoldrNI n -{-# INLINE [0] unfoldrN #-} - --- | /O(n)/ stream index (subscript) operator, starting from 0. -index :: Stream Char -> Int64 -> Char -index = S.indexI -{-# INLINE [0] index #-} - --- | /O(n)/ The 'count' function returns the number of times the query --- element appears in the given stream. -countChar :: Char -> Stream Char -> Int64 -countChar = S.countCharI -{-# INLINE [0] countChar #-} diff -Nru cabal-install-3.2-3.2+git20191216.2.e076113/src/text-1.2.4.0/Data/Text/Internal/Lazy/Search.hs cabal-install-3.2-3.2+git20200127.2.b84fd4f/src/text-1.2.4.0/Data/Text/Internal/Lazy/Search.hs --- cabal-install-3.2-3.2+git20191216.2.e076113/src/text-1.2.4.0/Data/Text/Internal/Lazy/Search.hs 2001-09-09 01:46:40.000000000 +0000 +++ cabal-install-3.2-3.2+git20200127.2.b84fd4f/src/text-1.2.4.0/Data/Text/Internal/Lazy/Search.hs 1970-01-01 00:00:00.000000000 +0000 @@ -1,134 +0,0 @@ -{-# LANGUAGE BangPatterns, ScopedTypeVariables #-} - --- | --- Module : Data.Text.Lazy.Search --- Copyright : (c) 2009, 2010 Bryan O'Sullivan --- --- License : BSD-style --- Maintainer : bos@serpentine.com --- Stability : experimental --- Portability : GHC --- --- /Warning/: this is an internal module, and does not have a stable --- API or name. Functions in this module may not check or enforce --- preconditions expected by public modules. Use at your own risk! --- --- Fast substring search for lazy 'Text', based on work by Boyer, --- Moore, Horspool, Sunday, and Lundh. Adapted from the strict --- implementation. - -module Data.Text.Internal.Lazy.Search - ( - indices - ) where - -import qualified Data.Text.Array as A -import Data.Int (Int64) -import Data.Word (Word16, Word64) -import qualified Data.Text.Internal as T -import Data.Text.Internal.Fusion.Types (PairS(..)) -import Data.Text.Internal.Lazy (Text(..), foldlChunks) -import Data.Bits ((.|.), (.&.)) -import Data.Text.Internal.Unsafe.Shift (shiftL) - --- | /O(n+m)/ Find the offsets of all non-overlapping indices of --- @needle@ within @haystack@. --- --- This function is strict in @needle@, and lazy (as far as possible) --- in the chunks of @haystack@. --- --- In (unlikely) bad cases, this algorithm's complexity degrades --- towards /O(n*m)/. -indices :: Text -- ^ Substring to search for (@needle@) - -> Text -- ^ Text to search in (@haystack@) - -> [Int64] -indices needle@(Chunk n ns) _haystack@(Chunk k ks) - | nlen <= 0 = [] - | nlen == 1 = indicesOne (nindex 0) 0 k ks - | otherwise = advance k ks 0 0 - where - advance x@(T.Text _ _ l) xs = scan - where - scan !g !i - | i >= m = case xs of - Empty -> [] - Chunk y ys -> advance y ys g (i-m) - | lackingHay (i + nlen) x xs = [] - | c == z && candidateMatch 0 = g : scan (g+nlen) (i+nlen) - | otherwise = scan (g+delta) (i+delta) - where - m = fromIntegral l - c = hindex (i + nlast) - delta | nextInPattern = nlen + 1 - | c == z = skip + 1 - | otherwise = 1 - nextInPattern = mask .&. swizzle (hindex (i+nlen)) == 0 - candidateMatch !j - | j >= nlast = True - | hindex (i+j) /= nindex j = False - | otherwise = candidateMatch (j+1) - hindex = index x xs - nlen = wordLength needle - nlast = nlen - 1 - nindex = index n ns - z = foldlChunks fin 0 needle - where fin _ (T.Text farr foff flen) = A.unsafeIndex farr (foff+flen-1) - (mask :: Word64) :*: skip = buildTable n ns 0 0 0 (nlen-2) - swizzle w = 1 `shiftL` (fromIntegral w .&. 0x3f) - buildTable (T.Text xarr xoff xlen) xs = go - where - go !(g::Int64) !i !msk !skp - | i >= xlast = case xs of - Empty -> (msk .|. swizzle z) :*: skp - Chunk y ys -> buildTable y ys g 0 msk' skp' - | otherwise = go (g+1) (i+1) msk' skp' - where c = A.unsafeIndex xarr (xoff+i) - msk' = msk .|. swizzle c - skp' | c == z = nlen - g - 2 - | otherwise = skp - xlast = xlen - 1 - -- | Check whether an attempt to index into the haystack at the - -- given offset would fail. - lackingHay q = go 0 - where - go p (T.Text _ _ l) ps = p' < q && case ps of - Empty -> True - Chunk r rs -> go p' r rs - where p' = p + fromIntegral l -indices _ _ = [] - --- | Fast index into a partly unpacked 'Text'. We take into account --- the possibility that the caller might try to access one element --- past the end. -index :: T.Text -> Text -> Int64 -> Word16 -index (T.Text arr off len) xs !i - | j < len = A.unsafeIndex arr (off+j) - | otherwise = case xs of - Empty - -- out of bounds, but legal - | j == len -> 0 - -- should never happen, due to lackingHay above - | otherwise -> emptyError "index" - Chunk c cs -> index c cs (i-fromIntegral len) - where j = fromIntegral i - --- | A variant of 'indices' that scans linearly for a single 'Word16'. -indicesOne :: Word16 -> Int64 -> T.Text -> Text -> [Int64] -indicesOne c = chunk - where - chunk !i (T.Text oarr ooff olen) os = go 0 - where - go h | h >= olen = case os of - Empty -> [] - Chunk y ys -> chunk (i+fromIntegral olen) y ys - | on == c = i + fromIntegral h : go (h+1) - | otherwise = go (h+1) - where on = A.unsafeIndex oarr (ooff+h) - --- | The number of 'Word16' values in a 'Text'. -wordLength :: Text -> Int64 -wordLength = foldlChunks sumLength 0 - where sumLength i (T.Text _ _ l) = i + fromIntegral l - -emptyError :: String -> a -emptyError fun = error ("Data.Text.Lazy.Search." ++ fun ++ ": empty input") diff -Nru cabal-install-3.2-3.2+git20191216.2.e076113/src/text-1.2.4.0/Data/Text/Internal/Lazy.hs cabal-install-3.2-3.2+git20200127.2.b84fd4f/src/text-1.2.4.0/Data/Text/Internal/Lazy.hs --- cabal-install-3.2-3.2+git20191216.2.e076113/src/text-1.2.4.0/Data/Text/Internal/Lazy.hs 2001-09-09 01:46:40.000000000 +0000 +++ cabal-install-3.2-3.2+git20200127.2.b84fd4f/src/text-1.2.4.0/Data/Text/Internal/Lazy.hs 1970-01-01 00:00:00.000000000 +0000 @@ -1,119 +0,0 @@ -{-# LANGUAGE BangPatterns, DeriveDataTypeable #-} -{-# OPTIONS_HADDOCK not-home #-} - --- | --- Module : Data.Text.Internal.Lazy --- Copyright : (c) 2009, 2010 Bryan O'Sullivan --- --- License : BSD-style --- Maintainer : bos@serpentine.com --- Stability : experimental --- Portability : GHC --- --- /Warning/: this is an internal module, and does not have a stable --- API or name. Functions in this module may not check or enforce --- preconditions expected by public modules. Use at your own risk! --- --- A module containing private 'Text' internals. This exposes the --- 'Text' representation and low level construction functions. --- Modules which extend the 'Text' system may need to use this module. - -module Data.Text.Internal.Lazy - ( - Text(..) - , chunk - , empty - , foldrChunks - , foldlChunks - -- * Data type invariant and abstraction functions - - -- $invariant - , strictInvariant - , lazyInvariant - , showStructure - - -- * Chunk allocation sizes - , defaultChunkSize - , smallChunkSize - , chunkOverhead - ) where - -import Data.Text () -import Data.Text.Internal.Unsafe.Shift (shiftL) -import Data.Typeable (Typeable) -import Foreign.Storable (sizeOf) -import qualified Data.Text.Internal as T - -data Text = Empty - | Chunk {-# UNPACK #-} !T.Text Text - deriving (Typeable) - --- $invariant --- --- The data type invariant for lazy 'Text': Every 'Text' is either 'Empty' or --- consists of non-null 'T.Text's. All functions must preserve this, --- and the QC properties must check this. - --- | Check the invariant strictly. -strictInvariant :: Text -> Bool -strictInvariant Empty = True -strictInvariant x@(Chunk (T.Text _ _ len) cs) - | len > 0 = strictInvariant cs - | otherwise = error $ "Data.Text.Lazy: invariant violation: " - ++ showStructure x - --- | Check the invariant lazily. -lazyInvariant :: Text -> Text -lazyInvariant Empty = Empty -lazyInvariant x@(Chunk c@(T.Text _ _ len) cs) - | len > 0 = Chunk c (lazyInvariant cs) - | otherwise = error $ "Data.Text.Lazy: invariant violation: " - ++ showStructure x - --- | Display the internal structure of a lazy 'Text'. -showStructure :: Text -> String -showStructure Empty = "Empty" -showStructure (Chunk t Empty) = "Chunk " ++ show t ++ " Empty" -showStructure (Chunk t ts) = - "Chunk " ++ show t ++ " (" ++ showStructure ts ++ ")" - --- | Smart constructor for 'Chunk'. Guarantees the data type invariant. -chunk :: T.Text -> Text -> Text -{-# INLINE chunk #-} -chunk t@(T.Text _ _ len) ts | len == 0 = ts - | otherwise = Chunk t ts - --- | Smart constructor for 'Empty'. -empty :: Text -{-# INLINE [0] empty #-} -empty = Empty - --- | Consume the chunks of a lazy 'Text' with a natural right fold. -foldrChunks :: (T.Text -> a -> a) -> a -> Text -> a -foldrChunks f z = go - where go Empty = z - go (Chunk c cs) = f c (go cs) -{-# INLINE foldrChunks #-} - --- | Consume the chunks of a lazy 'Text' with a strict, tail-recursive, --- accumulating left fold. -foldlChunks :: (a -> T.Text -> a) -> a -> Text -> a -foldlChunks f z = go z - where go !a Empty = a - go !a (Chunk c cs) = go (f a c) cs -{-# INLINE foldlChunks #-} - --- | Currently set to 16 KiB, less the memory management overhead. -defaultChunkSize :: Int -defaultChunkSize = 16384 - chunkOverhead -{-# INLINE defaultChunkSize #-} - --- | Currently set to 128 bytes, less the memory management overhead. -smallChunkSize :: Int -smallChunkSize = 128 - chunkOverhead -{-# INLINE smallChunkSize #-} - --- | The memory management overhead. Currently this is tuned for GHC only. -chunkOverhead :: Int -chunkOverhead = sizeOf (undefined :: Int) `shiftL` 1 -{-# INLINE chunkOverhead #-} diff -Nru cabal-install-3.2-3.2+git20191216.2.e076113/src/text-1.2.4.0/Data/Text/Internal/Private.hs cabal-install-3.2-3.2+git20200127.2.b84fd4f/src/text-1.2.4.0/Data/Text/Internal/Private.hs --- cabal-install-3.2-3.2+git20191216.2.e076113/src/text-1.2.4.0/Data/Text/Internal/Private.hs 2001-09-09 01:46:40.000000000 +0000 +++ cabal-install-3.2-3.2+git20200127.2.b84fd4f/src/text-1.2.4.0/Data/Text/Internal/Private.hs 1970-01-01 00:00:00.000000000 +0000 @@ -1,37 +0,0 @@ -{-# LANGUAGE BangPatterns, Rank2Types, UnboxedTuples #-} - --- | --- Module : Data.Text.Internal.Private --- Copyright : (c) 2011 Bryan O'Sullivan --- --- License : BSD-style --- Maintainer : bos@serpentine.com --- Stability : experimental --- Portability : GHC - -module Data.Text.Internal.Private - ( - runText - , span_ - ) where - -import Control.Monad.ST (ST, runST) -import Data.Text.Internal (Text(..), text) -import Data.Text.Unsafe (Iter(..), iter) -import qualified Data.Text.Array as A - -span_ :: (Char -> Bool) -> Text -> (# Text, Text #) -span_ p t@(Text arr off len) = (# hd,tl #) - where hd = text arr off k - tl = text arr (off+k) (len-k) - !k = loop 0 - loop !i | i < len && p c = loop (i+d) - | otherwise = i - where Iter c d = iter t i -{-# INLINE span_ #-} - -runText :: (forall s. (A.MArray s -> Int -> ST s Text) -> ST s Text) -> Text -runText act = runST (act $ \ !marr !len -> do - arr <- A.unsafeFreeze marr - return $! text arr 0 len) -{-# INLINE runText #-} diff -Nru cabal-install-3.2-3.2+git20191216.2.e076113/src/text-1.2.4.0/Data/Text/Internal/Read.hs cabal-install-3.2-3.2+git20200127.2.b84fd4f/src/text-1.2.4.0/Data/Text/Internal/Read.hs --- cabal-install-3.2-3.2+git20191216.2.e076113/src/text-1.2.4.0/Data/Text/Internal/Read.hs 2001-09-09 01:46:40.000000000 +0000 +++ cabal-install-3.2-3.2+git20200127.2.b84fd4f/src/text-1.2.4.0/Data/Text/Internal/Read.hs 1970-01-01 00:00:00.000000000 +0000 @@ -1,69 +0,0 @@ --- | --- Module : Data.Text.Internal.Read --- Copyright : (c) 2014 Bryan O'Sullivan --- --- License : BSD-style --- Maintainer : bos@serpentine.com --- Stability : experimental --- Portability : GHC --- --- Common internal functions for reading textual data. -module Data.Text.Internal.Read - ( - IReader - , IParser(..) - , T(..) - , digitToInt - , hexDigitToInt - , perhaps - ) where - -import Control.Applicative as App (Applicative(..)) -import Control.Arrow (first) -import Control.Monad (ap) -import Data.Char (ord) - -type IReader t a = t -> Either String (a,t) - -newtype IParser t a = P { - runP :: IReader t a - } - -instance Functor (IParser t) where - fmap f m = P $ fmap (first f) . runP m - -instance Applicative (IParser t) where - pure a = P $ \t -> Right (a,t) - {-# INLINE pure #-} - (<*>) = ap - -instance Monad (IParser t) where - return = App.pure - m >>= k = P $ \t -> case runP m t of - Left err -> Left err - Right (a,t') -> runP (k a) t' - {-# INLINE (>>=) #-} - --- If we ever need a `MonadFail` instance the definition below can be used --- --- > instance MonadFail (IParser t) where --- > fail msg = P $ \_ -> Left msg --- --- But given the code compiles fine with a post-MFP GHC 8.6+ we don't need --- one just yet. - -data T = T !Integer !Int - -perhaps :: a -> IParser t a -> IParser t a -perhaps def m = P $ \t -> case runP m t of - Left _ -> Right (def,t) - r@(Right _) -> r - -hexDigitToInt :: Char -> Int -hexDigitToInt c - | c >= '0' && c <= '9' = ord c - ord '0' - | c >= 'a' && c <= 'f' = ord c - (ord 'a' - 10) - | otherwise = ord c - (ord 'A' - 10) - -digitToInt :: Char -> Int -digitToInt c = ord c - ord '0' diff -Nru cabal-install-3.2-3.2+git20191216.2.e076113/src/text-1.2.4.0/Data/Text/Internal/Search.hs cabal-install-3.2-3.2+git20200127.2.b84fd4f/src/text-1.2.4.0/Data/Text/Internal/Search.hs --- cabal-install-3.2-3.2+git20191216.2.e076113/src/text-1.2.4.0/Data/Text/Internal/Search.hs 2001-09-09 01:46:40.000000000 +0000 +++ cabal-install-3.2-3.2+git20200127.2.b84fd4f/src/text-1.2.4.0/Data/Text/Internal/Search.hs 1970-01-01 00:00:00.000000000 +0000 @@ -1,89 +0,0 @@ -{-# LANGUAGE BangPatterns, ScopedTypeVariables #-} - --- | --- Module : Data.Text.Internal.Search --- Copyright : (c) Bryan O'Sullivan 2009 --- --- License : BSD-style --- Maintainer : bos@serpentine.com --- Stability : experimental --- Portability : GHC --- --- Fast substring search for 'Text', based on work by Boyer, Moore, --- Horspool, Sunday, and Lundh. --- --- References: --- --- * R. S. Boyer, J. S. Moore: A Fast String Searching Algorithm. --- Communications of the ACM, 20, 10, 762-772 (1977) --- --- * R. N. Horspool: Practical Fast Searching in Strings. Software - --- Practice and Experience 10, 501-506 (1980) --- --- * D. M. Sunday: A Very Fast Substring Search Algorithm. --- Communications of the ACM, 33, 8, 132-142 (1990) --- --- * F. Lundh: The Fast Search Algorithm. --- (2006) - -module Data.Text.Internal.Search - ( - indices - ) where - -import qualified Data.Text.Array as A -import Data.Word (Word64) -import Data.Text.Internal (Text(..)) -import Data.Bits ((.|.), (.&.)) -import Data.Text.Internal.Unsafe.Shift (shiftL) - -data T = {-# UNPACK #-} !Word64 :* {-# UNPACK #-} !Int - --- | /O(n+m)/ Find the offsets of all non-overlapping indices of --- @needle@ within @haystack@. The offsets returned represent --- uncorrected indices in the low-level \"needle\" array, to which its --- offset must be added. --- --- In (unlikely) bad cases, this algorithm's complexity degrades --- towards /O(n*m)/. -indices :: Text -- ^ Substring to search for (@needle@) - -> Text -- ^ Text to search in (@haystack@) - -> [Int] -indices _needle@(Text narr noff nlen) _haystack@(Text harr hoff hlen) - | nlen == 1 = scanOne (nindex 0) - | nlen <= 0 || ldiff < 0 = [] - | otherwise = scan 0 - where - ldiff = hlen - nlen - nlast = nlen - 1 - z = nindex nlast - nindex k = A.unsafeIndex narr (noff+k) - hindex k = A.unsafeIndex harr (hoff+k) - hindex' k | k == hlen = 0 - | otherwise = A.unsafeIndex harr (hoff+k) - buildTable !i !msk !skp - | i >= nlast = (msk .|. swizzle z) :* skp - | otherwise = buildTable (i+1) (msk .|. swizzle c) skp' - where c = nindex i - skp' | c == z = nlen - i - 2 - | otherwise = skp - swizzle k = 1 `shiftL` (fromIntegral k .&. 0x3f) - scan !i - | i > ldiff = [] - | c == z && candidateMatch 0 = i : scan (i + nlen) - | otherwise = scan (i + delta) - where c = hindex (i + nlast) - candidateMatch !j - | j >= nlast = True - | hindex (i+j) /= nindex j = False - | otherwise = candidateMatch (j+1) - delta | nextInPattern = nlen + 1 - | c == z = skip + 1 - | otherwise = 1 - where nextInPattern = mask .&. swizzle (hindex' (i+nlen)) == 0 - !(mask :* skip) = buildTable 0 0 (nlen-2) - scanOne c = loop 0 - where loop !i | i >= hlen = [] - | hindex i == c = i : loop (i+1) - | otherwise = loop (i+1) -{-# INLINE indices #-} diff -Nru cabal-install-3.2-3.2+git20191216.2.e076113/src/text-1.2.4.0/Data/Text/Internal/Unsafe/Char.hs cabal-install-3.2-3.2+git20200127.2.b84fd4f/src/text-1.2.4.0/Data/Text/Internal/Unsafe/Char.hs --- cabal-install-3.2-3.2+git20191216.2.e076113/src/text-1.2.4.0/Data/Text/Internal/Unsafe/Char.hs 2001-09-09 01:46:40.000000000 +0000 +++ cabal-install-3.2-3.2+git20200127.2.b84fd4f/src/text-1.2.4.0/Data/Text/Internal/Unsafe/Char.hs 1970-01-01 00:00:00.000000000 +0000 @@ -1,95 +0,0 @@ -{-# LANGUAGE CPP, MagicHash #-} - --- | --- Module : Data.Text.Internal.Unsafe.Char --- Copyright : (c) 2008, 2009 Tom Harper, --- (c) 2009, 2010 Bryan O'Sullivan, --- (c) 2009 Duncan Coutts --- --- License : BSD-style --- Maintainer : bos@serpentine.com --- Stability : experimental --- Portability : GHC --- --- /Warning/: this is an internal module, and does not have a stable --- API or name. Functions in this module may not check or enforce --- preconditions expected by public modules. Use at your own risk! --- --- Fast character manipulation functions. -module Data.Text.Internal.Unsafe.Char - ( - ord - , unsafeChr - , unsafeChr8 - , unsafeChr32 - , unsafeWrite - -- , unsafeWriteRev - ) where - -#ifdef ASSERTS -import Control.Exception (assert) -#endif -import Control.Monad.ST (ST) -import Data.Bits ((.&.)) -import Data.Text.Internal.Unsafe.Shift (shiftR) -import GHC.Exts (Char(..), Int(..), chr#, ord#, word2Int#) -import GHC.Word (Word8(..), Word16(..), Word32(..)) -import qualified Data.Text.Array as A - -ord :: Char -> Int -ord (C# c#) = I# (ord# c#) -{-# INLINE ord #-} - -unsafeChr :: Word16 -> Char -unsafeChr (W16# w#) = C# (chr# (word2Int# w#)) -{-# INLINE unsafeChr #-} - -unsafeChr8 :: Word8 -> Char -unsafeChr8 (W8# w#) = C# (chr# (word2Int# w#)) -{-# INLINE unsafeChr8 #-} - -unsafeChr32 :: Word32 -> Char -unsafeChr32 (W32# w#) = C# (chr# (word2Int# w#)) -{-# INLINE unsafeChr32 #-} - --- | Write a character into the array at the given offset. Returns --- the number of 'Word16's written. -unsafeWrite :: A.MArray s -> Int -> Char -> ST s Int -unsafeWrite marr i c - | n < 0x10000 = do -#if defined(ASSERTS) - assert (i >= 0) . assert (i < A.length marr) $ return () -#endif - A.unsafeWrite marr i (fromIntegral n) - return 1 - | otherwise = do -#if defined(ASSERTS) - assert (i >= 0) . assert (i < A.length marr - 1) $ return () -#endif - A.unsafeWrite marr i lo - A.unsafeWrite marr (i+1) hi - return 2 - where n = ord c - m = n - 0x10000 - lo = fromIntegral $ (m `shiftR` 10) + 0xD800 - hi = fromIntegral $ (m .&. 0x3FF) + 0xDC00 -{-# INLINE unsafeWrite #-} - -{- -unsafeWriteRev :: A.MArray s Word16 -> Int -> Char -> ST s Int -unsafeWriteRev marr i c - | n < 0x10000 = do - assert (i >= 0) . assert (i < A.length marr) $ - A.unsafeWrite marr i (fromIntegral n) - return (i-1) - | otherwise = do - assert (i >= 1) . assert (i < A.length marr) $ - A.unsafeWrite marr (i-1) lo - A.unsafeWrite marr i hi - return (i-2) - where n = ord c - m = n - 0x10000 - lo = fromIntegral $ (m `shiftR` 10) + 0xD800 - hi = fromIntegral $ (m .&. 0x3FF) + 0xDC00 -{-# INLINE unsafeWriteRev #-} --} diff -Nru cabal-install-3.2-3.2+git20191216.2.e076113/src/text-1.2.4.0/Data/Text/Internal/Unsafe/Shift.hs cabal-install-3.2-3.2+git20200127.2.b84fd4f/src/text-1.2.4.0/Data/Text/Internal/Unsafe/Shift.hs --- cabal-install-3.2-3.2+git20191216.2.e076113/src/text-1.2.4.0/Data/Text/Internal/Unsafe/Shift.hs 2001-09-09 01:46:40.000000000 +0000 +++ cabal-install-3.2-3.2+git20200127.2.b84fd4f/src/text-1.2.4.0/Data/Text/Internal/Unsafe/Shift.hs 1970-01-01 00:00:00.000000000 +0000 @@ -1,72 +0,0 @@ -{-# LANGUAGE MagicHash #-} - --- | --- Module : Data.Text.Internal.Unsafe.Shift --- Copyright : (c) Bryan O'Sullivan 2009 --- --- License : BSD-style --- Maintainer : bos@serpentine.com --- Stability : experimental --- Portability : GHC --- --- /Warning/: this is an internal module, and does not have a stable --- API or name. Functions in this module may not check or enforce --- preconditions expected by public modules. Use at your own risk! --- --- Fast, unchecked bit shifting functions. - -module Data.Text.Internal.Unsafe.Shift - ( - UnsafeShift(..) - ) where - --- import qualified Data.Bits as Bits -import GHC.Base -import GHC.Word - --- | This is a workaround for poor optimisation in GHC 6.8.2. It --- fails to notice constant-width shifts, and adds a test and branch --- to every shift. This imposes about a 10% performance hit. --- --- These functions are undefined when the amount being shifted by is --- greater than the size in bits of a machine Int#. -class UnsafeShift a where - shiftL :: a -> Int -> a - shiftR :: a -> Int -> a - -instance UnsafeShift Word16 where - {-# INLINE shiftL #-} - shiftL (W16# x#) (I# i#) = W16# (narrow16Word# (x# `uncheckedShiftL#` i#)) - - {-# INLINE shiftR #-} - shiftR (W16# x#) (I# i#) = W16# (x# `uncheckedShiftRL#` i#) - -instance UnsafeShift Word32 where - {-# INLINE shiftL #-} - shiftL (W32# x#) (I# i#) = W32# (narrow32Word# (x# `uncheckedShiftL#` i#)) - - {-# INLINE shiftR #-} - shiftR (W32# x#) (I# i#) = W32# (x# `uncheckedShiftRL#` i#) - -instance UnsafeShift Word64 where - {-# INLINE shiftL #-} - shiftL (W64# x#) (I# i#) = W64# (x# `uncheckedShiftL64#` i#) - - {-# INLINE shiftR #-} - shiftR (W64# x#) (I# i#) = W64# (x# `uncheckedShiftRL64#` i#) - -instance UnsafeShift Int where - {-# INLINE shiftL #-} - shiftL (I# x#) (I# i#) = I# (x# `iShiftL#` i#) - - {-# INLINE shiftR #-} - shiftR (I# x#) (I# i#) = I# (x# `iShiftRA#` i#) - -{- -instance UnsafeShift Integer where - {-# INLINE shiftL #-} - shiftL = Bits.shiftL - - {-# INLINE shiftR #-} - shiftR = Bits.shiftR --} diff -Nru cabal-install-3.2-3.2+git20191216.2.e076113/src/text-1.2.4.0/Data/Text/Internal/Unsafe.hs cabal-install-3.2-3.2+git20200127.2.b84fd4f/src/text-1.2.4.0/Data/Text/Internal/Unsafe.hs --- cabal-install-3.2-3.2+git20191216.2.e076113/src/text-1.2.4.0/Data/Text/Internal/Unsafe.hs 2001-09-09 01:46:40.000000000 +0000 +++ cabal-install-3.2-3.2+git20200127.2.b84fd4f/src/text-1.2.4.0/Data/Text/Internal/Unsafe.hs 1970-01-01 00:00:00.000000000 +0000 @@ -1,56 +0,0 @@ -{-# LANGUAGE CPP, MagicHash, UnboxedTuples #-} -{-# OPTIONS_HADDOCK not-home #-} - --- | --- Module : Data.Text.Internal.Unsafe --- Copyright : (c) 2009, 2010, 2011 Bryan O'Sullivan --- License : BSD-style --- Maintainer : bos@serpentine.com --- Stability : experimental --- Portability : portable --- --- /Warning/: this is an internal module, and does not have a stable --- API or name. Functions in this module may not check or enforce --- preconditions expected by public modules. Use at your own risk! --- --- A module containing /unsafe/ operations, for /very very careful/ use --- in /heavily tested/ code. -module Data.Text.Internal.Unsafe - ( - inlineInterleaveST - , inlinePerformIO - ) where - -import GHC.ST (ST(..)) -#if defined(__GLASGOW_HASKELL__) -import GHC.IO (IO(IO)) -import GHC.Base (realWorld#) -#endif - - --- | Just like unsafePerformIO, but we inline it. Big performance gains as --- it exposes lots of things to further inlining. /Very unsafe/. In --- particular, you should do no memory allocation inside an --- 'inlinePerformIO' block. On Hugs this is just @unsafePerformIO@. --- -{-# INLINE inlinePerformIO #-} -inlinePerformIO :: IO a -> a -#if defined(__GLASGOW_HASKELL__) -inlinePerformIO (IO m) = case m realWorld# of (# _, r #) -> r -#else -inlinePerformIO = unsafePerformIO -#endif - --- | Allow an 'ST' computation to be deferred lazily. When passed an --- action of type 'ST' @s@ @a@, the action will only be performed when --- the value of @a@ is demanded. --- --- This function is identical to the normal unsafeInterleaveST, but is --- inlined and hence faster. --- --- /Note/: This operation is highly unsafe, as it can introduce --- externally visible non-determinism into an 'ST' action. -inlineInterleaveST :: ST s a -> ST s a -inlineInterleaveST (ST m) = ST $ \ s -> - let r = case m s of (# _, res #) -> res in (# s, r #) -{-# INLINE inlineInterleaveST #-} diff -Nru cabal-install-3.2-3.2+git20191216.2.e076113/src/text-1.2.4.0/Data/Text/Internal.hs cabal-install-3.2-3.2+git20200127.2.b84fd4f/src/text-1.2.4.0/Data/Text/Internal.hs --- cabal-install-3.2-3.2+git20191216.2.e076113/src/text-1.2.4.0/Data/Text/Internal.hs 2001-09-09 01:46:40.000000000 +0000 +++ cabal-install-3.2-3.2+git20200127.2.b84fd4f/src/text-1.2.4.0/Data/Text/Internal.hs 1970-01-01 00:00:00.000000000 +0000 @@ -1,188 +0,0 @@ -{-# LANGUAGE CPP, DeriveDataTypeable, UnboxedTuples #-} -{-# OPTIONS_HADDOCK not-home #-} - --- | --- Module : Data.Text.Internal --- Copyright : (c) 2008, 2009 Tom Harper, --- (c) 2009, 2010 Bryan O'Sullivan, --- (c) 2009 Duncan Coutts --- --- License : BSD-style --- Maintainer : bos@serpentine.com --- Stability : experimental --- Portability : GHC --- --- A module containing private 'Text' internals. This exposes the --- 'Text' representation and low level construction functions. --- Modules which extend the 'Text' system may need to use this module. --- --- You should not use this module unless you are determined to monkey --- with the internals, as the functions here do just about nothing to --- preserve data invariants. You have been warned! - -#if defined(__GLASGOW_HASKELL__) && !defined(__HADDOCK__) -#include "MachDeps.h" -#endif - -module Data.Text.Internal - ( - -- * Types - -- $internals - Text(..) - -- * Construction - , text - , textP - -- * Safety - , safe - -- * Code that must be here for accessibility - , empty - , empty_ - -- * Utilities - , firstf - -- * Checked multiplication - , mul - , mul32 - , mul64 - -- * Debugging - , showText - ) where - -#if defined(ASSERTS) -import Control.Exception (assert) -#endif -import Data.Bits -import Data.Int (Int32, Int64) -import Data.Text.Internal.Unsafe.Char (ord) -import Data.Typeable (Typeable) -import qualified Data.Text.Array as A - --- | A space efficient, packed, unboxed Unicode text type. -data Text = Text - {-# UNPACK #-} !A.Array -- payload (Word16 elements) - {-# UNPACK #-} !Int -- offset (units of Word16, not Char) - {-# UNPACK #-} !Int -- length (units of Word16, not Char) - deriving (Typeable) - --- | Smart constructor. -text_ :: A.Array -> Int -> Int -> Text -text_ arr off len = -#if defined(ASSERTS) - let c = A.unsafeIndex arr off - alen = A.length arr - in assert (len >= 0) . - assert (off >= 0) . - assert (alen == 0 || len == 0 || off < alen) . - assert (len == 0 || c < 0xDC00 || c > 0xDFFF) $ -#endif - Text arr off len -{-# INLINE text_ #-} - --- | /O(1)/ The empty 'Text'. -empty :: Text -empty = Text A.empty 0 0 -{-# INLINE [1] empty #-} - --- | A non-inlined version of 'empty'. -empty_ :: Text -empty_ = Text A.empty 0 0 -{-# NOINLINE empty_ #-} - --- | Construct a 'Text' without invisibly pinning its byte array in --- memory if its length has dwindled to zero. -text :: A.Array -> Int -> Int -> Text -text arr off len | len == 0 = empty - | otherwise = text_ arr off len -{-# INLINE text #-} - -textP :: A.Array -> Int -> Int -> Text -{-# DEPRECATED textP "Use text instead" #-} -textP = text - --- | A useful 'show'-like function for debugging purposes. -showText :: Text -> String -showText (Text arr off len) = - "Text " ++ show (A.toList arr off len) ++ ' ' : - show off ++ ' ' : show len - --- | Map a 'Char' to a 'Text'-safe value. --- --- UTF-16 surrogate code points are not included in the set of Unicode --- scalar values, but are unfortunately admitted as valid 'Char' --- values by Haskell. They cannot be represented in a 'Text'. This --- function remaps those code points to the Unicode replacement --- character (U+FFFD, \'�\'), and leaves other code points --- unchanged. -safe :: Char -> Char -safe c - | ord c .&. 0x1ff800 /= 0xd800 = c - | otherwise = '\xfffd' -{-# INLINE [0] safe #-} - --- | Apply a function to the first element of an optional pair. -firstf :: (a -> c) -> Maybe (a,b) -> Maybe (c,b) -firstf f (Just (a, b)) = Just (f a, b) -firstf _ Nothing = Nothing - --- | Checked multiplication. Calls 'error' if the result would --- overflow. -mul :: Int -> Int -> Int -#if WORD_SIZE_IN_BITS == 64 -mul a b = fromIntegral $ fromIntegral a `mul64` fromIntegral b -#else -mul a b = fromIntegral $ fromIntegral a `mul32` fromIntegral b -#endif -{-# INLINE mul #-} -infixl 7 `mul` - --- | Checked multiplication. Calls 'error' if the result would --- overflow. -mul64 :: Int64 -> Int64 -> Int64 -mul64 a b - | a >= 0 && b >= 0 = mul64_ a b - | a >= 0 = -mul64_ a (-b) - | b >= 0 = -mul64_ (-a) b - | otherwise = mul64_ (-a) (-b) -{-# INLINE mul64 #-} -infixl 7 `mul64` - -mul64_ :: Int64 -> Int64 -> Int64 -mul64_ a b - | ahi > 0 && bhi > 0 = error "overflow" - | top > 0x7fffffff = error "overflow" - | total < 0 = error "overflow" - | otherwise = total - where (# ahi, alo #) = (# a `shiftR` 32, a .&. 0xffffffff #) - (# bhi, blo #) = (# b `shiftR` 32, b .&. 0xffffffff #) - top = ahi * blo + alo * bhi - total = (top `shiftL` 32) + alo * blo -{-# INLINE mul64_ #-} - --- | Checked multiplication. Calls 'error' if the result would --- overflow. -mul32 :: Int32 -> Int32 -> Int32 -mul32 a b = case fromIntegral a * fromIntegral b of - ab | ab < min32 || ab > max32 -> error "overflow" - | otherwise -> fromIntegral ab - where min32 = -0x80000000 :: Int64 - max32 = 0x7fffffff -{-# INLINE mul32 #-} -infixl 7 `mul32` - --- $internals --- --- Internally, the 'Text' type is represented as an array of 'Word16' --- UTF-16 code units. The offset and length fields in the constructor --- are in these units, /not/ units of 'Char'. --- --- Invariants that all functions must maintain: --- --- * Since the 'Text' type uses UTF-16 internally, it cannot represent --- characters in the reserved surrogate code point range U+D800 to --- U+DFFF. To maintain this invariant, the 'safe' function maps --- 'Char' values in this range to the replacement character (U+FFFD, --- \'�\'). --- --- * A leading (or \"high\") surrogate code unit (0xD800–0xDBFF) must --- always be followed by a trailing (or \"low\") surrogate code unit --- (0xDC00-0xDFFF). A trailing surrogate code unit must always be --- preceded by a leading surrogate code unit. diff -Nru cabal-install-3.2-3.2+git20191216.2.e076113/src/text-1.2.4.0/Data/Text/IO.hs cabal-install-3.2-3.2+git20200127.2.b84fd4f/src/text-1.2.4.0/Data/Text/IO.hs --- cabal-install-3.2-3.2+git20191216.2.e076113/src/text-1.2.4.0/Data/Text/IO.hs 2001-09-09 01:46:40.000000000 +0000 +++ cabal-install-3.2-3.2+git20200127.2.b84fd4f/src/text-1.2.4.0/Data/Text/IO.hs 1970-01-01 00:00:00.000000000 +0000 @@ -1,350 +0,0 @@ -{-# LANGUAGE BangPatterns, CPP, RecordWildCards, ScopedTypeVariables #-} -#if __GLASGOW_HASKELL__ >= 702 -{-# LANGUAGE Trustworthy #-} -#endif --- | --- Module : Data.Text.IO --- Copyright : (c) 2009, 2010 Bryan O'Sullivan, --- (c) 2009 Simon Marlow --- License : BSD-style --- Maintainer : bos@serpentine.com --- Portability : GHC --- --- Efficient locale-sensitive support for text I\/O. --- --- Skip past the synopsis for some important notes on performance and --- portability across different versions of GHC. - -module Data.Text.IO - ( - -- * Performance - -- $performance - - -- * Locale support - -- $locale - -- * File-at-a-time operations - readFile - , writeFile - , appendFile - -- * Operations on handles - , hGetContents - , hGetChunk - , hGetLine - , hPutStr - , hPutStrLn - -- * Special cases for standard input and output - , interact - , getContents - , getLine - , putStr - , putStrLn - ) where - -import Data.Text (Text) -import Prelude hiding (appendFile, getContents, getLine, interact, - putStr, putStrLn, readFile, writeFile) -import System.IO (Handle, IOMode(..), hPutChar, openFile, stdin, stdout, - withFile) -import qualified Control.Exception as E -import Control.Monad (liftM2, when) -import Data.IORef (readIORef, writeIORef) -import qualified Data.Text as T -import Data.Text.Internal.Fusion (stream) -import Data.Text.Internal.Fusion.Types (Step(..), Stream(..)) -import Data.Text.Internal.IO (hGetLineWith, readChunk) -import GHC.IO.Buffer (Buffer(..), BufferState(..), CharBufElem, CharBuffer, - RawCharBuffer, emptyBuffer, isEmptyBuffer, newCharBuffer, - writeCharBuf) -import GHC.IO.Exception (IOException(ioe_type), IOErrorType(InappropriateType)) -import GHC.IO.Handle.Internals (augmentIOError, hClose_help, wantReadableHandle, - wantWritableHandle) -import GHC.IO.Handle.Text (commitBuffer') -import GHC.IO.Handle.Types (BufferList(..), BufferMode(..), Handle__(..), - HandleType(..), Newline(..)) -import System.IO (hGetBuffering, hFileSize, hSetBuffering, hTell) -import System.IO.Error (isEOFError) - --- $performance --- #performance# --- --- The functions in this module obey the runtime system's locale, --- character set encoding, and line ending conversion settings. --- --- If you know in advance that you will be working with data that has --- a specific encoding (e.g. UTF-8), and your application is highly --- performance sensitive, you may find that it is faster to perform --- I\/O with bytestrings and to encode and decode yourself than to use --- the functions in this module. --- --- Whether this will hold depends on the version of GHC you are using, --- the platform you are working on, the data you are working with, and --- the encodings you are using, so be sure to test for yourself. - --- | The 'readFile' function reads a file and returns the contents of --- the file as a string. The entire file is read strictly, as with --- 'getContents'. -readFile :: FilePath -> IO Text -readFile name = openFile name ReadMode >>= hGetContents - --- | Write a string to a file. The file is truncated to zero length --- before writing begins. -writeFile :: FilePath -> Text -> IO () -writeFile p = withFile p WriteMode . flip hPutStr - --- | Write a string the end of a file. -appendFile :: FilePath -> Text -> IO () -appendFile p = withFile p AppendMode . flip hPutStr - -catchError :: String -> Handle -> Handle__ -> IOError -> IO (Text, Bool) -catchError caller h Handle__{..} err - | isEOFError err = do - buf <- readIORef haCharBuffer - return $ if isEmptyBuffer buf - then (T.empty, True) - else (T.singleton '\r', True) - | otherwise = E.throwIO (augmentIOError err caller h) - --- | Wrap readChunk and return a value indicating if we're reached the EOF. --- This is needed because unpack_nl is unable to discern the difference --- between a buffer with just \r due to EOF or because not enough data was left --- for decoding. e.g. the final character decoded from the byte buffer was \r. -readChunkEof :: Handle__ -> CharBuffer -> IO (Text, Bool) -readChunkEof hh buf = do t <- readChunk hh buf - return (t, False) - --- | /Experimental./ Read a single chunk of strict text from a --- 'Handle'. The size of the chunk depends on the amount of input --- currently buffered. --- --- This function blocks only if there is no data available, and EOF --- has not yet been reached. Once EOF is reached, this function --- returns an empty string instead of throwing an exception. -hGetChunk :: Handle -> IO Text -hGetChunk h = wantReadableHandle "hGetChunk" h readSingleChunk - where - readSingleChunk hh@Handle__{..} = do - buf <- readIORef haCharBuffer - (t, _) <- readChunkEof hh buf `E.catch` catchError "hGetChunk" h hh - return (hh, t) - --- | Read the remaining contents of a 'Handle' as a string. The --- 'Handle' is closed once the contents have been read, or if an --- exception is thrown. --- --- Internally, this function reads a chunk at a time from the --- lower-level buffering abstraction, and concatenates the chunks into --- a single string once the entire file has been read. --- --- As a result, it requires approximately twice as much memory as its --- result to construct its result. For files more than a half of --- available RAM in size, this may result in memory exhaustion. -hGetContents :: Handle -> IO Text -hGetContents h = do - chooseGoodBuffering h - wantReadableHandle "hGetContents" h readAll - where - readAll hh@Handle__{..} = do - let readChunks = do - buf <- readIORef haCharBuffer - (t, eof) <- readChunkEof hh buf - `E.catch` catchError "hGetContents" h hh - if eof - then return [t] - else (t:) `fmap` readChunks - ts <- readChunks - (hh', _) <- hClose_help hh - return (hh'{haType=ClosedHandle}, T.concat ts) - --- | Use a more efficient buffer size if we're reading in --- block-buffered mode with the default buffer size. When we can --- determine the size of the handle we're reading, set the buffer size --- to that, so that we can read the entire file in one chunk. --- Otherwise, use a buffer size of at least 16KB. -chooseGoodBuffering :: Handle -> IO () -chooseGoodBuffering h = do - bufMode <- hGetBuffering h - case bufMode of - BlockBuffering Nothing -> do - d <- E.catch (liftM2 (-) (hFileSize h) (hTell h)) $ \(e::IOException) -> - if ioe_type e == InappropriateType - then return 16384 -- faster than the 2KB default - else E.throwIO e - when (d > 0) . hSetBuffering h . BlockBuffering . Just . fromIntegral $ d - _ -> return () - --- | Read a single line from a handle. -hGetLine :: Handle -> IO Text -hGetLine = hGetLineWith T.concat - --- | Write a string to a handle. -hPutStr :: Handle -> Text -> IO () --- This function is lifted almost verbatim from GHC.IO.Handle.Text. -hPutStr h t = do - (buffer_mode, nl) <- - wantWritableHandle "hPutStr" h $ \h_ -> do - bmode <- getSpareBuffer h_ - return (bmode, haOutputNL h_) - let str = stream t - case buffer_mode of - (NoBuffering, _) -> hPutChars h str - (LineBuffering, buf) -> writeLines h nl buf str - (BlockBuffering _, buf) - | nl == CRLF -> writeBlocksCRLF h buf str - | otherwise -> writeBlocksRaw h buf str - -hPutChars :: Handle -> Stream Char -> IO () -hPutChars h (Stream next0 s0 _len) = loop s0 - where - loop !s = case next0 s of - Done -> return () - Skip s' -> loop s' - Yield x s' -> hPutChar h x >> loop s' - --- The following functions are largely lifted from GHC.IO.Handle.Text, --- but adapted to a coinductive stream of data instead of an inductive --- list. --- --- We have several variations of more or less the same code for --- performance reasons. Splitting the original buffered write --- function into line- and block-oriented versions gave us a 2.1x --- performance improvement. Lifting out the raw/cooked newline --- handling gave a few more percent on top. - -writeLines :: Handle -> Newline -> Buffer CharBufElem -> Stream Char -> IO () -writeLines h nl buf0 (Stream next0 s0 _len) = outer s0 buf0 - where - outer s1 Buffer{bufRaw=raw, bufSize=len} = inner s1 (0::Int) - where - inner !s !n = - case next0 s of - Done -> commit n False{-no flush-} True{-release-} >> return () - Skip s' -> inner s' n - Yield x s' - | n + 1 >= len -> commit n True{-needs flush-} False >>= outer s - | x == '\n' -> do - n' <- if nl == CRLF - then do n1 <- writeCharBuf raw n '\r' - writeCharBuf raw n1 '\n' - else writeCharBuf raw n x - commit n' True{-needs flush-} False >>= outer s' - | otherwise -> writeCharBuf raw n x >>= inner s' - commit = commitBuffer h raw len - -writeBlocksCRLF :: Handle -> Buffer CharBufElem -> Stream Char -> IO () -writeBlocksCRLF h buf0 (Stream next0 s0 _len) = outer s0 buf0 - where - outer s1 Buffer{bufRaw=raw, bufSize=len} = inner s1 (0::Int) - where - inner !s !n = - case next0 s of - Done -> commit n False{-no flush-} True{-release-} >> return () - Skip s' -> inner s' n - Yield x s' - | n + 1 >= len -> commit n True{-needs flush-} False >>= outer s - | x == '\n' -> do n1 <- writeCharBuf raw n '\r' - writeCharBuf raw n1 '\n' >>= inner s' - | otherwise -> writeCharBuf raw n x >>= inner s' - commit = commitBuffer h raw len - -writeBlocksRaw :: Handle -> Buffer CharBufElem -> Stream Char -> IO () -writeBlocksRaw h buf0 (Stream next0 s0 _len) = outer s0 buf0 - where - outer s1 Buffer{bufRaw=raw, bufSize=len} = inner s1 (0::Int) - where - inner !s !n = - case next0 s of - Done -> commit n False{-no flush-} True{-release-} >> return () - Skip s' -> inner s' n - Yield x s' - | n + 1 >= len -> commit n True{-needs flush-} False >>= outer s - | otherwise -> writeCharBuf raw n x >>= inner s' - commit = commitBuffer h raw len - --- This function is completely lifted from GHC.IO.Handle.Text. -getSpareBuffer :: Handle__ -> IO (BufferMode, CharBuffer) -getSpareBuffer Handle__{haCharBuffer=ref, - haBuffers=spare_ref, - haBufferMode=mode} - = do - case mode of - NoBuffering -> return (mode, error "no buffer!") - _ -> do - bufs <- readIORef spare_ref - buf <- readIORef ref - case bufs of - BufferListCons b rest -> do - writeIORef spare_ref rest - return ( mode, emptyBuffer b (bufSize buf) WriteBuffer) - BufferListNil -> do - new_buf <- newCharBuffer (bufSize buf) WriteBuffer - return (mode, new_buf) - - --- This function is completely lifted from GHC.IO.Handle.Text. -commitBuffer :: Handle -> RawCharBuffer -> Int -> Int -> Bool -> Bool - -> IO CharBuffer -commitBuffer hdl !raw !sz !count flush release = - wantWritableHandle "commitAndReleaseBuffer" hdl $ - commitBuffer' raw sz count flush release -{-# INLINE commitBuffer #-} - --- | Write a string to a handle, followed by a newline. -hPutStrLn :: Handle -> Text -> IO () -hPutStrLn h t = hPutStr h t >> hPutChar h '\n' - --- | The 'interact' function takes a function of type @Text -> Text@ --- as its argument. The entire input from the standard input device is --- passed to this function as its argument, and the resulting string --- is output on the standard output device. -interact :: (Text -> Text) -> IO () -interact f = putStr . f =<< getContents - --- | Read all user input on 'stdin' as a single string. -getContents :: IO Text -getContents = hGetContents stdin - --- | Read a single line of user input from 'stdin'. -getLine :: IO Text -getLine = hGetLine stdin - --- | Write a string to 'stdout'. -putStr :: Text -> IO () -putStr = hPutStr stdout - --- | Write a string to 'stdout', followed by a newline. -putStrLn :: Text -> IO () -putStrLn = hPutStrLn stdout - --- $locale --- --- /Note/: The behaviour of functions in this module depends on the --- version of GHC you are using. --- --- Beginning with GHC 6.12, text I\/O is performed using the system or --- handle's current locale and line ending conventions. --- --- Under GHC 6.10 and earlier, the system I\/O libraries do not --- support locale-sensitive I\/O or line ending conversion. On these --- versions of GHC, functions in this library all use UTF-8. What --- does this mean in practice? --- --- * All data that is read will be decoded as UTF-8. --- --- * Before data is written, it is first encoded as UTF-8. --- --- * On both reading and writing, the platform's native newline --- conversion is performed. --- --- If you must use a non-UTF-8 locale on an older version of GHC, you --- will have to perform the transcoding yourself, e.g. as follows: --- --- > import qualified Data.ByteString as B --- > import Data.Text (Text) --- > import Data.Text.Encoding (encodeUtf16) --- > --- > putStr_Utf16LE :: Text -> IO () --- > putStr_Utf16LE t = B.putStr (encodeUtf16LE t) --- --- On transcoding errors, an 'IOError' exception is thrown. You can --- use the API in "Data.Text.Encoding" if you need more control over --- error handling or transcoding. diff -Nru cabal-install-3.2-3.2+git20191216.2.e076113/src/text-1.2.4.0/Data/Text/Lazy/Builder/Int.hs cabal-install-3.2-3.2+git20200127.2.b84fd4f/src/text-1.2.4.0/Data/Text/Lazy/Builder/Int.hs --- cabal-install-3.2-3.2+git20191216.2.e076113/src/text-1.2.4.0/Data/Text/Lazy/Builder/Int.hs 2001-09-09 01:46:40.000000000 +0000 +++ cabal-install-3.2-3.2+git20200127.2.b84fd4f/src/text-1.2.4.0/Data/Text/Lazy/Builder/Int.hs 1970-01-01 00:00:00.000000000 +0000 @@ -1,264 +0,0 @@ -{-# LANGUAGE BangPatterns, CPP, MagicHash, RankNTypes, ScopedTypeVariables, - UnboxedTuples #-} -#if __GLASGOW_HASKELL__ >= 702 -{-# LANGUAGE Trustworthy #-} -#endif - --- Module: Data.Text.Lazy.Builder.Int --- Copyright: (c) 2013 Bryan O'Sullivan --- (c) 2011 MailRank, Inc. --- License: BSD-style --- Maintainer: Bryan O'Sullivan --- Portability: portable --- --- Efficiently write an integral value to a 'Builder'. - -module Data.Text.Lazy.Builder.Int - ( - decimal - , hexadecimal - ) where - -import Data.Int (Int8, Int16, Int32, Int64) -import Data.Monoid (mempty) -import qualified Data.ByteString.Unsafe as B -import Data.Text.Internal.Builder.Functions ((<>), i2d) -import Data.Text.Internal.Builder -import Data.Text.Internal.Builder.Int.Digits (digits) -import Data.Text.Array -import Data.Word (Word, Word8, Word16, Word32, Word64) -import GHC.Base (quotInt, remInt) -import GHC.Num (quotRemInteger) -import GHC.Types (Int(..)) -import Control.Monad.ST -#if MIN_VERSION_base(4,11,0) -import Prelude hiding ((<>)) -#endif - -#ifdef __GLASGOW_HASKELL__ -# if defined(INTEGER_GMP) -import GHC.Integer.GMP.Internals (Integer(S#)) -# elif defined(INTEGER_SIMPLE) -import GHC.Integer -# else -# error "You need to use either GMP or integer-simple." -# endif -#endif - -#if defined(INTEGER_GMP) || defined(INTEGER_SIMPLE) -# define PAIR(a,b) (# a,b #) -#else -# define PAIR(a,b) (a,b) -#endif - -decimal :: Integral a => a -> Builder -{-# RULES "decimal/Int8" decimal = boundedDecimal :: Int8 -> Builder #-} -{-# RULES "decimal/Int" decimal = boundedDecimal :: Int -> Builder #-} -{-# RULES "decimal/Int16" decimal = boundedDecimal :: Int16 -> Builder #-} -{-# RULES "decimal/Int32" decimal = boundedDecimal :: Int32 -> Builder #-} -{-# RULES "decimal/Int64" decimal = boundedDecimal :: Int64 -> Builder #-} -{-# RULES "decimal/Word" decimal = positive :: Data.Word.Word -> Builder #-} -{-# RULES "decimal/Word8" decimal = positive :: Word8 -> Builder #-} -{-# RULES "decimal/Word16" decimal = positive :: Word16 -> Builder #-} -{-# RULES "decimal/Word32" decimal = positive :: Word32 -> Builder #-} -{-# RULES "decimal/Word64" decimal = positive :: Word64 -> Builder #-} -{-# RULES "decimal/Integer" decimal = integer 10 :: Integer -> Builder #-} -decimal i = decimal' (<= -128) i -{-# NOINLINE decimal #-} - -boundedDecimal :: (Integral a, Bounded a) => a -> Builder -{-# SPECIALIZE boundedDecimal :: Int -> Builder #-} -{-# SPECIALIZE boundedDecimal :: Int8 -> Builder #-} -{-# SPECIALIZE boundedDecimal :: Int16 -> Builder #-} -{-# SPECIALIZE boundedDecimal :: Int32 -> Builder #-} -{-# SPECIALIZE boundedDecimal :: Int64 -> Builder #-} -boundedDecimal i = decimal' (== minBound) i - -decimal' :: (Integral a) => (a -> Bool) -> a -> Builder -{-# INLINE decimal' #-} -decimal' p i - | i < 0 = if p i - then let (q, r) = i `quotRem` 10 - qq = -q - !n = countDigits qq - in writeN (n + 2) $ \marr off -> do - unsafeWrite marr off minus - posDecimal marr (off+1) n qq - unsafeWrite marr (off+n+1) (i2w (-r)) - else let j = -i - !n = countDigits j - in writeN (n + 1) $ \marr off -> - unsafeWrite marr off minus >> posDecimal marr (off+1) n j - | otherwise = positive i - -positive :: (Integral a) => a -> Builder -{-# SPECIALIZE positive :: Int -> Builder #-} -{-# SPECIALIZE positive :: Int8 -> Builder #-} -{-# SPECIALIZE positive :: Int16 -> Builder #-} -{-# SPECIALIZE positive :: Int32 -> Builder #-} -{-# SPECIALIZE positive :: Int64 -> Builder #-} -{-# SPECIALIZE positive :: Word -> Builder #-} -{-# SPECIALIZE positive :: Word8 -> Builder #-} -{-# SPECIALIZE positive :: Word16 -> Builder #-} -{-# SPECIALIZE positive :: Word32 -> Builder #-} -{-# SPECIALIZE positive :: Word64 -> Builder #-} -positive i - | i < 10 = writeN 1 $ \marr off -> unsafeWrite marr off (i2w i) - | otherwise = let !n = countDigits i - in writeN n $ \marr off -> posDecimal marr off n i - -posDecimal :: (Integral a) => - forall s. MArray s -> Int -> Int -> a -> ST s () -{-# INLINE posDecimal #-} -posDecimal marr off0 ds v0 = go (off0 + ds - 1) v0 - where go off v - | v >= 100 = do - let (q, r) = v `quotRem` 100 - write2 off r - go (off - 2) q - | v < 10 = unsafeWrite marr off (i2w v) - | otherwise = write2 off v - write2 off i0 = do - let i = fromIntegral i0; j = i + i - unsafeWrite marr off $ get (j + 1) - unsafeWrite marr (off - 1) $ get j - get = fromIntegral . B.unsafeIndex digits - -minus, zero :: Word16 -{-# INLINE minus #-} -{-# INLINE zero #-} -minus = 45 -zero = 48 - -i2w :: (Integral a) => a -> Word16 -{-# INLINE i2w #-} -i2w v = zero + fromIntegral v - -countDigits :: (Integral a) => a -> Int -{-# INLINE countDigits #-} -countDigits v0 - | fromIntegral v64 == v0 = go 1 v64 - | otherwise = goBig 1 (fromIntegral v0) - where v64 = fromIntegral v0 - goBig !k (v :: Integer) - | v > big = goBig (k + 19) (v `quot` big) - | otherwise = go k (fromIntegral v) - big = 10000000000000000000 - go !k (v :: Word64) - | v < 10 = k - | v < 100 = k + 1 - | v < 1000 = k + 2 - | v < 1000000000000 = - k + if v < 100000000 - then if v < 1000000 - then if v < 10000 - then 3 - else 4 + fin v 100000 - else 6 + fin v 10000000 - else if v < 10000000000 - then 8 + fin v 1000000000 - else 10 + fin v 100000000000 - | otherwise = go (k + 12) (v `quot` 1000000000000) - fin v n = if v >= n then 1 else 0 - -hexadecimal :: Integral a => a -> Builder -{-# SPECIALIZE hexadecimal :: Int -> Builder #-} -{-# SPECIALIZE hexadecimal :: Int8 -> Builder #-} -{-# SPECIALIZE hexadecimal :: Int16 -> Builder #-} -{-# SPECIALIZE hexadecimal :: Int32 -> Builder #-} -{-# SPECIALIZE hexadecimal :: Int64 -> Builder #-} -{-# SPECIALIZE hexadecimal :: Word -> Builder #-} -{-# SPECIALIZE hexadecimal :: Word8 -> Builder #-} -{-# SPECIALIZE hexadecimal :: Word16 -> Builder #-} -{-# SPECIALIZE hexadecimal :: Word32 -> Builder #-} -{-# SPECIALIZE hexadecimal :: Word64 -> Builder #-} -{-# RULES "hexadecimal/Integer" - hexadecimal = hexInteger :: Integer -> Builder #-} -hexadecimal i - | i < 0 = error hexErrMsg - | otherwise = go i - where - go n | n < 16 = hexDigit n - | otherwise = go (n `quot` 16) <> hexDigit (n `rem` 16) -{-# NOINLINE[0] hexadecimal #-} - -hexInteger :: Integer -> Builder -hexInteger i - | i < 0 = error hexErrMsg - | otherwise = integer 16 i - -hexErrMsg :: String -hexErrMsg = "Data.Text.Lazy.Builder.Int.hexadecimal: applied to negative number" - -hexDigit :: Integral a => a -> Builder -hexDigit n - | n <= 9 = singleton $! i2d (fromIntegral n) - | otherwise = singleton $! toEnum (fromIntegral n + 87) -{-# INLINE hexDigit #-} - -data T = T !Integer !Int - -integer :: Int -> Integer -> Builder -#ifdef INTEGER_GMP -integer 10 (S# i#) = decimal (I# i#) -integer 16 (S# i#) = hexadecimal (I# i#) -#endif -integer base i - | i < 0 = singleton '-' <> go (-i) - | otherwise = go i - where - go n | n < maxInt = int (fromInteger n) - | otherwise = putH (splitf (maxInt * maxInt) n) - - splitf p n - | p > n = [n] - | otherwise = splith p (splitf (p*p) n) - - splith p (n:ns) = case n `quotRemInteger` p of - PAIR(q,r) | q > 0 -> q : r : splitb p ns - | otherwise -> r : splitb p ns - splith _ _ = error "splith: the impossible happened." - - splitb p (n:ns) = case n `quotRemInteger` p of - PAIR(q,r) -> q : r : splitb p ns - splitb _ _ = [] - - T maxInt10 maxDigits10 = - until ((>mi) . (*10) . fstT) (\(T n d) -> T (n*10) (d+1)) (T 10 1) - where mi = fromIntegral (maxBound :: Int) - T maxInt16 maxDigits16 = - until ((>mi) . (*16) . fstT) (\(T n d) -> T (n*16) (d+1)) (T 16 1) - where mi = fromIntegral (maxBound :: Int) - - fstT (T a _) = a - - maxInt | base == 10 = maxInt10 - | otherwise = maxInt16 - maxDigits | base == 10 = maxDigits10 - | otherwise = maxDigits16 - - putH (n:ns) = case n `quotRemInteger` maxInt of - PAIR(x,y) - | q > 0 -> int q <> pblock r <> putB ns - | otherwise -> int r <> putB ns - where q = fromInteger x - r = fromInteger y - putH _ = error "putH: the impossible happened" - - putB (n:ns) = case n `quotRemInteger` maxInt of - PAIR(x,y) -> pblock q <> pblock r <> putB ns - where q = fromInteger x - r = fromInteger y - putB _ = Data.Monoid.mempty - - int :: Int -> Builder - int x | base == 10 = decimal x - | otherwise = hexadecimal x - - pblock = loop maxDigits - where - loop !d !n - | d == 1 = hexDigit n - | otherwise = loop (d-1) q <> hexDigit r - where q = n `quotInt` base - r = n `remInt` base diff -Nru cabal-install-3.2-3.2+git20191216.2.e076113/src/text-1.2.4.0/Data/Text/Lazy/Builder/RealFloat.hs cabal-install-3.2-3.2+git20200127.2.b84fd4f/src/text-1.2.4.0/Data/Text/Lazy/Builder/RealFloat.hs --- cabal-install-3.2-3.2+git20191216.2.e076113/src/text-1.2.4.0/Data/Text/Lazy/Builder/RealFloat.hs 2001-09-09 01:46:40.000000000 +0000 +++ cabal-install-3.2-3.2+git20200127.2.b84fd4f/src/text-1.2.4.0/Data/Text/Lazy/Builder/RealFloat.hs 1970-01-01 00:00:00.000000000 +0000 @@ -1,253 +0,0 @@ -{-# LANGUAGE CPP, OverloadedStrings #-} -#if __GLASGOW_HASKELL__ >= 702 -{-# LANGUAGE Trustworthy #-} -#endif - --- | --- Module: Data.Text.Lazy.Builder.RealFloat --- Copyright: (c) The University of Glasgow 1994-2002 --- License: see libraries/base/LICENSE --- --- Write a floating point value to a 'Builder'. - -module Data.Text.Lazy.Builder.RealFloat - ( - FPFormat(..) - , realFloat - , formatRealFloat - ) where - -import Data.Array.Base (unsafeAt) -import Data.Array.IArray -import Data.Text.Internal.Builder.Functions ((<>), i2d) -import Data.Text.Lazy.Builder.Int (decimal) -import Data.Text.Internal.Builder.RealFloat.Functions (roundTo) -import Data.Text.Lazy.Builder -import qualified Data.Text as T -#if MIN_VERSION_base(4,11,0) -import Prelude hiding ((<>)) -#endif - --- | Control the rendering of floating point numbers. -data FPFormat = Exponent - -- ^ Scientific notation (e.g. @2.3e123@). - | Fixed - -- ^ Standard decimal notation. - | Generic - -- ^ Use decimal notation for values between @0.1@ and - -- @9,999,999@, and scientific notation otherwise. - deriving (Enum, Read, Show) - --- | Show a signed 'RealFloat' value to full precision, --- using standard decimal notation for arguments whose absolute value lies --- between @0.1@ and @9,999,999@, and scientific notation otherwise. -realFloat :: (RealFloat a) => a -> Builder -{-# SPECIALIZE realFloat :: Float -> Builder #-} -{-# SPECIALIZE realFloat :: Double -> Builder #-} -realFloat x = formatRealFloat Generic Nothing x - --- | Encode a signed 'RealFloat' according to 'FPFormat' and optionally requested precision. --- --- This corresponds to the @show{E,F,G}Float@ operations provided by @base@'s "Numeric" module. --- --- __NOTE__: The functions in @base-4.12@ changed the serialisation in --- case of a @Just 0@ precision; this version of @text@ still provides --- the serialisation as implemented in @base-4.11@. The next major --- version of @text@ will switch to the more correct @base-4.12@ serialisation. -formatRealFloat :: (RealFloat a) => - FPFormat - -> Maybe Int -- ^ Number of decimal places to render. - -> a - -> Builder -{-# SPECIALIZE formatRealFloat :: FPFormat -> Maybe Int -> Float -> Builder #-} -{-# SPECIALIZE formatRealFloat :: FPFormat -> Maybe Int -> Double -> Builder #-} -formatRealFloat fmt decs x - | isNaN x = "NaN" - | isInfinite x = if x < 0 then "-Infinity" else "Infinity" - | x < 0 || isNegativeZero x = singleton '-' <> doFmt fmt (floatToDigits (-x)) - | otherwise = doFmt fmt (floatToDigits x) - where - doFmt format (is, e) = - let ds = map i2d is in - case format of - Generic -> - doFmt (if e < 0 || e > 7 then Exponent else Fixed) - (is,e) - Exponent -> - case decs of - Nothing -> - let show_e' = decimal (e-1) in - case ds of - "0" -> "0.0e0" - [d] -> singleton d <> ".0e" <> show_e' - (d:ds') -> singleton d <> singleton '.' <> fromString ds' <> singleton 'e' <> show_e' - [] -> error "formatRealFloat/doFmt/Exponent/Nothing: []" - Just dec -> - let dec' = max dec 1 in - case is of - [0] -> "0." <> fromText (T.replicate dec' "0") <> "e0" - _ -> - let (ei,is') = roundTo (dec'+1) is - is'' = map i2d (if ei > 0 then init is' else is') - in case is'' of - [] -> error "formatRealFloat/doFmt/Exponent/Just: []" - (d:ds') -> singleton d <> singleton '.' <> fromString ds' <> singleton 'e' <> decimal (e-1+ei) - Fixed -> - let - mk0 ls = case ls of { "" -> "0" ; _ -> fromString ls} - in - case decs of - Nothing - | e <= 0 -> "0." <> fromText (T.replicate (-e) "0") <> fromString ds - | otherwise -> - let - f 0 s rs = mk0 (reverse s) <> singleton '.' <> mk0 rs - f n s "" = f (n-1) ('0':s) "" - f n s (r:rs) = f (n-1) (r:s) rs - in - f e "" ds - Just dec -> - let dec' = max dec 0 in - if e >= 0 then - let - (ei,is') = roundTo (dec' + e) is - (ls,rs) = splitAt (e+ei) (map i2d is') - in - mk0 ls <> (if null rs then "" else singleton '.' <> fromString rs) - else - let (ei,is') = roundTo dec' (replicate (-e) 0 ++ is) - is'' = map i2d (if ei > 0 then is' else 0:is') - in case is'' of - [] -> error "formatRealFloat/doFmt/Fixed: []" - (d:ds') -> singleton d <> (if null ds' then "" else singleton '.' <> fromString ds') - - --- Based on "Printing Floating-Point Numbers Quickly and Accurately" --- by R.G. Burger and R.K. Dybvig in PLDI 96. --- This version uses a much slower logarithm estimator. It should be improved. - --- | 'floatToDigits' takes a base and a non-negative 'RealFloat' number, --- and returns a list of digits and an exponent. --- In particular, if @x>=0@, and --- --- > floatToDigits base x = ([d1,d2,...,dn], e) --- --- then --- --- (1) @n >= 1@ --- --- (2) @x = 0.d1d2...dn * (base**e)@ --- --- (3) @0 <= di <= base-1@ - -floatToDigits :: (RealFloat a) => a -> ([Int], Int) -{-# SPECIALIZE floatToDigits :: Float -> ([Int], Int) #-} -{-# SPECIALIZE floatToDigits :: Double -> ([Int], Int) #-} -floatToDigits 0 = ([0], 0) -floatToDigits x = - let - (f0, e0) = decodeFloat x - (minExp0, _) = floatRange x - p = floatDigits x - b = floatRadix x - minExp = minExp0 - p -- the real minimum exponent - -- Haskell requires that f be adjusted so denormalized numbers - -- will have an impossibly low exponent. Adjust for this. - (f, e) = - let n = minExp - e0 in - if n > 0 then (f0 `quot` (expt b n), e0+n) else (f0, e0) - (r, s, mUp, mDn) = - if e >= 0 then - let be = expt b e in - if f == expt b (p-1) then - (f*be*b*2, 2*b, be*b, be) -- according to Burger and Dybvig - else - (f*be*2, 2, be, be) - else - if e > minExp && f == expt b (p-1) then - (f*b*2, expt b (-e+1)*2, b, 1) - else - (f*2, expt b (-e)*2, 1, 1) - k :: Int - k = - let - k0 :: Int - k0 = - if b == 2 then - -- logBase 10 2 is very slightly larger than 8651/28738 - -- (about 5.3558e-10), so if log x >= 0, the approximation - -- k1 is too small, hence we add one and need one fixup step less. - -- If log x < 0, the approximation errs rather on the high side. - -- That is usually more than compensated for by ignoring the - -- fractional part of logBase 2 x, but when x is a power of 1/2 - -- or slightly larger and the exponent is a multiple of the - -- denominator of the rational approximation to logBase 10 2, - -- k1 is larger than logBase 10 x. If k1 > 1 + logBase 10 x, - -- we get a leading zero-digit we don't want. - -- With the approximation 3/10, this happened for - -- 0.5^1030, 0.5^1040, ..., 0.5^1070 and values close above. - -- The approximation 8651/28738 guarantees k1 < 1 + logBase 10 x - -- for IEEE-ish floating point types with exponent fields - -- <= 17 bits and mantissae of several thousand bits, earlier - -- convergents to logBase 10 2 would fail for long double. - -- Using quot instead of div is a little faster and requires - -- fewer fixup steps for negative lx. - let lx = p - 1 + e0 - k1 = (lx * 8651) `quot` 28738 - in if lx >= 0 then k1 + 1 else k1 - else - -- f :: Integer, log :: Float -> Float, - -- ceiling :: Float -> Int - ceiling ((log (fromInteger (f+1) :: Float) + - fromIntegral e * log (fromInteger b)) / - log 10) ---WAS: fromInt e * log (fromInteger b)) - - fixup n = - if n >= 0 then - if r + mUp <= expt 10 n * s then n else fixup (n+1) - else - if expt 10 (-n) * (r + mUp) <= s then n else fixup (n+1) - in - fixup k0 - - gen ds rn sN mUpN mDnN = - let - (dn, rn') = (rn * 10) `quotRem` sN - mUpN' = mUpN * 10 - mDnN' = mDnN * 10 - in - case (rn' < mDnN', rn' + mUpN' > sN) of - (True, False) -> dn : ds - (False, True) -> dn+1 : ds - (True, True) -> if rn' * 2 < sN then dn : ds else dn+1 : ds - (False, False) -> gen (dn:ds) rn' sN mUpN' mDnN' - - rds = - if k >= 0 then - gen [] r (s * expt 10 k) mUp mDn - else - let bk = expt 10 (-k) in - gen [] (r * bk) s (mUp * bk) (mDn * bk) - in - (map fromIntegral (reverse rds), k) - --- Exponentiation with a cache for the most common numbers. -minExpt, maxExpt :: Int -minExpt = 0 -maxExpt = 1100 - -expt :: Integer -> Int -> Integer -expt base n - | base == 2 && n >= minExpt && n <= maxExpt = expts `unsafeAt` n - | base == 10 && n <= maxExpt10 = expts10 `unsafeAt` n - | otherwise = base^n - -expts :: Array Int Integer -expts = array (minExpt,maxExpt) [(n,2^n) | n <- [minExpt .. maxExpt]] - -maxExpt10 :: Int -maxExpt10 = 324 - -expts10 :: Array Int Integer -expts10 = array (minExpt,maxExpt10) [(n,10^n) | n <- [minExpt .. maxExpt10]] diff -Nru cabal-install-3.2-3.2+git20191216.2.e076113/src/text-1.2.4.0/Data/Text/Lazy/Builder.hs cabal-install-3.2-3.2+git20200127.2.b84fd4f/src/text-1.2.4.0/Data/Text/Lazy/Builder.hs --- cabal-install-3.2-3.2+git20191216.2.e076113/src/text-1.2.4.0/Data/Text/Lazy/Builder.hs 2001-09-09 01:46:40.000000000 +0000 +++ cabal-install-3.2-3.2+git20200127.2.b84fd4f/src/text-1.2.4.0/Data/Text/Lazy/Builder.hs 1970-01-01 00:00:00.000000000 +0000 @@ -1,57 +0,0 @@ -{-# LANGUAGE BangPatterns, CPP, Rank2Types #-} -#if __GLASGOW_HASKELL__ >= 702 -{-# LANGUAGE Trustworthy #-} -#endif - ------------------------------------------------------------------------------ --- | --- Module : Data.Text.Lazy.Builder --- Copyright : (c) 2013 Bryan O'Sullivan --- (c) 2010 Johan Tibell --- License : BSD-style (see LICENSE) --- --- Maintainer : Johan Tibell --- Portability : portable to Hugs and GHC --- --- Efficient construction of lazy @Text@ values. The principal --- operations on a @Builder@ are @singleton@, @fromText@, and --- @fromLazyText@, which construct new builders, and 'mappend', which --- concatenates two builders. --- --- To get maximum performance when building lazy @Text@ values using a --- builder, associate @mappend@ calls to the right. For example, --- prefer --- --- > singleton 'a' `mappend` (singleton 'b' `mappend` singleton 'c') --- --- to --- --- > singleton 'a' `mappend` singleton 'b' `mappend` singleton 'c' --- --- as the latter associates @mappend@ to the left. Or, equivalently, --- prefer --- --- > singleton 'a' <> singleton 'b' <> singleton 'c' --- --- since the '<>' from recent versions of 'Data.Monoid' associates --- to the right. - ------------------------------------------------------------------------------ - -module Data.Text.Lazy.Builder - ( -- * The Builder type - Builder - , toLazyText - , toLazyTextWith - - -- * Constructing Builders - , singleton - , fromText - , fromLazyText - , fromString - - -- * Flushing the buffer state - , flush - ) where - -import Data.Text.Internal.Builder diff -Nru cabal-install-3.2-3.2+git20191216.2.e076113/src/text-1.2.4.0/Data/Text/Lazy/Encoding.hs cabal-install-3.2-3.2+git20200127.2.b84fd4f/src/text-1.2.4.0/Data/Text/Lazy/Encoding.hs --- cabal-install-3.2-3.2+git20191216.2.e076113/src/text-1.2.4.0/Data/Text/Lazy/Encoding.hs 2001-09-09 01:46:40.000000000 +0000 +++ cabal-install-3.2-3.2+git20200127.2.b84fd4f/src/text-1.2.4.0/Data/Text/Lazy/Encoding.hs 1970-01-01 00:00:00.000000000 +0000 @@ -1,250 +0,0 @@ -{-# LANGUAGE BangPatterns,CPP #-} -#if __GLASGOW_HASKELL__ >= 702 -{-# LANGUAGE Trustworthy #-} -#endif --- | --- Module : Data.Text.Lazy.Encoding --- Copyright : (c) 2009, 2010 Bryan O'Sullivan --- --- License : BSD-style --- Maintainer : bos@serpentine.com --- Portability : portable --- --- Functions for converting lazy 'Text' values to and from lazy --- 'ByteString', using several standard encodings. --- --- To gain access to a much larger family of encodings, use the --- . - -module Data.Text.Lazy.Encoding - ( - -- * Decoding ByteStrings to Text - -- $strict - decodeASCII - , decodeLatin1 - , decodeUtf8 - , decodeUtf16LE - , decodeUtf16BE - , decodeUtf32LE - , decodeUtf32BE - - -- ** Catchable failure - , decodeUtf8' - - -- ** Controllable error handling - , decodeUtf8With - , decodeUtf16LEWith - , decodeUtf16BEWith - , decodeUtf32LEWith - , decodeUtf32BEWith - - -- * Encoding Text to ByteStrings - , encodeUtf8 - , encodeUtf16LE - , encodeUtf16BE - , encodeUtf32LE - , encodeUtf32BE - - -- * Encoding Text using ByteString Builders - , encodeUtf8Builder - , encodeUtf8BuilderEscaped - ) where - -import Control.Exception (evaluate, try) -import Data.Monoid (Monoid(..)) -import Data.Text.Encoding.Error (OnDecodeError, UnicodeException, strictDecode) -import Data.Text.Internal.Lazy (Text(..), chunk, empty, foldrChunks) -import Data.Word (Word8) -import qualified Data.ByteString as S -import qualified Data.ByteString.Builder as B -import qualified Data.ByteString.Builder.Extra as B (safeStrategy, toLazyByteStringWith) -import qualified Data.ByteString.Builder.Prim as BP -import qualified Data.ByteString.Lazy as B -import qualified Data.ByteString.Lazy.Internal as B -import qualified Data.ByteString.Unsafe as B -import qualified Data.Text as T -import qualified Data.Text.Encoding as TE -import qualified Data.Text.Internal.Lazy.Encoding.Fusion as E -import qualified Data.Text.Internal.Lazy.Fusion as F -import Data.Text.Unsafe (unsafeDupablePerformIO) - --- $strict --- --- All of the single-parameter functions for decoding bytestrings --- encoded in one of the Unicode Transformation Formats (UTF) operate --- in a /strict/ mode: each will throw an exception if given invalid --- input. --- --- Each function has a variant, whose name is suffixed with -'With', --- that gives greater control over the handling of decoding errors. --- For instance, 'decodeUtf8' will throw an exception, but --- 'decodeUtf8With' allows the programmer to determine what to do on a --- decoding error. - --- | /Deprecated/. Decode a 'ByteString' containing 7-bit ASCII --- encoded text. -decodeASCII :: B.ByteString -> Text -decodeASCII = decodeUtf8 -{-# DEPRECATED decodeASCII "Use decodeUtf8 instead" #-} - --- | Decode a 'ByteString' containing Latin-1 (aka ISO-8859-1) encoded text. -decodeLatin1 :: B.ByteString -> Text -decodeLatin1 = foldr (chunk . TE.decodeLatin1) empty . B.toChunks - --- | Decode a 'ByteString' containing UTF-8 encoded text. -decodeUtf8With :: OnDecodeError -> B.ByteString -> Text -decodeUtf8With onErr (B.Chunk b0 bs0) = - case TE.streamDecodeUtf8With onErr b0 of - TE.Some t l f -> chunk t (go f l bs0) - where - go f0 _ (B.Chunk b bs) = - case f0 b of - TE.Some t l f -> chunk t (go f l bs) - go _ l _ - | S.null l = empty - | otherwise = case onErr desc (Just (B.unsafeHead l)) of - Nothing -> empty - Just c -> Chunk (T.singleton c) Empty - desc = "Data.Text.Lazy.Encoding.decodeUtf8With: Invalid UTF-8 stream" -decodeUtf8With _ _ = empty - --- | Decode a 'ByteString' containing UTF-8 encoded text that is known --- to be valid. --- --- If the input contains any invalid UTF-8 data, an exception will be --- thrown that cannot be caught in pure code. For more control over --- the handling of invalid data, use 'decodeUtf8'' or --- 'decodeUtf8With'. -decodeUtf8 :: B.ByteString -> Text -decodeUtf8 = decodeUtf8With strictDecode -{-# INLINE[0] decodeUtf8 #-} - --- This rule seems to cause performance loss. -{- RULES "LAZY STREAM stream/decodeUtf8' fusion" [1] - forall bs. F.stream (decodeUtf8' bs) = E.streamUtf8 strictDecode bs #-} - --- | Decode a 'ByteString' containing UTF-8 encoded text.. --- --- If the input contains any invalid UTF-8 data, the relevant --- exception will be returned, otherwise the decoded text. --- --- /Note/: this function is /not/ lazy, as it must decode its entire --- input before it can return a result. If you need lazy (streaming) --- decoding, use 'decodeUtf8With' in lenient mode. -decodeUtf8' :: B.ByteString -> Either UnicodeException Text -decodeUtf8' bs = unsafeDupablePerformIO $ do - let t = decodeUtf8 bs - try (evaluate (rnf t `seq` t)) - where - rnf Empty = () - rnf (Chunk _ ts) = rnf ts -{-# INLINE decodeUtf8' #-} - --- | Encode text using UTF-8 encoding. -encodeUtf8 :: Text -> B.ByteString -encodeUtf8 Empty = B.empty -encodeUtf8 lt@(Chunk t _) = - B.toLazyByteStringWith strategy B.empty $ encodeUtf8Builder lt - where - -- To improve our small string performance, we use a strategy that - -- allocates a buffer that is guaranteed to be large enough for the - -- encoding of the first chunk, but not larger than the default - -- B.smallChunkSize. We clamp the firstChunkSize to ensure that we don't - -- generate too large buffers which hamper streaming. - firstChunkSize = min B.smallChunkSize (4 * (T.length t + 1)) - strategy = B.safeStrategy firstChunkSize B.defaultChunkSize - --- | Encode text to a ByteString 'B.Builder' using UTF-8 encoding. --- --- @since 1.1.0.0 -encodeUtf8Builder :: Text -> B.Builder -encodeUtf8Builder = - foldrChunks (\c b -> TE.encodeUtf8Builder c `mappend` b) Data.Monoid.mempty - --- | Encode text using UTF-8 encoding and escape the ASCII characters using --- a 'BP.BoundedPrim'. --- --- Use this function is to implement efficient encoders for text-based formats --- like JSON or HTML. --- --- @since 1.1.0.0 -{-# INLINE encodeUtf8BuilderEscaped #-} -encodeUtf8BuilderEscaped :: BP.BoundedPrim Word8 -> Text -> B.Builder -encodeUtf8BuilderEscaped prim = - foldrChunks (\c b -> TE.encodeUtf8BuilderEscaped prim c `mappend` b) mempty - --- | Decode text from little endian UTF-16 encoding. -decodeUtf16LEWith :: OnDecodeError -> B.ByteString -> Text -decodeUtf16LEWith onErr bs = F.unstream (E.streamUtf16LE onErr bs) -{-# INLINE decodeUtf16LEWith #-} - --- | Decode text from little endian UTF-16 encoding. --- --- If the input contains any invalid little endian UTF-16 data, an --- exception will be thrown. For more control over the handling of --- invalid data, use 'decodeUtf16LEWith'. -decodeUtf16LE :: B.ByteString -> Text -decodeUtf16LE = decodeUtf16LEWith strictDecode -{-# INLINE decodeUtf16LE #-} - --- | Decode text from big endian UTF-16 encoding. -decodeUtf16BEWith :: OnDecodeError -> B.ByteString -> Text -decodeUtf16BEWith onErr bs = F.unstream (E.streamUtf16BE onErr bs) -{-# INLINE decodeUtf16BEWith #-} - --- | Decode text from big endian UTF-16 encoding. --- --- If the input contains any invalid big endian UTF-16 data, an --- exception will be thrown. For more control over the handling of --- invalid data, use 'decodeUtf16BEWith'. -decodeUtf16BE :: B.ByteString -> Text -decodeUtf16BE = decodeUtf16BEWith strictDecode -{-# INLINE decodeUtf16BE #-} - --- | Encode text using little endian UTF-16 encoding. -encodeUtf16LE :: Text -> B.ByteString -encodeUtf16LE txt = B.fromChunks (foldrChunks ((:) . TE.encodeUtf16LE) [] txt) -{-# INLINE encodeUtf16LE #-} - --- | Encode text using big endian UTF-16 encoding. -encodeUtf16BE :: Text -> B.ByteString -encodeUtf16BE txt = B.fromChunks (foldrChunks ((:) . TE.encodeUtf16BE) [] txt) -{-# INLINE encodeUtf16BE #-} - --- | Decode text from little endian UTF-32 encoding. -decodeUtf32LEWith :: OnDecodeError -> B.ByteString -> Text -decodeUtf32LEWith onErr bs = F.unstream (E.streamUtf32LE onErr bs) -{-# INLINE decodeUtf32LEWith #-} - --- | Decode text from little endian UTF-32 encoding. --- --- If the input contains any invalid little endian UTF-32 data, an --- exception will be thrown. For more control over the handling of --- invalid data, use 'decodeUtf32LEWith'. -decodeUtf32LE :: B.ByteString -> Text -decodeUtf32LE = decodeUtf32LEWith strictDecode -{-# INLINE decodeUtf32LE #-} - --- | Decode text from big endian UTF-32 encoding. -decodeUtf32BEWith :: OnDecodeError -> B.ByteString -> Text -decodeUtf32BEWith onErr bs = F.unstream (E.streamUtf32BE onErr bs) -{-# INLINE decodeUtf32BEWith #-} - --- | Decode text from big endian UTF-32 encoding. --- --- If the input contains any invalid big endian UTF-32 data, an --- exception will be thrown. For more control over the handling of --- invalid data, use 'decodeUtf32BEWith'. -decodeUtf32BE :: B.ByteString -> Text -decodeUtf32BE = decodeUtf32BEWith strictDecode -{-# INLINE decodeUtf32BE #-} - --- | Encode text using little endian UTF-32 encoding. -encodeUtf32LE :: Text -> B.ByteString -encodeUtf32LE txt = B.fromChunks (foldrChunks ((:) . TE.encodeUtf32LE) [] txt) -{-# INLINE encodeUtf32LE #-} - --- | Encode text using big endian UTF-32 encoding. -encodeUtf32BE :: Text -> B.ByteString -encodeUtf32BE txt = B.fromChunks (foldrChunks ((:) . TE.encodeUtf32BE) [] txt) -{-# INLINE encodeUtf32BE #-} diff -Nru cabal-install-3.2-3.2+git20191216.2.e076113/src/text-1.2.4.0/Data/Text/Lazy/Internal.hs cabal-install-3.2-3.2+git20200127.2.b84fd4f/src/text-1.2.4.0/Data/Text/Lazy/Internal.hs --- cabal-install-3.2-3.2+git20191216.2.e076113/src/text-1.2.4.0/Data/Text/Lazy/Internal.hs 2001-09-09 01:46:40.000000000 +0000 +++ cabal-install-3.2-3.2+git20200127.2.b84fd4f/src/text-1.2.4.0/Data/Text/Lazy/Internal.hs 1970-01-01 00:00:00.000000000 +0000 @@ -1,20 +0,0 @@ -{-# LANGUAGE BangPatterns, DeriveDataTypeable #-} --- | --- Module : Data.Text.Lazy.Internal --- Copyright : (c) 2013 Bryan O'Sullivan --- --- License : BSD-style --- Maintainer : bos@serpentine.com --- Stability : experimental --- Portability : GHC --- --- This module has been renamed to 'Data.Text.Internal.Lazy'. This --- name for the module will be removed in the next major release. - -module Data.Text.Lazy.Internal - {-# DEPRECATED "Use Data.Text.Internal.Lazy instead" #-} - ( - module Data.Text.Internal.Lazy - ) where - -import Data.Text.Internal.Lazy diff -Nru cabal-install-3.2-3.2+git20191216.2.e076113/src/text-1.2.4.0/Data/Text/Lazy/IO.hs cabal-install-3.2-3.2+git20200127.2.b84fd4f/src/text-1.2.4.0/Data/Text/Lazy/IO.hs --- cabal-install-3.2-3.2+git20191216.2.e076113/src/text-1.2.4.0/Data/Text/Lazy/IO.hs 2001-09-09 01:46:40.000000000 +0000 +++ cabal-install-3.2-3.2+git20200127.2.b84fd4f/src/text-1.2.4.0/Data/Text/Lazy/IO.hs 1970-01-01 00:00:00.000000000 +0000 @@ -1,195 +0,0 @@ -{-# LANGUAGE BangPatterns, CPP, RecordWildCards #-} -#if __GLASGOW_HASKELL__ >= 702 -{-# LANGUAGE Trustworthy #-} -#endif --- | --- Module : Data.Text.Lazy.IO --- Copyright : (c) 2009, 2010 Bryan O'Sullivan, --- (c) 2009 Simon Marlow --- License : BSD-style --- Maintainer : bos@serpentine.com --- Portability : GHC --- --- Efficient locale-sensitive support for lazy text I\/O. --- --- Skip past the synopsis for some important notes on performance and --- portability across different versions of GHC. - -module Data.Text.Lazy.IO - ( - -- * Performance - -- $performance - - -- * Locale support - -- $locale - -- * File-at-a-time operations - readFile - , writeFile - , appendFile - -- * Operations on handles - , hGetContents - , hGetLine - , hPutStr - , hPutStrLn - -- * Special cases for standard input and output - , interact - , getContents - , getLine - , putStr - , putStrLn - ) where - -import Data.Text.Lazy (Text) -import Prelude hiding (appendFile, getContents, getLine, interact, - putStr, putStrLn, readFile, writeFile) -import System.IO (Handle, IOMode(..), hPutChar, openFile, stdin, stdout, - withFile) -import qualified Data.Text.IO as T -import qualified Data.Text.Lazy as L -import qualified Control.Exception as E -import Control.Monad (when) -import Data.IORef (readIORef) -import Data.Text.Internal.IO (hGetLineWith, readChunk) -import Data.Text.Internal.Lazy (chunk, empty) -import GHC.IO.Buffer (isEmptyBuffer) -import GHC.IO.Exception (IOException(..), IOErrorType(..), ioException) -import GHC.IO.Handle.Internals (augmentIOError, hClose_help, - wantReadableHandle, withHandle) -import GHC.IO.Handle.Types (Handle__(..), HandleType(..)) -import System.IO (BufferMode(..), hGetBuffering, hSetBuffering) -import System.IO.Error (isEOFError) -import System.IO.Unsafe (unsafeInterleaveIO) - --- $performance --- --- The functions in this module obey the runtime system's locale, --- character set encoding, and line ending conversion settings. --- --- If you know in advance that you will be working with data that has --- a specific encoding (e.g. UTF-8), and your application is highly --- performance sensitive, you may find that it is faster to perform --- I\/O with bytestrings and to encode and decode yourself than to use --- the functions in this module. --- --- Whether this will hold depends on the version of GHC you are using, --- the platform you are working on, the data you are working with, and --- the encodings you are using, so be sure to test for yourself. - --- | Read a file and return its contents as a string. The file is --- read lazily, as with 'getContents'. -readFile :: FilePath -> IO Text -readFile name = openFile name ReadMode >>= hGetContents - --- | Write a string to a file. The file is truncated to zero length --- before writing begins. -writeFile :: FilePath -> Text -> IO () -writeFile p = withFile p WriteMode . flip hPutStr - --- | Write a string the end of a file. -appendFile :: FilePath -> Text -> IO () -appendFile p = withFile p AppendMode . flip hPutStr - --- | Lazily read the remaining contents of a 'Handle'. The 'Handle' --- will be closed after the read completes, or on error. -hGetContents :: Handle -> IO Text -hGetContents h = do - chooseGoodBuffering h - wantReadableHandle "hGetContents" h $ \hh -> do - ts <- lazyRead h - return (hh{haType=SemiClosedHandle}, ts) - --- | Use a more efficient buffer size if we're reading in --- block-buffered mode with the default buffer size. -chooseGoodBuffering :: Handle -> IO () -chooseGoodBuffering h = do - bufMode <- hGetBuffering h - when (bufMode == BlockBuffering Nothing) $ - hSetBuffering h (BlockBuffering (Just 16384)) - -lazyRead :: Handle -> IO Text -lazyRead h = unsafeInterleaveIO $ - withHandle "hGetContents" h $ \hh -> do - case haType hh of - ClosedHandle -> return (hh, L.empty) - SemiClosedHandle -> lazyReadBuffered h hh - _ -> ioException - (IOError (Just h) IllegalOperation "hGetContents" - "illegal handle type" Nothing Nothing) - -lazyReadBuffered :: Handle -> Handle__ -> IO (Handle__, Text) -lazyReadBuffered h hh@Handle__{..} = do - buf <- readIORef haCharBuffer - (do t <- readChunk hh buf - ts <- lazyRead h - return (hh, chunk t ts)) `E.catch` \e -> do - (hh', _) <- hClose_help hh - if isEOFError e - then return $ if isEmptyBuffer buf - then (hh', empty) - else (hh', L.singleton '\r') - else E.throwIO (augmentIOError e "hGetContents" h) - --- | Read a single line from a handle. -hGetLine :: Handle -> IO Text -hGetLine = hGetLineWith L.fromChunks - --- | Write a string to a handle. -hPutStr :: Handle -> Text -> IO () -hPutStr h = mapM_ (T.hPutStr h) . L.toChunks - --- | Write a string to a handle, followed by a newline. -hPutStrLn :: Handle -> Text -> IO () -hPutStrLn h t = hPutStr h t >> hPutChar h '\n' - --- | The 'interact' function takes a function of type @Text -> Text@ --- as its argument. The entire input from the standard input device is --- passed (lazily) to this function as its argument, and the resulting --- string is output on the standard output device. -interact :: (Text -> Text) -> IO () -interact f = putStr . f =<< getContents - --- | Lazily read all user input on 'stdin' as a single string. -getContents :: IO Text -getContents = hGetContents stdin - --- | Read a single line of user input from 'stdin'. -getLine :: IO Text -getLine = hGetLine stdin - --- | Write a string to 'stdout'. -putStr :: Text -> IO () -putStr = hPutStr stdout - --- | Write a string to 'stdout', followed by a newline. -putStrLn :: Text -> IO () -putStrLn = hPutStrLn stdout - --- $locale --- --- /Note/: The behaviour of functions in this module depends on the --- version of GHC you are using. --- --- Beginning with GHC 6.12, text I\/O is performed using the system or --- handle's current locale and line ending conventions. --- --- Under GHC 6.10 and earlier, the system I\/O libraries /do not --- support/ locale-sensitive I\/O or line ending conversion. On these --- versions of GHC, functions in this library all use UTF-8. What --- does this mean in practice? --- --- * All data that is read will be decoded as UTF-8. --- --- * Before data is written, it is first encoded as UTF-8. --- --- * On both reading and writing, the platform's native newline --- conversion is performed. --- --- If you must use a non-UTF-8 locale on an older version of GHC, you --- will have to perform the transcoding yourself, e.g. as follows: --- --- > import qualified Data.ByteString.Lazy as B --- > import Data.Text.Lazy (Text) --- > import Data.Text.Lazy.Encoding (encodeUtf16) --- > --- > putStr_Utf16LE :: Text -> IO () --- > putStr_Utf16LE t = B.putStr (encodeUtf16LE t) diff -Nru cabal-install-3.2-3.2+git20191216.2.e076113/src/text-1.2.4.0/Data/Text/Lazy/Read.hs cabal-install-3.2-3.2+git20200127.2.b84fd4f/src/text-1.2.4.0/Data/Text/Lazy/Read.hs --- cabal-install-3.2-3.2+git20191216.2.e076113/src/text-1.2.4.0/Data/Text/Lazy/Read.hs 2001-09-09 01:46:40.000000000 +0000 +++ cabal-install-3.2-3.2+git20200127.2.b84fd4f/src/text-1.2.4.0/Data/Text/Lazy/Read.hs 1970-01-01 00:00:00.000000000 +0000 @@ -1,192 +0,0 @@ -{-# LANGUAGE OverloadedStrings, CPP #-} -#if __GLASGOW_HASKELL__ >= 704 -{-# LANGUAGE Safe #-} -#elif __GLASGOW_HASKELL__ >= 702 -{-# LANGUAGE Trustworthy #-} -#endif - --- | --- Module : Data.Text.Lazy.Read --- Copyright : (c) 2010, 2011 Bryan O'Sullivan --- --- License : BSD-style --- Maintainer : bos@serpentine.com --- Portability : GHC --- --- Functions used frequently when reading textual data. -module Data.Text.Lazy.Read - ( - Reader - , decimal - , hexadecimal - , signed - , rational - , double - ) where - -import Control.Monad (liftM) -import Data.Char (isDigit, isHexDigit) -import Data.Int (Int8, Int16, Int32, Int64) -import Data.Ratio ((%)) -import Data.Text.Internal.Read -import Data.Text.Lazy as T -import Data.Word (Word, Word8, Word16, Word32, Word64) - --- | Read some text. If the read succeeds, return its value and the --- remaining text, otherwise an error message. -type Reader a = IReader Text a -type Parser = IParser Text - --- | Read a decimal integer. The input must begin with at least one --- decimal digit, and is consumed until a non-digit or end of string --- is reached. --- --- This function does not handle leading sign characters. If you need --- to handle signed input, use @'signed' 'decimal'@. --- --- /Note/: For fixed-width integer types, this function does not --- attempt to detect overflow, so a sufficiently long input may give --- incorrect results. If you are worried about overflow, use --- 'Integer' for your result type. -decimal :: Integral a => Reader a -{-# SPECIALIZE decimal :: Reader Int #-} -{-# SPECIALIZE decimal :: Reader Int8 #-} -{-# SPECIALIZE decimal :: Reader Int16 #-} -{-# SPECIALIZE decimal :: Reader Int32 #-} -{-# SPECIALIZE decimal :: Reader Int64 #-} -{-# SPECIALIZE decimal :: Reader Integer #-} -{-# SPECIALIZE decimal :: Reader Data.Word.Word #-} -{-# SPECIALIZE decimal :: Reader Word8 #-} -{-# SPECIALIZE decimal :: Reader Word16 #-} -{-# SPECIALIZE decimal :: Reader Word32 #-} -{-# SPECIALIZE decimal :: Reader Word64 #-} -decimal txt - | T.null h = Left "input does not start with a digit" - | otherwise = Right (T.foldl' go 0 h, t) - where (h,t) = T.span isDigit txt - go n d = (n * 10 + fromIntegral (digitToInt d)) - --- | Read a hexadecimal integer, consisting of an optional leading --- @\"0x\"@ followed by at least one hexadecimal digit. Input is --- consumed until a non-hex-digit or end of string is reached. --- This function is case insensitive. --- --- This function does not handle leading sign characters. If you need --- to handle signed input, use @'signed' 'hexadecimal'@. --- --- /Note/: For fixed-width integer types, this function does not --- attempt to detect overflow, so a sufficiently long input may give --- incorrect results. If you are worried about overflow, use --- 'Integer' for your result type. -hexadecimal :: Integral a => Reader a -{-# SPECIALIZE hexadecimal :: Reader Int #-} -{-# SPECIALIZE hexadecimal :: Reader Integer #-} -hexadecimal txt - | h == "0x" || h == "0X" = hex t - | otherwise = hex txt - where (h,t) = T.splitAt 2 txt - -hex :: Integral a => Reader a -{-# SPECIALIZE hexadecimal :: Reader Int #-} -{-# SPECIALIZE hexadecimal :: Reader Int8 #-} -{-# SPECIALIZE hexadecimal :: Reader Int16 #-} -{-# SPECIALIZE hexadecimal :: Reader Int32 #-} -{-# SPECIALIZE hexadecimal :: Reader Int64 #-} -{-# SPECIALIZE hexadecimal :: Reader Integer #-} -{-# SPECIALIZE hexadecimal :: Reader Word #-} -{-# SPECIALIZE hexadecimal :: Reader Word8 #-} -{-# SPECIALIZE hexadecimal :: Reader Word16 #-} -{-# SPECIALIZE hexadecimal :: Reader Word32 #-} -{-# SPECIALIZE hexadecimal :: Reader Word64 #-} -hex txt - | T.null h = Left "input does not start with a hexadecimal digit" - | otherwise = Right (T.foldl' go 0 h, t) - where (h,t) = T.span isHexDigit txt - go n d = (n * 16 + fromIntegral (hexDigitToInt d)) - --- | Read an optional leading sign character (@\'-\'@ or @\'+\'@) and --- apply it to the result of applying the given reader. -signed :: Num a => Reader a -> Reader a -{-# INLINE signed #-} -signed f = runP (signa (P f)) - --- | Read a rational number. --- --- This function accepts an optional leading sign character, followed --- by at least one decimal digit. The syntax similar to that accepted --- by the 'read' function, with the exception that a trailing @\'.\'@ --- or @\'e\'@ /not/ followed by a number is not consumed. --- --- Examples: --- --- >rational "3" == Right (3.0, "") --- >rational "3.1" == Right (3.1, "") --- >rational "3e4" == Right (30000.0, "") --- >rational "3.1e4" == Right (31000.0, "") --- >rational ".3" == Left "input does not start with a digit" --- >rational "e3" == Left "input does not start with a digit" --- --- Examples of differences from 'read': --- --- >rational "3.foo" == Right (3.0, ".foo") --- >rational "3e" == Right (3.0, "e") -rational :: Fractional a => Reader a -{-# SPECIALIZE rational :: Reader Double #-} -rational = floaty $ \real frac fracDenom -> fromRational $ - real % 1 + frac % fracDenom - --- | Read a rational number. --- --- The syntax accepted by this function is the same as for 'rational'. --- --- /Note/: This function is almost ten times faster than 'rational', --- but is slightly less accurate. --- --- The 'Double' type supports about 16 decimal places of accuracy. --- For 94.2% of numbers, this function and 'rational' give identical --- results, but for the remaining 5.8%, this function loses precision --- around the 15th decimal place. For 0.001% of numbers, this --- function will lose precision at the 13th or 14th decimal place. -double :: Reader Double -double = floaty $ \real frac fracDenom -> - fromIntegral real + - fromIntegral frac / fromIntegral fracDenom - -signa :: Num a => Parser a -> Parser a -{-# SPECIALIZE signa :: Parser Int -> Parser Int #-} -{-# SPECIALIZE signa :: Parser Int8 -> Parser Int8 #-} -{-# SPECIALIZE signa :: Parser Int16 -> Parser Int16 #-} -{-# SPECIALIZE signa :: Parser Int32 -> Parser Int32 #-} -{-# SPECIALIZE signa :: Parser Int64 -> Parser Int64 #-} -{-# SPECIALIZE signa :: Parser Integer -> Parser Integer #-} -signa p = do - sign <- perhaps '+' $ char (\c -> c == '-' || c == '+') - if sign == '+' then p else negate `liftM` p - -char :: (Char -> Bool) -> Parser Char -char p = P $ \t -> case T.uncons t of - Just (c,t') | p c -> Right (c,t') - _ -> Left "character does not match" - -floaty :: Fractional a => (Integer -> Integer -> Integer -> a) -> Reader a -{-# INLINE floaty #-} -floaty f = runP $ do - sign <- perhaps '+' $ char (\c -> c == '-' || c == '+') - real <- P decimal - T fraction fracDigits <- perhaps (T 0 0) $ do - _ <- char (=='.') - digits <- P $ \t -> Right (fromIntegral . T.length $ T.takeWhile isDigit t, t) - n <- P decimal - return $ T n digits - let e c = c == 'e' || c == 'E' - power <- perhaps 0 (char e >> signa (P decimal) :: Parser Int) - let n = if fracDigits == 0 - then if power == 0 - then fromIntegral real - else fromIntegral real * (10 ^^ power) - else if power == 0 - then f real fraction (10 ^ fracDigits) - else f real fraction (10 ^ fracDigits) * (10 ^^ power) - return $! if sign == '+' - then n - else -n diff -Nru cabal-install-3.2-3.2+git20191216.2.e076113/src/text-1.2.4.0/Data/Text/Lazy.hs cabal-install-3.2-3.2+git20200127.2.b84fd4f/src/text-1.2.4.0/Data/Text/Lazy.hs --- cabal-install-3.2-3.2+git20191216.2.e076113/src/text-1.2.4.0/Data/Text/Lazy.hs 2001-09-09 01:46:40.000000000 +0000 +++ cabal-install-3.2-3.2+git20200127.2.b84fd4f/src/text-1.2.4.0/Data/Text/Lazy.hs 1970-01-01 00:00:00.000000000 +0000 @@ -1,1746 +0,0 @@ -{-# OPTIONS_GHC -fno-warn-orphans #-} -{-# LANGUAGE BangPatterns, MagicHash, CPP, TypeFamilies #-} -#if __GLASGOW_HASKELL__ >= 702 -{-# LANGUAGE Trustworthy #-} -#endif --- Using TemplateHaskell in text unconditionally is unacceptable, as --- it's a GHC boot library. TemplateHaskellQuotes was added in 8.0, so --- this would seem to be a problem. However, GHC's policy of only --- needing to be able to compile itself from the last few releases --- allows us to use full-fat TH on older versions, while using THQ for --- GHC versions that may be used for bootstrapping. -#if __GLASGOW_HASKELL__ >= 800 -{-# LANGUAGE TemplateHaskellQuotes #-} -#else -{-# LANGUAGE TemplateHaskell #-} -#endif - --- | --- Module : Data.Text.Lazy --- Copyright : (c) 2009, 2010, 2012 Bryan O'Sullivan --- --- License : BSD-style --- Maintainer : bos@serpentine.com --- Portability : GHC --- --- A time and space-efficient implementation of Unicode text using --- lists of packed arrays. --- --- /Note/: Read below the synopsis for important notes on the use of --- this module. --- --- The representation used by this module is suitable for high --- performance use and for streaming large quantities of data. It --- provides a means to manipulate a large body of text without --- requiring that the entire content be resident in memory. --- --- Some operations, such as 'concat', 'append', 'reverse' and 'cons', --- have better time complexity than their "Data.Text" equivalents, due --- to the underlying representation being a list of chunks. For other --- operations, lazy 'Text's are usually within a few percent of strict --- ones, but often with better heap usage if used in a streaming --- fashion. For data larger than available memory, or if you have --- tight memory constraints, this module will be the only option. --- --- This module is intended to be imported @qualified@, to avoid name --- clashes with "Prelude" functions. eg. --- --- > import qualified Data.Text.Lazy as L - -module Data.Text.Lazy - ( - -- * Fusion - -- $fusion - - -- * Acceptable data - -- $replacement - - -- * Types - Text - - -- * Creation and elimination - , pack - , unpack - , singleton - , empty - , fromChunks - , toChunks - , toStrict - , fromStrict - , foldrChunks - , foldlChunks - - -- * Basic interface - , cons - , snoc - , append - , uncons - , unsnoc - , head - , last - , tail - , init - , null - , length - , compareLength - - -- * Transformations - , map - , intercalate - , intersperse - , transpose - , reverse - , replace - - -- ** Case conversion - -- $case - , toCaseFold - , toLower - , toUpper - , toTitle - - -- ** Justification - , justifyLeft - , justifyRight - , center - - -- * Folds - , foldl - , foldl' - , foldl1 - , foldl1' - , foldr - , foldr1 - - -- ** Special folds - , concat - , concatMap - , any - , all - , maximum - , minimum - - -- * Construction - - -- ** Scans - , scanl - , scanl1 - , scanr - , scanr1 - - -- ** Accumulating maps - , mapAccumL - , mapAccumR - - -- ** Generation and unfolding - , repeat - , replicate - , cycle - , iterate - , unfoldr - , unfoldrN - - -- * Substrings - - -- ** Breaking strings - , take - , takeEnd - , drop - , dropEnd - , takeWhile - , takeWhileEnd - , dropWhile - , dropWhileEnd - , dropAround - , strip - , stripStart - , stripEnd - , splitAt - , span - , breakOn - , breakOnEnd - , break - , group - , groupBy - , inits - , tails - - -- ** Breaking into many substrings - -- $split - , splitOn - , split - , chunksOf - -- , breakSubstring - - -- ** Breaking into lines and words - , lines - , words - , unlines - , unwords - - -- * Predicates - , isPrefixOf - , isSuffixOf - , isInfixOf - - -- ** View patterns - , stripPrefix - , stripSuffix - , commonPrefixes - - -- * Searching - , filter - , find - , breakOnAll - , partition - - -- , findSubstring - - -- * Indexing - , index - , count - - -- * Zipping and unzipping - , zip - , zipWith - - -- -* Ordered text - -- , sort - ) where - -import Prelude (Char, Bool(..), Maybe(..), String, - Eq(..), Ord(..), Ordering(..), Read(..), Show(..), - (&&), (||), (+), (-), (.), ($), (++), - error, flip, fmap, fromIntegral, not, otherwise, quot) -import qualified Prelude as P -import Control.DeepSeq (NFData(..)) -import Data.Int (Int64) -import qualified Data.List as L -import Data.Char (isSpace) -import Data.Data (Data(gfoldl, toConstr, gunfold, dataTypeOf), constrIndex, - Constr, mkConstr, DataType, mkDataType, Fixity(Prefix)) -import Data.Binary (Binary(get, put)) -import Data.Monoid (Monoid(..)) -#if MIN_VERSION_base(4,9,0) -import Data.Semigroup (Semigroup(..)) -#endif -import Data.String (IsString(..)) -import qualified Data.Text as T -import qualified Data.Text.Internal as T -import qualified Data.Text.Internal.Fusion.Common as S -import qualified Data.Text.Unsafe as T -import qualified Data.Text.Internal.Lazy.Fusion as S -import Data.Text.Internal.Fusion.Types (PairS(..)) -import Data.Text.Internal.Lazy.Fusion (stream, unstream) -import Data.Text.Internal.Lazy (Text(..), chunk, empty, foldlChunks, - foldrChunks, smallChunkSize) -import Data.Text.Internal (firstf, safe, text) -import Data.Text.Lazy.Encoding (decodeUtf8', encodeUtf8) -import qualified Data.Text.Internal.Functions as F -import Data.Text.Internal.Lazy.Search (indices) -#if __GLASGOW_HASKELL__ >= 702 -import qualified GHC.CString as GHC -#else -import qualified GHC.Base as GHC -#endif -#if MIN_VERSION_base(4,7,0) -import qualified GHC.Exts as Exts -#endif -import GHC.Prim (Addr#) -import qualified Language.Haskell.TH.Lib as TH -import Language.Haskell.TH.Syntax (Lift, lift) -#if MIN_VERSION_base(4,7,0) -import Text.Printf (PrintfArg, formatArg, formatString) -#endif - --- $fusion --- --- Most of the functions in this module are subject to /fusion/, --- meaning that a pipeline of such functions will usually allocate at --- most one 'Text' value. --- --- As an example, consider the following pipeline: --- --- > import Data.Text.Lazy as T --- > import Data.Text.Lazy.Encoding as E --- > import Data.ByteString.Lazy (ByteString) --- > --- > countChars :: ByteString -> Int --- > countChars = T.length . T.toUpper . E.decodeUtf8 --- --- From the type signatures involved, this looks like it should --- allocate one 'ByteString' value, and two 'Text' values. However, --- when a module is compiled with optimisation enabled under GHC, the --- two intermediate 'Text' values will be optimised away, and the --- function will be compiled down to a single loop over the source --- 'ByteString'. --- --- Functions that can be fused by the compiler are documented with the --- phrase \"Subject to fusion\". - --- $replacement --- --- A 'Text' value is a sequence of Unicode scalar values, as defined --- in --- . --- As such, a 'Text' cannot contain values in the range U+D800 to --- U+DFFF inclusive. Haskell implementations admit all Unicode code --- points --- () --- as 'Char' values, including code points from this invalid range. --- This means that there are some 'Char' values that are not valid --- Unicode scalar values, and the functions in this module must handle --- those cases. --- --- Within this module, many functions construct a 'Text' from one or --- more 'Char' values. Those functions will substitute 'Char' values --- that are not valid Unicode scalar values with the replacement --- character \"�\" (U+FFFD). Functions that perform this --- inspection and replacement are documented with the phrase --- \"Performs replacement on invalid scalar values\". --- --- (One reason for this policy of replacement is that internally, a --- 'Text' value is represented as packed UTF-16 data. Values in the --- range U+D800 through U+DFFF are used by UTF-16 to denote surrogate --- code points, and so cannot be represented. The functions replace --- invalid scalar values, instead of dropping them, as a security --- measure. For details, see --- .) - -equal :: Text -> Text -> Bool -equal Empty Empty = True -equal Empty _ = False -equal _ Empty = False -equal (Chunk a as) (Chunk b bs) = - case compare lenA lenB of - LT -> a == (T.takeWord16 lenA b) && - as `equal` Chunk (T.dropWord16 lenA b) bs - EQ -> a == b && as `equal` bs - GT -> T.takeWord16 lenB a == b && - Chunk (T.dropWord16 lenB a) as `equal` bs - where lenA = T.lengthWord16 a - lenB = T.lengthWord16 b - -instance Eq Text where - (==) = equal - {-# INLINE (==) #-} - -instance Ord Text where - compare = compareText - -compareText :: Text -> Text -> Ordering -compareText Empty Empty = EQ -compareText Empty _ = LT -compareText _ Empty = GT -compareText (Chunk a0 as) (Chunk b0 bs) = outer a0 b0 - where - outer ta@(T.Text arrA offA lenA) tb@(T.Text arrB offB lenB) = go 0 0 - where - go !i !j - | i >= lenA = compareText as (chunk (T.Text arrB (offB+j) (lenB-j)) bs) - | j >= lenB = compareText (chunk (T.Text arrA (offA+i) (lenA-i)) as) bs - | a < b = LT - | a > b = GT - | otherwise = go (i+di) (j+dj) - where T.Iter a di = T.iter ta i - T.Iter b dj = T.iter tb j - -instance Show Text where - showsPrec p ps r = showsPrec p (unpack ps) r - -instance Read Text where - readsPrec p str = [(pack x,y) | (x,y) <- readsPrec p str] - -#if MIN_VERSION_base(4,9,0) --- | Non-orphan 'Semigroup' instance only defined for --- @base-4.9.0.0@ and later; orphan instances for older GHCs are --- provided by --- the [semigroups](http://hackage.haskell.org/package/semigroups) --- package --- --- @since 1.2.2.0 -instance Semigroup Text where - (<>) = append -#endif - -instance Monoid Text where - mempty = empty -#if MIN_VERSION_base(4,9,0) - mappend = (<>) -- future-proof definition -#else - mappend = append -#endif - mconcat = concat - -instance IsString Text where - fromString = pack - -#if MIN_VERSION_base(4,7,0) --- | @since 1.2.0.0 -instance Exts.IsList Text where - type Item Text = Char - fromList = pack - toList = unpack -#endif - -instance NFData Text where - rnf Empty = () - rnf (Chunk _ ts) = rnf ts - --- | @since 1.2.1.0 -instance Binary Text where - put t = put (encodeUtf8 t) - get = do - bs <- get - case decodeUtf8' bs of - P.Left exn -> P.fail (P.show exn) - P.Right a -> P.return a - --- | This instance preserves data abstraction at the cost of inefficiency. --- We omit reflection services for the sake of data abstraction. --- --- This instance was created by copying the updated behavior of --- @"Data.Text".@'Data.Text.Text' -instance Data Text where - gfoldl f z txt = z pack `f` (unpack txt) - toConstr _ = packConstr - gunfold k z c = case constrIndex c of - 1 -> k (z pack) - _ -> error "Data.Text.Lazy.Text.gunfold" - dataTypeOf _ = textDataType - --- | This instance has similar considerations to the 'Data' instance: --- it preserves abstraction at the cost of inefficiency. --- --- @since 1.2.4.0 -instance Lift Text where - lift = TH.appE (TH.varE 'pack) . TH.stringE . unpack - -#if MIN_VERSION_base(4,7,0) --- | Only defined for @base-4.7.0.0@ and later --- --- @since 1.2.2.0 -instance PrintfArg Text where - formatArg txt = formatString $ unpack txt -#endif - -packConstr :: Constr -packConstr = mkConstr textDataType "pack" [] Prefix - -textDataType :: DataType -textDataType = mkDataType "Data.Text.Lazy.Text" [packConstr] - --- | /O(n)/ Convert a 'String' into a 'Text'. --- --- Subject to fusion. Performs replacement on invalid scalar values. -pack :: String -> Text -pack = unstream . S.streamList . L.map safe -{-# INLINE [1] pack #-} - --- | /O(n)/ Convert a 'Text' into a 'String'. --- Subject to fusion. -unpack :: Text -> String -unpack t = S.unstreamList (stream t) -{-# INLINE [1] unpack #-} - --- | /O(n)/ Convert a literal string into a Text. -unpackCString# :: Addr# -> Text -unpackCString# addr# = unstream (S.streamCString# addr#) -{-# NOINLINE unpackCString# #-} - -{-# RULES "TEXT literal" forall a. - unstream (S.streamList (L.map safe (GHC.unpackCString# a))) - = unpackCString# a #-} - -{-# RULES "TEXT literal UTF8" forall a. - unstream (S.streamList (L.map safe (GHC.unpackCStringUtf8# a))) - = unpackCString# a #-} - -{-# RULES "LAZY TEXT empty literal" - unstream (S.streamList (L.map safe [])) - = Empty #-} - -{-# RULES "LAZY TEXT empty literal" forall a. - unstream (S.streamList (L.map safe [a])) - = Chunk (T.singleton a) Empty #-} - --- | /O(1)/ Convert a character into a Text. Subject to fusion. --- Performs replacement on invalid scalar values. -singleton :: Char -> Text -singleton c = Chunk (T.singleton c) Empty -{-# INLINE [1] singleton #-} - -{-# RULES -"LAZY TEXT singleton -> fused" [~1] forall c. - singleton c = unstream (S.singleton c) -"LAZY TEXT singleton -> unfused" [1] forall c. - unstream (S.singleton c) = singleton c - #-} - --- | /O(c)/ Convert a list of strict 'T.Text's into a lazy 'Text'. -fromChunks :: [T.Text] -> Text -fromChunks cs = L.foldr chunk Empty cs - --- | /O(n)/ Convert a lazy 'Text' into a list of strict 'T.Text's. -toChunks :: Text -> [T.Text] -toChunks cs = foldrChunks (:) [] cs - --- | /O(n)/ Convert a lazy 'Text' into a strict 'T.Text'. -toStrict :: Text -> T.Text -toStrict t = T.concat (toChunks t) -{-# INLINE [1] toStrict #-} - --- | /O(c)/ Convert a strict 'T.Text' into a lazy 'Text'. -fromStrict :: T.Text -> Text -fromStrict t = chunk t Empty -{-# INLINE [1] fromStrict #-} - --- ----------------------------------------------------------------------------- --- * Basic functions - --- | /O(1)/ Adds a character to the front of a 'Text'. Subject to fusion. -cons :: Char -> Text -> Text -cons c t = Chunk (T.singleton c) t -{-# INLINE [1] cons #-} - -infixr 5 `cons` - -{-# RULES -"LAZY TEXT cons -> fused" [~1] forall c t. - cons c t = unstream (S.cons c (stream t)) -"LAZY TEXT cons -> unfused" [1] forall c t. - unstream (S.cons c (stream t)) = cons c t - #-} - --- | /O(n)/ Adds a character to the end of a 'Text'. This copies the --- entire array in the process, unless fused. Subject to fusion. -snoc :: Text -> Char -> Text -snoc t c = foldrChunks Chunk (singleton c) t -{-# INLINE [1] snoc #-} - -{-# RULES -"LAZY TEXT snoc -> fused" [~1] forall t c. - snoc t c = unstream (S.snoc (stream t) c) -"LAZY TEXT snoc -> unfused" [1] forall t c. - unstream (S.snoc (stream t) c) = snoc t c - #-} - --- | /O(n\/c)/ Appends one 'Text' to another. Subject to fusion. -append :: Text -> Text -> Text -append xs ys = foldrChunks Chunk ys xs -{-# INLINE [1] append #-} - -{-# RULES -"LAZY TEXT append -> fused" [~1] forall t1 t2. - append t1 t2 = unstream (S.append (stream t1) (stream t2)) -"LAZY TEXT append -> unfused" [1] forall t1 t2. - unstream (S.append (stream t1) (stream t2)) = append t1 t2 - #-} - --- | /O(1)/ Returns the first character and rest of a 'Text', or --- 'Nothing' if empty. Subject to fusion. -uncons :: Text -> Maybe (Char, Text) -uncons Empty = Nothing -uncons (Chunk t ts) = Just (T.unsafeHead t, ts') - where ts' | T.compareLength t 1 == EQ = ts - | otherwise = Chunk (T.unsafeTail t) ts -{-# INLINE uncons #-} - --- | /O(1)/ Returns the first character of a 'Text', which must be --- non-empty. Subject to fusion. -head :: Text -> Char -head t = S.head (stream t) -{-# INLINE head #-} - --- | /O(1)/ Returns all characters after the head of a 'Text', which --- must be non-empty. Subject to fusion. -tail :: Text -> Text -tail (Chunk t ts) = chunk (T.tail t) ts -tail Empty = emptyError "tail" -{-# INLINE [1] tail #-} - -{-# RULES -"LAZY TEXT tail -> fused" [~1] forall t. - tail t = unstream (S.tail (stream t)) -"LAZY TEXT tail -> unfused" [1] forall t. - unstream (S.tail (stream t)) = tail t - #-} - --- | /O(n\/c)/ Returns all but the last character of a 'Text', which must --- be non-empty. Subject to fusion. -init :: Text -> Text -init (Chunk t0 ts0) = go t0 ts0 - where go t (Chunk t' ts) = Chunk t (go t' ts) - go t Empty = chunk (T.init t) Empty -init Empty = emptyError "init" -{-# INLINE [1] init #-} - -{-# RULES -"LAZY TEXT init -> fused" [~1] forall t. - init t = unstream (S.init (stream t)) -"LAZY TEXT init -> unfused" [1] forall t. - unstream (S.init (stream t)) = init t - #-} - --- | /O(n\/c)/ Returns the 'init' and 'last' of a 'Text', or 'Nothing' if --- empty. --- --- * It is no faster than using 'init' and 'last'. --- --- @since 1.2.3.0 -unsnoc :: Text -> Maybe (Text, Char) -unsnoc Empty = Nothing -unsnoc ts@(Chunk _ _) = Just (init ts, last ts) -{-# INLINE unsnoc #-} - --- | /O(1)/ Tests whether a 'Text' is empty or not. Subject to --- fusion. -null :: Text -> Bool -null Empty = True -null _ = False -{-# INLINE [1] null #-} - -{-# RULES -"LAZY TEXT null -> fused" [~1] forall t. - null t = S.null (stream t) -"LAZY TEXT null -> unfused" [1] forall t. - S.null (stream t) = null t - #-} - --- | /O(1)/ Tests whether a 'Text' contains exactly one character. --- Subject to fusion. -isSingleton :: Text -> Bool -isSingleton = S.isSingleton . stream -{-# INLINE isSingleton #-} - --- | /O(n\/c)/ Returns the last character of a 'Text', which must be --- non-empty. Subject to fusion. -last :: Text -> Char -last Empty = emptyError "last" -last (Chunk t ts) = go t ts - where go _ (Chunk t' ts') = go t' ts' - go t' Empty = T.last t' -{-# INLINE [1] last #-} - -{-# RULES -"LAZY TEXT last -> fused" [~1] forall t. - last t = S.last (stream t) -"LAZY TEXT last -> unfused" [1] forall t. - S.last (stream t) = last t - #-} - --- | /O(n)/ Returns the number of characters in a 'Text'. --- Subject to fusion. -length :: Text -> Int64 -length = foldlChunks go 0 - where go l t = l + fromIntegral (T.length t) -{-# INLINE [1] length #-} - -{-# RULES -"LAZY TEXT length -> fused" [~1] forall t. - length t = S.length (stream t) -"LAZY TEXT length -> unfused" [1] forall t. - S.length (stream t) = length t - #-} - --- | /O(n)/ Compare the count of characters in a 'Text' to a number. --- Subject to fusion. --- --- This function gives the same answer as comparing against the result --- of 'length', but can short circuit if the count of characters is --- greater than the number, and hence be more efficient. -compareLength :: Text -> Int64 -> Ordering -compareLength t n = S.compareLengthI (stream t) n -{-# INLINE [1] compareLength #-} - --- We don't apply those otherwise appealing length-to-compareLength --- rewrite rules here, because they can change the strictness --- properties of code. - --- | /O(n)/ 'map' @f@ @t@ is the 'Text' obtained by applying @f@ to --- each element of @t@. Subject to fusion. Performs replacement on --- invalid scalar values. -map :: (Char -> Char) -> Text -> Text -map f t = unstream (S.map (safe . f) (stream t)) -{-# INLINE [1] map #-} - --- | /O(n)/ The 'intercalate' function takes a 'Text' and a list of --- 'Text's and concatenates the list after interspersing the first --- argument between each element of the list. -intercalate :: Text -> [Text] -> Text -intercalate t = concat . (F.intersperse t) -{-# INLINE intercalate #-} - --- | /O(n)/ The 'intersperse' function takes a character and places it --- between the characters of a 'Text'. Subject to fusion. Performs --- replacement on invalid scalar values. -intersperse :: Char -> Text -> Text -intersperse c t = unstream (S.intersperse (safe c) (stream t)) -{-# INLINE intersperse #-} - --- | /O(n)/ Left-justify a string to the given length, using the --- specified fill character on the right. Subject to fusion. Performs --- replacement on invalid scalar values. --- --- Examples: --- --- > justifyLeft 7 'x' "foo" == "fooxxxx" --- > justifyLeft 3 'x' "foobar" == "foobar" -justifyLeft :: Int64 -> Char -> Text -> Text -justifyLeft k c t - | len >= k = t - | otherwise = t `append` replicateChar (k-len) c - where len = length t -{-# INLINE [1] justifyLeft #-} - -{-# RULES -"LAZY TEXT justifyLeft -> fused" [~1] forall k c t. - justifyLeft k c t = unstream (S.justifyLeftI k c (stream t)) -"LAZY TEXT justifyLeft -> unfused" [1] forall k c t. - unstream (S.justifyLeftI k c (stream t)) = justifyLeft k c t - #-} - --- | /O(n)/ Right-justify a string to the given length, using the --- specified fill character on the left. Performs replacement on --- invalid scalar values. --- --- Examples: --- --- > justifyRight 7 'x' "bar" == "xxxxbar" --- > justifyRight 3 'x' "foobar" == "foobar" -justifyRight :: Int64 -> Char -> Text -> Text -justifyRight k c t - | len >= k = t - | otherwise = replicateChar (k-len) c `append` t - where len = length t -{-# INLINE justifyRight #-} - --- | /O(n)/ Center a string to the given length, using the specified --- fill character on either side. Performs replacement on invalid --- scalar values. --- --- Examples: --- --- > center 8 'x' "HS" = "xxxHSxxx" -center :: Int64 -> Char -> Text -> Text -center k c t - | len >= k = t - | otherwise = replicateChar l c `append` t `append` replicateChar r c - where len = length t - d = k - len - r = d `quot` 2 - l = d - r -{-# INLINE center #-} - --- | /O(n)/ The 'transpose' function transposes the rows and columns --- of its 'Text' argument. Note that this function uses 'pack', --- 'unpack', and the list version of transpose, and is thus not very --- efficient. -transpose :: [Text] -> [Text] -transpose ts = L.map (\ss -> Chunk (T.pack ss) Empty) - (L.transpose (L.map unpack ts)) --- TODO: make this fast - --- | /O(n)/ 'reverse' @t@ returns the elements of @t@ in reverse order. -reverse :: Text -> Text -reverse = rev Empty - where rev a Empty = a - rev a (Chunk t ts) = rev (Chunk (T.reverse t) a) ts - --- | /O(m+n)/ Replace every non-overlapping occurrence of @needle@ in --- @haystack@ with @replacement@. --- --- This function behaves as though it was defined as follows: --- --- @ --- replace needle replacement haystack = --- 'intercalate' replacement ('splitOn' needle haystack) --- @ --- --- As this suggests, each occurrence is replaced exactly once. So if --- @needle@ occurs in @replacement@, that occurrence will /not/ itself --- be replaced recursively: --- --- > replace "oo" "foo" "oo" == "foo" --- --- In cases where several instances of @needle@ overlap, only the --- first one will be replaced: --- --- > replace "ofo" "bar" "ofofo" == "barfo" --- --- In (unlikely) bad cases, this function's time complexity degrades --- towards /O(n*m)/. -replace :: Text - -- ^ @needle@ to search for. If this string is empty, an - -- error will occur. - -> Text - -- ^ @replacement@ to replace @needle@ with. - -> Text - -- ^ @haystack@ in which to search. - -> Text -replace s d = intercalate d . splitOn s -{-# INLINE replace #-} - --- ---------------------------------------------------------------------------- --- ** Case conversions (folds) - --- $case --- --- With Unicode text, it is incorrect to use combinators like @map --- toUpper@ to case convert each character of a string individually. --- Instead, use the whole-string case conversion functions from this --- module. For correctness in different writing systems, these --- functions may map one input character to two or three output --- characters. - --- | /O(n)/ Convert a string to folded case. Subject to fusion. --- --- This function is mainly useful for performing caseless (or case --- insensitive) string comparisons. --- --- A string @x@ is a caseless match for a string @y@ if and only if: --- --- @toCaseFold x == toCaseFold y@ --- --- The result string may be longer than the input string, and may --- differ from applying 'toLower' to the input string. For instance, --- the Armenian small ligature men now (U+FB13) is case folded to the --- bigram men now (U+0574 U+0576), while the micro sign (U+00B5) is --- case folded to the Greek small letter letter mu (U+03BC) instead of --- itself. -toCaseFold :: Text -> Text -toCaseFold t = unstream (S.toCaseFold (stream t)) -{-# INLINE toCaseFold #-} - --- | /O(n)/ Convert a string to lower case, using simple case --- conversion. Subject to fusion. --- --- The result string may be longer than the input string. For --- instance, the Latin capital letter I with dot above (U+0130) maps --- to the sequence Latin small letter i (U+0069) followed by combining --- dot above (U+0307). -toLower :: Text -> Text -toLower t = unstream (S.toLower (stream t)) -{-# INLINE toLower #-} - --- | /O(n)/ Convert a string to upper case, using simple case --- conversion. Subject to fusion. --- --- The result string may be longer than the input string. For --- instance, the German eszett (U+00DF) maps to the two-letter --- sequence SS. -toUpper :: Text -> Text -toUpper t = unstream (S.toUpper (stream t)) -{-# INLINE toUpper #-} - - --- | /O(n)/ Convert a string to title case, using simple case --- conversion. Subject to fusion. --- --- The first letter of the input is converted to title case, as is --- every subsequent letter that immediately follows a non-letter. --- Every letter that immediately follows another letter is converted --- to lower case. --- --- The result string may be longer than the input string. For example, --- the Latin small ligature fl (U+FB02) is converted to the --- sequence Latin capital letter F (U+0046) followed by Latin small --- letter l (U+006C). --- --- /Note/: this function does not take language or culture specific --- rules into account. For instance, in English, different style --- guides disagree on whether the book name \"The Hill of the Red --- Fox\" is correctly title cased—but this function will --- capitalize /every/ word. --- --- @since 1.0.0.0 -toTitle :: Text -> Text -toTitle t = unstream (S.toTitle (stream t)) -{-# INLINE toTitle #-} - --- | /O(n)/ 'foldl', applied to a binary operator, a starting value --- (typically the left-identity of the operator), and a 'Text', --- reduces the 'Text' using the binary operator, from left to right. --- Subject to fusion. -foldl :: (a -> Char -> a) -> a -> Text -> a -foldl f z t = S.foldl f z (stream t) -{-# INLINE foldl #-} - --- | /O(n)/ A strict version of 'foldl'. --- Subject to fusion. -foldl' :: (a -> Char -> a) -> a -> Text -> a -foldl' f z t = S.foldl' f z (stream t) -{-# INLINE foldl' #-} - --- | /O(n)/ A variant of 'foldl' that has no starting value argument, --- and thus must be applied to a non-empty 'Text'. Subject to fusion. -foldl1 :: (Char -> Char -> Char) -> Text -> Char -foldl1 f t = S.foldl1 f (stream t) -{-# INLINE foldl1 #-} - --- | /O(n)/ A strict version of 'foldl1'. Subject to fusion. -foldl1' :: (Char -> Char -> Char) -> Text -> Char -foldl1' f t = S.foldl1' f (stream t) -{-# INLINE foldl1' #-} - --- | /O(n)/ 'foldr', applied to a binary operator, a starting value --- (typically the right-identity of the operator), and a 'Text', --- reduces the 'Text' using the binary operator, from right to left. --- Subject to fusion. -foldr :: (Char -> a -> a) -> a -> Text -> a -foldr f z t = S.foldr f z (stream t) -{-# INLINE foldr #-} - --- | /O(n)/ A variant of 'foldr' that has no starting value argument, --- and thus must be applied to a non-empty 'Text'. Subject to --- fusion. -foldr1 :: (Char -> Char -> Char) -> Text -> Char -foldr1 f t = S.foldr1 f (stream t) -{-# INLINE foldr1 #-} - --- | /O(n)/ Concatenate a list of 'Text's. -concat :: [Text] -> Text -concat = to - where - go Empty css = to css - go (Chunk c cs) css = Chunk c (go cs css) - to [] = Empty - to (cs:css) = go cs css -{-# INLINE concat #-} - --- | /O(n)/ Map a function over a 'Text' that results in a 'Text', and --- concatenate the results. -concatMap :: (Char -> Text) -> Text -> Text -concatMap f = concat . foldr ((:) . f) [] -{-# INLINE concatMap #-} - --- | /O(n)/ 'any' @p@ @t@ determines whether any character in the --- 'Text' @t@ satisfies the predicate @p@. Subject to fusion. -any :: (Char -> Bool) -> Text -> Bool -any p t = S.any p (stream t) -{-# INLINE any #-} - --- | /O(n)/ 'all' @p@ @t@ determines whether all characters in the --- 'Text' @t@ satisfy the predicate @p@. Subject to fusion. -all :: (Char -> Bool) -> Text -> Bool -all p t = S.all p (stream t) -{-# INLINE all #-} - --- | /O(n)/ 'maximum' returns the maximum value from a 'Text', which --- must be non-empty. Subject to fusion. -maximum :: Text -> Char -maximum t = S.maximum (stream t) -{-# INLINE maximum #-} - --- | /O(n)/ 'minimum' returns the minimum value from a 'Text', which --- must be non-empty. Subject to fusion. -minimum :: Text -> Char -minimum t = S.minimum (stream t) -{-# INLINE minimum #-} - --- | /O(n)/ 'scanl' is similar to 'foldl', but returns a list of --- successive reduced values from the left. Subject to fusion. --- Performs replacement on invalid scalar values. --- --- > scanl f z [x1, x2, ...] == [z, z `f` x1, (z `f` x1) `f` x2, ...] --- --- Note that --- --- > last (scanl f z xs) == foldl f z xs. -scanl :: (Char -> Char -> Char) -> Char -> Text -> Text -scanl f z t = unstream (S.scanl g z (stream t)) - where g a b = safe (f a b) -{-# INLINE scanl #-} - --- | /O(n)/ 'scanl1' is a variant of 'scanl' that has no starting --- value argument. Performs replacement on invalid scalar values. --- --- > scanl1 f [x1, x2, ...] == [x1, x1 `f` x2, ...] -scanl1 :: (Char -> Char -> Char) -> Text -> Text -scanl1 f t0 = case uncons t0 of - Nothing -> empty - Just (t,ts) -> scanl f t ts -{-# INLINE scanl1 #-} - --- | /O(n)/ 'scanr' is the right-to-left dual of 'scanl'. Performs --- replacement on invalid scalar values. --- --- > scanr f v == reverse . scanl (flip f) v . reverse -scanr :: (Char -> Char -> Char) -> Char -> Text -> Text -scanr f v = reverse . scanl g v . reverse - where g a b = safe (f b a) - --- | /O(n)/ 'scanr1' is a variant of 'scanr' that has no starting --- value argument. Performs replacement on invalid scalar values. -scanr1 :: (Char -> Char -> Char) -> Text -> Text -scanr1 f t | null t = empty - | otherwise = scanr f (last t) (init t) - --- | /O(n)/ Like a combination of 'map' and 'foldl''. Applies a --- function to each element of a 'Text', passing an accumulating --- parameter from left to right, and returns a final 'Text'. Performs --- replacement on invalid scalar values. -mapAccumL :: (a -> Char -> (a,Char)) -> a -> Text -> (a, Text) -mapAccumL f = go - where - go z (Chunk c cs) = (z'', Chunk c' cs') - where (z', c') = T.mapAccumL f z c - (z'', cs') = go z' cs - go z Empty = (z, Empty) -{-# INLINE mapAccumL #-} - --- | The 'mapAccumR' function behaves like a combination of 'map' and --- a strict 'foldr'; it applies a function to each element of a --- 'Text', passing an accumulating parameter from right to left, and --- returning a final value of this accumulator together with the new --- 'Text'. Performs replacement on invalid scalar values. -mapAccumR :: (a -> Char -> (a,Char)) -> a -> Text -> (a, Text) -mapAccumR f = go - where - go z (Chunk c cs) = (z'', Chunk c' cs') - where (z'', c') = T.mapAccumR f z' c - (z', cs') = go z cs - go z Empty = (z, Empty) -{-# INLINE mapAccumR #-} - --- | @'repeat' x@ is an infinite 'Text', with @x@ the value of every --- element. --- --- @since 1.2.0.5 -repeat :: Char -> Text -repeat c = let t = Chunk (T.replicate smallChunkSize (T.singleton c)) t - in t - --- | /O(n*m)/ 'replicate' @n@ @t@ is a 'Text' consisting of the input --- @t@ repeated @n@ times. -replicate :: Int64 -> Text -> Text -replicate n t - | null t || n <= 0 = empty - | isSingleton t = replicateChar n (head t) - | otherwise = concat (rep 0) - where rep !i | i >= n = [] - | otherwise = t : rep (i+1) -{-# INLINE [1] replicate #-} - --- | 'cycle' ties a finite, non-empty 'Text' into a circular one, or --- equivalently, the infinite repetition of the original 'Text'. --- --- @since 1.2.0.5 -cycle :: Text -> Text -cycle Empty = emptyError "cycle" -cycle t = let t' = foldrChunks Chunk t' t - in t' - --- | @'iterate' f x@ returns an infinite 'Text' of repeated applications --- of @f@ to @x@: --- --- > iterate f x == [x, f x, f (f x), ...] --- --- @since 1.2.0.5 -iterate :: (Char -> Char) -> Char -> Text -iterate f c = let t c' = Chunk (T.singleton c') (t (f c')) - in t c - --- | /O(n)/ 'replicateChar' @n@ @c@ is a 'Text' of length @n@ with @c@ the --- value of every element. Subject to fusion. -replicateChar :: Int64 -> Char -> Text -replicateChar n c = unstream (S.replicateCharI n (safe c)) -{-# INLINE replicateChar #-} - -{-# RULES -"LAZY TEXT replicate/singleton -> replicateChar" [~1] forall n c. - replicate n (singleton c) = replicateChar n c -"LAZY TEXT replicate/unstream/singleton -> replicateChar" [~1] forall n c. - replicate n (unstream (S.singleton c)) = replicateChar n c - #-} - --- | /O(n)/, where @n@ is the length of the result. The 'unfoldr' --- function is analogous to the List 'L.unfoldr'. 'unfoldr' builds a --- 'Text' from a seed value. The function takes the element and --- returns 'Nothing' if it is done producing the 'Text', otherwise --- 'Just' @(a,b)@. In this case, @a@ is the next 'Char' in the --- string, and @b@ is the seed value for further production. --- Subject to fusion. --- Performs replacement on invalid scalar values. -unfoldr :: (a -> Maybe (Char,a)) -> a -> Text -unfoldr f s = unstream (S.unfoldr (firstf safe . f) s) -{-# INLINE unfoldr #-} - --- | /O(n)/ Like 'unfoldr', 'unfoldrN' builds a 'Text' from a seed --- value. However, the length of the result should be limited by the --- first argument to 'unfoldrN'. This function is more efficient than --- 'unfoldr' when the maximum length of the result is known and --- correct, otherwise its performance is similar to 'unfoldr'. --- Subject to fusion. --- Performs replacement on invalid scalar values. -unfoldrN :: Int64 -> (a -> Maybe (Char,a)) -> a -> Text -unfoldrN n f s = unstream (S.unfoldrN n (firstf safe . f) s) -{-# INLINE unfoldrN #-} - --- | /O(n)/ 'take' @n@, applied to a 'Text', returns the prefix of the --- 'Text' of length @n@, or the 'Text' itself if @n@ is greater than --- the length of the Text. Subject to fusion. -take :: Int64 -> Text -> Text -take i _ | i <= 0 = Empty -take i t0 = take' i t0 - where take' 0 _ = Empty - take' _ Empty = Empty - take' n (Chunk t ts) - | n < len = Chunk (T.take (fromIntegral n) t) Empty - | otherwise = Chunk t (take' (n - len) ts) - where len = fromIntegral (T.length t) -{-# INLINE [1] take #-} - -{-# RULES -"LAZY TEXT take -> fused" [~1] forall n t. - take n t = unstream (S.take n (stream t)) -"LAZY TEXT take -> unfused" [1] forall n t. - unstream (S.take n (stream t)) = take n t - #-} - --- | /O(n)/ 'takeEnd' @n@ @t@ returns the suffix remaining after --- taking @n@ characters from the end of @t@. --- --- Examples: --- --- > takeEnd 3 "foobar" == "bar" --- --- @since 1.1.1.0 -takeEnd :: Int64 -> Text -> Text -takeEnd n t0 - | n <= 0 = empty - | otherwise = takeChunk n empty . L.reverse . toChunks $ t0 - where takeChunk _ acc [] = acc - takeChunk i acc (t:ts) - | i <= l = chunk (T.takeEnd (fromIntegral i) t) acc - | otherwise = takeChunk (i-l) (Chunk t acc) ts - where l = fromIntegral (T.length t) - --- | /O(n)/ 'drop' @n@, applied to a 'Text', returns the suffix of the --- 'Text' after the first @n@ characters, or the empty 'Text' if @n@ --- is greater than the length of the 'Text'. Subject to fusion. -drop :: Int64 -> Text -> Text -drop i t0 - | i <= 0 = t0 - | otherwise = drop' i t0 - where drop' 0 ts = ts - drop' _ Empty = Empty - drop' n (Chunk t ts) - | n < len = Chunk (T.drop (fromIntegral n) t) ts - | otherwise = drop' (n - len) ts - where len = fromIntegral (T.length t) -{-# INLINE [1] drop #-} - -{-# RULES -"LAZY TEXT drop -> fused" [~1] forall n t. - drop n t = unstream (S.drop n (stream t)) -"LAZY TEXT drop -> unfused" [1] forall n t. - unstream (S.drop n (stream t)) = drop n t - #-} - --- | /O(n)/ 'dropEnd' @n@ @t@ returns the prefix remaining after --- dropping @n@ characters from the end of @t@. --- --- Examples: --- --- > dropEnd 3 "foobar" == "foo" --- --- @since 1.1.1.0 -dropEnd :: Int64 -> Text -> Text -dropEnd n t0 - | n <= 0 = t0 - | otherwise = dropChunk n . L.reverse . toChunks $ t0 - where dropChunk _ [] = empty - dropChunk m (t:ts) - | m >= l = dropChunk (m-l) ts - | otherwise = fromChunks . L.reverse $ - T.dropEnd (fromIntegral m) t : ts - where l = fromIntegral (T.length t) - --- | /O(n)/ 'dropWords' @n@ returns the suffix with @n@ 'Word16' --- values dropped, or the empty 'Text' if @n@ is greater than the --- number of 'Word16' values present. -dropWords :: Int64 -> Text -> Text -dropWords i t0 - | i <= 0 = t0 - | otherwise = drop' i t0 - where drop' 0 ts = ts - drop' _ Empty = Empty - drop' n (Chunk (T.Text arr off len) ts) - | n < len' = chunk (text arr (off+n') (len-n')) ts - | otherwise = drop' (n - len') ts - where len' = fromIntegral len - n' = fromIntegral n - --- | /O(n)/ 'takeWhile', applied to a predicate @p@ and a 'Text', --- returns the longest prefix (possibly empty) of elements that --- satisfy @p@. Subject to fusion. -takeWhile :: (Char -> Bool) -> Text -> Text -takeWhile p t0 = takeWhile' t0 - where takeWhile' Empty = Empty - takeWhile' (Chunk t ts) = - case T.findIndex (not . p) t of - Just n | n > 0 -> Chunk (T.take n t) Empty - | otherwise -> Empty - Nothing -> Chunk t (takeWhile' ts) -{-# INLINE [1] takeWhile #-} - -{-# RULES -"LAZY TEXT takeWhile -> fused" [~1] forall p t. - takeWhile p t = unstream (S.takeWhile p (stream t)) -"LAZY TEXT takeWhile -> unfused" [1] forall p t. - unstream (S.takeWhile p (stream t)) = takeWhile p t - #-} --- | /O(n)/ 'takeWhileEnd', applied to a predicate @p@ and a 'Text', --- returns the longest suffix (possibly empty) of elements that --- satisfy @p@. --- Examples: --- --- > takeWhileEnd (=='o') "foo" == "oo" --- --- @since 1.2.2.0 -takeWhileEnd :: (Char -> Bool) -> Text -> Text -takeWhileEnd p = takeChunk empty . L.reverse . toChunks - where takeChunk acc [] = acc - takeChunk acc (t:ts) - | T.lengthWord16 t' < T.lengthWord16 t - = chunk t' acc - | otherwise = takeChunk (Chunk t' acc) ts - where t' = T.takeWhileEnd p t -{-# INLINE takeWhileEnd #-} - --- | /O(n)/ 'dropWhile' @p@ @t@ returns the suffix remaining after --- 'takeWhile' @p@ @t@. Subject to fusion. -dropWhile :: (Char -> Bool) -> Text -> Text -dropWhile p t0 = dropWhile' t0 - where dropWhile' Empty = Empty - dropWhile' (Chunk t ts) = - case T.findIndex (not . p) t of - Just n -> Chunk (T.drop n t) ts - Nothing -> dropWhile' ts -{-# INLINE [1] dropWhile #-} - -{-# RULES -"LAZY TEXT dropWhile -> fused" [~1] forall p t. - dropWhile p t = unstream (S.dropWhile p (stream t)) -"LAZY TEXT dropWhile -> unfused" [1] forall p t. - unstream (S.dropWhile p (stream t)) = dropWhile p t - #-} - --- | /O(n)/ 'dropWhileEnd' @p@ @t@ returns the prefix remaining after --- dropping characters that satisfy the predicate @p@ from the end of --- @t@. --- --- Examples: --- --- > dropWhileEnd (=='.') "foo..." == "foo" -dropWhileEnd :: (Char -> Bool) -> Text -> Text -dropWhileEnd p = go - where go Empty = Empty - go (Chunk t Empty) = if T.null t' - then Empty - else Chunk t' Empty - where t' = T.dropWhileEnd p t - go (Chunk t ts) = case go ts of - Empty -> go (Chunk t Empty) - ts' -> Chunk t ts' -{-# INLINE dropWhileEnd #-} - --- | /O(n)/ 'dropAround' @p@ @t@ returns the substring remaining after --- dropping characters that satisfy the predicate @p@ from both the --- beginning and end of @t@. -dropAround :: (Char -> Bool) -> Text -> Text -dropAround p = dropWhile p . dropWhileEnd p -{-# INLINE [1] dropAround #-} - --- | /O(n)/ Remove leading white space from a string. Equivalent to: --- --- > dropWhile isSpace -stripStart :: Text -> Text -stripStart = dropWhile isSpace -{-# INLINE stripStart #-} - --- | /O(n)/ Remove trailing white space from a string. Equivalent to: --- --- > dropWhileEnd isSpace -stripEnd :: Text -> Text -stripEnd = dropWhileEnd isSpace -{-# INLINE [1] stripEnd #-} - --- | /O(n)/ Remove leading and trailing white space from a string. --- Equivalent to: --- --- > dropAround isSpace -strip :: Text -> Text -strip = dropAround isSpace -{-# INLINE [1] strip #-} - --- | /O(n)/ 'splitAt' @n t@ returns a pair whose first element is a --- prefix of @t@ of length @n@, and whose second is the remainder of --- the string. It is equivalent to @('take' n t, 'drop' n t)@. -splitAt :: Int64 -> Text -> (Text, Text) -splitAt = loop - where loop _ Empty = (empty, empty) - loop n t | n <= 0 = (empty, t) - loop n (Chunk t ts) - | n < len = let (t',t'') = T.splitAt (fromIntegral n) t - in (Chunk t' Empty, Chunk t'' ts) - | otherwise = let (ts',ts'') = loop (n - len) ts - in (Chunk t ts', ts'') - where len = fromIntegral (T.length t) - --- | /O(n)/ 'splitAtWord' @n t@ returns a strict pair whose first --- element is a prefix of @t@ whose chunks contain @n@ 'Word16' --- values, and whose second is the remainder of the string. -splitAtWord :: Int64 -> Text -> PairS Text Text -splitAtWord _ Empty = empty :*: empty -splitAtWord x (Chunk c@(T.Text arr off len) cs) - | y >= len = let h :*: t = splitAtWord (x-fromIntegral len) cs - in Chunk c h :*: t - | otherwise = chunk (text arr off y) empty :*: - chunk (text arr (off+y) (len-y)) cs - where y = fromIntegral x - --- | /O(n+m)/ Find the first instance of @needle@ (which must be --- non-'null') in @haystack@. The first element of the returned tuple --- is the prefix of @haystack@ before @needle@ is matched. The second --- is the remainder of @haystack@, starting with the match. --- --- Examples: --- --- > breakOn "::" "a::b::c" ==> ("a", "::b::c") --- > breakOn "/" "foobar" ==> ("foobar", "") --- --- Laws: --- --- > append prefix match == haystack --- > where (prefix, match) = breakOn needle haystack --- --- If you need to break a string by a substring repeatedly (e.g. you --- want to break on every instance of a substring), use 'breakOnAll' --- instead, as it has lower startup overhead. --- --- This function is strict in its first argument, and lazy in its --- second. --- --- In (unlikely) bad cases, this function's time complexity degrades --- towards /O(n*m)/. -breakOn :: Text -> Text -> (Text, Text) -breakOn pat src - | null pat = emptyError "breakOn" - | otherwise = case indices pat src of - [] -> (src, empty) - (x:_) -> let h :*: t = splitAtWord x src - in (h, t) - --- | /O(n+m)/ Similar to 'breakOn', but searches from the end of the string. --- --- The first element of the returned tuple is the prefix of @haystack@ --- up to and including the last match of @needle@. The second is the --- remainder of @haystack@, following the match. --- --- > breakOnEnd "::" "a::b::c" ==> ("a::b::", "c") -breakOnEnd :: Text -> Text -> (Text, Text) -breakOnEnd pat src = let (a,b) = breakOn (reverse pat) (reverse src) - in (reverse b, reverse a) -{-# INLINE breakOnEnd #-} - --- | /O(n+m)/ Find all non-overlapping instances of @needle@ in --- @haystack@. Each element of the returned list consists of a pair: --- --- * The entire string prior to the /k/th match (i.e. the prefix) --- --- * The /k/th match, followed by the remainder of the string --- --- Examples: --- --- > breakOnAll "::" "" --- > ==> [] --- > breakOnAll "/" "a/b/c/" --- > ==> [("a", "/b/c/"), ("a/b", "/c/"), ("a/b/c", "/")] --- --- This function is strict in its first argument, and lazy in its --- second. --- --- In (unlikely) bad cases, this function's time complexity degrades --- towards /O(n*m)/. --- --- The @needle@ parameter may not be empty. -breakOnAll :: Text -- ^ @needle@ to search for - -> Text -- ^ @haystack@ in which to search - -> [(Text, Text)] -breakOnAll pat src - | null pat = emptyError "breakOnAll" - | otherwise = go 0 empty src (indices pat src) - where - go !n p s (x:xs) = let h :*: t = splitAtWord (x-n) s - h' = append p h - in (h',t) : go x h' t xs - go _ _ _ _ = [] - --- | /O(n)/ 'break' is like 'span', but the prefix returned is over --- elements that fail the predicate @p@. -break :: (Char -> Bool) -> Text -> (Text, Text) -break p t0 = break' t0 - where break' Empty = (empty, empty) - break' c@(Chunk t ts) = - case T.findIndex p t of - Nothing -> let (ts', ts'') = break' ts - in (Chunk t ts', ts'') - Just n | n == 0 -> (Empty, c) - | otherwise -> let (a,b) = T.splitAt n t - in (Chunk a Empty, Chunk b ts) - --- | /O(n)/ 'span', applied to a predicate @p@ and text @t@, returns --- a pair whose first element is the longest prefix (possibly empty) --- of @t@ of elements that satisfy @p@, and whose second is the --- remainder of the list. -span :: (Char -> Bool) -> Text -> (Text, Text) -span p = break (not . p) -{-# INLINE span #-} - --- | The 'group' function takes a 'Text' and returns a list of 'Text's --- such that the concatenation of the result is equal to the argument. --- Moreover, each sublist in the result contains only equal elements. --- For example, --- --- > group "Mississippi" = ["M","i","ss","i","ss","i","pp","i"] --- --- It is a special case of 'groupBy', which allows the programmer to --- supply their own equality test. -group :: Text -> [Text] -group = groupBy (==) -{-# INLINE group #-} - --- | The 'groupBy' function is the non-overloaded version of 'group'. -groupBy :: (Char -> Char -> Bool) -> Text -> [Text] -groupBy _ Empty = [] -groupBy eq (Chunk t ts) = cons x ys : groupBy eq zs - where (ys,zs) = span (eq x) xs - x = T.unsafeHead t - xs = chunk (T.unsafeTail t) ts - --- | /O(n)/ Return all initial segments of the given 'Text', --- shortest first. -inits :: Text -> [Text] -inits = (Empty :) . inits' - where inits' Empty = [] - inits' (Chunk t ts) = L.map (\t' -> Chunk t' Empty) (L.tail (T.inits t)) - ++ L.map (Chunk t) (inits' ts) - --- | /O(n)/ Return all final segments of the given 'Text', longest --- first. -tails :: Text -> [Text] -tails Empty = Empty : [] -tails ts@(Chunk t ts') - | T.length t == 1 = ts : tails ts' - | otherwise = ts : tails (Chunk (T.unsafeTail t) ts') - --- $split --- --- Splitting functions in this library do not perform character-wise --- copies to create substrings; they just construct new 'Text's that --- are slices of the original. - --- | /O(m+n)/ Break a 'Text' into pieces separated by the first 'Text' --- argument (which cannot be an empty string), consuming the --- delimiter. An empty delimiter is invalid, and will cause an error --- to be raised. --- --- Examples: --- --- > splitOn "\r\n" "a\r\nb\r\nd\r\ne" == ["a","b","d","e"] --- > splitOn "aaa" "aaaXaaaXaaaXaaa" == ["","X","X","X",""] --- > splitOn "x" "x" == ["",""] --- --- and --- --- > intercalate s . splitOn s == id --- > splitOn (singleton c) == split (==c) --- --- (Note: the string @s@ to split on above cannot be empty.) --- --- This function is strict in its first argument, and lazy in its --- second. --- --- In (unlikely) bad cases, this function's time complexity degrades --- towards /O(n*m)/. -splitOn :: Text - -- ^ String to split on. If this string is empty, an error - -- will occur. - -> Text - -- ^ Input text. - -> [Text] -splitOn pat src - | null pat = emptyError "splitOn" - | isSingleton pat = split (== head pat) src - | otherwise = go 0 (indices pat src) src - where - go _ [] cs = [cs] - go !i (x:xs) cs = let h :*: t = splitAtWord (x-i) cs - in h : go (x+l) xs (dropWords l t) - l = foldlChunks (\a (T.Text _ _ b) -> a + fromIntegral b) 0 pat -{-# INLINE [1] splitOn #-} - -{-# RULES -"LAZY TEXT splitOn/singleton -> split/==" [~1] forall c t. - splitOn (singleton c) t = split (==c) t - #-} - --- | /O(n)/ Splits a 'Text' into components delimited by separators, --- where the predicate returns True for a separator element. The --- resulting components do not contain the separators. Two adjacent --- separators result in an empty component in the output. eg. --- --- > split (=='a') "aabbaca" == ["","","bb","c",""] --- > split (=='a') [] == [""] -split :: (Char -> Bool) -> Text -> [Text] -split _ Empty = [Empty] -split p (Chunk t0 ts0) = comb [] (T.split p t0) ts0 - where comb acc (s:[]) Empty = revChunks (s:acc) : [] - comb acc (s:[]) (Chunk t ts) = comb (s:acc) (T.split p t) ts - comb acc (s:ss) ts = revChunks (s:acc) : comb [] ss ts - comb _ [] _ = impossibleError "split" -{-# INLINE split #-} - --- | /O(n)/ Splits a 'Text' into components of length @k@. The last --- element may be shorter than the other chunks, depending on the --- length of the input. Examples: --- --- > chunksOf 3 "foobarbaz" == ["foo","bar","baz"] --- > chunksOf 4 "haskell.org" == ["hask","ell.","org"] -chunksOf :: Int64 -> Text -> [Text] -chunksOf k = go - where - go t = case splitAt k t of - (a,b) | null a -> [] - | otherwise -> a : go b -{-# INLINE chunksOf #-} - --- | /O(n)/ Breaks a 'Text' up into a list of 'Text's at --- newline 'Char's. The resulting strings do not contain newlines. -lines :: Text -> [Text] -lines Empty = [] -lines t = let (l,t') = break ((==) '\n') t - in l : if null t' then [] - else lines (tail t') - --- | /O(n)/ Breaks a 'Text' up into a list of words, delimited by 'Char's --- representing white space. -words :: Text -> [Text] -words = L.filter (not . null) . split isSpace -{-# INLINE words #-} - --- | /O(n)/ Joins lines, after appending a terminating newline to --- each. -unlines :: [Text] -> Text -unlines = concat . L.map (`snoc` '\n') -{-# INLINE unlines #-} - --- | /O(n)/ Joins words using single space characters. -unwords :: [Text] -> Text -unwords = intercalate (singleton ' ') -{-# INLINE unwords #-} - --- | /O(n)/ The 'isPrefixOf' function takes two 'Text's and returns --- 'True' iff the first is a prefix of the second. Subject to fusion. -isPrefixOf :: Text -> Text -> Bool -isPrefixOf Empty _ = True -isPrefixOf _ Empty = False -isPrefixOf (Chunk x xs) (Chunk y ys) - | lx == ly = x == y && isPrefixOf xs ys - | lx < ly = x == yh && isPrefixOf xs (Chunk yt ys) - | otherwise = xh == y && isPrefixOf (Chunk xt xs) ys - where (xh,xt) = T.splitAt ly x - (yh,yt) = T.splitAt lx y - lx = T.length x - ly = T.length y -{-# INLINE [1] isPrefixOf #-} - -{-# RULES -"LAZY TEXT isPrefixOf -> fused" [~1] forall s t. - isPrefixOf s t = S.isPrefixOf (stream s) (stream t) -"LAZY TEXT isPrefixOf -> unfused" [1] forall s t. - S.isPrefixOf (stream s) (stream t) = isPrefixOf s t - #-} - --- | /O(n)/ The 'isSuffixOf' function takes two 'Text's and returns --- 'True' iff the first is a suffix of the second. -isSuffixOf :: Text -> Text -> Bool -isSuffixOf x y = reverse x `isPrefixOf` reverse y -{-# INLINE isSuffixOf #-} --- TODO: a better implementation - --- | /O(n+m)/ The 'isInfixOf' function takes two 'Text's and returns --- 'True' iff the first is contained, wholly and intact, anywhere --- within the second. --- --- This function is strict in its first argument, and lazy in its --- second. --- --- In (unlikely) bad cases, this function's time complexity degrades --- towards /O(n*m)/. -isInfixOf :: Text -> Text -> Bool -isInfixOf needle haystack - | null needle = True - | isSingleton needle = S.elem (head needle) . S.stream $ haystack - | otherwise = not . L.null . indices needle $ haystack -{-# INLINE [1] isInfixOf #-} - -{-# RULES -"LAZY TEXT isInfixOf/singleton -> S.elem/S.stream" [~1] forall n h. - isInfixOf (singleton n) h = S.elem n (S.stream h) - #-} - -------------------------------------------------------------------------------- --- * View patterns - --- | /O(n)/ Return the suffix of the second string if its prefix --- matches the entire first string. --- --- Examples: --- --- > stripPrefix "foo" "foobar" == Just "bar" --- > stripPrefix "" "baz" == Just "baz" --- > stripPrefix "foo" "quux" == Nothing --- --- This is particularly useful with the @ViewPatterns@ extension to --- GHC, as follows: --- --- > {-# LANGUAGE ViewPatterns #-} --- > import Data.Text.Lazy as T --- > --- > fnordLength :: Text -> Int --- > fnordLength (stripPrefix "fnord" -> Just suf) = T.length suf --- > fnordLength _ = -1 -stripPrefix :: Text -> Text -> Maybe Text -stripPrefix p t - | null p = Just t - | otherwise = case commonPrefixes p t of - Just (_,c,r) | null c -> Just r - _ -> Nothing - --- | /O(n)/ Find the longest non-empty common prefix of two strings --- and return it, along with the suffixes of each string at which they --- no longer match. --- --- If the strings do not have a common prefix or either one is empty, --- this function returns 'Nothing'. --- --- Examples: --- --- > commonPrefixes "foobar" "fooquux" == Just ("foo","bar","quux") --- > commonPrefixes "veeble" "fetzer" == Nothing --- > commonPrefixes "" "baz" == Nothing -commonPrefixes :: Text -> Text -> Maybe (Text,Text,Text) -commonPrefixes Empty _ = Nothing -commonPrefixes _ Empty = Nothing -commonPrefixes a0 b0 = Just (go a0 b0 []) - where - go t0@(Chunk x xs) t1@(Chunk y ys) ps - = case T.commonPrefixes x y of - Just (p,a,b) - | T.null a -> go xs (chunk b ys) (p:ps) - | T.null b -> go (chunk a xs) ys (p:ps) - | otherwise -> (fromChunks (L.reverse (p:ps)),chunk a xs, chunk b ys) - Nothing -> (fromChunks (L.reverse ps),t0,t1) - go t0 t1 ps = (fromChunks (L.reverse ps),t0,t1) - --- | /O(n)/ Return the prefix of the second string if its suffix --- matches the entire first string. --- --- Examples: --- --- > stripSuffix "bar" "foobar" == Just "foo" --- > stripSuffix "" "baz" == Just "baz" --- > stripSuffix "foo" "quux" == Nothing --- --- This is particularly useful with the @ViewPatterns@ extension to --- GHC, as follows: --- --- > {-# LANGUAGE ViewPatterns #-} --- > import Data.Text.Lazy as T --- > --- > quuxLength :: Text -> Int --- > quuxLength (stripSuffix "quux" -> Just pre) = T.length pre --- > quuxLength _ = -1 -stripSuffix :: Text -> Text -> Maybe Text -stripSuffix p t = reverse `fmap` stripPrefix (reverse p) (reverse t) - --- | /O(n)/ 'filter', applied to a predicate and a 'Text', --- returns a 'Text' containing those characters that satisfy the --- predicate. -filter :: (Char -> Bool) -> Text -> Text -filter p t = unstream (S.filter p (stream t)) -{-# INLINE filter #-} - --- | /O(n)/ The 'find' function takes a predicate and a 'Text', and --- returns the first element in matching the predicate, or 'Nothing' --- if there is no such element. Subject to fusion. -find :: (Char -> Bool) -> Text -> Maybe Char -find p t = S.findBy p (stream t) -{-# INLINE find #-} - --- | /O(n)/ The 'partition' function takes a predicate and a 'Text', --- and returns the pair of 'Text's with elements which do and do not --- satisfy the predicate, respectively; i.e. --- --- > partition p t == (filter p t, filter (not . p) t) -partition :: (Char -> Bool) -> Text -> (Text, Text) -partition p t = (filter p t, filter (not . p) t) -{-# INLINE partition #-} - --- | /O(n)/ 'Text' index (subscript) operator, starting from 0. --- Subject to fusion. -index :: Text -> Int64 -> Char -index t n = S.index (stream t) n -{-# INLINE index #-} - --- | /O(n+m)/ The 'count' function returns the number of times the --- query string appears in the given 'Text'. An empty query string is --- invalid, and will cause an error to be raised. --- --- In (unlikely) bad cases, this function's time complexity degrades --- towards /O(n*m)/. -count :: Text -> Text -> Int64 -count pat src - | null pat = emptyError "count" - | otherwise = go 0 (indices pat src) - where go !n [] = n - go !n (_:xs) = go (n+1) xs -{-# INLINE [1] count #-} - -{-# RULES -"LAZY TEXT count/singleton -> countChar" [~1] forall c t. - count (singleton c) t = countChar c t - #-} - --- | /O(n)/ The 'countChar' function returns the number of times the --- query element appears in the given 'Text'. Subject to fusion. -countChar :: Char -> Text -> Int64 -countChar c t = S.countChar c (stream t) - --- | /O(n)/ 'zip' takes two 'Text's and returns a list of --- corresponding pairs of bytes. If one input 'Text' is short, --- excess elements of the longer 'Text' are discarded. This is --- equivalent to a pair of 'unpack' operations. -zip :: Text -> Text -> [(Char,Char)] -zip a b = S.unstreamList $ S.zipWith (,) (stream a) (stream b) -{-# INLINE [0] zip #-} - --- | /O(n)/ 'zipWith' generalises 'zip' by zipping with the function --- given as the first argument, instead of a tupling function. --- Performs replacement on invalid scalar values. -zipWith :: (Char -> Char -> Char) -> Text -> Text -> Text -zipWith f t1 t2 = unstream (S.zipWith g (stream t1) (stream t2)) - where g a b = safe (f a b) -{-# INLINE [0] zipWith #-} - -revChunks :: [T.Text] -> Text -revChunks = L.foldl' (flip chunk) Empty - -emptyError :: String -> a -emptyError fun = P.error ("Data.Text.Lazy." ++ fun ++ ": empty input") - -impossibleError :: String -> a -impossibleError fun = P.error ("Data.Text.Lazy." ++ fun ++ ": impossible case") diff -Nru cabal-install-3.2-3.2+git20191216.2.e076113/src/text-1.2.4.0/Data/Text/Read.hs cabal-install-3.2-3.2+git20200127.2.b84fd4f/src/text-1.2.4.0/Data/Text/Read.hs --- cabal-install-3.2-3.2+git20191216.2.e076113/src/text-1.2.4.0/Data/Text/Read.hs 2001-09-09 01:46:40.000000000 +0000 +++ cabal-install-3.2-3.2+git20200127.2.b84fd4f/src/text-1.2.4.0/Data/Text/Read.hs 1970-01-01 00:00:00.000000000 +0000 @@ -1,200 +0,0 @@ -{-# LANGUAGE OverloadedStrings, UnboxedTuples, CPP #-} -#if __GLASGOW_HASKELL__ >= 702 -{-# LANGUAGE Trustworthy #-} -#endif - --- | --- Module : Data.Text.Read --- Copyright : (c) 2010, 2011 Bryan O'Sullivan --- --- License : BSD-style --- Maintainer : bos@serpentine.com --- Portability : GHC --- --- Functions used frequently when reading textual data. -module Data.Text.Read - ( - Reader - , decimal - , hexadecimal - , signed - , rational - , double - ) where - -import Control.Monad (liftM) -import Data.Char (isDigit, isHexDigit) -import Data.Int (Int8, Int16, Int32, Int64) -import Data.Ratio ((%)) -import Data.Text as T -import Data.Text.Internal.Private (span_) -import Data.Text.Internal.Read -import Data.Word (Word, Word8, Word16, Word32, Word64) - --- | Read some text. If the read succeeds, return its value and the --- remaining text, otherwise an error message. -type Reader a = IReader Text a -type Parser a = IParser Text a - --- | Read a decimal integer. The input must begin with at least one --- decimal digit, and is consumed until a non-digit or end of string --- is reached. --- --- This function does not handle leading sign characters. If you need --- to handle signed input, use @'signed' 'decimal'@. --- --- /Note/: For fixed-width integer types, this function does not --- attempt to detect overflow, so a sufficiently long input may give --- incorrect results. If you are worried about overflow, use --- 'Integer' for your result type. -decimal :: Integral a => Reader a -{-# SPECIALIZE decimal :: Reader Int #-} -{-# SPECIALIZE decimal :: Reader Int8 #-} -{-# SPECIALIZE decimal :: Reader Int16 #-} -{-# SPECIALIZE decimal :: Reader Int32 #-} -{-# SPECIALIZE decimal :: Reader Int64 #-} -{-# SPECIALIZE decimal :: Reader Integer #-} -{-# SPECIALIZE decimal :: Reader Data.Word.Word #-} -{-# SPECIALIZE decimal :: Reader Word8 #-} -{-# SPECIALIZE decimal :: Reader Word16 #-} -{-# SPECIALIZE decimal :: Reader Word32 #-} -{-# SPECIALIZE decimal :: Reader Word64 #-} -decimal txt - | T.null h = Left "input does not start with a digit" - | otherwise = Right (T.foldl' go 0 h, t) - where (# h,t #) = span_ isDigit txt - go n d = (n * 10 + fromIntegral (digitToInt d)) - --- | Read a hexadecimal integer, consisting of an optional leading --- @\"0x\"@ followed by at least one hexadecimal digit. Input is --- consumed until a non-hex-digit or end of string is reached. --- This function is case insensitive. --- --- This function does not handle leading sign characters. If you need --- to handle signed input, use @'signed' 'hexadecimal'@. --- --- /Note/: For fixed-width integer types, this function does not --- attempt to detect overflow, so a sufficiently long input may give --- incorrect results. If you are worried about overflow, use --- 'Integer' for your result type. -hexadecimal :: Integral a => Reader a -{-# SPECIALIZE hexadecimal :: Reader Int #-} -{-# SPECIALIZE hexadecimal :: Reader Int8 #-} -{-# SPECIALIZE hexadecimal :: Reader Int16 #-} -{-# SPECIALIZE hexadecimal :: Reader Int32 #-} -{-# SPECIALIZE hexadecimal :: Reader Int64 #-} -{-# SPECIALIZE hexadecimal :: Reader Integer #-} -{-# SPECIALIZE hexadecimal :: Reader Word #-} -{-# SPECIALIZE hexadecimal :: Reader Word8 #-} -{-# SPECIALIZE hexadecimal :: Reader Word16 #-} -{-# SPECIALIZE hexadecimal :: Reader Word32 #-} -{-# SPECIALIZE hexadecimal :: Reader Word64 #-} -hexadecimal txt - | h == "0x" || h == "0X" = hex t - | otherwise = hex txt - where (h,t) = T.splitAt 2 txt - -hex :: Integral a => Reader a -{-# SPECIALIZE hex :: Reader Int #-} -{-# SPECIALIZE hex :: Reader Int8 #-} -{-# SPECIALIZE hex :: Reader Int16 #-} -{-# SPECIALIZE hex :: Reader Int32 #-} -{-# SPECIALIZE hex :: Reader Int64 #-} -{-# SPECIALIZE hex :: Reader Integer #-} -{-# SPECIALIZE hex :: Reader Word #-} -{-# SPECIALIZE hex :: Reader Word8 #-} -{-# SPECIALIZE hex :: Reader Word16 #-} -{-# SPECIALIZE hex :: Reader Word32 #-} -{-# SPECIALIZE hex :: Reader Word64 #-} -hex txt - | T.null h = Left "input does not start with a hexadecimal digit" - | otherwise = Right (T.foldl' go 0 h, t) - where (# h,t #) = span_ isHexDigit txt - go n d = (n * 16 + fromIntegral (hexDigitToInt d)) - --- | Read an optional leading sign character (@\'-\'@ or @\'+\'@) and --- apply it to the result of applying the given reader. -signed :: Num a => Reader a -> Reader a -{-# INLINE signed #-} -signed f = runP (signa (P f)) - --- | Read a rational number. --- --- This function accepts an optional leading sign character, followed --- by at least one decimal digit. The syntax similar to that accepted --- by the 'read' function, with the exception that a trailing @\'.\'@ --- or @\'e\'@ /not/ followed by a number is not consumed. --- --- Examples (with behaviour identical to 'read'): --- --- >rational "3" == Right (3.0, "") --- >rational "3.1" == Right (3.1, "") --- >rational "3e4" == Right (30000.0, "") --- >rational "3.1e4" == Right (31000.0, "") --- >rational ".3" == Left "input does not start with a digit" --- >rational "e3" == Left "input does not start with a digit" --- --- Examples of differences from 'read': --- --- >rational "3.foo" == Right (3.0, ".foo") --- >rational "3e" == Right (3.0, "e") -rational :: Fractional a => Reader a -{-# SPECIALIZE rational :: Reader Double #-} -rational = floaty $ \real frac fracDenom -> fromRational $ - real % 1 + frac % fracDenom - --- | Read a rational number. --- --- The syntax accepted by this function is the same as for 'rational'. --- --- /Note/: This function is almost ten times faster than 'rational', --- but is slightly less accurate. --- --- The 'Double' type supports about 16 decimal places of accuracy. --- For 94.2% of numbers, this function and 'rational' give identical --- results, but for the remaining 5.8%, this function loses precision --- around the 15th decimal place. For 0.001% of numbers, this --- function will lose precision at the 13th or 14th decimal place. -double :: Reader Double -double = floaty $ \real frac fracDenom -> - fromIntegral real + - fromIntegral frac / fromIntegral fracDenom - -signa :: Num a => Parser a -> Parser a -{-# SPECIALIZE signa :: Parser Int -> Parser Int #-} -{-# SPECIALIZE signa :: Parser Int8 -> Parser Int8 #-} -{-# SPECIALIZE signa :: Parser Int16 -> Parser Int16 #-} -{-# SPECIALIZE signa :: Parser Int32 -> Parser Int32 #-} -{-# SPECIALIZE signa :: Parser Int64 -> Parser Int64 #-} -{-# SPECIALIZE signa :: Parser Integer -> Parser Integer #-} -signa p = do - sign <- perhaps '+' $ char (\c -> c == '-' || c == '+') - if sign == '+' then p else negate `liftM` p - -char :: (Char -> Bool) -> Parser Char -char p = P $ \t -> case T.uncons t of - Just (c,t') | p c -> Right (c,t') - _ -> Left "character does not match" - -floaty :: Fractional a => (Integer -> Integer -> Integer -> a) -> Reader a -{-# INLINE floaty #-} -floaty f = runP $ do - sign <- perhaps '+' $ char (\c -> c == '-' || c == '+') - real <- P decimal - T fraction fracDigits <- perhaps (T 0 0) $ do - _ <- char (=='.') - digits <- P $ \t -> Right (T.length $ T.takeWhile isDigit t, t) - n <- P decimal - return $ T n digits - let e c = c == 'e' || c == 'E' - power <- perhaps 0 (char e >> signa (P decimal) :: Parser Int) - let n = if fracDigits == 0 - then if power == 0 - then fromIntegral real - else fromIntegral real * (10 ^^ power) - else if power == 0 - then f real fraction (10 ^ fracDigits) - else f real fraction (10 ^ fracDigits) * (10 ^^ power) - return $! if sign == '+' - then n - else -n diff -Nru cabal-install-3.2-3.2+git20191216.2.e076113/src/text-1.2.4.0/Data/Text/Show.hs cabal-install-3.2-3.2+git20200127.2.b84fd4f/src/text-1.2.4.0/Data/Text/Show.hs --- cabal-install-3.2-3.2+git20191216.2.e076113/src/text-1.2.4.0/Data/Text/Show.hs 2001-09-09 01:46:40.000000000 +0000 +++ cabal-install-3.2-3.2+git20200127.2.b84fd4f/src/text-1.2.4.0/Data/Text/Show.hs 1970-01-01 00:00:00.000000000 +0000 @@ -1,90 +0,0 @@ -{-# LANGUAGE CPP, MagicHash #-} -{-# OPTIONS_GHC -fno-warn-orphans #-} -#if __GLASGOW_HASKELL__ >= 702 -{-# LANGUAGE Trustworthy #-} -#endif - --- | --- Module : Data.Text.Show --- Copyright : (c) 2009-2015 Bryan O'Sullivan --- --- License : BSD-style --- Maintainer : bos@serpentine.com --- Stability : experimental --- Portability : GHC - -module Data.Text.Show - ( - singleton - , unpack - , unpackCString# - ) where - -import Control.Monad.ST (ST) -import Data.Text.Internal (Text(..), empty_, safe) -import Data.Text.Internal.Fusion (stream, unstream) -import Data.Text.Internal.Unsafe.Char (unsafeWrite) -import GHC.Prim (Addr#) -import qualified Data.Text.Array as A -import qualified Data.Text.Internal.Fusion.Common as S - -#if __GLASGOW_HASKELL__ >= 702 -import qualified GHC.CString as GHC -#else -import qualified GHC.Base as GHC -#endif - -instance Show Text where - showsPrec p ps r = showsPrec p (unpack ps) r - --- | /O(n)/ Convert a 'Text' into a 'String'. Subject to fusion. -unpack :: Text -> String -unpack = S.unstreamList . stream -{-# INLINE [1] unpack #-} - --- | /O(n)/ Convert a literal string into a 'Text'. --- --- This is exposed solely for people writing GHC rewrite rules. --- --- @since 1.2.1.1 -unpackCString# :: Addr# -> Text -unpackCString# addr# = unstream (S.streamCString# addr#) -{-# NOINLINE unpackCString# #-} - -{-# RULES "TEXT literal" [1] forall a. - unstream (S.map safe (S.streamList (GHC.unpackCString# a))) - = unpackCString# a #-} - -{-# RULES "TEXT literal UTF8" [1] forall a. - unstream (S.map safe (S.streamList (GHC.unpackCStringUtf8# a))) - = unpackCString# a #-} - -{-# RULES "TEXT empty literal" [1] - unstream (S.map safe (S.streamList [])) - = empty_ #-} - -{-# RULES "TEXT singleton literal" [1] forall a. - unstream (S.map safe (S.streamList [a])) - = singleton_ a #-} - --- | /O(1)/ Convert a character into a Text. Subject to fusion. --- Performs replacement on invalid scalar values. -singleton :: Char -> Text -singleton = unstream . S.singleton . safe -{-# INLINE [1] singleton #-} - -{-# RULES "TEXT singleton" forall a. - unstream (S.singleton (safe a)) - = singleton_ a #-} - --- This is intended to reduce inlining bloat. -singleton_ :: Char -> Text -singleton_ c = Text (A.run x) 0 len - where x :: ST s (A.MArray s) - x = do arr <- A.new len - _ <- unsafeWrite arr 0 d - return arr - len | d < '\x10000' = 1 - | otherwise = 2 - d = safe c -{-# NOINLINE singleton_ #-} diff -Nru cabal-install-3.2-3.2+git20191216.2.e076113/src/text-1.2.4.0/Data/Text/Unsafe.hs cabal-install-3.2-3.2+git20200127.2.b84fd4f/src/text-1.2.4.0/Data/Text/Unsafe.hs --- cabal-install-3.2-3.2+git20191216.2.e076113/src/text-1.2.4.0/Data/Text/Unsafe.hs 2001-09-09 01:46:40.000000000 +0000 +++ cabal-install-3.2-3.2+git20200127.2.b84fd4f/src/text-1.2.4.0/Data/Text/Unsafe.hs 1970-01-01 00:00:00.000000000 +0000 @@ -1,124 +0,0 @@ -{-# LANGUAGE CPP, MagicHash, UnboxedTuples #-} --- | --- Module : Data.Text.Unsafe --- Copyright : (c) 2009, 2010, 2011 Bryan O'Sullivan --- License : BSD-style --- Maintainer : bos@serpentine.com --- Portability : portable --- --- A module containing unsafe 'Text' operations, for very very careful --- use in heavily tested code. -module Data.Text.Unsafe - ( - inlineInterleaveST - , inlinePerformIO - , unsafeDupablePerformIO - , Iter(..) - , iter - , iter_ - , reverseIter - , reverseIter_ - , unsafeHead - , unsafeTail - , lengthWord16 - , takeWord16 - , dropWord16 - ) where - -#if defined(ASSERTS) -import Control.Exception (assert) -#endif -import Data.Text.Internal.Encoding.Utf16 (chr2) -import Data.Text.Internal (Text(..)) -import Data.Text.Internal.Unsafe (inlineInterleaveST, inlinePerformIO) -import Data.Text.Internal.Unsafe.Char (unsafeChr) -import qualified Data.Text.Array as A -import GHC.IO (unsafeDupablePerformIO) - --- | /O(1)/ A variant of 'head' for non-empty 'Text'. 'unsafeHead' --- omits the check for the empty case, so there is an obligation on --- the programmer to provide a proof that the 'Text' is non-empty. -unsafeHead :: Text -> Char -unsafeHead (Text arr off _len) - | m < 0xD800 || m > 0xDBFF = unsafeChr m - | otherwise = chr2 m n - where m = A.unsafeIndex arr off - n = A.unsafeIndex arr (off+1) -{-# INLINE unsafeHead #-} - --- | /O(1)/ A variant of 'tail' for non-empty 'Text'. 'unsafeTail' --- omits the check for the empty case, so there is an obligation on --- the programmer to provide a proof that the 'Text' is non-empty. -unsafeTail :: Text -> Text -unsafeTail t@(Text arr off len) = -#if defined(ASSERTS) - assert (d <= len) $ -#endif - Text arr (off+d) (len-d) - where d = iter_ t 0 -{-# INLINE unsafeTail #-} - -data Iter = Iter {-# UNPACK #-} !Char {-# UNPACK #-} !Int - --- | /O(1)/ Iterate (unsafely) one step forwards through a UTF-16 --- array, returning the current character and the delta to add to give --- the next offset to iterate at. -iter :: Text -> Int -> Iter -iter (Text arr off _len) i - | m < 0xD800 || m > 0xDBFF = Iter (unsafeChr m) 1 - | otherwise = Iter (chr2 m n) 2 - where m = A.unsafeIndex arr j - n = A.unsafeIndex arr k - j = off + i - k = j + 1 -{-# INLINE iter #-} - --- | /O(1)/ Iterate one step through a UTF-16 array, returning the --- delta to add to give the next offset to iterate at. -iter_ :: Text -> Int -> Int -iter_ (Text arr off _len) i | m < 0xD800 || m > 0xDBFF = 1 - | otherwise = 2 - where m = A.unsafeIndex arr (off+i) -{-# INLINE iter_ #-} - --- | /O(1)/ Iterate one step backwards through a UTF-16 array, --- returning the current character and the delta to add (i.e. a --- negative number) to give the next offset to iterate at. -reverseIter :: Text -> Int -> (Char,Int) -reverseIter (Text arr off _len) i - | m < 0xDC00 || m > 0xDFFF = (unsafeChr m, -1) - | otherwise = (chr2 n m, -2) - where m = A.unsafeIndex arr j - n = A.unsafeIndex arr k - j = off + i - k = j - 1 -{-# INLINE reverseIter #-} - --- | /O(1)/ Iterate one step backwards through a UTF-16 array, --- returning the delta to add (i.e. a negative number) to give the --- next offset to iterate at. --- --- @since 1.1.1.0 -reverseIter_ :: Text -> Int -> Int -reverseIter_ (Text arr off _len) i - | m < 0xDC00 || m > 0xDFFF = -1 - | otherwise = -2 - where m = A.unsafeIndex arr (off+i) -{-# INLINE reverseIter_ #-} - --- | /O(1)/ Return the length of a 'Text' in units of 'Word16'. This --- is useful for sizing a target array appropriately before using --- 'unsafeCopyToPtr'. -lengthWord16 :: Text -> Int -lengthWord16 (Text _arr _off len) = len -{-# INLINE lengthWord16 #-} - --- | /O(1)/ Unchecked take of 'k' 'Word16's from the front of a 'Text'. -takeWord16 :: Int -> Text -> Text -takeWord16 k (Text arr off _len) = Text arr off k -{-# INLINE takeWord16 #-} - --- | /O(1)/ Unchecked drop of 'k' 'Word16's from the front of a 'Text'. -dropWord16 :: Int -> Text -> Text -dropWord16 k (Text arr off len) = Text arr (off+k) (len-k) -{-# INLINE dropWord16 #-} diff -Nru cabal-install-3.2-3.2+git20191216.2.e076113/src/text-1.2.4.0/Data/Text.hs cabal-install-3.2-3.2+git20200127.2.b84fd4f/src/text-1.2.4.0/Data/Text.hs --- cabal-install-3.2-3.2+git20191216.2.e076113/src/text-1.2.4.0/Data/Text.hs 2001-09-09 01:46:40.000000000 +0000 +++ cabal-install-3.2-3.2+git20200127.2.b84fd4f/src/text-1.2.4.0/Data/Text.hs 1970-01-01 00:00:00.000000000 +0000 @@ -1,1887 +0,0 @@ -{-# LANGUAGE BangPatterns, CPP, MagicHash, Rank2Types, UnboxedTuples, TypeFamilies #-} -{-# OPTIONS_GHC -fno-warn-orphans #-} -#if __GLASGOW_HASKELL__ >= 702 -{-# LANGUAGE Trustworthy #-} -#endif --- Using TemplateHaskell in text unconditionally is unacceptable, as --- it's a GHC boot library. TemplateHaskellQuotes was added in 8.0, so --- this would seem to be a problem. However, GHC's policy of only --- needing to be able to compile itself from the last few releases --- allows us to use full-fat TH on older versions, while using THQ for --- GHC versions that may be used for bootstrapping. -#if __GLASGOW_HASKELL__ >= 800 -{-# LANGUAGE TemplateHaskellQuotes #-} -#else -{-# LANGUAGE TemplateHaskell #-} -#endif - --- | --- Module : Data.Text --- Copyright : (c) 2009, 2010, 2011, 2012 Bryan O'Sullivan, --- (c) 2009 Duncan Coutts, --- (c) 2008, 2009 Tom Harper --- --- License : BSD-style --- Maintainer : bos@serpentine.com --- Portability : GHC --- --- A time and space-efficient implementation of Unicode text. --- Suitable for performance critical use, both in terms of large data --- quantities and high speed. --- --- /Note/: Read below the synopsis for important notes on the use of --- this module. --- --- This module is intended to be imported @qualified@, to avoid name --- clashes with "Prelude" functions, e.g. --- --- > import qualified Data.Text as T --- --- To use an extended and very rich family of functions for working --- with Unicode text (including normalization, regular expressions, --- non-standard encodings, text breaking, and locales), see the --- . --- - -module Data.Text - ( - -- * Strict vs lazy types - -- $strict - - -- * Acceptable data - -- $replacement - - -- * Definition of character - -- $character_definition - - -- * Fusion - -- $fusion - - -- * Types - Text - - -- * Creation and elimination - , pack - , unpack - , singleton - , empty - - -- * Basic interface - , cons - , snoc - , append - , uncons - , unsnoc - , head - , last - , tail - , init - , null - , length - , compareLength - - -- * Transformations - , map - , intercalate - , intersperse - , transpose - , reverse - , replace - - -- ** Case conversion - -- $case - , toCaseFold - , toLower - , toUpper - , toTitle - - -- ** Justification - , justifyLeft - , justifyRight - , center - - -- * Folds - , foldl - , foldl' - , foldl1 - , foldl1' - , foldr - , foldr1 - - -- ** Special folds - , concat - , concatMap - , any - , all - , maximum - , minimum - - -- * Construction - - -- ** Scans - , scanl - , scanl1 - , scanr - , scanr1 - - -- ** Accumulating maps - , mapAccumL - , mapAccumR - - -- ** Generation and unfolding - , replicate - , unfoldr - , unfoldrN - - -- * Substrings - - -- ** Breaking strings - , take - , takeEnd - , drop - , dropEnd - , takeWhile - , takeWhileEnd - , dropWhile - , dropWhileEnd - , dropAround - , strip - , stripStart - , stripEnd - , splitAt - , breakOn - , breakOnEnd - , break - , span - , group - , groupBy - , inits - , tails - - -- ** Breaking into many substrings - -- $split - , splitOn - , split - , chunksOf - - -- ** Breaking into lines and words - , lines - --, lines' - , words - , unlines - , unwords - - -- * Predicates - , isPrefixOf - , isSuffixOf - , isInfixOf - - -- ** View patterns - , stripPrefix - , stripSuffix - , commonPrefixes - - -- * Searching - , filter - , breakOnAll - , find - , partition - - -- , findSubstring - - -- * Indexing - -- $index - , index - , findIndex - , count - - -- * Zipping - , zip - , zipWith - - -- -* Ordered text - -- , sort - - -- * Low level operations - , copy - , unpackCString# - ) where - -import Prelude (Char, Bool(..), Int, Maybe(..), String, - Eq(..), Ord(..), Ordering(..), (++), - Read(..), - (&&), (||), (+), (-), (.), ($), ($!), (>>), - not, return, otherwise, quot) -import Control.DeepSeq (NFData(rnf)) -#if defined(ASSERTS) -import Control.Exception (assert) -#endif -import Data.Char (isSpace) -import Data.Data (Data(gfoldl, toConstr, gunfold, dataTypeOf), constrIndex, - Constr, mkConstr, DataType, mkDataType, Fixity(Prefix)) -import Control.Monad (foldM) -import Control.Monad.ST (ST) -import qualified Data.Text.Array as A -import qualified Data.List as L -import Data.Binary (Binary(get, put)) -import Data.Monoid (Monoid(..)) -#if MIN_VERSION_base(4,9,0) -import Data.Semigroup (Semigroup(..)) -#endif -import Data.String (IsString(..)) -import qualified Data.Text.Internal.Fusion as S -import qualified Data.Text.Internal.Fusion.Common as S -import Data.Text.Encoding (decodeUtf8', encodeUtf8) -import Data.Text.Internal.Fusion (stream, reverseStream, unstream) -import Data.Text.Internal.Private (span_) -import Data.Text.Internal (Text(..), empty, firstf, mul, safe, text) -import Data.Text.Show (singleton, unpack, unpackCString#) -import qualified Prelude as P -import Data.Text.Unsafe (Iter(..), iter, iter_, lengthWord16, reverseIter, - reverseIter_, unsafeHead, unsafeTail) -import Data.Text.Internal.Unsafe.Char (unsafeChr) -import qualified Data.Text.Internal.Functions as F -import qualified Data.Text.Internal.Encoding.Utf16 as U16 -import Data.Text.Internal.Search (indices) -import Data.Text.Internal.Unsafe.Shift (UnsafeShift(..)) -#if defined(__HADDOCK__) -import Data.ByteString (ByteString) -import qualified Data.Text.Lazy as L -import Data.Int (Int64) -#endif -import GHC.Base (eqInt, neInt, gtInt, geInt, ltInt, leInt) -#if MIN_VERSION_base(4,7,0) -import qualified GHC.Exts as Exts -#endif -import qualified Language.Haskell.TH.Lib as TH -import Language.Haskell.TH.Syntax (Lift, lift) -#if MIN_VERSION_base(4,7,0) -import Text.Printf (PrintfArg, formatArg, formatString) -#endif - --- $character_definition --- --- This package uses the term /character/ to denote Unicode /code points/. --- --- Note that this is not the same thing as a grapheme (e.g. a --- composition of code points that form one visual symbol). For --- instance, consider the grapheme \"ä\". This symbol has two --- Unicode representations: a single code-point representation --- @U+00E4@ (the @LATIN SMALL LETTER A WITH DIAERESIS@ code point), --- and a two code point representation @U+0061@ (the \"@A@\" code --- point) and @U+0308@ (the @COMBINING DIAERESIS@ code point). - --- $strict --- --- This package provides both strict and lazy 'Text' types. The --- strict type is provided by the "Data.Text" module, while the lazy --- type is provided by the "Data.Text.Lazy" module. Internally, the --- lazy @Text@ type consists of a list of strict chunks. --- --- The strict 'Text' type requires that an entire string fit into --- memory at once. The lazy 'Data.Text.Lazy.Text' type is capable of --- streaming strings that are larger than memory using a small memory --- footprint. In many cases, the overhead of chunked streaming makes --- the lazy 'Data.Text.Lazy.Text' type slower than its strict --- counterpart, but this is not always the case. Sometimes, the time --- complexity of a function in one module may be different from the --- other, due to their differing internal structures. --- --- Each module provides an almost identical API, with the main --- difference being that the strict module uses 'Int' values for --- lengths and counts, while the lazy module uses 'Data.Int.Int64' --- lengths. - --- $replacement --- --- A 'Text' value is a sequence of Unicode scalar values, as defined --- in --- . --- As such, a 'Text' cannot contain values in the range U+D800 to --- U+DFFF inclusive. Haskell implementations admit all Unicode code --- points --- () --- as 'Char' values, including code points from this invalid range. --- This means that there are some 'Char' values that are not valid --- Unicode scalar values, and the functions in this module must handle --- those cases. --- --- Within this module, many functions construct a 'Text' from one or --- more 'Char' values. Those functions will substitute 'Char' values --- that are not valid Unicode scalar values with the replacement --- character \"�\" (U+FFFD). Functions that perform this --- inspection and replacement are documented with the phrase --- \"Performs replacement on invalid scalar values\". --- --- (One reason for this policy of replacement is that internally, a --- 'Text' value is represented as packed UTF-16 data. Values in the --- range U+D800 through U+DFFF are used by UTF-16 to denote surrogate --- code points, and so cannot be represented. The functions replace --- invalid scalar values, instead of dropping them, as a security --- measure. For details, see --- .) - --- $fusion --- --- Most of the functions in this module are subject to /fusion/, --- meaning that a pipeline of such functions will usually allocate at --- most one 'Text' value. --- --- As an example, consider the following pipeline: --- --- > import Data.Text as T --- > import Data.Text.Encoding as E --- > import Data.ByteString (ByteString) --- > --- > countChars :: ByteString -> Int --- > countChars = T.length . T.toUpper . E.decodeUtf8 --- --- From the type signatures involved, this looks like it should --- allocate one 'Data.ByteString.ByteString' value, and two 'Text' --- values. However, when a module is compiled with optimisation --- enabled under GHC, the two intermediate 'Text' values will be --- optimised away, and the function will be compiled down to a single --- loop over the source 'Data.ByteString.ByteString'. --- --- Functions that can be fused by the compiler are documented with the --- phrase \"Subject to fusion\". - -instance Eq Text where - Text arrA offA lenA == Text arrB offB lenB - | lenA == lenB = A.equal arrA offA arrB offB lenA - | otherwise = False - {-# INLINE (==) #-} - -instance Ord Text where - compare = compareText - -instance Read Text where - readsPrec p str = [(pack x,y) | (x,y) <- readsPrec p str] - -#if MIN_VERSION_base(4,9,0) --- | Non-orphan 'Semigroup' instance only defined for --- @base-4.9.0.0@ and later; orphan instances for older GHCs are --- provided by --- the [semigroups](http://hackage.haskell.org/package/semigroups) --- package --- --- @since 1.2.2.0 -instance Semigroup Text where - (<>) = append -#endif - -instance Monoid Text where - mempty = empty -#if MIN_VERSION_base(4,9,0) - mappend = (<>) -- future-proof definition -#else - mappend = append -#endif - mconcat = concat - -instance IsString Text where - fromString = pack - -#if MIN_VERSION_base(4,7,0) --- | @since 1.2.0.0 -instance Exts.IsList Text where - type Item Text = Char - fromList = pack - toList = unpack -#endif - -instance NFData Text where rnf !_ = () - --- | @since 1.2.1.0 -instance Binary Text where - put t = put (encodeUtf8 t) - get = do - bs <- get - case decodeUtf8' bs of - P.Left exn -> P.fail (P.show exn) - P.Right a -> P.return a - --- | This instance preserves data abstraction at the cost of inefficiency. --- We omit reflection services for the sake of data abstraction. --- --- This instance was created by copying the updated behavior of --- @"Data.Set".@'Data.Set.Set' and @"Data.Map".@'Data.Map.Map'. If you --- feel a mistake has been made, please feel free to submit --- improvements. --- --- The original discussion is archived here: --- --- --- The followup discussion that changed the behavior of 'Data.Set.Set' --- and 'Data.Map.Map' is archived here: --- - -instance Data Text where - gfoldl f z txt = z pack `f` (unpack txt) - toConstr _ = packConstr - gunfold k z c = case constrIndex c of - 1 -> k (z pack) - _ -> P.error "gunfold" - dataTypeOf _ = textDataType - --- | This instance has similar considerations to the 'Data' instance: --- it preserves abstraction at the cost of inefficiency. --- --- @since 1.2.4.0 -instance Lift Text where - lift = TH.appE (TH.varE 'pack) . TH.stringE . unpack - -#if MIN_VERSION_base(4,7,0) --- | Only defined for @base-4.7.0.0@ and later --- --- @since 1.2.2.0 -instance PrintfArg Text where - formatArg txt = formatString $ unpack txt -#endif - -packConstr :: Constr -packConstr = mkConstr textDataType "pack" [] Prefix - -textDataType :: DataType -textDataType = mkDataType "Data.Text.Text" [packConstr] - --- | /O(n)/ Compare two 'Text' values lexicographically. -compareText :: Text -> Text -> Ordering -compareText ta@(Text _arrA _offA lenA) tb@(Text _arrB _offB lenB) - | lenA == 0 && lenB == 0 = EQ - | otherwise = go 0 0 - where - go !i !j - | i >= lenA || j >= lenB = compare lenA lenB - | a < b = LT - | a > b = GT - | otherwise = go (i+di) (j+dj) - where Iter a di = iter ta i - Iter b dj = iter tb j - --- ----------------------------------------------------------------------------- --- * Conversion to/from 'Text' - --- | /O(n)/ Convert a 'String' into a 'Text'. Subject to --- fusion. Performs replacement on invalid scalar values. -pack :: String -> Text -pack = unstream . S.map safe . S.streamList -{-# INLINE [1] pack #-} - --- ----------------------------------------------------------------------------- --- * Basic functions - --- | /O(n)/ Adds a character to the front of a 'Text'. This function --- is more costly than its 'List' counterpart because it requires --- copying a new array. Subject to fusion. Performs replacement on --- invalid scalar values. -cons :: Char -> Text -> Text -cons c t = unstream (S.cons (safe c) (stream t)) -{-# INLINE cons #-} - -infixr 5 `cons` - --- | /O(n)/ Adds a character to the end of a 'Text'. This copies the --- entire array in the process, unless fused. Subject to fusion. --- Performs replacement on invalid scalar values. -snoc :: Text -> Char -> Text -snoc t c = unstream (S.snoc (stream t) (safe c)) -{-# INLINE snoc #-} - --- | /O(n)/ Appends one 'Text' to the other by copying both of them --- into a new 'Text'. Subject to fusion. -append :: Text -> Text -> Text -append a@(Text arr1 off1 len1) b@(Text arr2 off2 len2) - | len1 == 0 = b - | len2 == 0 = a - | len > 0 = Text (A.run x) 0 len - | otherwise = overflowError "append" - where - len = len1+len2 - x :: ST s (A.MArray s) - x = do - arr <- A.new len - A.copyI arr 0 arr1 off1 len1 - A.copyI arr len1 arr2 off2 len - return arr -{-# NOINLINE append #-} - -{-# RULES -"TEXT append -> fused" [~1] forall t1 t2. - append t1 t2 = unstream (S.append (stream t1) (stream t2)) -"TEXT append -> unfused" [1] forall t1 t2. - unstream (S.append (stream t1) (stream t2)) = append t1 t2 - #-} - --- | /O(1)/ Returns the first character of a 'Text', which must be --- non-empty. Subject to fusion. -head :: Text -> Char -head t = S.head (stream t) -{-# INLINE head #-} - --- | /O(1)/ Returns the first character and rest of a 'Text', or --- 'Nothing' if empty. Subject to fusion. -uncons :: Text -> Maybe (Char, Text) -uncons t@(Text arr off len) - | len <= 0 = Nothing - | otherwise = Just $ let !(Iter c d) = iter t 0 - in (c, text arr (off+d) (len-d)) -{-# INLINE [1] uncons #-} - --- | Lifted from Control.Arrow and specialized. -second :: (b -> c) -> (a,b) -> (a,c) -second f (a, b) = (a, f b) - --- | /O(1)/ Returns the last character of a 'Text', which must be --- non-empty. Subject to fusion. -last :: Text -> Char -last (Text arr off len) - | len <= 0 = emptyError "last" - | n < 0xDC00 || n > 0xDFFF = unsafeChr n - | otherwise = U16.chr2 n0 n - where n = A.unsafeIndex arr (off+len-1) - n0 = A.unsafeIndex arr (off+len-2) -{-# INLINE [1] last #-} - -{-# RULES -"TEXT last -> fused" [~1] forall t. - last t = S.last (stream t) -"TEXT last -> unfused" [1] forall t. - S.last (stream t) = last t - #-} - --- | /O(1)/ Returns all characters after the head of a 'Text', which --- must be non-empty. Subject to fusion. -tail :: Text -> Text -tail t@(Text arr off len) - | len <= 0 = emptyError "tail" - | otherwise = text arr (off+d) (len-d) - where d = iter_ t 0 -{-# INLINE [1] tail #-} - -{-# RULES -"TEXT tail -> fused" [~1] forall t. - tail t = unstream (S.tail (stream t)) -"TEXT tail -> unfused" [1] forall t. - unstream (S.tail (stream t)) = tail t - #-} - --- | /O(1)/ Returns all but the last character of a 'Text', which must --- be non-empty. Subject to fusion. -init :: Text -> Text -init (Text arr off len) | len <= 0 = emptyError "init" - | n >= 0xDC00 && n <= 0xDFFF = text arr off (len-2) - | otherwise = text arr off (len-1) - where - n = A.unsafeIndex arr (off+len-1) -{-# INLINE [1] init #-} - -{-# RULES -"TEXT init -> fused" [~1] forall t. - init t = unstream (S.init (stream t)) -"TEXT init -> unfused" [1] forall t. - unstream (S.init (stream t)) = init t - #-} - --- | /O(1)/ Returns all but the last character and the last character of a --- 'Text', or 'Nothing' if empty. --- --- @since 1.2.3.0 -unsnoc :: Text -> Maybe (Text, Char) -unsnoc (Text arr off len) - | len <= 0 = Nothing - | n < 0xDC00 || n > 0xDFFF = Just (text arr off (len-1), unsafeChr n) - | otherwise = Just (text arr off (len-2), U16.chr2 n0 n) - where n = A.unsafeIndex arr (off+len-1) - n0 = A.unsafeIndex arr (off+len-2) -{-# INLINE [1] unsnoc #-} - --- | /O(1)/ Tests whether a 'Text' is empty or not. Subject to --- fusion. -null :: Text -> Bool -null (Text _arr _off len) = -#if defined(ASSERTS) - assert (len >= 0) $ -#endif - len <= 0 -{-# INLINE [1] null #-} - -{-# RULES -"TEXT null -> fused" [~1] forall t. - null t = S.null (stream t) -"TEXT null -> unfused" [1] forall t. - S.null (stream t) = null t - #-} - --- | /O(1)/ Tests whether a 'Text' contains exactly one character. --- Subject to fusion. -isSingleton :: Text -> Bool -isSingleton = S.isSingleton . stream -{-# INLINE isSingleton #-} - --- | /O(n)/ Returns the number of characters in a 'Text'. --- Subject to fusion. -length :: Text -> Int -length t = S.length (stream t) -{-# INLINE [1] length #-} --- length needs to be phased after the compareN/length rules otherwise --- it may inline before the rules have an opportunity to fire. - --- | /O(n)/ Compare the count of characters in a 'Text' to a number. --- Subject to fusion. --- --- This function gives the same answer as comparing against the result --- of 'length', but can short circuit if the count of characters is --- greater than the number, and hence be more efficient. -compareLength :: Text -> Int -> Ordering -compareLength t n = S.compareLengthI (stream t) n -{-# INLINE [1] compareLength #-} - -{-# RULES -"TEXT compareN/length -> compareLength" [~1] forall t n. - compare (length t) n = compareLength t n - #-} - -{-# RULES -"TEXT ==N/length -> compareLength/==EQ" [~1] forall t n. - eqInt (length t) n = compareLength t n == EQ - #-} - -{-# RULES -"TEXT /=N/length -> compareLength//=EQ" [~1] forall t n. - neInt (length t) n = compareLength t n /= EQ - #-} - -{-# RULES -"TEXT compareLength/==LT" [~1] forall t n. - ltInt (length t) n = compareLength t n == LT - #-} - -{-# RULES -"TEXT <=N/length -> compareLength//=GT" [~1] forall t n. - leInt (length t) n = compareLength t n /= GT - #-} - -{-# RULES -"TEXT >N/length -> compareLength/==GT" [~1] forall t n. - gtInt (length t) n = compareLength t n == GT - #-} - -{-# RULES -"TEXT >=N/length -> compareLength//=LT" [~1] forall t n. - geInt (length t) n = compareLength t n /= LT - #-} - --- ----------------------------------------------------------------------------- --- * Transformations --- | /O(n)/ 'map' @f@ @t@ is the 'Text' obtained by applying @f@ to --- each element of @t@. --- --- Example: --- --- >>> let message = pack "I am not angry. Not at all." --- >>> T.map (\c -> if c == '.' then '!' else c) message --- "I am not angry! Not at all!" --- --- Subject to fusion. Performs replacement on invalid scalar values. -map :: (Char -> Char) -> Text -> Text -map f t = unstream (S.map (safe . f) (stream t)) -{-# INLINE [1] map #-} - --- | /O(n)/ The 'intercalate' function takes a 'Text' and a list of --- 'Text's and concatenates the list after interspersing the first --- argument between each element of the list. --- --- Example: --- --- >>> T.intercalate "NI!" ["We", "seek", "the", "Holy", "Grail"] --- "WeNI!seekNI!theNI!HolyNI!Grail" -intercalate :: Text -> [Text] -> Text -intercalate t = concat . (F.intersperse t) -{-# INLINE intercalate #-} - --- | /O(n)/ The 'intersperse' function takes a character and places it --- between the characters of a 'Text'. --- --- Example: --- --- >>> T.intersperse '.' "SHIELD" --- "S.H.I.E.L.D" --- --- Subject to fusion. Performs replacement on invalid scalar values. -intersperse :: Char -> Text -> Text -intersperse c t = unstream (S.intersperse (safe c) (stream t)) -{-# INLINE intersperse #-} - --- | /O(n)/ Reverse the characters of a string. --- --- Example: --- --- >>> T.reverse "desrever" --- "reversed" --- --- Subject to fusion (fuses with its argument). -reverse :: Text -> Text -reverse t = S.reverse (stream t) -{-# INLINE reverse #-} - --- | /O(m+n)/ Replace every non-overlapping occurrence of @needle@ in --- @haystack@ with @replacement@. --- --- This function behaves as though it was defined as follows: --- --- @ --- replace needle replacement haystack = --- 'intercalate' replacement ('splitOn' needle haystack) --- @ --- --- As this suggests, each occurrence is replaced exactly once. So if --- @needle@ occurs in @replacement@, that occurrence will /not/ itself --- be replaced recursively: --- --- >>> replace "oo" "foo" "oo" --- "foo" --- --- In cases where several instances of @needle@ overlap, only the --- first one will be replaced: --- --- >>> replace "ofo" "bar" "ofofo" --- "barfo" --- --- In (unlikely) bad cases, this function's time complexity degrades --- towards /O(n*m)/. -replace :: Text - -- ^ @needle@ to search for. If this string is empty, an - -- error will occur. - -> Text - -- ^ @replacement@ to replace @needle@ with. - -> Text - -- ^ @haystack@ in which to search. - -> Text -replace needle@(Text _ _ neeLen) - (Text repArr repOff repLen) - haystack@(Text hayArr hayOff hayLen) - | neeLen == 0 = emptyError "replace" - | L.null ixs = haystack - | len > 0 = Text (A.run x) 0 len - | otherwise = empty - where - ixs = indices needle haystack - len = hayLen - (neeLen - repLen) `mul` L.length ixs - x :: ST s (A.MArray s) - x = do - marr <- A.new len - let loop (i:is) o d = do - let d0 = d + i - o - d1 = d0 + repLen - A.copyI marr d hayArr (hayOff+o) d0 - A.copyI marr d0 repArr repOff d1 - loop is (i + neeLen) d1 - loop [] o d = A.copyI marr d hayArr (hayOff+o) len - loop ixs 0 0 - return marr - --- ---------------------------------------------------------------------------- --- ** Case conversions (folds) - --- $case --- --- When case converting 'Text' values, do not use combinators like --- @map toUpper@ to case convert each character of a string --- individually, as this gives incorrect results according to the --- rules of some writing systems. The whole-string case conversion --- functions from this module, such as @toUpper@, obey the correct --- case conversion rules. As a result, these functions may map one --- input character to two or three output characters. For examples, --- see the documentation of each function. --- --- /Note/: In some languages, case conversion is a locale- and --- context-dependent operation. The case conversion functions in this --- module are /not/ locale sensitive. Programs that require locale --- sensitivity should use appropriate versions of the --- . - --- | /O(n)/ Convert a string to folded case. Subject to fusion. --- --- This function is mainly useful for performing caseless (also known --- as case insensitive) string comparisons. --- --- A string @x@ is a caseless match for a string @y@ if and only if: --- --- @toCaseFold x == toCaseFold y@ --- --- The result string may be longer than the input string, and may --- differ from applying 'toLower' to the input string. For instance, --- the Armenian small ligature \"ﬓ\" (men now, U+FB13) is case --- folded to the sequence \"մ\" (men, U+0574) followed by --- \"ն\" (now, U+0576), while the Greek \"µ\" (micro sign, --- U+00B5) is case folded to \"μ\" (small letter mu, U+03BC) --- instead of itself. -toCaseFold :: Text -> Text -toCaseFold t = unstream (S.toCaseFold (stream t)) -{-# INLINE toCaseFold #-} - --- | /O(n)/ Convert a string to lower case, using simple case --- conversion. Subject to fusion. --- --- The result string may be longer than the input string. For --- instance, \"İ\" (Latin capital letter I with dot above, --- U+0130) maps to the sequence \"i\" (Latin small letter i, U+0069) --- followed by \" ̇\" (combining dot above, U+0307). -toLower :: Text -> Text -toLower t = unstream (S.toLower (stream t)) -{-# INLINE toLower #-} - --- | /O(n)/ Convert a string to upper case, using simple case --- conversion. Subject to fusion. --- --- The result string may be longer than the input string. For --- instance, the German \"ß\" (eszett, U+00DF) maps to the --- two-letter sequence \"SS\". -toUpper :: Text -> Text -toUpper t = unstream (S.toUpper (stream t)) -{-# INLINE toUpper #-} - --- | /O(n)/ Convert a string to title case, using simple case --- conversion. Subject to fusion. --- --- The first letter of the input is converted to title case, as is --- every subsequent letter that immediately follows a non-letter. --- Every letter that immediately follows another letter is converted --- to lower case. --- --- The result string may be longer than the input string. For example, --- the Latin small ligature fl (U+FB02) is converted to the --- sequence Latin capital letter F (U+0046) followed by Latin small --- letter l (U+006C). --- --- /Note/: this function does not take language or culture specific --- rules into account. For instance, in English, different style --- guides disagree on whether the book name \"The Hill of the Red --- Fox\" is correctly title cased—but this function will --- capitalize /every/ word. --- --- @since 1.0.0.0 -toTitle :: Text -> Text -toTitle t = unstream (S.toTitle (stream t)) -{-# INLINE toTitle #-} - --- | /O(n)/ Left-justify a string to the given length, using the --- specified fill character on the right. Subject to fusion. --- Performs replacement on invalid scalar values. --- --- Examples: --- --- >>> justifyLeft 7 'x' "foo" --- "fooxxxx" --- --- >>> justifyLeft 3 'x' "foobar" --- "foobar" -justifyLeft :: Int -> Char -> Text -> Text -justifyLeft k c t - | len >= k = t - | otherwise = t `append` replicateChar (k-len) c - where len = length t -{-# INLINE [1] justifyLeft #-} - -{-# RULES -"TEXT justifyLeft -> fused" [~1] forall k c t. - justifyLeft k c t = unstream (S.justifyLeftI k c (stream t)) -"TEXT justifyLeft -> unfused" [1] forall k c t. - unstream (S.justifyLeftI k c (stream t)) = justifyLeft k c t - #-} - --- | /O(n)/ Right-justify a string to the given length, using the --- specified fill character on the left. Performs replacement on --- invalid scalar values. --- --- Examples: --- --- >>> justifyRight 7 'x' "bar" --- "xxxxbar" --- --- >>> justifyRight 3 'x' "foobar" --- "foobar" -justifyRight :: Int -> Char -> Text -> Text -justifyRight k c t - | len >= k = t - | otherwise = replicateChar (k-len) c `append` t - where len = length t -{-# INLINE justifyRight #-} - --- | /O(n)/ Center a string to the given length, using the specified --- fill character on either side. Performs replacement on invalid --- scalar values. --- --- Examples: --- --- >>> center 8 'x' "HS" --- "xxxHSxxx" -center :: Int -> Char -> Text -> Text -center k c t - | len >= k = t - | otherwise = replicateChar l c `append` t `append` replicateChar r c - where len = length t - d = k - len - r = d `quot` 2 - l = d - r -{-# INLINE center #-} - --- | /O(n)/ The 'transpose' function transposes the rows and columns --- of its 'Text' argument. Note that this function uses 'pack', --- 'unpack', and the list version of transpose, and is thus not very --- efficient. --- --- Examples: --- --- >>> transpose ["green","orange"] --- ["go","rr","ea","en","ng","e"] --- --- >>> transpose ["blue","red"] --- ["br","le","ud","e"] -transpose :: [Text] -> [Text] -transpose ts = P.map pack (L.transpose (P.map unpack ts)) - --- ----------------------------------------------------------------------------- --- * Reducing 'Text's (folds) - --- | /O(n)/ 'foldl', applied to a binary operator, a starting value --- (typically the left-identity of the operator), and a 'Text', --- reduces the 'Text' using the binary operator, from left to right. --- Subject to fusion. -foldl :: (a -> Char -> a) -> a -> Text -> a -foldl f z t = S.foldl f z (stream t) -{-# INLINE foldl #-} - --- | /O(n)/ A strict version of 'foldl'. Subject to fusion. -foldl' :: (a -> Char -> a) -> a -> Text -> a -foldl' f z t = S.foldl' f z (stream t) -{-# INLINE foldl' #-} - --- | /O(n)/ A variant of 'foldl' that has no starting value argument, --- and thus must be applied to a non-empty 'Text'. Subject to fusion. -foldl1 :: (Char -> Char -> Char) -> Text -> Char -foldl1 f t = S.foldl1 f (stream t) -{-# INLINE foldl1 #-} - --- | /O(n)/ A strict version of 'foldl1'. Subject to fusion. -foldl1' :: (Char -> Char -> Char) -> Text -> Char -foldl1' f t = S.foldl1' f (stream t) -{-# INLINE foldl1' #-} - --- | /O(n)/ 'foldr', applied to a binary operator, a starting value --- (typically the right-identity of the operator), and a 'Text', --- reduces the 'Text' using the binary operator, from right to left. --- Subject to fusion. -foldr :: (Char -> a -> a) -> a -> Text -> a -foldr f z t = S.foldr f z (stream t) -{-# INLINE foldr #-} - --- | /O(n)/ A variant of 'foldr' that has no starting value argument, --- and thus must be applied to a non-empty 'Text'. Subject to --- fusion. -foldr1 :: (Char -> Char -> Char) -> Text -> Char -foldr1 f t = S.foldr1 f (stream t) -{-# INLINE foldr1 #-} - --- ----------------------------------------------------------------------------- --- ** Special folds - --- | /O(n)/ Concatenate a list of 'Text's. -concat :: [Text] -> Text -concat ts = case ts' of - [] -> empty - [t] -> t - _ -> Text (A.run go) 0 len - where - ts' = L.filter (not . null) ts - len = sumP "concat" $ L.map lengthWord16 ts' - go :: ST s (A.MArray s) - go = do - arr <- A.new len - let step i (Text a o l) = - let !j = i + l in A.copyI arr i a o j >> return j - foldM step 0 ts' >> return arr - --- | /O(n)/ Map a function over a 'Text' that results in a 'Text', and --- concatenate the results. -concatMap :: (Char -> Text) -> Text -> Text -concatMap f = concat . foldr ((:) . f) [] -{-# INLINE concatMap #-} - --- | /O(n)/ 'any' @p@ @t@ determines whether any character in the --- 'Text' @t@ satisfies the predicate @p@. Subject to fusion. -any :: (Char -> Bool) -> Text -> Bool -any p t = S.any p (stream t) -{-# INLINE any #-} - --- | /O(n)/ 'all' @p@ @t@ determines whether all characters in the --- 'Text' @t@ satisfy the predicate @p@. Subject to fusion. -all :: (Char -> Bool) -> Text -> Bool -all p t = S.all p (stream t) -{-# INLINE all #-} - --- | /O(n)/ 'maximum' returns the maximum value from a 'Text', which --- must be non-empty. Subject to fusion. -maximum :: Text -> Char -maximum t = S.maximum (stream t) -{-# INLINE maximum #-} - --- | /O(n)/ 'minimum' returns the minimum value from a 'Text', which --- must be non-empty. Subject to fusion. -minimum :: Text -> Char -minimum t = S.minimum (stream t) -{-# INLINE minimum #-} - --- ----------------------------------------------------------------------------- --- * Building 'Text's - --- | /O(n)/ 'scanl' is similar to 'foldl', but returns a list of --- successive reduced values from the left. Subject to fusion. --- Performs replacement on invalid scalar values. --- --- > scanl f z [x1, x2, ...] == [z, z `f` x1, (z `f` x1) `f` x2, ...] --- --- Note that --- --- > last (scanl f z xs) == foldl f z xs. -scanl :: (Char -> Char -> Char) -> Char -> Text -> Text -scanl f z t = unstream (S.scanl g z (stream t)) - where g a b = safe (f a b) -{-# INLINE scanl #-} - --- | /O(n)/ 'scanl1' is a variant of 'scanl' that has no starting --- value argument. Performs replacement on invalid scalar values. --- --- > scanl1 f [x1, x2, ...] == [x1, x1 `f` x2, ...] -scanl1 :: (Char -> Char -> Char) -> Text -> Text -scanl1 f t | null t = empty - | otherwise = scanl f (unsafeHead t) (unsafeTail t) -{-# INLINE scanl1 #-} - --- | /O(n)/ 'scanr' is the right-to-left dual of 'scanl'. Performs --- replacement on invalid scalar values. --- --- > scanr f v == reverse . scanl (flip f) v . reverse -scanr :: (Char -> Char -> Char) -> Char -> Text -> Text -scanr f z = S.reverse . S.reverseScanr g z . reverseStream - where g a b = safe (f a b) -{-# INLINE scanr #-} - --- | /O(n)/ 'scanr1' is a variant of 'scanr' that has no starting --- value argument. Performs replacement on invalid scalar values. -scanr1 :: (Char -> Char -> Char) -> Text -> Text -scanr1 f t | null t = empty - | otherwise = scanr f (last t) (init t) -{-# INLINE scanr1 #-} - --- | /O(n)/ Like a combination of 'map' and 'foldl''. Applies a --- function to each element of a 'Text', passing an accumulating --- parameter from left to right, and returns a final 'Text'. Performs --- replacement on invalid scalar values. -mapAccumL :: (a -> Char -> (a,Char)) -> a -> Text -> (a, Text) -mapAccumL f z0 = S.mapAccumL g z0 . stream - where g a b = second safe (f a b) -{-# INLINE mapAccumL #-} - --- | The 'mapAccumR' function behaves like a combination of 'map' and --- a strict 'foldr'; it applies a function to each element of a --- 'Text', passing an accumulating parameter from right to left, and --- returning a final value of this accumulator together with the new --- 'Text'. --- Performs replacement on invalid scalar values. -mapAccumR :: (a -> Char -> (a,Char)) -> a -> Text -> (a, Text) -mapAccumR f z0 = second reverse . S.mapAccumL g z0 . reverseStream - where g a b = second safe (f a b) -{-# INLINE mapAccumR #-} - --- ----------------------------------------------------------------------------- --- ** Generating and unfolding 'Text's - --- | /O(n*m)/ 'replicate' @n@ @t@ is a 'Text' consisting of the input --- @t@ repeated @n@ times. -replicate :: Int -> Text -> Text -replicate n t@(Text a o l) - | n <= 0 || l <= 0 = empty - | n == 1 = t - | isSingleton t = replicateChar n (unsafeHead t) - | otherwise = Text (A.run x) 0 len - where - len = l `mul` n -- TODO: detect overflows - x :: ST s (A.MArray s) - x = do - arr <- A.new len - A.copyI arr 0 a o l - let loop !l1 = - let rest = len - l1 in - if rest <= l1 then A.copyM arr l1 arr 0 rest >> return arr - else A.copyM arr l1 arr 0 l1 >> loop (l1 `shiftL` 1) - loop l -{-# INLINE [1] replicate #-} - - -{-# RULES -"TEXT replicate/singleton -> replicateChar" [~1] forall n c. - replicate n (singleton c) = replicateChar n c - #-} - --- | /O(n)/ 'replicateChar' @n@ @c@ is a 'Text' of length @n@ with @c@ the --- value of every element. Subject to fusion. -replicateChar :: Int -> Char -> Text -replicateChar n c = unstream (S.replicateCharI n (safe c)) -{-# INLINE replicateChar #-} - --- | /O(n)/, where @n@ is the length of the result. The 'unfoldr' --- function is analogous to the List 'L.unfoldr'. 'unfoldr' builds a --- 'Text' from a seed value. The function takes the element and --- returns 'Nothing' if it is done producing the 'Text', otherwise --- 'Just' @(a,b)@. In this case, @a@ is the next 'Char' in the --- string, and @b@ is the seed value for further production. Subject --- to fusion. Performs replacement on invalid scalar values. -unfoldr :: (a -> Maybe (Char,a)) -> a -> Text -unfoldr f s = unstream (S.unfoldr (firstf safe . f) s) -{-# INLINE unfoldr #-} - --- | /O(n)/ Like 'unfoldr', 'unfoldrN' builds a 'Text' from a seed --- value. However, the length of the result should be limited by the --- first argument to 'unfoldrN'. This function is more efficient than --- 'unfoldr' when the maximum length of the result is known and --- correct, otherwise its performance is similar to 'unfoldr'. Subject --- to fusion. Performs replacement on invalid scalar values. -unfoldrN :: Int -> (a -> Maybe (Char,a)) -> a -> Text -unfoldrN n f s = unstream (S.unfoldrN n (firstf safe . f) s) -{-# INLINE unfoldrN #-} - --- ----------------------------------------------------------------------------- --- * Substrings - --- | /O(n)/ 'take' @n@, applied to a 'Text', returns the prefix of the --- 'Text' of length @n@, or the 'Text' itself if @n@ is greater than --- the length of the Text. Subject to fusion. -take :: Int -> Text -> Text -take n t@(Text arr off len) - | n <= 0 = empty - | n >= len = t - | otherwise = text arr off (iterN n t) -{-# INLINE [1] take #-} - -iterN :: Int -> Text -> Int -iterN n t@(Text _arr _off len) = loop 0 0 - where loop !i !cnt - | i >= len || cnt >= n = i - | otherwise = loop (i+d) (cnt+1) - where d = iter_ t i - -{-# RULES -"TEXT take -> fused" [~1] forall n t. - take n t = unstream (S.take n (stream t)) -"TEXT take -> unfused" [1] forall n t. - unstream (S.take n (stream t)) = take n t - #-} - --- | /O(n)/ 'takeEnd' @n@ @t@ returns the suffix remaining after --- taking @n@ characters from the end of @t@. --- --- Examples: --- --- >>> takeEnd 3 "foobar" --- "bar" --- --- @since 1.1.1.0 -takeEnd :: Int -> Text -> Text -takeEnd n t@(Text arr off len) - | n <= 0 = empty - | n >= len = t - | otherwise = text arr (off+i) (len-i) - where i = iterNEnd n t - -iterNEnd :: Int -> Text -> Int -iterNEnd n t@(Text _arr _off len) = loop (len-1) n - where loop i !m - | m <= 0 = i+1 - | i <= 0 = 0 - | otherwise = loop (i+d) (m-1) - where d = reverseIter_ t i - --- | /O(n)/ 'drop' @n@, applied to a 'Text', returns the suffix of the --- 'Text' after the first @n@ characters, or the empty 'Text' if @n@ --- is greater than the length of the 'Text'. Subject to fusion. -drop :: Int -> Text -> Text -drop n t@(Text arr off len) - | n <= 0 = t - | n >= len = empty - | otherwise = text arr (off+i) (len-i) - where i = iterN n t -{-# INLINE [1] drop #-} - -{-# RULES -"TEXT drop -> fused" [~1] forall n t. - drop n t = unstream (S.drop n (stream t)) -"TEXT drop -> unfused" [1] forall n t. - unstream (S.drop n (stream t)) = drop n t - #-} - --- | /O(n)/ 'dropEnd' @n@ @t@ returns the prefix remaining after --- dropping @n@ characters from the end of @t@. --- --- Examples: --- --- >>> dropEnd 3 "foobar" --- "foo" --- --- @since 1.1.1.0 -dropEnd :: Int -> Text -> Text -dropEnd n t@(Text arr off len) - | n <= 0 = t - | n >= len = empty - | otherwise = text arr off (iterNEnd n t) - --- | /O(n)/ 'takeWhile', applied to a predicate @p@ and a 'Text', --- returns the longest prefix (possibly empty) of elements that --- satisfy @p@. Subject to fusion. -takeWhile :: (Char -> Bool) -> Text -> Text -takeWhile p t@(Text arr off len) = loop 0 - where loop !i | i >= len = t - | p c = loop (i+d) - | otherwise = text arr off i - where Iter c d = iter t i -{-# INLINE [1] takeWhile #-} - -{-# RULES -"TEXT takeWhile -> fused" [~1] forall p t. - takeWhile p t = unstream (S.takeWhile p (stream t)) -"TEXT takeWhile -> unfused" [1] forall p t. - unstream (S.takeWhile p (stream t)) = takeWhile p t - #-} - --- | /O(n)/ 'takeWhileEnd', applied to a predicate @p@ and a 'Text', --- returns the longest suffix (possibly empty) of elements that --- satisfy @p@. --- Examples: --- --- >>> takeWhileEnd (=='o') "foo" --- "oo" --- --- @since 1.2.2.0 -takeWhileEnd :: (Char -> Bool) -> Text -> Text -takeWhileEnd p t@(Text arr off len) = loop (len-1) len - where loop !i !l | l <= 0 = t - | p c = loop (i+d) (l+d) - | otherwise = text arr (off+l) (len-l) - where (c,d) = reverseIter t i -{-# INLINE [1] takeWhileEnd #-} - --- | /O(n)/ 'dropWhile' @p@ @t@ returns the suffix remaining after --- 'takeWhile' @p@ @t@. Subject to fusion. -dropWhile :: (Char -> Bool) -> Text -> Text -dropWhile p t@(Text arr off len) = loop 0 0 - where loop !i !l | l >= len = empty - | p c = loop (i+d) (l+d) - | otherwise = Text arr (off+i) (len-l) - where Iter c d = iter t i -{-# INLINE [1] dropWhile #-} - -{-# RULES -"TEXT dropWhile -> fused" [~1] forall p t. - dropWhile p t = unstream (S.dropWhile p (stream t)) -"TEXT dropWhile -> unfused" [1] forall p t. - unstream (S.dropWhile p (stream t)) = dropWhile p t - #-} - --- | /O(n)/ 'dropWhileEnd' @p@ @t@ returns the prefix remaining after --- dropping characters that satisfy the predicate @p@ from the end of --- @t@. --- --- Examples: --- --- >>> dropWhileEnd (=='.') "foo..." --- "foo" -dropWhileEnd :: (Char -> Bool) -> Text -> Text -dropWhileEnd p t@(Text arr off len) = loop (len-1) len - where loop !i !l | l <= 0 = empty - | p c = loop (i+d) (l+d) - | otherwise = Text arr off l - where (c,d) = reverseIter t i -{-# INLINE [1] dropWhileEnd #-} - --- | /O(n)/ 'dropAround' @p@ @t@ returns the substring remaining after --- dropping characters that satisfy the predicate @p@ from both the --- beginning and end of @t@. Subject to fusion. -dropAround :: (Char -> Bool) -> Text -> Text -dropAround p = dropWhile p . dropWhileEnd p -{-# INLINE [1] dropAround #-} - --- | /O(n)/ Remove leading white space from a string. Equivalent to: --- --- > dropWhile isSpace -stripStart :: Text -> Text -stripStart = dropWhile isSpace -{-# INLINE stripStart #-} - --- | /O(n)/ Remove trailing white space from a string. Equivalent to: --- --- > dropWhileEnd isSpace -stripEnd :: Text -> Text -stripEnd = dropWhileEnd isSpace -{-# INLINE [1] stripEnd #-} - --- | /O(n)/ Remove leading and trailing white space from a string. --- Equivalent to: --- --- > dropAround isSpace -strip :: Text -> Text -strip = dropAround isSpace -{-# INLINE [1] strip #-} - --- | /O(n)/ 'splitAt' @n t@ returns a pair whose first element is a --- prefix of @t@ of length @n@, and whose second is the remainder of --- the string. It is equivalent to @('take' n t, 'drop' n t)@. -splitAt :: Int -> Text -> (Text, Text) -splitAt n t@(Text arr off len) - | n <= 0 = (empty, t) - | n >= len = (t, empty) - | otherwise = let k = iterN n t - in (text arr off k, text arr (off+k) (len-k)) - --- | /O(n)/ 'span', applied to a predicate @p@ and text @t@, returns --- a pair whose first element is the longest prefix (possibly empty) --- of @t@ of elements that satisfy @p@, and whose second is the --- remainder of the list. -span :: (Char -> Bool) -> Text -> (Text, Text) -span p t = case span_ p t of - (# hd,tl #) -> (hd,tl) -{-# INLINE span #-} - --- | /O(n)/ 'break' is like 'span', but the prefix returned is --- over elements that fail the predicate @p@. -break :: (Char -> Bool) -> Text -> (Text, Text) -break p = span (not . p) -{-# INLINE break #-} - --- | /O(n)/ Group characters in a string according to a predicate. -groupBy :: (Char -> Char -> Bool) -> Text -> [Text] -groupBy p = loop - where - loop t@(Text arr off len) - | null t = [] - | otherwise = text arr off n : loop (text arr (off+n) (len-n)) - where Iter c d = iter t 0 - n = d + findAIndexOrEnd (not . p c) (Text arr (off+d) (len-d)) - --- | Returns the /array/ index (in units of 'Word16') at which a --- character may be found. This is /not/ the same as the logical --- index returned by e.g. 'findIndex'. -findAIndexOrEnd :: (Char -> Bool) -> Text -> Int -findAIndexOrEnd q t@(Text _arr _off len) = go 0 - where go !i | i >= len || q c = i - | otherwise = go (i+d) - where Iter c d = iter t i - --- | /O(n)/ Group characters in a string by equality. -group :: Text -> [Text] -group = groupBy (==) - --- | /O(n)/ Return all initial segments of the given 'Text', shortest --- first. -inits :: Text -> [Text] -inits t@(Text arr off len) = loop 0 - where loop i | i >= len = [t] - | otherwise = Text arr off i : loop (i + iter_ t i) - --- | /O(n)/ Return all final segments of the given 'Text', longest --- first. -tails :: Text -> [Text] -tails t | null t = [empty] - | otherwise = t : tails (unsafeTail t) - --- $split --- --- Splitting functions in this library do not perform character-wise --- copies to create substrings; they just construct new 'Text's that --- are slices of the original. - --- | /O(m+n)/ Break a 'Text' into pieces separated by the first 'Text' --- argument (which cannot be empty), consuming the delimiter. An empty --- delimiter is invalid, and will cause an error to be raised. --- --- Examples: --- --- >>> splitOn "\r\n" "a\r\nb\r\nd\r\ne" --- ["a","b","d","e"] --- --- >>> splitOn "aaa" "aaaXaaaXaaaXaaa" --- ["","X","X","X",""] --- --- >>> splitOn "x" "x" --- ["",""] --- --- and --- --- > intercalate s . splitOn s == id --- > splitOn (singleton c) == split (==c) --- --- (Note: the string @s@ to split on above cannot be empty.) --- --- In (unlikely) bad cases, this function's time complexity degrades --- towards /O(n*m)/. -splitOn :: Text - -- ^ String to split on. If this string is empty, an error - -- will occur. - -> Text - -- ^ Input text. - -> [Text] -splitOn pat@(Text _ _ l) src@(Text arr off len) - | l <= 0 = emptyError "splitOn" - | isSingleton pat = split (== unsafeHead pat) src - | otherwise = go 0 (indices pat src) - where - go !s (x:xs) = text arr (s+off) (x-s) : go (x+l) xs - go s _ = [text arr (s+off) (len-s)] -{-# INLINE [1] splitOn #-} - -{-# RULES -"TEXT splitOn/singleton -> split/==" [~1] forall c t. - splitOn (singleton c) t = split (==c) t - #-} - --- | /O(n)/ Splits a 'Text' into components delimited by separators, --- where the predicate returns True for a separator element. The --- resulting components do not contain the separators. Two adjacent --- separators result in an empty component in the output. eg. --- --- >>> split (=='a') "aabbaca" --- ["","","bb","c",""] --- --- >>> split (=='a') "" --- [""] -split :: (Char -> Bool) -> Text -> [Text] -split _ t@(Text _off _arr 0) = [t] -split p t = loop t - where loop s | null s' = [l] - | otherwise = l : loop (unsafeTail s') - where (# l, s' #) = span_ (not . p) s -{-# INLINE split #-} - --- | /O(n)/ Splits a 'Text' into components of length @k@. The last --- element may be shorter than the other chunks, depending on the --- length of the input. Examples: --- --- >>> chunksOf 3 "foobarbaz" --- ["foo","bar","baz"] --- --- >>> chunksOf 4 "haskell.org" --- ["hask","ell.","org"] -chunksOf :: Int -> Text -> [Text] -chunksOf k = go - where - go t = case splitAt k t of - (a,b) | null a -> [] - | otherwise -> a : go b -{-# INLINE chunksOf #-} - --- ---------------------------------------------------------------------------- --- * Searching - -------------------------------------------------------------------------------- --- ** Searching with a predicate - --- | /O(n)/ The 'find' function takes a predicate and a 'Text', and --- returns the first element matching the predicate, or 'Nothing' if --- there is no such element. Subject to fusion. -find :: (Char -> Bool) -> Text -> Maybe Char -find p t = S.findBy p (stream t) -{-# INLINE find #-} - --- | /O(n)/ The 'partition' function takes a predicate and a 'Text', --- and returns the pair of 'Text's with elements which do and do not --- satisfy the predicate, respectively; i.e. --- --- > partition p t == (filter p t, filter (not . p) t) -partition :: (Char -> Bool) -> Text -> (Text, Text) -partition p t = (filter p t, filter (not . p) t) -{-# INLINE partition #-} - --- | /O(n)/ 'filter', applied to a predicate and a 'Text', --- returns a 'Text' containing those characters that satisfy the --- predicate. -filter :: (Char -> Bool) -> Text -> Text -filter p t = unstream (S.filter p (stream t)) -{-# INLINE filter #-} - --- | /O(n+m)/ Find the first instance of @needle@ (which must be --- non-'null') in @haystack@. The first element of the returned tuple --- is the prefix of @haystack@ before @needle@ is matched. The second --- is the remainder of @haystack@, starting with the match. --- --- Examples: --- --- >>> breakOn "::" "a::b::c" --- ("a","::b::c") --- --- >>> breakOn "/" "foobar" --- ("foobar","") --- --- Laws: --- --- > append prefix match == haystack --- > where (prefix, match) = breakOn needle haystack --- --- If you need to break a string by a substring repeatedly (e.g. you --- want to break on every instance of a substring), use 'breakOnAll' --- instead, as it has lower startup overhead. --- --- In (unlikely) bad cases, this function's time complexity degrades --- towards /O(n*m)/. -breakOn :: Text -> Text -> (Text, Text) -breakOn pat src@(Text arr off len) - | null pat = emptyError "breakOn" - | otherwise = case indices pat src of - [] -> (src, empty) - (x:_) -> (text arr off x, text arr (off+x) (len-x)) -{-# INLINE breakOn #-} - --- | /O(n+m)/ Similar to 'breakOn', but searches from the end of the --- string. --- --- The first element of the returned tuple is the prefix of @haystack@ --- up to and including the last match of @needle@. The second is the --- remainder of @haystack@, following the match. --- --- >>> breakOnEnd "::" "a::b::c" --- ("a::b::","c") -breakOnEnd :: Text -> Text -> (Text, Text) -breakOnEnd pat src = (reverse b, reverse a) - where (a,b) = breakOn (reverse pat) (reverse src) -{-# INLINE breakOnEnd #-} - --- | /O(n+m)/ Find all non-overlapping instances of @needle@ in --- @haystack@. Each element of the returned list consists of a pair: --- --- * The entire string prior to the /k/th match (i.e. the prefix) --- --- * The /k/th match, followed by the remainder of the string --- --- Examples: --- --- >>> breakOnAll "::" "" --- [] --- --- >>> breakOnAll "/" "a/b/c/" --- [("a","/b/c/"),("a/b","/c/"),("a/b/c","/")] --- --- In (unlikely) bad cases, this function's time complexity degrades --- towards /O(n*m)/. --- --- The @needle@ parameter may not be empty. -breakOnAll :: Text -- ^ @needle@ to search for - -> Text -- ^ @haystack@ in which to search - -> [(Text, Text)] -breakOnAll pat src@(Text arr off slen) - | null pat = emptyError "breakOnAll" - | otherwise = L.map step (indices pat src) - where - step x = (chunk 0 x, chunk x (slen-x)) - chunk !n !l = text arr (n+off) l -{-# INLINE breakOnAll #-} - -------------------------------------------------------------------------------- --- ** Indexing 'Text's - --- $index --- --- If you think of a 'Text' value as an array of 'Char' values (which --- it is not), you run the risk of writing inefficient code. --- --- An idiom that is common in some languages is to find the numeric --- offset of a character or substring, then use that number to split --- or trim the searched string. With a 'Text' value, this approach --- would require two /O(n)/ operations: one to perform the search, and --- one to operate from wherever the search ended. --- --- For example, suppose you have a string that you want to split on --- the substring @\"::\"@, such as @\"foo::bar::quux\"@. Instead of --- searching for the index of @\"::\"@ and taking the substrings --- before and after that index, you would instead use @breakOnAll \"::\"@. - --- | /O(n)/ 'Text' index (subscript) operator, starting from 0. Subject to fusion. -index :: Text -> Int -> Char -index t n = S.index (stream t) n -{-# INLINE index #-} - --- | /O(n)/ The 'findIndex' function takes a predicate and a 'Text' --- and returns the index of the first element in the 'Text' satisfying --- the predicate. Subject to fusion. -findIndex :: (Char -> Bool) -> Text -> Maybe Int -findIndex p t = S.findIndex p (stream t) -{-# INLINE findIndex #-} - --- | /O(n+m)/ The 'count' function returns the number of times the --- query string appears in the given 'Text'. An empty query string is --- invalid, and will cause an error to be raised. --- --- In (unlikely) bad cases, this function's time complexity degrades --- towards /O(n*m)/. -count :: Text -> Text -> Int -count pat src - | null pat = emptyError "count" - | isSingleton pat = countChar (unsafeHead pat) src - | otherwise = L.length (indices pat src) -{-# INLINE [1] count #-} - -{-# RULES -"TEXT count/singleton -> countChar" [~1] forall c t. - count (singleton c) t = countChar c t - #-} - --- | /O(n)/ The 'countChar' function returns the number of times the --- query element appears in the given 'Text'. Subject to fusion. -countChar :: Char -> Text -> Int -countChar c t = S.countChar c (stream t) -{-# INLINE countChar #-} - -------------------------------------------------------------------------------- --- * Zipping - --- | /O(n)/ 'zip' takes two 'Text's and returns a list of --- corresponding pairs of bytes. If one input 'Text' is short, --- excess elements of the longer 'Text' are discarded. This is --- equivalent to a pair of 'unpack' operations. -zip :: Text -> Text -> [(Char,Char)] -zip a b = S.unstreamList $ S.zipWith (,) (stream a) (stream b) -{-# INLINE zip #-} - --- | /O(n)/ 'zipWith' generalises 'zip' by zipping with the function --- given as the first argument, instead of a tupling function. --- Performs replacement on invalid scalar values. -zipWith :: (Char -> Char -> Char) -> Text -> Text -> Text -zipWith f t1 t2 = unstream (S.zipWith g (stream t1) (stream t2)) - where g a b = safe (f a b) -{-# INLINE zipWith #-} - --- | /O(n)/ Breaks a 'Text' up into a list of words, delimited by 'Char's --- representing white space. -words :: Text -> [Text] -words t@(Text arr off len) = loop 0 0 - where - loop !start !n - | n >= len = if start == n - then [] - else [Text arr (start+off) (n-start)] - | isSpace c = - if start == n - then loop (start+1) (start+1) - else Text arr (start+off) (n-start) : loop (n+d) (n+d) - | otherwise = loop start (n+d) - where Iter c d = iter t n -{-# INLINE words #-} - --- | /O(n)/ Breaks a 'Text' up into a list of 'Text's at --- newline 'Char's. The resulting strings do not contain newlines. -lines :: Text -> [Text] -lines ps | null ps = [] - | otherwise = h : if null t - then [] - else lines (unsafeTail t) - where (# h,t #) = span_ (/= '\n') ps -{-# INLINE lines #-} - -{- --- | /O(n)/ Portably breaks a 'Text' up into a list of 'Text's at line --- boundaries. --- --- A line boundary is considered to be either a line feed, a carriage --- return immediately followed by a line feed, or a carriage return. --- This accounts for both Unix and Windows line ending conventions, --- and for the old convention used on Mac OS 9 and earlier. -lines' :: Text -> [Text] -lines' ps | null ps = [] - | otherwise = h : case uncons t of - Nothing -> [] - Just (c,t') - | c == '\n' -> lines t' - | c == '\r' -> case uncons t' of - Just ('\n',t'') -> lines t'' - _ -> lines t' - where (h,t) = span notEOL ps - notEOL c = c /= '\n' && c /= '\r' -{-# INLINE lines' #-} --} - --- | /O(n)/ Joins lines, after appending a terminating newline to --- each. -unlines :: [Text] -> Text -unlines = concat . L.map (`snoc` '\n') -{-# INLINE unlines #-} - --- | /O(n)/ Joins words using single space characters. -unwords :: [Text] -> Text -unwords = intercalate (singleton ' ') -{-# INLINE unwords #-} - --- | /O(n)/ The 'isPrefixOf' function takes two 'Text's and returns --- 'True' iff the first is a prefix of the second. Subject to fusion. -isPrefixOf :: Text -> Text -> Bool -isPrefixOf a@(Text _ _ alen) b@(Text _ _ blen) = - alen <= blen && S.isPrefixOf (stream a) (stream b) -{-# INLINE [1] isPrefixOf #-} - -{-# RULES -"TEXT isPrefixOf -> fused" [~1] forall s t. - isPrefixOf s t = S.isPrefixOf (stream s) (stream t) - #-} - --- | /O(n)/ The 'isSuffixOf' function takes two 'Text's and returns --- 'True' iff the first is a suffix of the second. -isSuffixOf :: Text -> Text -> Bool -isSuffixOf a@(Text _aarr _aoff alen) b@(Text barr boff blen) = - d >= 0 && a == b' - where d = blen - alen - b' | d == 0 = b - | otherwise = Text barr (boff+d) alen -{-# INLINE isSuffixOf #-} - --- | /O(n+m)/ The 'isInfixOf' function takes two 'Text's and returns --- 'True' iff the first is contained, wholly and intact, anywhere --- within the second. --- --- In (unlikely) bad cases, this function's time complexity degrades --- towards /O(n*m)/. -isInfixOf :: Text -> Text -> Bool -isInfixOf needle haystack - | null needle = True - | isSingleton needle = S.elem (unsafeHead needle) . S.stream $ haystack - | otherwise = not . L.null . indices needle $ haystack -{-# INLINE [1] isInfixOf #-} - -{-# RULES -"TEXT isInfixOf/singleton -> S.elem/S.stream" [~1] forall n h. - isInfixOf (singleton n) h = S.elem n (S.stream h) - #-} - -------------------------------------------------------------------------------- --- * View patterns - --- | /O(n)/ Return the suffix of the second string if its prefix --- matches the entire first string. --- --- Examples: --- --- >>> stripPrefix "foo" "foobar" --- Just "bar" --- --- >>> stripPrefix "" "baz" --- Just "baz" --- --- >>> stripPrefix "foo" "quux" --- Nothing --- --- This is particularly useful with the @ViewPatterns@ extension to --- GHC, as follows: --- --- > {-# LANGUAGE ViewPatterns #-} --- > import Data.Text as T --- > --- > fnordLength :: Text -> Int --- > fnordLength (stripPrefix "fnord" -> Just suf) = T.length suf --- > fnordLength _ = -1 -stripPrefix :: Text -> Text -> Maybe Text -stripPrefix p@(Text _arr _off plen) t@(Text arr off len) - | p `isPrefixOf` t = Just $! text arr (off+plen) (len-plen) - | otherwise = Nothing - --- | /O(n)/ Find the longest non-empty common prefix of two strings --- and return it, along with the suffixes of each string at which they --- no longer match. --- --- If the strings do not have a common prefix or either one is empty, --- this function returns 'Nothing'. --- --- Examples: --- --- >>> commonPrefixes "foobar" "fooquux" --- Just ("foo","bar","quux") --- --- >>> commonPrefixes "veeble" "fetzer" --- Nothing --- --- >>> commonPrefixes "" "baz" --- Nothing -commonPrefixes :: Text -> Text -> Maybe (Text,Text,Text) -commonPrefixes t0@(Text arr0 off0 len0) t1@(Text arr1 off1 len1) = go 0 0 - where - go !i !j | i < len0 && j < len1 && a == b = go (i+d0) (j+d1) - | i > 0 = Just (Text arr0 off0 i, - text arr0 (off0+i) (len0-i), - text arr1 (off1+j) (len1-j)) - | otherwise = Nothing - where Iter a d0 = iter t0 i - Iter b d1 = iter t1 j - --- | /O(n)/ Return the prefix of the second string if its suffix --- matches the entire first string. --- --- Examples: --- --- >>> stripSuffix "bar" "foobar" --- Just "foo" --- --- >>> stripSuffix "" "baz" --- Just "baz" --- --- >>> stripSuffix "foo" "quux" --- Nothing --- --- This is particularly useful with the @ViewPatterns@ extension to --- GHC, as follows: --- --- > {-# LANGUAGE ViewPatterns #-} --- > import Data.Text as T --- > --- > quuxLength :: Text -> Int --- > quuxLength (stripSuffix "quux" -> Just pre) = T.length pre --- > quuxLength _ = -1 -stripSuffix :: Text -> Text -> Maybe Text -stripSuffix p@(Text _arr _off plen) t@(Text arr off len) - | p `isSuffixOf` t = Just $! text arr off (len-plen) - | otherwise = Nothing - --- | Add a list of non-negative numbers. Errors out on overflow. -sumP :: String -> [Int] -> Int -sumP fun = go 0 - where go !a (x:xs) - | ax >= 0 = go ax xs - | otherwise = overflowError fun - where ax = a + x - go a _ = a - -emptyError :: String -> a -emptyError fun = P.error $ "Data.Text." ++ fun ++ ": empty input" - -overflowError :: String -> a -overflowError fun = P.error $ "Data.Text." ++ fun ++ ": size overflow" - --- | /O(n)/ Make a distinct copy of the given string, sharing no --- storage with the original string. --- --- As an example, suppose you read a large string, of which you need --- only a small portion. If you do not use 'copy', the entire original --- array will be kept alive in memory by the smaller string. Making a --- copy \"breaks the link\" to the original array, allowing it to be --- garbage collected if there are no other live references to it. -copy :: Text -> Text -copy (Text arr off len) = Text (A.run go) 0 len - where - go :: ST s (A.MArray s) - go = do - marr <- A.new len - A.copyI marr 0 arr off len - return marr - - -------------------------------------------------- --- NOTE: the named chunk below used by doctest; --- verify the doctests via `doctest -fobject-code Data/Text.hs` - --- $setup --- >>> :set -XOverloadedStrings --- >>> import qualified Data.Text as T diff -Nru cabal-install-3.2-3.2+git20191216.2.e076113/src/text-1.2.4.0/include/text_cbits.h cabal-install-3.2-3.2+git20200127.2.b84fd4f/src/text-1.2.4.0/include/text_cbits.h --- cabal-install-3.2-3.2+git20191216.2.e076113/src/text-1.2.4.0/include/text_cbits.h 2001-09-09 01:46:40.000000000 +0000 +++ cabal-install-3.2-3.2+git20200127.2.b84fd4f/src/text-1.2.4.0/include/text_cbits.h 1970-01-01 00:00:00.000000000 +0000 @@ -1,11 +0,0 @@ -/* - * Copyright (c) 2013 Bryan O'Sullivan . - */ - -#ifndef _text_cbits_h -#define _text_cbits_h - -#define UTF8_ACCEPT 0 -#define UTF8_REJECT 12 - -#endif diff -Nru cabal-install-3.2-3.2+git20191216.2.e076113/src/text-1.2.4.0/LICENSE cabal-install-3.2-3.2+git20200127.2.b84fd4f/src/text-1.2.4.0/LICENSE --- cabal-install-3.2-3.2+git20191216.2.e076113/src/text-1.2.4.0/LICENSE 2001-09-09 01:46:40.000000000 +0000 +++ cabal-install-3.2-3.2+git20200127.2.b84fd4f/src/text-1.2.4.0/LICENSE 1970-01-01 00:00:00.000000000 +0000 @@ -1,26 +0,0 @@ -Copyright (c) 2008-2009, Tom Harper -All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions -are met: - - * Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - - * Redistributions in binary form must reproduce the above - copyright notice, this list of conditions and the following - disclaimer in the documentation and/or other materials provided - with the distribution. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff -Nru cabal-install-3.2-3.2+git20191216.2.e076113/src/text-1.2.4.0/README.markdown cabal-install-3.2-3.2+git20200127.2.b84fd4f/src/text-1.2.4.0/README.markdown --- cabal-install-3.2-3.2+git20191216.2.e076113/src/text-1.2.4.0/README.markdown 2001-09-09 01:46:40.000000000 +0000 +++ cabal-install-3.2-3.2+git20200127.2.b84fd4f/src/text-1.2.4.0/README.markdown 1970-01-01 00:00:00.000000000 +0000 @@ -1,31 +0,0 @@ -# `text`: Fast, packed Unicode strings, using stream fusion - -This package provides the Data.Text library, a library for the space- -and time-efficient manipulation of Unicode text in Haskell. - -**Please refer to the [package description on Hackage](https://hackage.haskell.org/package/text#description) for more information.** - -# Get involved! - -Please report bugs via the -[github issue tracker](https://github.com/haskell/text/issues). - -Master [git repository](https://github.com/haskell/text): - -* `git clone git://github.com/haskell/text.git` - -There's also a [Mercurial mirror](https://bitbucket.org/bos/text): - -* `hg clone https://bitbucket.org/bos/text` - -(You can create and contribute changes using either Mercurial or git.) - - -# Authors - -The base code for this library was originally written by Tom Harper, -based on the stream fusion framework developed by Roman Leshchinskiy, -Duncan Coutts, and Don Stewart. - -The core library was fleshed out, debugged, and tested by Bryan -O'Sullivan , and he is the current maintainer. diff -Nru cabal-install-3.2-3.2+git20191216.2.e076113/src/text-1.2.4.0/scripts/ApiCompare.hs cabal-install-3.2-3.2+git20200127.2.b84fd4f/src/text-1.2.4.0/scripts/ApiCompare.hs --- cabal-install-3.2-3.2+git20191216.2.e076113/src/text-1.2.4.0/scripts/ApiCompare.hs 2001-09-09 01:46:40.000000000 +0000 +++ cabal-install-3.2-3.2+git20200127.2.b84fd4f/src/text-1.2.4.0/scripts/ApiCompare.hs 1970-01-01 00:00:00.000000000 +0000 @@ -1,28 +0,0 @@ --- This script compares the strict and lazy Text APIs to ensure that --- they're reasonably in sync. - -{-# LANGUAGE OverloadedStrings #-} - -import qualified Data.Set as S -import qualified Data.Text as T -import System.Process - -main = do - let tidy pkg = (S.fromList . filter (T.isInfixOf "::") . T.lines . - T.replace "GHC.Int.Int64" "Int" . - T.replace "\n " "" . - T.replace (T.append (T.pack pkg) ".") "" . T.pack) `fmap` - readProcess "ghci" [] (":browse " ++ pkg) - let diff a b = mapM_ (putStrLn . (" "++) . T.unpack) . S.toList $ - S.difference a b - text <- tidy "Data.Text" - lazy <- tidy "Data.Text.Lazy" - list <- tidy "Data.List" - putStrLn "Text \\ List:" - diff text list - putStrLn "" - putStrLn "Text \\ Lazy:" - diff text lazy - putStrLn "" - putStrLn "Lazy \\ Text:" - diff lazy text diff -Nru cabal-install-3.2-3.2+git20191216.2.e076113/src/text-1.2.4.0/scripts/Arsec.hs cabal-install-3.2-3.2+git20200127.2.b84fd4f/src/text-1.2.4.0/scripts/Arsec.hs --- cabal-install-3.2-3.2+git20191216.2.e076113/src/text-1.2.4.0/scripts/Arsec.hs 2001-09-09 01:46:40.000000000 +0000 +++ cabal-install-3.2-3.2+git20200127.2.b84fd4f/src/text-1.2.4.0/scripts/Arsec.hs 1970-01-01 00:00:00.000000000 +0000 @@ -1,44 +0,0 @@ -module Arsec - ( - Comment - , comment - , semi - , showC - , unichar - , unichars - , module Control.Applicative - , module Control.Monad - , module Data.Char - , module Text.ParserCombinators.Parsec.Char - , module Text.ParserCombinators.Parsec.Combinator - , module Text.ParserCombinators.Parsec.Error - , module Text.ParserCombinators.Parsec.Prim - ) where - -import Control.Monad -import Control.Applicative -import Data.Char -import Numeric -import Text.ParserCombinators.Parsec.Char hiding (lower, upper) -import Text.ParserCombinators.Parsec.Combinator hiding (optional) -import Text.ParserCombinators.Parsec.Error -import Text.ParserCombinators.Parsec.Prim hiding ((<|>), many) - -type Comment = String - -unichar :: Parser Char -unichar = chr . fst . head . readHex <$> many1 hexDigit - -unichars :: Parser [Char] -unichars = manyTill (unichar <* spaces) semi - -semi :: Parser () -semi = char ';' *> spaces *> pure () - -comment :: Parser Comment -comment = (char '#' *> manyTill anyToken (char '\n')) <|> string "\n" - -showC :: Char -> String -showC c = "'\\x" ++ d ++ "'" - where h = showHex (ord c) "" - d = replicate (4 - length h) '0' ++ h diff -Nru cabal-install-3.2-3.2+git20191216.2.e076113/src/text-1.2.4.0/scripts/CaseFolding.hs cabal-install-3.2-3.2+git20200127.2.b84fd4f/src/text-1.2.4.0/scripts/CaseFolding.hs --- cabal-install-3.2-3.2+git20191216.2.e076113/src/text-1.2.4.0/scripts/CaseFolding.hs 2001-09-09 01:46:40.000000000 +0000 +++ cabal-install-3.2-3.2+git20200127.2.b84fd4f/src/text-1.2.4.0/scripts/CaseFolding.hs 1970-01-01 00:00:00.000000000 +0000 @@ -1,46 +0,0 @@ --- This script processes the following source file: --- --- http://unicode.org/Public/UNIDATA/CaseFolding.txt - -module CaseFolding - ( - CaseFolding(..) - , Fold(..) - , parseCF - , mapCF - ) where - -import Arsec - -data Fold = Fold { - code :: Char - , status :: Char - , mapping :: [Char] - , name :: String - } deriving (Eq, Ord, Show) - -data CaseFolding = CF { cfComments :: [Comment], cfFolding :: [Fold] } - deriving (Show) - -entries :: Parser CaseFolding -entries = CF <$> many comment <*> many (entry <* many comment) - where - entry = Fold <$> unichar <* semi - <*> oneOf "CFST" <* semi - <*> unichars - <*> (string "# " *> manyTill anyToken (char '\n')) - -parseCF :: FilePath -> IO (Either ParseError CaseFolding) -parseCF name = parse entries name <$> readFile name - -mapCF :: CaseFolding -> [String] -mapCF (CF _ ms) = typ ++ (map nice . filter p $ ms) ++ [last] - where - typ = ["foldMapping :: forall s. Char -> s -> Step (CC s) Char" - ,"{-# NOINLINE foldMapping #-}"] - last = "foldMapping c s = Yield (toLower c) (CC s '\\0' '\\0')" - nice c = "-- " ++ name c ++ "\n" ++ - "foldMapping " ++ showC (code c) ++ " s = Yield " ++ x ++ " (CC s " ++ y ++ " " ++ z ++ ")" - where [x,y,z] = (map showC . take 3) (mapping c ++ repeat '\0') - p f = status f `elem` "CF" && - mapping f /= [toLower (code f)] diff -Nru cabal-install-3.2-3.2+git20191216.2.e076113/src/text-1.2.4.0/scripts/CaseMapping.hs cabal-install-3.2-3.2+git20200127.2.b84fd4f/src/text-1.2.4.0/scripts/CaseMapping.hs --- cabal-install-3.2-3.2+git20191216.2.e076113/src/text-1.2.4.0/scripts/CaseMapping.hs 2001-09-09 01:46:40.000000000 +0000 +++ cabal-install-3.2-3.2+git20200127.2.b84fd4f/src/text-1.2.4.0/scripts/CaseMapping.hs 1970-01-01 00:00:00.000000000 +0000 @@ -1,38 +0,0 @@ -import System.Environment -import System.IO - -import Arsec -import CaseFolding -import SpecialCasing - -main = do - args <- getArgs - let oname = case args of - [] -> "../Data/Text/Internal/Fusion/CaseMapping.hs" - [o] -> o - psc <- parseSC "SpecialCasing.txt" - pcf <- parseCF "CaseFolding.txt" - scs <- case psc of - Left err -> print err >> return undefined - Right ms -> return ms - cfs <- case pcf of - Left err -> print err >> return undefined - Right ms -> return ms - h <- openFile oname WriteMode - let comments = map ("--" ++) $ - take 2 (cfComments cfs) ++ take 2 (scComments scs) - mapM_ (hPutStrLn h) $ - ["{-# LANGUAGE Rank2Types #-}" - ,"-- AUTOMATICALLY GENERATED - DO NOT EDIT" - ,"-- Generated by scripts/CaseMapping.hs"] ++ - comments ++ - ["" - ,"module Data.Text.Internal.Fusion.CaseMapping where" - ,"import Data.Char" - ,"import Data.Text.Internal.Fusion.Types" - ,""] - mapM_ (hPutStrLn h) (mapSC "upper" upper toUpper scs) - mapM_ (hPutStrLn h) (mapSC "lower" lower toLower scs) - mapM_ (hPutStrLn h) (mapSC "title" title toTitle scs) - mapM_ (hPutStrLn h) (mapCF cfs) - hClose h diff -Nru cabal-install-3.2-3.2+git20191216.2.e076113/src/text-1.2.4.0/scripts/SpecialCasing.hs cabal-install-3.2-3.2+git20200127.2.b84fd4f/src/text-1.2.4.0/scripts/SpecialCasing.hs --- cabal-install-3.2-3.2+git20191216.2.e076113/src/text-1.2.4.0/scripts/SpecialCasing.hs 2001-09-09 01:46:40.000000000 +0000 +++ cabal-install-3.2-3.2+git20200127.2.b84fd4f/src/text-1.2.4.0/scripts/SpecialCasing.hs 1970-01-01 00:00:00.000000000 +0000 @@ -1,56 +0,0 @@ --- This script processes the following source file: --- --- http://unicode.org/Public/UNIDATA/SpecialCasing.txt - -module SpecialCasing - ( - SpecialCasing(..) - , Case(..) - , parseSC - , mapSC - ) where - -import Arsec - -data SpecialCasing = SC { scComments :: [Comment], scCasing :: [Case] } - deriving (Show) - -data Case = Case { - code :: Char - , lower :: [Char] - , title :: [Char] - , upper :: [Char] - , conditions :: String - , name :: String - } deriving (Eq, Ord, Show) - -entries :: Parser SpecialCasing -entries = SC <$> many comment <*> many (entry <* many comment) - where - entry = Case <$> unichar <* semi - <*> unichars - <*> unichars - <*> unichars - <*> manyTill anyToken (string "# ") - <*> manyTill anyToken (char '\n') - -parseSC :: FilePath -> IO (Either ParseError SpecialCasing) -parseSC name = parse entries name <$> readFile name - -mapSC :: String -> (Case -> String) -> (Char -> Char) -> SpecialCasing - -> [String] -mapSC which access twiddle (SC _ ms) = - typ ++ (map nice . filter p $ ms) ++ [last] - where - typ = [which ++ "Mapping :: forall s. Char -> s -> Step (CC s) Char" - ,"{-# NOINLINE " ++ which ++ "Mapping #-}"] - last = which ++ "Mapping c s = Yield (to" ++ ucFirst which ++ " c) (CC s '\\0' '\\0')" - nice c = "-- " ++ name c ++ "\n" ++ - which ++ "Mapping " ++ showC (code c) ++ " s = Yield " ++ x ++ " (CC s " ++ y ++ " " ++ z ++ ")" - where [x,y,z] = (map showC . take 3) (access c ++ repeat '\0') - p c = [k] /= a && a /= [twiddle k] && null (conditions c) - where a = access c - k = code c - -ucFirst (c:cs) = toUpper c : cs -ucFirst [] = [] diff -Nru cabal-install-3.2-3.2+git20191216.2.e076113/src/text-1.2.4.0/Setup.lhs cabal-install-3.2-3.2+git20200127.2.b84fd4f/src/text-1.2.4.0/Setup.lhs --- cabal-install-3.2-3.2+git20191216.2.e076113/src/text-1.2.4.0/Setup.lhs 2001-09-09 01:46:40.000000000 +0000 +++ cabal-install-3.2-3.2+git20200127.2.b84fd4f/src/text-1.2.4.0/Setup.lhs 1970-01-01 00:00:00.000000000 +0000 @@ -1,3 +0,0 @@ -#!/usr/bin/env runhaskell -> import Distribution.Simple -> main = defaultMain diff -Nru cabal-install-3.2-3.2+git20191216.2.e076113/src/text-1.2.4.0/tests/cabal.config cabal-install-3.2-3.2+git20200127.2.b84fd4f/src/text-1.2.4.0/tests/cabal.config --- cabal-install-3.2-3.2+git20191216.2.e076113/src/text-1.2.4.0/tests/cabal.config 2001-09-09 01:46:40.000000000 +0000 +++ cabal-install-3.2-3.2+git20200127.2.b84fd4f/src/text-1.2.4.0/tests/cabal.config 1970-01-01 00:00:00.000000000 +0000 @@ -1,6 +0,0 @@ --- These flags help to speed up building the test suite. - -documentation: False -executable-stripping: False -flags: developer -library-profiling: False diff -Nru cabal-install-3.2-3.2+git20191216.2.e076113/src/text-1.2.4.0/tests/.ghci cabal-install-3.2-3.2+git20200127.2.b84fd4f/src/text-1.2.4.0/tests/.ghci --- cabal-install-3.2-3.2+git20191216.2.e076113/src/text-1.2.4.0/tests/.ghci 2001-09-09 01:46:40.000000000 +0000 +++ cabal-install-3.2-3.2+git20200127.2.b84fd4f/src/text-1.2.4.0/tests/.ghci 1970-01-01 00:00:00.000000000 +0000 @@ -1 +0,0 @@ -:set -isrc -i../.. diff -Nru cabal-install-3.2-3.2+git20191216.2.e076113/src/text-1.2.4.0/tests/LiteralRuleTest.hs cabal-install-3.2-3.2+git20200127.2.b84fd4f/src/text-1.2.4.0/tests/LiteralRuleTest.hs --- cabal-install-3.2-3.2+git20191216.2.e076113/src/text-1.2.4.0/tests/LiteralRuleTest.hs 2001-09-09 01:46:40.000000000 +0000 +++ cabal-install-3.2-3.2+git20200127.2.b84fd4f/src/text-1.2.4.0/tests/LiteralRuleTest.hs 1970-01-01 00:00:00.000000000 +0000 @@ -1,21 +0,0 @@ -{-# LANGUAGE OverloadedStrings #-} - -module LiteralRuleTest where - -import Data.Text (Text) - --- This should produce 8 firings of the "TEXT literal" rule -strings :: [Text] -strings = [ "abstime", "aclitem", "bit", "bool", "box", "bpchar", "bytea", "char" ] - --- This should produce 7 firings of the "TEXT literal UTF8" rule -utf8Strings :: [Text] -utf8Strings = [ "\0abstime", "\0aclitem", "\xfefe bit", "\0bool", "\0box", "\0bpchar", "\0bytea" ] - --- This should produce 4 firings of the "TEXT empty literal" rule -empties :: [Text] -empties = [ "", "", "", "" ] - --- This should produce 5 firings of the "TEXT empty literal" rule ---singletons :: [Text] ---singletons = [ "a", "b", "c", "d", "e" ] diff -Nru cabal-install-3.2-3.2+git20191216.2.e076113/src/text-1.2.4.0/tests/Makefile cabal-install-3.2-3.2+git20200127.2.b84fd4f/src/text-1.2.4.0/tests/Makefile --- cabal-install-3.2-3.2+git20191216.2.e076113/src/text-1.2.4.0/tests/Makefile 2001-09-09 01:46:40.000000000 +0000 +++ cabal-install-3.2-3.2+git20200127.2.b84fd4f/src/text-1.2.4.0/tests/Makefile 1970-01-01 00:00:00.000000000 +0000 @@ -1,45 +0,0 @@ -VCS = hg -count = 1000 - -all: coverage literal-rule-test - -literal-rule-test: - ./literal-rule-test.sh - -coverage: build coverage/hpc_index.html - -build: text-test-data - cabal configure -fhpc - cabal build - -text-test-data: -ifeq ($(VCS),git) - git clone https://github.com/bos/text-test-data.git -else - hg clone https://bitbucket.org/bos/text-test-data -endif - $(MAKE) -C text-test-data - -coverage/text-tests.tix: - -mkdir -p coverage - ./dist/build/text-tests/text-tests -a $(count) - mv text-tests.tix $@ - -coverage/text-tests-stdio.tix: - -mkdir -p coverage - ./scripts/cover-stdio.sh ./dist/build/text-tests-stdio/text-tests-stdio - mv text-tests-stdio.tix $@ - -coverage/coverage.tix: coverage/text-tests.tix coverage/text-tests-stdio.tix - hpc combine --output=$@ \ - --exclude=Main \ - coverage/text-tests.tix \ - coverage/text-tests-stdio.tix - -coverage/hpc_index.html: coverage/coverage.tix - hpc markup --destdir=coverage coverage/coverage.tix - -clean: - rm -rf dist coverage .hpc - -.PHONY: all build clean coverage literal-rule-test diff -Nru cabal-install-3.2-3.2+git20191216.2.e076113/src/text-1.2.4.0/tests/scripts/cover-stdio.sh cabal-install-3.2-3.2+git20200127.2.b84fd4f/src/text-1.2.4.0/tests/scripts/cover-stdio.sh --- cabal-install-3.2-3.2+git20191216.2.e076113/src/text-1.2.4.0/tests/scripts/cover-stdio.sh 2001-09-09 01:46:40.000000000 +0000 +++ cabal-install-3.2-3.2+git20200127.2.b84fd4f/src/text-1.2.4.0/tests/scripts/cover-stdio.sh 1970-01-01 00:00:00.000000000 +0000 @@ -1,62 +0,0 @@ -#!/bin/bash - -if [[ $# < 1 ]]; then - echo "Usage: $0 " - exit 1 -fi - -exe=$1 - -rm -f $exe.tix - -f=$(mktemp stdio-f.XXXXXX) -g=$(mktemp stdio-g.XXXXXX) - -for t in T TL; do - echo $t.readFile > $f - $exe $t.readFile $f > $g - if ! diff -u $f $g; then - errs=$((errs+1)) - echo FAIL: $t.readFile 1>&2 - fi - - $exe $t.writeFile $f $t.writeFile - echo -n $t.writeFile > $g - if ! diff -u $f $g; then - errs=$((errs+1)) - echo FAIL: $t.writeFile 1>&2 - fi - - echo -n quux > $f - $exe $t.appendFile $f $t.appendFile - echo -n quux$t.appendFile > $g - if ! diff -u $f $g; then - errs=$((errs+1)) - echo FAIL: $t.appendFile 1>&2 - fi - - echo $t.interact | $exe $t.interact > $f - echo $t.interact > $g - if ! diff -u $f $g; then - errs=$((errs+1)) - echo FAIL: $t.interact 1>&2 - fi - - echo $t.getContents | $exe $t.getContents > $f - echo $t.getContents > $g - if ! diff -u $f $g; then - errs=$((errs+1)) - echo FAIL: $t.getContents 1>&2 - fi - - echo $t.getLine | $exe $t.getLine > $f - echo $t.getLine > $g - if ! diff -u $f $g; then - errs=$((errs+1)) - echo FAIL: $t.getLine 1>&2 - fi -done - -rm -f $f $g - -exit $errs diff -Nru cabal-install-3.2-3.2+git20191216.2.e076113/src/text-1.2.4.0/tests/Tests/IO.hs cabal-install-3.2-3.2+git20200127.2.b84fd4f/src/text-1.2.4.0/tests/Tests/IO.hs --- cabal-install-3.2-3.2+git20191216.2.e076113/src/text-1.2.4.0/tests/Tests/IO.hs 2001-09-09 01:46:40.000000000 +0000 +++ cabal-install-3.2-3.2+git20200127.2.b84fd4f/src/text-1.2.4.0/tests/Tests/IO.hs 1970-01-01 00:00:00.000000000 +0000 @@ -1,34 +0,0 @@ --- | Program which exposes some haskell functions as an exutable. The results --- and coverage of this module is meant to be checked using a shell script. --- -module Main - ( - main - ) where - -import System.Environment (getArgs) -import System.Exit (exitFailure) -import System.IO (hPutStrLn, stderr) -import qualified Data.Text as T -import qualified Data.Text.IO as T -import qualified Data.Text.Lazy as TL -import qualified Data.Text.Lazy.IO as TL - -main :: IO () -main = do - args <- getArgs - case args of - ["T.readFile", name] -> T.putStr =<< T.readFile name - ["T.writeFile", name, t] -> T.writeFile name (T.pack t) - ["T.appendFile", name, t] -> T.appendFile name (T.pack t) - ["T.interact"] -> T.interact id - ["T.getContents"] -> T.putStr =<< T.getContents - ["T.getLine"] -> T.putStrLn =<< T.getLine - - ["TL.readFile", name] -> TL.putStr =<< TL.readFile name - ["TL.writeFile", name, t] -> TL.writeFile name (TL.pack t) - ["TL.appendFile", name, t] -> TL.appendFile name (TL.pack t) - ["TL.interact"] -> TL.interact id - ["TL.getContents"] -> TL.putStr =<< TL.getContents - ["TL.getLine"] -> TL.putStrLn =<< TL.getLine - _ -> hPutStrLn stderr "invalid directive!" >> exitFailure diff -Nru cabal-install-3.2-3.2+git20191216.2.e076113/src/text-1.2.4.0/tests/Tests/Properties/Mul.hs cabal-install-3.2-3.2+git20200127.2.b84fd4f/src/text-1.2.4.0/tests/Tests/Properties/Mul.hs --- cabal-install-3.2-3.2+git20191216.2.e076113/src/text-1.2.4.0/tests/Tests/Properties/Mul.hs 2001-09-09 01:46:40.000000000 +0000 +++ cabal-install-3.2-3.2+git20200127.2.b84fd4f/src/text-1.2.4.0/tests/Tests/Properties/Mul.hs 1970-01-01 00:00:00.000000000 +0000 @@ -1,40 +0,0 @@ -{-# LANGUAGE ScopedTypeVariables #-} - -module Tests.Properties.Mul (tests) where - -import Control.Applicative ((<$>), pure) -import Control.Exception as E (SomeException, catch, evaluate) -import Data.Int (Int32, Int64) -import Data.Text.Internal (mul, mul32, mul64) -import System.IO.Unsafe (unsafePerformIO) -import Test.Framework (Test) -import Test.Framework.Providers.QuickCheck2 (testProperty) -import Test.QuickCheck hiding ((.&.)) - -mulRef :: (Integral a, Bounded a) => a -> a -> Maybe a -mulRef a b - | ab < bot || ab > top = Nothing - | otherwise = Just (fromIntegral ab) - where ab = fromIntegral a * fromIntegral b - top = fromIntegral (maxBound `asTypeOf` a) :: Integer - bot = fromIntegral (minBound `asTypeOf` a) :: Integer - -eval :: (a -> b -> c) -> a -> b -> Maybe c -eval f a b = unsafePerformIO $ - (Just <$> evaluate (f a b)) `E.catch` (\(_::SomeException) -> pure Nothing) - -t_mul32 :: Int32 -> Int32 -> Property -t_mul32 a b = mulRef a b === eval mul32 a b - -t_mul64 :: Int64 -> Int64 -> Property -t_mul64 a b = mulRef a b === eval mul64 a b - -t_mul :: Int -> Int -> Property -t_mul a b = mulRef a b === eval mul a b - -tests :: [Test] -tests = [ - testProperty "t_mul" t_mul - , testProperty "t_mul32" t_mul32 - , testProperty "t_mul64" t_mul64 - ] diff -Nru cabal-install-3.2-3.2+git20191216.2.e076113/src/text-1.2.4.0/tests/Tests/Properties.hs cabal-install-3.2-3.2+git20200127.2.b84fd4f/src/text-1.2.4.0/tests/Tests/Properties.hs --- cabal-install-3.2-3.2+git20191216.2.e076113/src/text-1.2.4.0/tests/Tests/Properties.hs 2001-09-09 01:46:40.000000000 +0000 +++ cabal-install-3.2-3.2+git20200127.2.b84fd4f/src/text-1.2.4.0/tests/Tests/Properties.hs 1970-01-01 00:00:00.000000000 +0000 @@ -1,1460 +0,0 @@ --- | QuickCheck properties for the text library. - -{-# LANGUAGE CPP #-} -{-# LANGUAGE BangPatterns, FlexibleInstances, OverloadedStrings, - ScopedTypeVariables, TypeSynonymInstances #-} -{-# OPTIONS_GHC -fno-enable-rewrite-rules -fno-warn-missing-signatures #-} -module Tests.Properties - ( - tests - ) where - -import Control.Applicative ((<$>), (<*>)) -import Control.Arrow ((***), first, second) -import Data.Bits ((.&.)) -import Data.Char (chr, isDigit, isHexDigit, isLower, isSpace, isLetter, isUpper, ord) -import Data.Int (Int8, Int16, Int32, Int64) -import Data.Monoid (Monoid(..)) -import Data.String (IsString(fromString)) -import Data.Text.Encoding.Error -import Data.Text.Foreign -import Data.Text.Internal.Encoding.Utf8 -import Data.Text.Internal.Fusion.Size -import Data.Text.Internal.Search (indices) -import Data.Text.Lazy.Read as TL -import Data.Text.Read as T -import Data.Word (Word, Word8, Word16, Word32, Word64) -import Data.Maybe (mapMaybe) -import Numeric (showEFloat, showFFloat, showGFloat, showHex) -import Prelude hiding (replicate) -import Test.Framework (Test, testGroup) -import Test.Framework.Providers.QuickCheck2 (testProperty) -import Test.QuickCheck hiding ((.&.)) -import Test.QuickCheck.Monadic -import Test.QuickCheck.Property (Property(..)) -import Test.QuickCheck.Unicode (char) -import Tests.QuickCheckUtils -import Tests.Utils -import Text.Show.Functions () -import qualified Control.Exception as Exception -import qualified Data.Bits as Bits (shiftL, shiftR) -import qualified Data.ByteString as B -import qualified Data.ByteString.Lazy as BL -import qualified Data.Char as C -import qualified Data.List as L -import qualified Data.Text as T -import qualified Data.Text.Encoding as E -import qualified Data.Text.IO as T -import qualified Data.Text.Internal.Fusion as S -import qualified Data.Text.Internal.Fusion.Common as S -import qualified Data.Text.Internal.Lazy.Fusion as SL -import qualified Data.Text.Internal.Lazy.Search as S (indices) -import qualified Data.Text.Internal.Unsafe.Shift as U -import qualified Data.Text.Lazy as TL -import qualified Data.Text.Lazy.Builder as TB -import qualified Data.Text.Lazy.Builder.Int as TB -import qualified Data.Text.Lazy.Builder.RealFloat as TB -import qualified Data.Text.Lazy.Encoding as EL -import qualified Data.Text.Lazy.IO as TL -import qualified System.IO as IO -import qualified Tests.Properties.Mul as Mul -import qualified Tests.SlowFunctions as Slow - -t_pack_unpack = (T.unpack . T.pack) `eq` id -tl_pack_unpack = (TL.unpack . TL.pack) `eq` id -t_stream_unstream = (S.unstream . S.stream) `eq` id -tl_stream_unstream = (SL.unstream . SL.stream) `eq` id -t_reverse_stream t = (S.reverse . S.reverseStream) t === t -t_singleton c = [c] === (T.unpack . T.singleton) c -tl_singleton c = [c] === (TL.unpack . TL.singleton) c -tl_unstreamChunks x = f 11 x === f 1000 x - where f n = SL.unstreamChunks n . S.streamList -tl_chunk_unchunk = (TL.fromChunks . TL.toChunks) `eq` id -tl_from_to_strict = (TL.fromStrict . TL.toStrict) `eq` id - --- Note: this silently truncates code-points > 255 to 8-bit due to 'B.pack' -encodeL1 :: T.Text -> B.ByteString -encodeL1 = B.pack . map (fromIntegral . fromEnum) . T.unpack -encodeLazyL1 :: TL.Text -> BL.ByteString -encodeLazyL1 = BL.fromChunks . map encodeL1 . TL.toChunks - -t_ascii t = E.decodeASCII (E.encodeUtf8 a) === a - where a = T.map (\c -> chr (ord c `mod` 128)) t -tl_ascii t = EL.decodeASCII (EL.encodeUtf8 a) === a - where a = TL.map (\c -> chr (ord c `mod` 128)) t -t_latin1 t = E.decodeLatin1 (encodeL1 a) === a - where a = T.map (\c -> chr (ord c `mod` 256)) t -tl_latin1 t = EL.decodeLatin1 (encodeLazyL1 a) === a - where a = TL.map (\c -> chr (ord c `mod` 256)) t -t_utf8 = forAll genUnicode $ (E.decodeUtf8 . E.encodeUtf8) `eq` id -t_utf8' = forAll genUnicode $ (E.decodeUtf8' . E.encodeUtf8) `eq` (id . Right) -tl_utf8 = forAll genUnicode $ (EL.decodeUtf8 . EL.encodeUtf8) `eq` id -tl_utf8' = forAll genUnicode $ (EL.decodeUtf8' . EL.encodeUtf8) `eq` (id . Right) -t_utf16LE = forAll genUnicode $ (E.decodeUtf16LE . E.encodeUtf16LE) `eq` id -tl_utf16LE = forAll genUnicode $ (EL.decodeUtf16LE . EL.encodeUtf16LE) `eq` id -t_utf16BE = forAll genUnicode $ (E.decodeUtf16BE . E.encodeUtf16BE) `eq` id -tl_utf16BE = forAll genUnicode $ (EL.decodeUtf16BE . EL.encodeUtf16BE) `eq` id -t_utf32LE = forAll genUnicode $ (E.decodeUtf32LE . E.encodeUtf32LE) `eq` id -tl_utf32LE = forAll genUnicode $ (EL.decodeUtf32LE . EL.encodeUtf32LE) `eq` id -t_utf32BE = forAll genUnicode $ (E.decodeUtf32BE . E.encodeUtf32BE) `eq` id -tl_utf32BE = forAll genUnicode $ (EL.decodeUtf32BE . EL.encodeUtf32BE) `eq` id - -t_utf8_incr = forAll genUnicode $ \s (Positive n) -> (recode n `eq` id) s - where recode n = T.concat . map fst . feedChunksOf n E.streamDecodeUtf8 . - E.encodeUtf8 - -feedChunksOf :: Int -> (B.ByteString -> E.Decoding) -> B.ByteString - -> [(T.Text, B.ByteString)] -feedChunksOf n f bs - | B.null bs = [] - | otherwise = let (x,y) = B.splitAt n bs - E.Some t b f' = f x - in (t,b) : feedChunksOf n f' y - -t_utf8_undecoded = forAll genUnicode $ \t -> - let b = E.encodeUtf8 t - ls = concatMap (leftover . E.encodeUtf8 . T.singleton) . T.unpack $ t - leftover = (++ [B.empty]) . init . tail . B.inits - in (map snd . feedChunksOf 1 E.streamDecodeUtf8) b === ls - -data Badness = Solo | Leading | Trailing - deriving (Eq, Show) - -instance Arbitrary Badness where - arbitrary = elements [Solo, Leading, Trailing] - -t_utf8_err :: Badness -> Maybe DecodeErr -> Property -t_utf8_err bad mde = do - let gen = case bad of - Solo -> genInvalidUTF8 - Leading -> B.append <$> genInvalidUTF8 <*> genUTF8 - Trailing -> B.append <$> genUTF8 <*> genInvalidUTF8 - genUTF8 = E.encodeUtf8 <$> genUnicode - forAll gen $ \bs -> MkProperty $ - case mde of - -- generate an invalid character - Nothing -> do - c <- choose ('\x10000', maxBound) - let onErr _ _ = Just c - unProperty . monadicIO $ do - l <- run $ let len = T.length (E.decodeUtf8With onErr bs) - in (len `seq` return (Right len)) `Exception.catch` - (\(e::Exception.SomeException) -> return (Left e)) - assert $ case l of - Left err -> - "non-BMP replacement characters not supported" `T.isInfixOf` T.pack (show err) - Right _ -> False - - -- generate a valid onErr - Just de -> do - onErr <- genDecodeErr de - unProperty . monadicIO $ do - l <- run $ let len = T.length (E.decodeUtf8With onErr bs) - in (len `seq` return (Right len)) `Exception.catch` - (\(e::UnicodeException) -> return (Left e)) - assert $ case l of - Left err -> length (show err) >= 0 - Right _ -> de /= Strict - -t_utf8_err' :: B.ByteString -> Property -t_utf8_err' bs = monadicIO . assert $ case E.decodeUtf8' bs of - Left err -> length (show err) >= 0 - Right t -> T.length t >= 0 - -genInvalidUTF8 :: Gen B.ByteString -genInvalidUTF8 = B.pack <$> oneof [ - -- invalid leading byte of a 2-byte sequence - (:) <$> choose (0xC0, 0xC1) <*> upTo 1 contByte - -- invalid leading byte of a 4-byte sequence - , (:) <$> choose (0xF5, 0xFF) <*> upTo 3 contByte - -- 4-byte sequence greater than U+10FFFF - , do k <- choose (0x11, 0x13) - let w0 = 0xF0 + (k `Bits.shiftR` 2) - w1 = 0x80 + ((k .&. 3) `Bits.shiftL` 4) - ([w0,w1]++) <$> vectorOf 2 contByte - -- continuation bytes without a start byte - , listOf1 contByte - -- short 2-byte sequence - , (:[]) <$> choose (0xC2, 0xDF) - -- short 3-byte sequence - , (:) <$> choose (0xE0, 0xEF) <*> upTo 1 contByte - -- short 4-byte sequence - , (:) <$> choose (0xF0, 0xF4) <*> upTo 2 contByte - -- overlong encoding - , do k <- choose (0,0xFFFF) - let c = chr k - case k of - _ | k < 0x80 -> oneof [ let (w,x) = ord2 c in return [w,x] - , let (w,x,y) = ord3 c in return [w,x,y] - , let (w,x,y,z) = ord4 c in return [w,x,y,z] ] - | k < 0x7FF -> oneof [ let (w,x,y) = ord3 c in return [w,x,y] - , let (w,x,y,z) = ord4 c in return [w,x,y,z] ] - | otherwise -> let (w,x,y,z) = ord4 c in return [w,x,y,z] - ] - where - contByte = (0x80 +) <$> choose (0, 0x3f) - upTo n gen = do - k <- choose (0,n) - vectorOf k gen - --- See http://unicode.org/faq/utf_bom.html#gen8 --- A sequence such as <110xxxxx2 0xxxxxxx2> is illegal ... --- When faced with this illegal byte sequence ... a UTF-8 conformant process --- must treat the first byte 110xxxxx2 as an illegal termination error --- (e.g. filter it out or replace by 0xFFFD) ... --- ... and continue processing at the second byte 0xxxxxxx2 -t_decode_with_error2 = - E.decodeUtf8With (\_ _ -> Just 'x') (B.pack [0xC2, 97]) === "xa" -t_decode_with_error3 = - E.decodeUtf8With (\_ _ -> Just 'x') (B.pack [0xE0, 97, 97]) === "xaa" -t_decode_with_error4 = - E.decodeUtf8With (\_ _ -> Just 'x') (B.pack [0xF0, 97, 97, 97]) === "xaaa" - -t_decode_with_error2' = - case E.streamDecodeUtf8With (\_ _ -> Just 'x') (B.pack [0xC2, 97]) of - E.Some x _ _ -> x === "xa" -t_decode_with_error3' = - case E.streamDecodeUtf8With (\_ _ -> Just 'x') (B.pack [0xC2, 97, 97]) of - E.Some x _ _ -> x === "xaa" -t_decode_with_error4' = - case E.streamDecodeUtf8With (\_ _ -> Just 'x') (B.pack [0xC2, 97, 97, 97]) of - E.Some x _ _ -> x === "xaaa" - -t_infix_concat bs1 text bs2 = - forAll (genDecodeErr Replace) $ \onErr -> - text `T.isInfixOf` - E.decodeUtf8With onErr (B.concat [bs1, E.encodeUtf8 text, bs2]) - -s_Eq s = (s==) `eq` ((S.streamList s==) . S.streamList) - where _types = s :: String -sf_Eq p s = - ((L.filter p s==) . L.filter p) `eq` - (((S.filter p $ S.streamList s)==) . S.filter p . S.streamList) -t_Eq s = (s==) `eq` ((T.pack s==) . T.pack) -tl_Eq s = (s==) `eq` ((TL.pack s==) . TL.pack) -s_Ord s = (compare s) `eq` (compare (S.streamList s) . S.streamList) - where _types = s :: String -sf_Ord p s = - ((compare $ L.filter p s) . L.filter p) `eq` - (compare (S.filter p $ S.streamList s) . S.filter p . S.streamList) -t_Ord s = (compare s) `eq` (compare (T.pack s) . T.pack) -tl_Ord s = (compare s) `eq` (compare (TL.pack s) . TL.pack) -t_Read = id `eq` (T.unpack . read . show) -tl_Read = id `eq` (TL.unpack . read . show) -t_Show = show `eq` (show . T.pack) -tl_Show = show `eq` (show . TL.pack) -t_mappend s = mappend s`eqP` (unpackS . mappend (T.pack s)) -tl_mappend s = mappend s`eqP` (unpackS . mappend (TL.pack s)) -t_mconcat = unsquare $ - mconcat `eq` (unpackS . mconcat . L.map T.pack) -tl_mconcat = unsquare $ - mconcat `eq` (unpackS . mconcat . L.map TL.pack) -t_mempty = mempty === (unpackS (mempty :: T.Text)) -tl_mempty = mempty === (unpackS (mempty :: TL.Text)) -t_IsString = fromString `eqP` (T.unpack . fromString) -tl_IsString = fromString `eqP` (TL.unpack . fromString) - -s_cons x = (x:) `eqP` (unpackS . S.cons x) -s_cons_s x = (x:) `eqP` (unpackS . S.unstream . S.cons x) -sf_cons p x = ((x:) . L.filter p) `eqP` (unpackS . S.cons x . S.filter p) -t_cons x = (x:) `eqP` (unpackS . T.cons x) -tl_cons x = (x:) `eqP` (unpackS . TL.cons x) -s_snoc x = (++ [x]) `eqP` (unpackS . (flip S.snoc) x) -t_snoc x = (++ [x]) `eqP` (unpackS . (flip T.snoc) x) -tl_snoc x = (++ [x]) `eqP` (unpackS . (flip TL.snoc) x) -s_append s = (s++) `eqP` (unpackS . S.append (S.streamList s)) -s_append_s s = (s++) `eqP` - (unpackS . S.unstream . S.append (S.streamList s)) -sf_append p s = (L.filter p s++) `eqP` - (unpackS . S.append (S.filter p $ S.streamList s)) -t_append s = (s++) `eqP` (unpackS . T.append (packS s)) - -uncons (x:xs) = Just (x,xs) -uncons _ = Nothing - -s_uncons = uncons `eqP` (fmap (second unpackS) . S.uncons) -sf_uncons p = (uncons . L.filter p) `eqP` - (fmap (second unpackS) . S.uncons . S.filter p) -t_uncons = uncons `eqP` (fmap (second unpackS) . T.uncons) -tl_uncons = uncons `eqP` (fmap (second unpackS) . TL.uncons) - -unsnoc xs@(_:_) = Just (init xs, last xs) -unsnoc [] = Nothing - -t_unsnoc = unsnoc `eqP` (fmap (first unpackS) . T.unsnoc) -tl_unsnoc = unsnoc `eqP` (fmap (first unpackS) . TL.unsnoc) - -s_head = head `eqP` S.head -sf_head p = (head . L.filter p) `eqP` (S.head . S.filter p) -t_head = head `eqP` T.head -tl_head = head `eqP` TL.head -s_last = last `eqP` S.last -sf_last p = (last . L.filter p) `eqP` (S.last . S.filter p) -t_last = last `eqP` T.last -tl_last = last `eqP` TL.last -s_tail = tail `eqP` (unpackS . S.tail) -s_tail_s = tail `eqP` (unpackS . S.unstream . S.tail) -sf_tail p = (tail . L.filter p) `eqP` (unpackS . S.tail . S.filter p) -t_tail = tail `eqP` (unpackS . T.tail) -tl_tail = tail `eqP` (unpackS . TL.tail) -s_init = init `eqP` (unpackS . S.init) -s_init_s = init `eqP` (unpackS . S.unstream . S.init) -sf_init p = (init . L.filter p) `eqP` (unpackS . S.init . S.filter p) -t_init = init `eqP` (unpackS . T.init) -tl_init = init `eqP` (unpackS . TL.init) -s_null = null `eqP` S.null -sf_null p = (null . L.filter p) `eqP` (S.null . S.filter p) -t_null = null `eqP` T.null -tl_null = null `eqP` TL.null -s_length = length `eqP` S.length -sf_length p = (length . L.filter p) `eqP` (S.length . S.filter p) -sl_length = (fromIntegral . length) `eqP` SL.length -t_length = length `eqP` T.length -tl_length = L.genericLength `eqP` TL.length -t_compareLength t = (compare (T.length t)) `eq` T.compareLength t -tl_compareLength t= (compare (TL.length t)) `eq` TL.compareLength t - -s_map f = map f `eqP` (unpackS . S.map f) -s_map_s f = map f `eqP` (unpackS . S.unstream . S.map f) -sf_map p f = (map f . L.filter p) `eqP` (unpackS . S.map f . S.filter p) -t_map f = map f `eqP` (unpackS . T.map f) -tl_map f = map f `eqP` (unpackS . TL.map f) -s_intercalate c = unsquare $ - L.intercalate c `eq` - (unpackS . S.intercalate (packS c) . map packS) -t_intercalate c = unsquare $ - L.intercalate c `eq` - (unpackS . T.intercalate (packS c) . map packS) -tl_intercalate c = unsquare $ - L.intercalate c `eq` - (unpackS . TL.intercalate (TL.pack c) . map TL.pack) -s_intersperse c = L.intersperse c `eqP` - (unpackS . S.intersperse c) -s_intersperse_s c = L.intersperse c `eqP` - (unpackS . S.unstream . S.intersperse c) -sf_intersperse p c= (L.intersperse c . L.filter p) `eqP` - (unpackS . S.intersperse c . S.filter p) -t_intersperse c = unsquare $ - L.intersperse c `eqP` (unpackS . T.intersperse c) -tl_intersperse c = unsquare $ - L.intersperse c `eqP` (unpackS . TL.intersperse c) -t_transpose = unsquare $ - L.transpose `eq` (map unpackS . T.transpose . map packS) -tl_transpose = unsquare $ - L.transpose `eq` (map unpackS . TL.transpose . map TL.pack) -t_reverse = L.reverse `eqP` (unpackS . T.reverse) -tl_reverse = L.reverse `eqP` (unpackS . TL.reverse) -t_reverse_short n = L.reverse `eqP` (unpackS . S.reverse . shorten n . S.stream) - -t_replace s d = (L.intercalate d . splitOn s) `eqP` - (unpackS . T.replace (T.pack s) (T.pack d)) -tl_replace s d = (L.intercalate d . splitOn s) `eqP` - (unpackS . TL.replace (TL.pack s) (TL.pack d)) - -splitOn :: (Eq a) => [a] -> [a] -> [[a]] -splitOn pat src0 - | l == 0 = error "splitOn: empty" - | otherwise = go src0 - where - l = length pat - go src = search 0 src - where - search _ [] = [src] - search !n s@(_:s') - | pat `L.isPrefixOf` s = take n src : go (drop l s) - | otherwise = search (n+1) s' - -s_toCaseFold_length xs = S.length (S.toCaseFold s) >= length xs - where s = S.streamList xs -sf_toCaseFold_length p xs = - (S.length . S.toCaseFold . S.filter p $ s) >= (length . L.filter p $ xs) - where s = S.streamList xs -t_toCaseFold_length t = T.length (T.toCaseFold t) >= T.length t -tl_toCaseFold_length t = TL.length (TL.toCaseFold t) >= TL.length t -t_toLower_length t = T.length (T.toLower t) >= T.length t -t_toLower_lower t = p (T.toLower t) >= p t - where p = T.length . T.filter isLower -tl_toLower_lower t = p (TL.toLower t) >= p t - where p = TL.length . TL.filter isLower -t_toUpper_length t = T.length (T.toUpper t) >= T.length t -t_toUpper_upper t = p (T.toUpper t) >= p t - where p = T.length . T.filter isUpper -tl_toUpper_upper t = p (TL.toUpper t) >= p t - where p = TL.length . TL.filter isUpper -t_toTitle_title t = all (<= 1) (caps w) - where caps = fmap (T.length . T.filter isUpper) . T.words . T.toTitle - -- TIL: there exist uppercase-only letters - w = T.filter (\c -> if C.isUpper c then C.toLower c /= c else True) t -t_toTitle_1stNotLower = and . notLow . T.toTitle . T.filter stable - where notLow = mapMaybe (fmap (not . isLower) . (T.find isLetter)) . T.words - -- Surprise! The Spanish/Portuguese ordinal indicators changed - -- from category Ll (letter, lowercase) to Lo (letter, other) - -- in Unicode 7.0 - -- Oh, and there exist lowercase-only letters (see previous test) - stable c = if isLower c - then C.toUpper c /= c - else c /= '\170' && c /= '\186' - -justifyLeft k c xs = xs ++ L.replicate (k - length xs) c -justifyRight m n xs = L.replicate (m - length xs) n ++ xs -center k c xs - | len >= k = xs - | otherwise = L.replicate l c ++ xs ++ L.replicate r c - where len = length xs - d = k - len - r = d `div` 2 - l = d - r - -s_justifyLeft k c = justifyLeft j c `eqP` (unpackS . S.justifyLeftI j c) - where j = fromIntegral (k :: Word8) -s_justifyLeft_s k c = justifyLeft j c `eqP` - (unpackS . S.unstream . S.justifyLeftI j c) - where j = fromIntegral (k :: Word8) -sf_justifyLeft p k c = (justifyLeft j c . L.filter p) `eqP` - (unpackS . S.justifyLeftI j c . S.filter p) - where j = fromIntegral (k :: Word8) -t_justifyLeft k c = justifyLeft j c `eqP` (unpackS . T.justifyLeft j c) - where j = fromIntegral (k :: Word8) -tl_justifyLeft k c = justifyLeft j c `eqP` - (unpackS . TL.justifyLeft (fromIntegral j) c) - where j = fromIntegral (k :: Word8) -t_justifyRight k c = justifyRight j c `eqP` (unpackS . T.justifyRight j c) - where j = fromIntegral (k :: Word8) -tl_justifyRight k c = justifyRight j c `eqP` - (unpackS . TL.justifyRight (fromIntegral j) c) - where j = fromIntegral (k :: Word8) -t_center k c = center j c `eqP` (unpackS . T.center j c) - where j = fromIntegral (k :: Word8) -tl_center k c = center j c `eqP` (unpackS . TL.center (fromIntegral j) c) - where j = fromIntegral (k :: Word8) - -sf_foldl p f z = (L.foldl f z . L.filter p) `eqP` (S.foldl f z . S.filter p) - where _types = f :: Char -> Char -> Char -t_foldl f z = L.foldl f z `eqP` (T.foldl f z) - where _types = f :: Char -> Char -> Char -tl_foldl f z = L.foldl f z `eqP` (TL.foldl f z) - where _types = f :: Char -> Char -> Char -sf_foldl' p f z = (L.foldl' f z . L.filter p) `eqP` - (S.foldl' f z . S.filter p) - where _types = f :: Char -> Char -> Char -t_foldl' f z = L.foldl' f z `eqP` T.foldl' f z - where _types = f :: Char -> Char -> Char -tl_foldl' f z = L.foldl' f z `eqP` TL.foldl' f z - where _types = f :: Char -> Char -> Char -sf_foldl1 p f = (L.foldl1 f . L.filter p) `eqP` (S.foldl1 f . S.filter p) -t_foldl1 f = L.foldl1 f `eqP` T.foldl1 f -tl_foldl1 f = L.foldl1 f `eqP` TL.foldl1 f -sf_foldl1' p f = (L.foldl1' f . L.filter p) `eqP` (S.foldl1' f . S.filter p) -t_foldl1' f = L.foldl1' f `eqP` T.foldl1' f -tl_foldl1' f = L.foldl1' f `eqP` TL.foldl1' f -sf_foldr p f z = (L.foldr f z . L.filter p) `eqP` (S.foldr f z . S.filter p) - where _types = f :: Char -> Char -> Char -t_foldr f z = L.foldr f z `eqP` T.foldr f z - where _types = f :: Char -> Char -> Char -tl_foldr f z = unsquare $ - L.foldr f z `eqP` TL.foldr f z - where _types = f :: Char -> Char -> Char -sf_foldr1 p f = unsquare $ - (L.foldr1 f . L.filter p) `eqP` (S.foldr1 f . S.filter p) -t_foldr1 f = L.foldr1 f `eqP` T.foldr1 f -tl_foldr1 f = unsquare $ - L.foldr1 f `eqP` TL.foldr1 f - -s_concat_s = unsquare $ - L.concat `eq` (unpackS . S.unstream . S.concat . map packS) -sf_concat p = unsquare $ - (L.concat . map (L.filter p)) `eq` - (unpackS . S.concat . map (S.filter p . packS)) -t_concat = unsquare $ - L.concat `eq` (unpackS . T.concat . map packS) -tl_concat = unsquare $ - L.concat `eq` (unpackS . TL.concat . map TL.pack) -sf_concatMap p f = unsquare $ (L.concatMap f . L.filter p) `eqP` - (unpackS . S.concatMap (packS . f) . S.filter p) -t_concatMap f = unsquare $ - L.concatMap f `eqP` (unpackS . T.concatMap (packS . f)) -tl_concatMap f = unsquare $ - L.concatMap f `eqP` (unpackS . TL.concatMap (TL.pack . f)) -sf_any q p = (L.any p . L.filter q) `eqP` (S.any p . S.filter q) -t_any p = L.any p `eqP` T.any p -tl_any p = L.any p `eqP` TL.any p -sf_all q p = (L.all p . L.filter q) `eqP` (S.all p . S.filter q) -t_all p = L.all p `eqP` T.all p -tl_all p = L.all p `eqP` TL.all p -sf_maximum p = (L.maximum . L.filter p) `eqP` (S.maximum . S.filter p) -t_maximum = L.maximum `eqP` T.maximum -tl_maximum = L.maximum `eqP` TL.maximum -sf_minimum p = (L.minimum . L.filter p) `eqP` (S.minimum . S.filter p) -t_minimum = L.minimum `eqP` T.minimum -tl_minimum = L.minimum `eqP` TL.minimum - -sf_scanl p f z = (L.scanl f z . L.filter p) `eqP` - (unpackS . S.scanl f z . S.filter p) -t_scanl f z = L.scanl f z `eqP` (unpackS . T.scanl f z) -tl_scanl f z = L.scanl f z `eqP` (unpackS . TL.scanl f z) -t_scanl1 f = L.scanl1 f `eqP` (unpackS . T.scanl1 f) -tl_scanl1 f = L.scanl1 f `eqP` (unpackS . TL.scanl1 f) -t_scanr f z = L.scanr f z `eqP` (unpackS . T.scanr f z) -tl_scanr f z = L.scanr f z `eqP` (unpackS . TL.scanr f z) -t_scanr1 f = L.scanr1 f `eqP` (unpackS . T.scanr1 f) -tl_scanr1 f = L.scanr1 f `eqP` (unpackS . TL.scanr1 f) - -t_mapAccumL f z = L.mapAccumL f z `eqP` (second unpackS . T.mapAccumL f z) - where _types = f :: Int -> Char -> (Int,Char) -tl_mapAccumL f z = L.mapAccumL f z `eqP` (second unpackS . TL.mapAccumL f z) - where _types = f :: Int -> Char -> (Int,Char) -t_mapAccumR f z = L.mapAccumR f z `eqP` (second unpackS . T.mapAccumR f z) - where _types = f :: Int -> Char -> (Int,Char) -tl_mapAccumR f z = L.mapAccumR f z `eqP` (second unpackS . TL.mapAccumR f z) - where _types = f :: Int -> Char -> (Int,Char) - -tl_repeat n = (L.take m . L.repeat) `eq` - (unpackS . TL.take (fromIntegral m) . TL.repeat) - where m = fromIntegral (n :: Word8) - -replicate n l = concat (L.replicate n l) - -s_replicate n = replicate m `eq` - (unpackS . S.replicateI (fromIntegral m) . packS) - where m = fromIntegral (n :: Word8) -t_replicate n = replicate m `eq` (unpackS . T.replicate m . packS) - where m = fromIntegral (n :: Word8) -tl_replicate n = replicate m `eq` - (unpackS . TL.replicate (fromIntegral m) . packS) - where m = fromIntegral (n :: Word8) - -tl_cycle n = (L.take m . L.cycle) `eq` - (unpackS . TL.take (fromIntegral m) . TL.cycle . packS) - where m = fromIntegral (n :: Word8) - -tl_iterate f n = (L.take m . L.iterate f) `eq` - (unpackS . TL.take (fromIntegral m) . TL.iterate f) - where m = fromIntegral (n :: Word8) - -unf :: Int -> Char -> Maybe (Char, Char) -unf n c | fromEnum c * 100 > n = Nothing - | otherwise = Just (c, succ c) - -t_unfoldr n = L.unfoldr (unf m) `eq` (unpackS . T.unfoldr (unf m)) - where m = fromIntegral (n :: Word16) -tl_unfoldr n = L.unfoldr (unf m) `eq` (unpackS . TL.unfoldr (unf m)) - where m = fromIntegral (n :: Word16) -t_unfoldrN n m = (L.take i . L.unfoldr (unf j)) `eq` - (unpackS . T.unfoldrN i (unf j)) - where i = fromIntegral (n :: Word16) - j = fromIntegral (m :: Word16) -tl_unfoldrN n m = (L.take i . L.unfoldr (unf j)) `eq` - (unpackS . TL.unfoldrN (fromIntegral i) (unf j)) - where i = fromIntegral (n :: Word16) - j = fromIntegral (m :: Word16) - -unpack2 :: (Stringy s) => (s,s) -> (String,String) -unpack2 = unpackS *** unpackS - -s_take n = L.take n `eqP` (unpackS . S.take n) -s_take_s m = L.take n `eqP` (unpackS . S.unstream . S.take n) - where n = small m -sf_take p n = (L.take n . L.filter p) `eqP` - (unpackS . S.take n . S.filter p) -t_take n = L.take n `eqP` (unpackS . T.take n) -t_takeEnd n = (L.reverse . L.take n . L.reverse) `eqP` - (unpackS . T.takeEnd n) -tl_take n = L.take n `eqP` (unpackS . TL.take (fromIntegral n)) -tl_takeEnd n = (L.reverse . L.take (fromIntegral n) . L.reverse) `eqP` - (unpackS . TL.takeEnd n) -s_drop n = L.drop n `eqP` (unpackS . S.drop n) -s_drop_s m = L.drop n `eqP` (unpackS . S.unstream . S.drop n) - where n = small m -sf_drop p n = (L.drop n . L.filter p) `eqP` - (unpackS . S.drop n . S.filter p) -t_drop n = L.drop n `eqP` (unpackS . T.drop n) -t_dropEnd n = (L.reverse . L.drop n . L.reverse) `eqP` - (unpackS . T.dropEnd n) -tl_drop n = L.drop n `eqP` (unpackS . TL.drop (fromIntegral n)) -tl_dropEnd n = (L.reverse . L.drop n . L.reverse) `eqP` - (unpackS . TL.dropEnd (fromIntegral n)) -s_take_drop m = (L.take n . L.drop n) `eqP` (unpackS . S.take n . S.drop n) - where n = small m -s_take_drop_s m = (L.take n . L.drop n) `eqP` - (unpackS . S.unstream . S.take n . S.drop n) - where n = small m -s_takeWhile p = L.takeWhile p `eqP` (unpackS . S.takeWhile p) -s_takeWhile_s p = L.takeWhile p `eqP` (unpackS . S.unstream . S.takeWhile p) -sf_takeWhile q p = (L.takeWhile p . L.filter q) `eqP` - (unpackS . S.takeWhile p . S.filter q) -noMatch = do - c <- char - d <- suchThat char (/= c) - return (c,d) -t_takeWhile p = L.takeWhile p `eqP` (unpackS . T.takeWhile p) -tl_takeWhile p = L.takeWhile p `eqP` (unpackS . TL.takeWhile p) -t_takeWhileEnd p = (L.reverse . L.takeWhile p . L.reverse) `eqP` - (unpackS . T.takeWhileEnd p) -t_takeWhileEnd_null t = forAll noMatch $ \(c,d) -> T.null $ - T.takeWhileEnd (==d) (T.snoc t c) -tl_takeWhileEnd p = (L.reverse . L.takeWhile p . L.reverse) `eqP` - (unpackS . TL.takeWhileEnd p) -tl_takeWhileEnd_null t = forAll noMatch $ \(c,d) -> TL.null $ - TL.takeWhileEnd (==d) (TL.snoc t c) -s_dropWhile p = L.dropWhile p `eqP` (unpackS . S.dropWhile p) -s_dropWhile_s p = L.dropWhile p `eqP` (unpackS . S.unstream . S.dropWhile p) -sf_dropWhile q p = (L.dropWhile p . L.filter q) `eqP` - (unpackS . S.dropWhile p . S.filter q) -t_dropWhile p = L.dropWhile p `eqP` (unpackS . T.dropWhile p) -tl_dropWhile p = L.dropWhile p `eqP` (unpackS . S.dropWhile p) -t_dropWhileEnd p = (L.reverse . L.dropWhile p . L.reverse) `eqP` - (unpackS . T.dropWhileEnd p) -tl_dropWhileEnd p = (L.reverse . L.dropWhile p . L.reverse) `eqP` - (unpackS . TL.dropWhileEnd p) -t_dropAround p = (L.dropWhile p . L.reverse . L.dropWhile p . L.reverse) - `eqP` (unpackS . T.dropAround p) -tl_dropAround p = (L.dropWhile p . L.reverse . L.dropWhile p . L.reverse) - `eqP` (unpackS . TL.dropAround p) -t_stripStart = T.dropWhile isSpace `eq` T.stripStart -tl_stripStart = TL.dropWhile isSpace `eq` TL.stripStart -t_stripEnd = T.dropWhileEnd isSpace `eq` T.stripEnd -tl_stripEnd = TL.dropWhileEnd isSpace `eq` TL.stripEnd -t_strip = T.dropAround isSpace `eq` T.strip -tl_strip = TL.dropAround isSpace `eq` TL.strip -t_splitAt n = L.splitAt n `eqP` (unpack2 . T.splitAt n) -tl_splitAt n = L.splitAt n `eqP` (unpack2 . TL.splitAt (fromIntegral n)) -t_span p = L.span p `eqP` (unpack2 . T.span p) -tl_span p = L.span p `eqP` (unpack2 . TL.span p) - -t_breakOn_id s = squid `eq` (uncurry T.append . T.breakOn s) - where squid t | T.null s = error "empty" - | otherwise = t -tl_breakOn_id s = squid `eq` (uncurry TL.append . TL.breakOn s) - where squid t | TL.null s = error "empty" - | otherwise = t -t_breakOn_start (NotEmpty s) t = - let (k,m) = T.breakOn s t - in k `T.isPrefixOf` t && (T.null m || s `T.isPrefixOf` m) -tl_breakOn_start (NotEmpty s) t = - let (k,m) = TL.breakOn s t - in k `TL.isPrefixOf` t && TL.null m || s `TL.isPrefixOf` m -t_breakOnEnd_end (NotEmpty s) t = - let (m,k) = T.breakOnEnd s t - in k `T.isSuffixOf` t && (T.null m || s `T.isSuffixOf` m) -tl_breakOnEnd_end (NotEmpty s) t = - let (m,k) = TL.breakOnEnd s t - in k `TL.isSuffixOf` t && (TL.null m || s `TL.isSuffixOf` m) -t_break p = L.break p `eqP` (unpack2 . T.break p) -tl_break p = L.break p `eqP` (unpack2 . TL.break p) -t_group = L.group `eqP` (map unpackS . T.group) -tl_group = L.group `eqP` (map unpackS . TL.group) -t_groupBy p = L.groupBy p `eqP` (map unpackS . T.groupBy p) -tl_groupBy p = L.groupBy p `eqP` (map unpackS . TL.groupBy p) -t_inits = L.inits `eqP` (map unpackS . T.inits) -tl_inits = L.inits `eqP` (map unpackS . TL.inits) -t_tails = L.tails `eqP` (map unpackS . T.tails) -tl_tails = unsquare $ - L.tails `eqP` (map unpackS . TL.tails) -t_findAppendId = unsquare $ \(NotEmpty s) ts -> - let t = T.intercalate s ts - in all (==t) $ map (uncurry T.append) (T.breakOnAll s t) -tl_findAppendId = unsquare $ \(NotEmpty s) ts -> - let t = TL.intercalate s ts - in all (==t) $ map (uncurry TL.append) (TL.breakOnAll s t) -t_findContains = unsquare $ \(NotEmpty s) -> - all (T.isPrefixOf s . snd) . T.breakOnAll s . T.intercalate s -tl_findContains = unsquare $ \(NotEmpty s) -> all (TL.isPrefixOf s . snd) . - TL.breakOnAll s . TL.intercalate s -sl_filterCount c = (L.genericLength . L.filter (==c)) `eqP` SL.countChar c -t_findCount s = (L.length . T.breakOnAll s) `eq` T.count s -tl_findCount s = (L.genericLength . TL.breakOnAll s) `eq` TL.count s - -t_splitOn_split s = unsquare $ - (T.splitOn s `eq` Slow.splitOn s) . T.intercalate s -tl_splitOn_split s = unsquare $ - ((TL.splitOn (TL.fromStrict s) . TL.fromStrict) `eq` - (map TL.fromStrict . T.splitOn s)) . T.intercalate s -t_splitOn_i (NotEmpty t) = id `eq` (T.intercalate t . T.splitOn t) -tl_splitOn_i (NotEmpty t) = id `eq` (TL.intercalate t . TL.splitOn t) - -t_split p = split p `eqP` (map unpackS . T.split p) -t_split_count c = (L.length . T.split (==c)) `eq` - ((1+) . T.count (T.singleton c)) -t_split_splitOn c = T.split (==c) `eq` T.splitOn (T.singleton c) -tl_split p = split p `eqP` (map unpackS . TL.split p) - -split :: (a -> Bool) -> [a] -> [[a]] -split _ [] = [[]] -split p xs = loop xs - where loop s | null s' = [l] - | otherwise = l : loop (tail s') - where (l, s') = break p s - -t_chunksOf_same_lengths k = all ((==k) . T.length) . ini . T.chunksOf k - where ini [] = [] - ini xs = init xs - -t_chunksOf_length k t = len == T.length t || (k <= 0 && len == 0) - where len = L.sum . L.map T.length $ T.chunksOf k t - -tl_chunksOf k = T.chunksOf k `eq` (map (T.concat . TL.toChunks) . - TL.chunksOf (fromIntegral k) . TL.fromStrict) - -t_lines = L.lines `eqP` (map unpackS . T.lines) -tl_lines = L.lines `eqP` (map unpackS . TL.lines) -{- -t_lines' = lines' `eqP` (map unpackS . T.lines') - where lines' "" = [] - lines' s = let (l, s') = break eol s - in l : case s' of - [] -> [] - ('\r':'\n':s'') -> lines' s'' - (_:s'') -> lines' s'' - eol c = c == '\r' || c == '\n' --} -t_words = L.words `eqP` (map unpackS . T.words) - -tl_words = L.words `eqP` (map unpackS . TL.words) -t_unlines = unsquare $ - L.unlines `eq` (unpackS . T.unlines . map packS) -tl_unlines = unsquare $ - L.unlines `eq` (unpackS . TL.unlines . map packS) -t_unwords = unsquare $ - L.unwords `eq` (unpackS . T.unwords . map packS) -tl_unwords = unsquare $ - L.unwords `eq` (unpackS . TL.unwords . map packS) - -s_isPrefixOf s = L.isPrefixOf s `eqP` - (S.isPrefixOf (S.stream $ packS s) . S.stream) -sf_isPrefixOf p s = (L.isPrefixOf s . L.filter p) `eqP` - (S.isPrefixOf (S.stream $ packS s) . S.filter p . S.stream) -t_isPrefixOf s = L.isPrefixOf s`eqP` T.isPrefixOf (packS s) -tl_isPrefixOf s = L.isPrefixOf s`eqP` TL.isPrefixOf (packS s) -t_isSuffixOf s = L.isSuffixOf s`eqP` T.isSuffixOf (packS s) -tl_isSuffixOf s = L.isSuffixOf s`eqP` TL.isSuffixOf (packS s) -t_isInfixOf s = L.isInfixOf s `eqP` T.isInfixOf (packS s) -tl_isInfixOf s = L.isInfixOf s `eqP` TL.isInfixOf (packS s) - -t_stripPrefix s = (fmap packS . L.stripPrefix s) `eqP` T.stripPrefix (packS s) -tl_stripPrefix s = (fmap packS . L.stripPrefix s) `eqP` TL.stripPrefix (packS s) - -stripSuffix p t = reverse `fmap` L.stripPrefix (reverse p) (reverse t) - -t_stripSuffix s = (fmap packS . stripSuffix s) `eqP` T.stripSuffix (packS s) -tl_stripSuffix s = (fmap packS . stripSuffix s) `eqP` TL.stripSuffix (packS s) - -commonPrefixes a0@(_:_) b0@(_:_) = Just (go a0 b0 []) - where go (a:as) (b:bs) ps - | a == b = go as bs (a:ps) - go as bs ps = (reverse ps,as,bs) -commonPrefixes _ _ = Nothing - -t_commonPrefixes a b (NonEmpty p) - = commonPrefixes pa pb == - repack `fmap` T.commonPrefixes (packS pa) (packS pb) - where repack (x,y,z) = (unpackS x,unpackS y,unpackS z) - pa = p ++ a - pb = p ++ b - -tl_commonPrefixes a b (NonEmpty p) - = commonPrefixes pa pb == - repack `fmap` TL.commonPrefixes (packS pa) (packS pb) - where repack (x,y,z) = (unpackS x,unpackS y,unpackS z) - pa = p ++ a - pb = p ++ b - -sf_elem p c = (L.elem c . L.filter p) `eqP` (S.elem c . S.filter p) -sf_filter q p = (L.filter p . L.filter q) `eqP` - (unpackS . S.filter p . S.filter q) -t_filter p = L.filter p `eqP` (unpackS . T.filter p) -tl_filter p = L.filter p `eqP` (unpackS . TL.filter p) -sf_findBy q p = (L.find p . L.filter q) `eqP` (S.findBy p . S.filter q) -t_find p = L.find p `eqP` T.find p -tl_find p = L.find p `eqP` TL.find p -t_partition p = L.partition p `eqP` (unpack2 . T.partition p) -tl_partition p = L.partition p `eqP` (unpack2 . TL.partition p) - -sf_index p s = forAll (choose (-l,l*2)) - ((L.filter p s L.!!) `eq` S.index (S.filter p $ packS s)) - where l = L.length s -t_index s = forAll (choose (-l,l*2)) ((s L.!!) `eq` T.index (packS s)) - where l = L.length s - -tl_index s = forAll (choose (-l,l*2)) - ((s L.!!) `eq` (TL.index (packS s) . fromIntegral)) - where l = L.length s - -t_findIndex p = L.findIndex p `eqP` T.findIndex p -t_count (NotEmpty t) = (subtract 1 . L.length . T.splitOn t) `eq` T.count t -tl_count (NotEmpty t) = (subtract 1 . L.genericLength . TL.splitOn t) `eq` - TL.count t -t_zip s = L.zip s `eqP` T.zip (packS s) -tl_zip s = L.zip s `eqP` TL.zip (packS s) -sf_zipWith p c s = (L.zipWith c (L.filter p s) . L.filter p) `eqP` - (unpackS . S.zipWith c (S.filter p $ packS s) . S.filter p) -t_zipWith c s = L.zipWith c s `eqP` (unpackS . T.zipWith c (packS s)) -tl_zipWith c s = L.zipWith c s `eqP` (unpackS . TL.zipWith c (packS s)) - -t_indices (NotEmpty s) = Slow.indices s `eq` indices s -tl_indices (NotEmpty s) = lazyIndices s `eq` S.indices s - where lazyIndices ss t = map fromIntegral $ Slow.indices (conc ss) (conc t) - conc = T.concat . TL.toChunks -t_indices_occurs = unsquare $ \(NotEmpty t) ts -> - let s = T.intercalate t ts - in Slow.indices t s === indices t s - --- Bit shifts. -shiftL w = forAll (choose (0,width-1)) $ \k -> Bits.shiftL w k == U.shiftL w k - where width = round (log (fromIntegral m) / log 2 :: Double) - (m,_) = (maxBound, m == w) -shiftR w = forAll (choose (0,width-1)) $ \k -> Bits.shiftR w k == U.shiftR w k - where width = round (log (fromIntegral m) / log 2 :: Double) - (m,_) = (maxBound, m == w) - -shiftL_Int = shiftL :: Int -> Property -shiftL_Word16 = shiftL :: Word16 -> Property -shiftL_Word32 = shiftL :: Word32 -> Property -shiftR_Int = shiftR :: Int -> Property -shiftR_Word16 = shiftR :: Word16 -> Property -shiftR_Word32 = shiftR :: Word32 -> Property - --- Builder. - -tb_singleton = id `eqP` - (unpackS . TB.toLazyText . mconcat . map TB.singleton) -tb_fromText = L.concat `eq` (unpackS . TB.toLazyText . mconcat . - map (TB.fromText . packS)) -tb_associative s1 s2 s3 = - TB.toLazyText (b1 `mappend` (b2 `mappend` b3)) == - TB.toLazyText ((b1 `mappend` b2) `mappend` b3) - where b1 = TB.fromText (packS s1) - b2 = TB.fromText (packS s2) - b3 = TB.fromText (packS s3) - --- Numeric builder stuff. - -tb_decimal :: (Integral a, Show a) => a -> Bool -tb_decimal = (TB.toLazyText . TB.decimal) `eq` (TL.pack . show) - -tb_decimal_integer (a::Integer) = tb_decimal a -tb_decimal_integer_big (Big a) = tb_decimal a -tb_decimal_int (a::Int) = tb_decimal a -tb_decimal_int8 (a::Int8) = tb_decimal a -tb_decimal_int16 (a::Int16) = tb_decimal a -tb_decimal_int32 (a::Int32) = tb_decimal a -tb_decimal_int64 (a::Int64) = tb_decimal a -tb_decimal_word (a::Word) = tb_decimal a -tb_decimal_word8 (a::Word8) = tb_decimal a -tb_decimal_word16 (a::Word16) = tb_decimal a -tb_decimal_word32 (a::Word32) = tb_decimal a -tb_decimal_word64 (a::Word64) = tb_decimal a - -tb_decimal_big_int (BigBounded (a::Int)) = tb_decimal a -tb_decimal_big_int64 (BigBounded (a::Int64)) = tb_decimal a -tb_decimal_big_word (BigBounded (a::Word)) = tb_decimal a -tb_decimal_big_word64 (BigBounded (a::Word64)) = tb_decimal a - -tb_hex :: (Integral a, Show a) => a -> Bool -tb_hex = (TB.toLazyText . TB.hexadecimal) `eq` (TL.pack . flip showHex "") - -tb_hexadecimal_integer (a::Integer) = tb_hex a -tb_hexadecimal_int (a::Int) = tb_hex a -tb_hexadecimal_int8 (a::Int8) = tb_hex a -tb_hexadecimal_int16 (a::Int16) = tb_hex a -tb_hexadecimal_int32 (a::Int32) = tb_hex a -tb_hexadecimal_int64 (a::Int64) = tb_hex a -tb_hexadecimal_word (a::Word) = tb_hex a -tb_hexadecimal_word8 (a::Word8) = tb_hex a -tb_hexadecimal_word16 (a::Word16) = tb_hex a -tb_hexadecimal_word32 (a::Word32) = tb_hex a -tb_hexadecimal_word64 (a::Word64) = tb_hex a - -tb_realfloat :: (RealFloat a, Show a) => a -> Bool -tb_realfloat = (TB.toLazyText . TB.realFloat) `eq` (TL.pack . show) - -tb_realfloat_float (a::Float) = tb_realfloat a -tb_realfloat_double (a::Double) = tb_realfloat a - -showFloat :: (RealFloat a) => TB.FPFormat -> Maybe Int -> a -> ShowS -showFloat TB.Exponent (Just 0) = showEFloat (Just 1) -- see gh-231 -showFloat TB.Exponent p = showEFloat p -showFloat TB.Fixed p = showFFloat p -showFloat TB.Generic p = showGFloat p - -tb_formatRealFloat :: (RealFloat a, Show a) => - a -> TB.FPFormat -> Precision a -> Property -tb_formatRealFloat a fmt prec = cond ==> - TB.formatRealFloat fmt p a === - TB.fromString (showFloat fmt p a "") - where p = precision a prec - cond = case (p,fmt) of -#if MIN_VERSION_base(4,12,0) - (Just 0, TB.Generic) -> False -- skipping due to gh-231 -#endif - _ -> True - -tb_formatRealFloat_float (a::Float) = tb_formatRealFloat a -tb_formatRealFloat_double (a::Double) = tb_formatRealFloat a - --- Reading. - -t_decimal (n::Int) s = - T.signed T.decimal (T.pack (show n) `T.append` t) === Right (n,t) - where t = T.dropWhile isDigit s -tl_decimal (n::Int) s = - TL.signed TL.decimal (TL.pack (show n) `TL.append` t) === Right (n,t) - where t = TL.dropWhile isDigit s -t_hexadecimal m s ox = - T.hexadecimal (T.concat [p, T.pack (showHex n ""), t]) === Right (n,t) - where t = T.dropWhile isHexDigit s - p = if ox then "0x" else "" - n = getPositive m :: Int -tl_hexadecimal m s ox = - TL.hexadecimal (TL.concat [p, TL.pack (showHex n ""), t]) === Right (n,t) - where t = TL.dropWhile isHexDigit s - p = if ox then "0x" else "" - n = getPositive m :: Int - -isFloaty c = c `elem` ("+-.0123456789eE" :: String) - -t_read_rational p tol (n::Double) s = - case p (T.pack (show n) `T.append` t) of - Left _err -> False - Right (n',t') -> t == t' && abs (n-n') <= tol - where t = T.dropWhile isFloaty s - -tl_read_rational p tol (n::Double) s = - case p (TL.pack (show n) `TL.append` t) of - Left _err -> False - Right (n',t') -> t == t' && abs (n-n') <= tol - where t = TL.dropWhile isFloaty s - -t_double = t_read_rational T.double 1e-13 -tl_double = tl_read_rational TL.double 1e-13 -t_rational = t_read_rational T.rational 1e-16 -tl_rational = tl_read_rational TL.rational 1e-16 - --- Input and output. - -t_put_get = write_read T.unlines T.filter put get - where put h = withRedirect h IO.stdout . T.putStr - get h = withRedirect h IO.stdin T.getContents -tl_put_get = write_read TL.unlines TL.filter put get - where put h = withRedirect h IO.stdout . TL.putStr - get h = withRedirect h IO.stdin TL.getContents -t_write_read = write_read T.unlines T.filter T.hPutStr T.hGetContents -tl_write_read = write_read TL.unlines TL.filter TL.hPutStr TL.hGetContents - -t_write_read_line e m b t = write_read head T.filter T.hPutStrLn - T.hGetLine e m b [t] -tl_write_read_line e m b t = write_read head TL.filter TL.hPutStrLn - TL.hGetLine e m b [t] - --- Low-level. - -t_dropWord16 m t = dropWord16 m t `T.isSuffixOf` t -t_takeWord16 m t = takeWord16 m t `T.isPrefixOf` t -t_take_drop_16 m t = T.append (takeWord16 n t) (dropWord16 n t) === t - where n = small m -t_use_from t = monadicIO $ assert . (==t) =<< run (useAsPtr t fromPtr) - -t_copy t = T.copy t === t - --- Regression tests. -s_filter_eq s = S.filter p t == S.streamList (filter p s) - where p = (/= S.last t) - t = S.streamList s - --- Make a stream appear shorter than it really is, to ensure that --- functions that consume inaccurately sized streams behave --- themselves. -shorten :: Int -> S.Stream a -> S.Stream a -shorten n t@(S.Stream arr off len) - | n > 0 = S.Stream arr off (smaller (exactSize n) len) - | otherwise = t - -tests :: Test -tests = - testGroup "Properties" [ - testGroup "creation/elimination" [ - testProperty "t_pack_unpack" t_pack_unpack, - testProperty "tl_pack_unpack" tl_pack_unpack, - testProperty "t_stream_unstream" t_stream_unstream, - testProperty "tl_stream_unstream" tl_stream_unstream, - testProperty "t_reverse_stream" t_reverse_stream, - testProperty "t_singleton" t_singleton, - testProperty "tl_singleton" tl_singleton, - testProperty "tl_unstreamChunks" tl_unstreamChunks, - testProperty "tl_chunk_unchunk" tl_chunk_unchunk, - testProperty "tl_from_to_strict" tl_from_to_strict - ], - - testGroup "transcoding" [ - testProperty "t_ascii" t_ascii, - testProperty "tl_ascii" tl_ascii, - testProperty "t_latin1" t_latin1, - testProperty "tl_latin1" tl_latin1, - testProperty "t_utf8" t_utf8, - testProperty "t_utf8'" t_utf8', - testProperty "t_utf8_incr" t_utf8_incr, - testProperty "t_utf8_undecoded" t_utf8_undecoded, - testProperty "tl_utf8" tl_utf8, - testProperty "tl_utf8'" tl_utf8', - testProperty "t_utf16LE" t_utf16LE, - testProperty "tl_utf16LE" tl_utf16LE, - testProperty "t_utf16BE" t_utf16BE, - testProperty "tl_utf16BE" tl_utf16BE, - testProperty "t_utf32LE" t_utf32LE, - testProperty "tl_utf32LE" tl_utf32LE, - testProperty "t_utf32BE" t_utf32BE, - testProperty "tl_utf32BE" tl_utf32BE, - testGroup "errors" [ - testProperty "t_utf8_err" t_utf8_err, - testProperty "t_utf8_err'" t_utf8_err' - ], - testGroup "error recovery" [ - testProperty "t_decode_with_error2" t_decode_with_error2, - testProperty "t_decode_with_error3" t_decode_with_error3, - testProperty "t_decode_with_error4" t_decode_with_error4, - testProperty "t_decode_with_error2'" t_decode_with_error2', - testProperty "t_decode_with_error3'" t_decode_with_error3', - testProperty "t_decode_with_error4'" t_decode_with_error4', - testProperty "t_infix_concat" t_infix_concat - ] - ], - - testGroup "instances" [ - testProperty "s_Eq" s_Eq, - testProperty "sf_Eq" sf_Eq, - testProperty "t_Eq" t_Eq, - testProperty "tl_Eq" tl_Eq, - testProperty "s_Ord" s_Ord, - testProperty "sf_Ord" sf_Ord, - testProperty "t_Ord" t_Ord, - testProperty "tl_Ord" tl_Ord, - testProperty "t_Read" t_Read, - testProperty "tl_Read" tl_Read, - testProperty "t_Show" t_Show, - testProperty "tl_Show" tl_Show, - testProperty "t_mappend" t_mappend, - testProperty "tl_mappend" tl_mappend, - testProperty "t_mconcat" t_mconcat, - testProperty "tl_mconcat" tl_mconcat, - testProperty "t_mempty" t_mempty, - testProperty "tl_mempty" tl_mempty, - testProperty "t_IsString" t_IsString, - testProperty "tl_IsString" tl_IsString - ], - - testGroup "basics" [ - testProperty "s_cons" s_cons, - testProperty "s_cons_s" s_cons_s, - testProperty "sf_cons" sf_cons, - testProperty "t_cons" t_cons, - testProperty "tl_cons" tl_cons, - testProperty "s_snoc" s_snoc, - testProperty "t_snoc" t_snoc, - testProperty "tl_snoc" tl_snoc, - testProperty "s_append" s_append, - testProperty "s_append_s" s_append_s, - testProperty "sf_append" sf_append, - testProperty "t_append" t_append, - testProperty "s_uncons" s_uncons, - testProperty "sf_uncons" sf_uncons, - testProperty "t_uncons" t_uncons, - testProperty "tl_uncons" tl_uncons, - testProperty "t_unsnoc" t_unsnoc, - testProperty "tl_unsnoc" tl_unsnoc, - testProperty "s_head" s_head, - testProperty "sf_head" sf_head, - testProperty "t_head" t_head, - testProperty "tl_head" tl_head, - testProperty "s_last" s_last, - testProperty "sf_last" sf_last, - testProperty "t_last" t_last, - testProperty "tl_last" tl_last, - testProperty "s_tail" s_tail, - testProperty "s_tail_s" s_tail_s, - testProperty "sf_tail" sf_tail, - testProperty "t_tail" t_tail, - testProperty "tl_tail" tl_tail, - testProperty "s_init" s_init, - testProperty "s_init_s" s_init_s, - testProperty "sf_init" sf_init, - testProperty "t_init" t_init, - testProperty "tl_init" tl_init, - testProperty "s_null" s_null, - testProperty "sf_null" sf_null, - testProperty "t_null" t_null, - testProperty "tl_null" tl_null, - testProperty "s_length" s_length, - testProperty "sf_length" sf_length, - testProperty "sl_length" sl_length, - testProperty "t_length" t_length, - testProperty "tl_length" tl_length, - testProperty "t_compareLength" t_compareLength, - testProperty "tl_compareLength" tl_compareLength - ], - - testGroup "transformations" [ - testProperty "s_map" s_map, - testProperty "s_map_s" s_map_s, - testProperty "sf_map" sf_map, - testProperty "t_map" t_map, - testProperty "tl_map" tl_map, - testProperty "s_intercalate" s_intercalate, - testProperty "t_intercalate" t_intercalate, - testProperty "tl_intercalate" tl_intercalate, - testProperty "s_intersperse" s_intersperse, - testProperty "s_intersperse_s" s_intersperse_s, - testProperty "sf_intersperse" sf_intersperse, - testProperty "t_intersperse" t_intersperse, - testProperty "tl_intersperse" tl_intersperse, - testProperty "t_transpose" t_transpose, - testProperty "tl_transpose" tl_transpose, - testProperty "t_reverse" t_reverse, - testProperty "tl_reverse" tl_reverse, - testProperty "t_reverse_short" t_reverse_short, - testProperty "t_replace" t_replace, - testProperty "tl_replace" tl_replace, - - testGroup "case conversion" [ - testProperty "s_toCaseFold_length" s_toCaseFold_length, - testProperty "sf_toCaseFold_length" sf_toCaseFold_length, - testProperty "t_toCaseFold_length" t_toCaseFold_length, - testProperty "tl_toCaseFold_length" tl_toCaseFold_length, - testProperty "t_toLower_length" t_toLower_length, - testProperty "t_toLower_lower" t_toLower_lower, - testProperty "tl_toLower_lower" tl_toLower_lower, - testProperty "t_toUpper_length" t_toUpper_length, - testProperty "t_toUpper_upper" t_toUpper_upper, - testProperty "tl_toUpper_upper" tl_toUpper_upper, - testProperty "t_toTitle_title" t_toTitle_title, - testProperty "t_toTitle_1stNotLower" t_toTitle_1stNotLower - ], - - testGroup "justification" [ - testProperty "s_justifyLeft" s_justifyLeft, - testProperty "s_justifyLeft_s" s_justifyLeft_s, - testProperty "sf_justifyLeft" sf_justifyLeft, - testProperty "t_justifyLeft" t_justifyLeft, - testProperty "tl_justifyLeft" tl_justifyLeft, - testProperty "t_justifyRight" t_justifyRight, - testProperty "tl_justifyRight" tl_justifyRight, - testProperty "t_center" t_center, - testProperty "tl_center" tl_center - ] - ], - - testGroup "folds" [ - testProperty "sf_foldl" sf_foldl, - testProperty "t_foldl" t_foldl, - testProperty "tl_foldl" tl_foldl, - testProperty "sf_foldl'" sf_foldl', - testProperty "t_foldl'" t_foldl', - testProperty "tl_foldl'" tl_foldl', - testProperty "sf_foldl1" sf_foldl1, - testProperty "t_foldl1" t_foldl1, - testProperty "tl_foldl1" tl_foldl1, - testProperty "t_foldl1'" t_foldl1', - testProperty "sf_foldl1'" sf_foldl1', - testProperty "tl_foldl1'" tl_foldl1', - testProperty "sf_foldr" sf_foldr, - testProperty "t_foldr" t_foldr, - testProperty "tl_foldr" tl_foldr, - testProperty "sf_foldr1" sf_foldr1, - testProperty "t_foldr1" t_foldr1, - testProperty "tl_foldr1" tl_foldr1, - - testGroup "special" [ - testProperty "s_concat_s" s_concat_s, - testProperty "sf_concat" sf_concat, - testProperty "t_concat" t_concat, - testProperty "tl_concat" tl_concat, - testProperty "sf_concatMap" sf_concatMap, - testProperty "t_concatMap" t_concatMap, - testProperty "tl_concatMap" tl_concatMap, - testProperty "sf_any" sf_any, - testProperty "t_any" t_any, - testProperty "tl_any" tl_any, - testProperty "sf_all" sf_all, - testProperty "t_all" t_all, - testProperty "tl_all" tl_all, - testProperty "sf_maximum" sf_maximum, - testProperty "t_maximum" t_maximum, - testProperty "tl_maximum" tl_maximum, - testProperty "sf_minimum" sf_minimum, - testProperty "t_minimum" t_minimum, - testProperty "tl_minimum" tl_minimum - ] - ], - - testGroup "construction" [ - testGroup "scans" [ - testProperty "sf_scanl" sf_scanl, - testProperty "t_scanl" t_scanl, - testProperty "tl_scanl" tl_scanl, - testProperty "t_scanl1" t_scanl1, - testProperty "tl_scanl1" tl_scanl1, - testProperty "t_scanr" t_scanr, - testProperty "tl_scanr" tl_scanr, - testProperty "t_scanr1" t_scanr1, - testProperty "tl_scanr1" tl_scanr1 - ], - - testGroup "mapAccum" [ - testProperty "t_mapAccumL" t_mapAccumL, - testProperty "tl_mapAccumL" tl_mapAccumL, - testProperty "t_mapAccumR" t_mapAccumR, - testProperty "tl_mapAccumR" tl_mapAccumR - ], - - testGroup "unfolds" [ - testProperty "tl_repeat" tl_repeat, - testProperty "s_replicate" s_replicate, - testProperty "t_replicate" t_replicate, - testProperty "tl_replicate" tl_replicate, - testProperty "tl_cycle" tl_cycle, - testProperty "tl_iterate" tl_iterate, - testProperty "t_unfoldr" t_unfoldr, - testProperty "tl_unfoldr" tl_unfoldr, - testProperty "t_unfoldrN" t_unfoldrN, - testProperty "tl_unfoldrN" tl_unfoldrN - ] - ], - - testGroup "substrings" [ - testGroup "breaking" [ - testProperty "s_take" s_take, - testProperty "s_take_s" s_take_s, - testProperty "sf_take" sf_take, - testProperty "t_take" t_take, - testProperty "t_takeEnd" t_takeEnd, - testProperty "tl_take" tl_take, - testProperty "tl_takeEnd" tl_takeEnd, - testProperty "s_drop" s_drop, - testProperty "s_drop_s" s_drop_s, - testProperty "sf_drop" sf_drop, - testProperty "t_drop" t_drop, - testProperty "t_dropEnd" t_dropEnd, - testProperty "tl_drop" tl_drop, - testProperty "tl_dropEnd" tl_dropEnd, - testProperty "s_take_drop" s_take_drop, - testProperty "s_take_drop_s" s_take_drop_s, - testProperty "s_takeWhile" s_takeWhile, - testProperty "s_takeWhile_s" s_takeWhile_s, - testProperty "sf_takeWhile" sf_takeWhile, - testProperty "t_takeWhile" t_takeWhile, - testProperty "tl_takeWhile" tl_takeWhile, - testProperty "t_takeWhileEnd" t_takeWhileEnd, - testProperty "t_takeWhileEnd_null" t_takeWhileEnd_null, - testProperty "tl_takeWhileEnd" tl_takeWhileEnd, - testProperty "tl_takeWhileEnd_null" tl_takeWhileEnd_null, - testProperty "sf_dropWhile" sf_dropWhile, - testProperty "s_dropWhile" s_dropWhile, - testProperty "s_dropWhile_s" s_dropWhile_s, - testProperty "t_dropWhile" t_dropWhile, - testProperty "tl_dropWhile" tl_dropWhile, - testProperty "t_dropWhileEnd" t_dropWhileEnd, - testProperty "tl_dropWhileEnd" tl_dropWhileEnd, - testProperty "t_dropAround" t_dropAround, - testProperty "tl_dropAround" tl_dropAround, - testProperty "t_stripStart" t_stripStart, - testProperty "tl_stripStart" tl_stripStart, - testProperty "t_stripEnd" t_stripEnd, - testProperty "tl_stripEnd" tl_stripEnd, - testProperty "t_strip" t_strip, - testProperty "tl_strip" tl_strip, - testProperty "t_splitAt" t_splitAt, - testProperty "tl_splitAt" tl_splitAt, - testProperty "t_span" t_span, - testProperty "tl_span" tl_span, - testProperty "t_breakOn_id" t_breakOn_id, - testProperty "tl_breakOn_id" tl_breakOn_id, - testProperty "t_breakOn_start" t_breakOn_start, - testProperty "tl_breakOn_start" tl_breakOn_start, - testProperty "t_breakOnEnd_end" t_breakOnEnd_end, - testProperty "tl_breakOnEnd_end" tl_breakOnEnd_end, - testProperty "t_break" t_break, - testProperty "tl_break" tl_break, - testProperty "t_group" t_group, - testProperty "tl_group" tl_group, - testProperty "t_groupBy" t_groupBy, - testProperty "tl_groupBy" tl_groupBy, - testProperty "t_inits" t_inits, - testProperty "tl_inits" tl_inits, - testProperty "t_tails" t_tails, - testProperty "tl_tails" tl_tails - ], - - testGroup "breaking many" [ - testProperty "t_findAppendId" t_findAppendId, - testProperty "tl_findAppendId" tl_findAppendId, - testProperty "t_findContains" t_findContains, - testProperty "tl_findContains" tl_findContains, - testProperty "sl_filterCount" sl_filterCount, - testProperty "t_findCount" t_findCount, - testProperty "tl_findCount" tl_findCount, - testProperty "t_splitOn_split" t_splitOn_split, - testProperty "tl_splitOn_split" tl_splitOn_split, - testProperty "t_splitOn_i" t_splitOn_i, - testProperty "tl_splitOn_i" tl_splitOn_i, - testProperty "t_split" t_split, - testProperty "t_split_count" t_split_count, - testProperty "t_split_splitOn" t_split_splitOn, - testProperty "tl_split" tl_split, - testProperty "t_chunksOf_same_lengths" t_chunksOf_same_lengths, - testProperty "t_chunksOf_length" t_chunksOf_length, - testProperty "tl_chunksOf" tl_chunksOf - ], - - testGroup "lines and words" [ - testProperty "t_lines" t_lines, - testProperty "tl_lines" tl_lines, - --testProperty "t_lines'" t_lines', - testProperty "t_words" t_words, - testProperty "tl_words" tl_words, - testProperty "t_unlines" t_unlines, - testProperty "tl_unlines" tl_unlines, - testProperty "t_unwords" t_unwords, - testProperty "tl_unwords" tl_unwords - ] - ], - - testGroup "predicates" [ - testProperty "s_isPrefixOf" s_isPrefixOf, - testProperty "sf_isPrefixOf" sf_isPrefixOf, - testProperty "t_isPrefixOf" t_isPrefixOf, - testProperty "tl_isPrefixOf" tl_isPrefixOf, - testProperty "t_isSuffixOf" t_isSuffixOf, - testProperty "tl_isSuffixOf" tl_isSuffixOf, - testProperty "t_isInfixOf" t_isInfixOf, - testProperty "tl_isInfixOf" tl_isInfixOf, - - testGroup "view" [ - testProperty "t_stripPrefix" t_stripPrefix, - testProperty "tl_stripPrefix" tl_stripPrefix, - testProperty "t_stripSuffix" t_stripSuffix, - testProperty "tl_stripSuffix" tl_stripSuffix, - testProperty "t_commonPrefixes" t_commonPrefixes, - testProperty "tl_commonPrefixes" tl_commonPrefixes - ] - ], - - testGroup "searching" [ - testProperty "sf_elem" sf_elem, - testProperty "sf_filter" sf_filter, - testProperty "t_filter" t_filter, - testProperty "tl_filter" tl_filter, - testProperty "sf_findBy" sf_findBy, - testProperty "t_find" t_find, - testProperty "tl_find" tl_find, - testProperty "t_partition" t_partition, - testProperty "tl_partition" tl_partition - ], - - testGroup "indexing" [ - testProperty "sf_index" sf_index, - testProperty "t_index" t_index, - testProperty "tl_index" tl_index, - testProperty "t_findIndex" t_findIndex, - testProperty "t_count" t_count, - testProperty "tl_count" tl_count, - testProperty "t_indices" t_indices, - testProperty "tl_indices" tl_indices, - testProperty "t_indices_occurs" t_indices_occurs - ], - - testGroup "zips" [ - testProperty "t_zip" t_zip, - testProperty "tl_zip" tl_zip, - testProperty "sf_zipWith" sf_zipWith, - testProperty "t_zipWith" t_zipWith, - testProperty "tl_zipWith" tl_zipWith - ], - - testGroup "regressions" [ - testProperty "s_filter_eq" s_filter_eq - ], - - testGroup "shifts" [ - testProperty "shiftL_Int" shiftL_Int, - testProperty "shiftL_Word16" shiftL_Word16, - testProperty "shiftL_Word32" shiftL_Word32, - testProperty "shiftR_Int" shiftR_Int, - testProperty "shiftR_Word16" shiftR_Word16, - testProperty "shiftR_Word32" shiftR_Word32 - ], - - testGroup "builder" [ - testProperty "tb_associative" tb_associative, - testGroup "decimal" [ - testProperty "tb_decimal_int" tb_decimal_int, - testProperty "tb_decimal_int8" tb_decimal_int8, - testProperty "tb_decimal_int16" tb_decimal_int16, - testProperty "tb_decimal_int32" tb_decimal_int32, - testProperty "tb_decimal_int64" tb_decimal_int64, - testProperty "tb_decimal_integer" tb_decimal_integer, - testProperty "tb_decimal_integer_big" tb_decimal_integer_big, - testProperty "tb_decimal_word" tb_decimal_word, - testProperty "tb_decimal_word8" tb_decimal_word8, - testProperty "tb_decimal_word16" tb_decimal_word16, - testProperty "tb_decimal_word32" tb_decimal_word32, - testProperty "tb_decimal_word64" tb_decimal_word64, - testProperty "tb_decimal_big_int" tb_decimal_big_int, - testProperty "tb_decimal_big_word" tb_decimal_big_word, - testProperty "tb_decimal_big_int64" tb_decimal_big_int64, - testProperty "tb_decimal_big_word64" tb_decimal_big_word64 - ], - testGroup "hexadecimal" [ - testProperty "tb_hexadecimal_int" tb_hexadecimal_int, - testProperty "tb_hexadecimal_int8" tb_hexadecimal_int8, - testProperty "tb_hexadecimal_int16" tb_hexadecimal_int16, - testProperty "tb_hexadecimal_int32" tb_hexadecimal_int32, - testProperty "tb_hexadecimal_int64" tb_hexadecimal_int64, - testProperty "tb_hexadecimal_integer" tb_hexadecimal_integer, - testProperty "tb_hexadecimal_word" tb_hexadecimal_word, - testProperty "tb_hexadecimal_word8" tb_hexadecimal_word8, - testProperty "tb_hexadecimal_word16" tb_hexadecimal_word16, - testProperty "tb_hexadecimal_word32" tb_hexadecimal_word32, - testProperty "tb_hexadecimal_word64" tb_hexadecimal_word64 - ], - testGroup "realfloat" [ - testProperty "tb_realfloat_double" tb_realfloat_double, - testProperty "tb_realfloat_float" tb_realfloat_float, - testProperty "tb_formatRealFloat_float" tb_formatRealFloat_float, - testProperty "tb_formatRealFloat_double" tb_formatRealFloat_double - ], - testProperty "tb_fromText" tb_fromText, - testProperty "tb_singleton" tb_singleton - ], - - testGroup "read" [ - testProperty "t_decimal" t_decimal, - testProperty "tl_decimal" tl_decimal, - testProperty "t_hexadecimal" t_hexadecimal, - testProperty "tl_hexadecimal" tl_hexadecimal, - testProperty "t_double" t_double, - testProperty "tl_double" tl_double, - testProperty "t_rational" t_rational, - testProperty "tl_rational" tl_rational - ], - - {- - testGroup "input-output" [ - testProperty "t_write_read" t_write_read, - testProperty "tl_write_read" tl_write_read, - testProperty "t_write_read_line" t_write_read_line, - testProperty "tl_write_read_line" tl_write_read_line - -- These tests are subject to I/O race conditions when run under - -- test-framework-quickcheck2. - -- testProperty "t_put_get" t_put_get - -- testProperty "tl_put_get" tl_put_get - ], - -} - - testGroup "lowlevel" [ - testProperty "t_dropWord16" t_dropWord16, - testProperty "t_takeWord16" t_takeWord16, - testProperty "t_take_drop_16" t_take_drop_16, - testProperty "t_use_from" t_use_from, - testProperty "t_copy" t_copy - ], - - testGroup "mul" Mul.tests - ] diff -Nru cabal-install-3.2-3.2+git20191216.2.e076113/src/text-1.2.4.0/tests/Tests/QuickCheckUtils.hs cabal-install-3.2-3.2+git20200127.2.b84fd4f/src/text-1.2.4.0/tests/Tests/QuickCheckUtils.hs --- cabal-install-3.2-3.2+git20191216.2.e076113/src/text-1.2.4.0/tests/Tests/QuickCheckUtils.hs 2001-09-09 01:46:40.000000000 +0000 +++ cabal-install-3.2-3.2+git20200127.2.b84fd4f/src/text-1.2.4.0/tests/Tests/QuickCheckUtils.hs 1970-01-01 00:00:00.000000000 +0000 @@ -1,363 +0,0 @@ --- | This module provides quickcheck utilities, e.g. arbitrary and show --- instances, and comparison functions, so we can focus on the actual properties --- in the 'Tests.Properties' module. --- -{-# LANGUAGE CPP, FlexibleInstances, TypeSynonymInstances #-} -{-# OPTIONS_GHC -fno-warn-orphans #-} -module Tests.QuickCheckUtils - ( - genUnicode - , unsquare - , smallArbitrary - - , BigBounded(..) - , BigInt(..) - , NotEmpty(..) - - , Small(..) - , small - - , Precision(..) - , precision - - , integralRandomR - - , DecodeErr(..) - , genDecodeErr - - , Stringy(..) - , eq - , eqP - - , Encoding(..) - - , write_read - ) where - -import Control.Applicative ((<$>)) -import Control.Arrow (first, (***)) -import Control.DeepSeq (NFData (..), deepseq) -import Control.Exception (bracket) -import Data.String (IsString, fromString) -import Data.Text.Foreign (I16) -import Data.Text.Lazy.Builder.RealFloat (FPFormat(..)) -import Data.Word (Word8, Word16) -import Debug.Trace (trace) -import System.Random (Random(..), RandomGen) -import Test.QuickCheck hiding (Fixed(..), Small (..), (.&.)) -import Test.QuickCheck.Monadic (assert, monadicIO, run) -import Test.QuickCheck.Unicode (string) -import Tests.Utils -import qualified Data.ByteString as B -import qualified Data.Text as T -import qualified Data.Text.Encoding.Error as T -import qualified Data.Text.Internal.Fusion as TF -import qualified Data.Text.Internal.Fusion.Common as TF -import qualified Data.Text.Internal.Lazy as TL -import qualified Data.Text.Internal.Lazy.Fusion as TLF -import qualified Data.Text.Lazy as TL -import qualified System.IO as IO - -#if !MIN_VERSION_base(4,4,0) -import Data.Int (Int64) -import Data.Word (Word, Word64) -#endif - -genUnicode :: IsString a => Gen a -genUnicode = fromString <$> string - -instance Random I16 where - randomR = integralRandomR - random = randomR (minBound,maxBound) - -instance Arbitrary I16 where - arbitrary = arbitrarySizedIntegral - shrink = shrinkIntegral - -instance Arbitrary B.ByteString where - arbitrary = B.pack `fmap` arbitrary - shrink = map B.pack . shrink . B.unpack - -#if !MIN_VERSION_base(4,4,0) -instance Random Int64 where - randomR = integralRandomR - random = randomR (minBound,maxBound) - -instance Random Word where - randomR = integralRandomR - random = randomR (minBound,maxBound) - -instance Random Word8 where - randomR = integralRandomR - random = randomR (minBound,maxBound) - -instance Random Word64 where - randomR = integralRandomR - random = randomR (minBound,maxBound) -#endif - --- For tests that have O(n^2) running times or input sizes, resize --- their inputs to the square root of the originals. -unsquare :: (Arbitrary a, Show a, Testable b) => (a -> b) -> Property -unsquare = forAll smallArbitrary - -smallArbitrary :: (Arbitrary a, Show a) => Gen a -smallArbitrary = sized $ \n -> resize (smallish n) arbitrary - where smallish = round . (sqrt :: Double -> Double) . fromIntegral . abs - -instance Arbitrary T.Text where - arbitrary = T.pack `fmap` string - shrink = map T.pack . shrink . T.unpack - -instance Arbitrary TL.Text where - arbitrary = (TL.fromChunks . map notEmpty) `fmap` smallArbitrary - shrink = map TL.pack . shrink . TL.unpack - -newtype BigInt = Big Integer - deriving (Eq, Show) - -instance Arbitrary BigInt where - arbitrary = choose (1::Int,200) >>= \e -> Big <$> choose (10^(e-1),10^e) - shrink (Big a) = [Big (a `div` 2^(l-e)) | e <- shrink l] - where l = truncate (log (fromIntegral a) / log 2 :: Double) :: Integer - -newtype BigBounded a = BigBounded a - deriving (Eq, Show) - -instance (Bounded a, Random a, Arbitrary a) => Arbitrary (BigBounded a) where - arbitrary = BigBounded <$> choose (minBound, maxBound) - -newtype NotEmpty a = NotEmpty { notEmpty :: a } - deriving (Eq, Ord) - -instance Show a => Show (NotEmpty a) where - show (NotEmpty a) = show a - -instance Functor NotEmpty where - fmap f (NotEmpty a) = NotEmpty (f a) - -instance Arbitrary a => Arbitrary (NotEmpty [a]) where - arbitrary = sized (\n -> NotEmpty `fmap` (choose (1,n+1) >>= vector)) - shrink = shrinkNotEmpty null - -instance Arbitrary (NotEmpty T.Text) where - arbitrary = (fmap T.pack) `fmap` arbitrary - shrink = shrinkNotEmpty T.null - -instance Arbitrary (NotEmpty TL.Text) where - arbitrary = (fmap TL.pack) `fmap` arbitrary - shrink = shrinkNotEmpty TL.null - -instance Arbitrary (NotEmpty B.ByteString) where - arbitrary = (fmap B.pack) `fmap` arbitrary - shrink = shrinkNotEmpty B.null - -shrinkNotEmpty :: Arbitrary a => (a -> Bool) -> NotEmpty a -> [NotEmpty a] -shrinkNotEmpty isNull (NotEmpty xs) = - [ NotEmpty xs' | xs' <- shrink xs, not (isNull xs') ] - -data Small = S0 | S1 | S2 | S3 | S4 | S5 | S6 | S7 - | S8 | S9 | S10 | S11 | S12 | S13 | S14 | S15 - | S16 | S17 | S18 | S19 | S20 | S21 | S22 | S23 - | S24 | S25 | S26 | S27 | S28 | S29 | S30 | S31 - deriving (Eq, Ord, Enum, Bounded) - -small :: Integral a => Small -> a -small = fromIntegral . fromEnum - -intf :: (Int -> Int -> Int) -> Small -> Small -> Small -intf f a b = toEnum ((fromEnum a `f` fromEnum b) `mod` 32) - -instance Show Small where - show = show . fromEnum - -instance Read Small where - readsPrec n = map (first toEnum) . readsPrec n - -instance Num Small where - fromInteger = toEnum . fromIntegral - signum _ = 1 - abs = id - (+) = intf (+) - (-) = intf (-) - (*) = intf (*) - -instance Real Small where - toRational = toRational . fromEnum - -instance Integral Small where - toInteger = toInteger . fromEnum - quotRem a b = (toEnum x, toEnum y) - where (x, y) = fromEnum a `quotRem` fromEnum b - -instance Random Small where - randomR = integralRandomR - random = randomR (minBound,maxBound) - -instance Arbitrary Small where - arbitrary = choose (minBound, maxBound) - shrink = shrinkIntegral - -integralRandomR :: (Integral a, RandomGen g) => (a,a) -> g -> (a,g) -integralRandomR (a,b) g = case randomR (fromIntegral a :: Integer, - fromIntegral b :: Integer) g of - (x,h) -> (fromIntegral x, h) - -data DecodeErr = Lenient | Ignore | Strict | Replace - deriving (Show, Eq) - -genDecodeErr :: DecodeErr -> Gen T.OnDecodeError -genDecodeErr Lenient = return T.lenientDecode -genDecodeErr Ignore = return T.ignore -genDecodeErr Strict = return T.strictDecode -genDecodeErr Replace = (\c _ _ -> c) <$> frequency - [ (1, return Nothing) - , (50, Just <$> choose ('\x1', '\xffff')) - ] - -instance Arbitrary DecodeErr where - arbitrary = elements [Lenient, Ignore, Strict, Replace] - -class Stringy s where - packS :: String -> s - unpackS :: s -> String - splitAtS :: Int -> s -> (s,s) - packSChunkSize :: Int -> String -> s - packSChunkSize _ = packS - -instance Stringy String where - packS = id - unpackS = id - splitAtS = splitAt - -instance Stringy (TF.Stream Char) where - packS = TF.streamList - unpackS = TF.unstreamList - splitAtS n s = (TF.take n s, TF.drop n s) - -instance Stringy T.Text where - packS = T.pack - unpackS = T.unpack - splitAtS = T.splitAt - -instance Stringy TL.Text where - packSChunkSize k = TLF.unstreamChunks k . TF.streamList - packS = TL.pack - unpackS = TL.unpack - splitAtS = ((TL.lazyInvariant *** TL.lazyInvariant) .) . - TL.splitAt . fromIntegral - --- Do two functions give the same answer? -eq :: (Eq a, Show a) => (t -> a) -> (t -> a) -> t -> Bool -eq a b s = a s =^= b s - --- What about with the RHS packed? -eqP :: (Eq a, Show a, Stringy s) => - (String -> a) -> (s -> a) -> String -> Word8 -> Bool -eqP f g s w = eql "orig" (f s) (g t) && - eql "mini" (f s) (g mini) && - eql "head" (f sa) (g ta) && - eql "tail" (f sb) (g tb) - where t = packS s - mini = packSChunkSize 10 s - (sa,sb) = splitAt m s - (ta,tb) = splitAtS m t - l = length s - m | l == 0 = n - | otherwise = n `mod` l - n = fromIntegral w - eql d a b - | a =^= b = True - | otherwise = trace (d ++ ": " ++ show a ++ " /= " ++ show b) False - -instance Arbitrary FPFormat where - arbitrary = elements [Exponent, Fixed, Generic] - -newtype Precision a = Precision (Maybe Int) - deriving (Eq, Show) - -precision :: a -> Precision a -> Maybe Int -precision _ (Precision prec) = prec - -arbitraryPrecision :: Int -> Gen (Precision a) -arbitraryPrecision maxDigits = Precision <$> do - n <- choose (-1,maxDigits) - return $ if n == -1 - then Nothing - else Just n - -instance Arbitrary (Precision Float) where - arbitrary = arbitraryPrecision 11 - shrink = map Precision . shrink . precision undefined - -instance Arbitrary (Precision Double) where - arbitrary = arbitraryPrecision 22 - shrink = map Precision . shrink . precision undefined - --- Work around lack of Show instance for TextEncoding. -data Encoding = E String IO.TextEncoding - -instance Show Encoding where show (E n _) = "utf" ++ n - -instance Arbitrary Encoding where - arbitrary = oneof . map return $ - [ E "8" IO.utf8, E "8_bom" IO.utf8_bom, E "16" IO.utf16 - , E "16le" IO.utf16le, E "16be" IO.utf16be, E "32" IO.utf32 - , E "32le" IO.utf32le, E "32be" IO.utf32be - ] - -windowsNewlineMode :: IO.NewlineMode -windowsNewlineMode = IO.NewlineMode - { IO.inputNL = IO.CRLF, IO.outputNL = IO.CRLF - } - -instance Arbitrary IO.NewlineMode where - arbitrary = oneof . map return $ - [ IO.noNewlineTranslation, IO.universalNewlineMode, IO.nativeNewlineMode - , windowsNewlineMode - ] - -instance Arbitrary IO.BufferMode where - arbitrary = oneof [ return IO.NoBuffering, - return IO.LineBuffering, - return (IO.BlockBuffering Nothing), - (IO.BlockBuffering . Just . (+1) . fromIntegral) `fmap` - (arbitrary :: Gen Word16) ] - --- This test harness is complex! What property are we checking? --- --- Reading after writing a multi-line file should give the same --- results as were written. --- --- What do we vary while checking this property? --- * The lines themselves, scrubbed to contain neither CR nor LF. (By --- working with a list of lines, we ensure that the data will --- sometimes contain line endings.) --- * Encoding. --- * Newline translation mode. --- * Buffering. -write_read :: (NFData a, Eq a) - => ([b] -> a) - -> ((Char -> Bool) -> a -> b) - -> (IO.Handle -> a -> IO ()) - -> (IO.Handle -> IO a) - -> Encoding - -> IO.NewlineMode - -> IO.BufferMode - -> [a] - -> Property -write_read unline filt writer reader (E _ _) nl buf ts = - monadicIO $ assert . (==t) =<< run act - where t = unline . map (filt (not . (`elem` "\r\n"))) $ ts - act = withTempFile $ \path h -> do - -- hSetEncoding h enc - IO.hSetNewlineMode h nl - IO.hSetBuffering h buf - () <- writer h t - IO.hClose h - bracket (IO.openFile path IO.ReadMode) IO.hClose $ \h' -> do - -- hSetEncoding h' enc - IO.hSetNewlineMode h' nl - IO.hSetBuffering h' buf - r <- reader h' - r `deepseq` return r diff -Nru cabal-install-3.2-3.2+git20191216.2.e076113/src/text-1.2.4.0/tests/Tests/Regressions.hs cabal-install-3.2-3.2+git20200127.2.b84fd4f/src/text-1.2.4.0/tests/Tests/Regressions.hs --- cabal-install-3.2-3.2+git20191216.2.e076113/src/text-1.2.4.0/tests/Tests/Regressions.hs 2001-09-09 01:46:40.000000000 +0000 +++ cabal-install-3.2-3.2+git20200127.2.b84fd4f/src/text-1.2.4.0/tests/Tests/Regressions.hs 1970-01-01 00:00:00.000000000 +0000 @@ -1,108 +0,0 @@ --- | Regression tests for specific bugs. --- -{-# LANGUAGE OverloadedStrings, ScopedTypeVariables #-} -module Tests.Regressions - ( - tests - ) where - -import Control.Exception (SomeException, handle) -import Data.Char (isLetter) -import System.IO -import Test.HUnit (assertBool, assertEqual, assertFailure) -import qualified Data.ByteString as B -import Data.ByteString.Char8 () -import qualified Data.ByteString.Lazy as LB -import qualified Data.Text as T -import qualified Data.Text.Encoding as TE -import qualified Data.Text.IO as T -import qualified Data.Text.Lazy as LT -import qualified Data.Text.Lazy.Encoding as LE -import qualified Data.Text.Unsafe as T -import qualified Test.Framework as F -import qualified Test.Framework.Providers.HUnit as F - -import Tests.Utils (withTempFile) - --- Reported by Michael Snoyman: UTF-8 encoding a large lazy bytestring --- caused either a segfault or attempt to allocate a negative number --- of bytes. -lazy_encode_crash :: IO () -lazy_encode_crash = withTempFile $ \ _ h -> - LB.hPut h . LE.encodeUtf8 . LT.pack . replicate 100000 $ 'a' - --- Reported by Pieter Laeremans: attempting to read an incorrectly --- encoded file can result in a crash in the RTS (i.e. not merely an --- exception). -hGetContents_crash :: IO () -hGetContents_crash = withTempFile $ \ path h -> do - B.hPut h (B.pack [0x78, 0xc4 ,0x0a]) >> hClose h - h' <- openFile path ReadMode - hSetEncoding h' utf8 - handle (\(_::SomeException) -> return ()) $ - T.hGetContents h' >> assertFailure "T.hGetContents should crash" - --- Reported by Ian Lynagh: attempting to allocate a sufficiently large --- string (via either Array.new or Text.replicate) could result in an --- integer overflow. -replicate_crash :: IO () -replicate_crash = handle (\(_::SomeException) -> return ()) $ - T.replicate (2^power) "0123456789abcdef" `seq` - assertFailure "T.replicate should crash" - where - power | maxBound == (2147483647::Int) = 28 - | otherwise = 60 :: Int - --- Reported by John Millikin: a UTF-8 decode error handler could --- return a bogus substitution character, which we would write without --- checking. -utf8_decode_unsafe :: IO () -utf8_decode_unsafe = do - let t = TE.decodeUtf8With (\_ _ -> Just '\xdc00') "\x80" - assertBool "broken error recovery shouldn't break us" (t == "\xfffd") - --- Reported by Eric Seidel: we mishandled mapping Chars that fit in a --- single Word16 to Chars that require two. -mapAccumL_resize :: IO () -mapAccumL_resize = do - let f a _ = (a, '\65536') - count = 5 - val = T.mapAccumL f (0::Int) (T.replicate count "a") - assertEqual "mapAccumL should correctly fill buffers for two-word results" - (0, T.replicate count "\65536") val - assertEqual "mapAccumL should correctly size buffers for two-word results" - (count * 2) (T.lengthWord16 (snd val)) - --- See GitHub #197 -t197 :: IO () -t197 = - assertBool "length (filter (==',') \"0,00\") should be 1" (currencyParser "0,00") - where - currencyParser x = cond == 1 - where - cond = length fltr - fltr = filter (== ',') x - -t221 :: IO () -t221 = - assertEqual "toLower of large input shouldn't crash" - (T.toLower (T.replicate 200000 "0") `seq` ()) - () - -t227 :: IO () -t227 = - assertEqual "take (-3) shouldn't crash with overflow" - (T.length $ T.filter isLetter $ T.take (-3) "Hello! How are you doing today?") - 0 - -tests :: F.Test -tests = F.testGroup "Regressions" - [ F.testCase "hGetContents_crash" hGetContents_crash - , F.testCase "lazy_encode_crash" lazy_encode_crash - , F.testCase "mapAccumL_resize" mapAccumL_resize - , F.testCase "replicate_crash" replicate_crash - , F.testCase "utf8_decode_unsafe" utf8_decode_unsafe - , F.testCase "t197" t197 - , F.testCase "t221" t221 - , F.testCase "t227" t227 - ] diff -Nru cabal-install-3.2-3.2+git20191216.2.e076113/src/text-1.2.4.0/tests/Tests/SlowFunctions.hs cabal-install-3.2-3.2+git20200127.2.b84fd4f/src/text-1.2.4.0/tests/Tests/SlowFunctions.hs --- cabal-install-3.2-3.2+git20191216.2.e076113/src/text-1.2.4.0/tests/Tests/SlowFunctions.hs 2001-09-09 01:46:40.000000000 +0000 +++ cabal-install-3.2-3.2+git20200127.2.b84fd4f/src/text-1.2.4.0/tests/Tests/SlowFunctions.hs 1970-01-01 00:00:00.000000000 +0000 @@ -1,39 +0,0 @@ -{-# LANGUAGE BangPatterns #-} -module Tests.SlowFunctions - ( - indices - , splitOn - ) where - -import qualified Data.Text as T -import Data.Text.Internal (Text(..)) -import Data.Text.Unsafe (iter_, unsafeHead, unsafeTail) - -indices :: T.Text -- ^ Substring to search for (@needle@) - -> T.Text -- ^ Text to search in (@haystack@) - -> [Int] -indices needle@(Text _narr _noff nlen) haystack@(Text harr hoff hlen) - | T.null needle = [] - | otherwise = scan 0 - where - scan i | i >= hlen = [] - | needle `T.isPrefixOf` t = i : scan (i+nlen) - | otherwise = scan (i+d) - where t = Text harr (hoff+i) (hlen-i) - d = iter_ haystack i - -splitOn :: T.Text -- ^ Text to split on - -> T.Text -- ^ Input text - -> [T.Text] -splitOn pat src0 - | T.null pat = error "splitOn: empty" - | l == 1 = T.split (== (unsafeHead pat)) src0 - | otherwise = go src0 - where - l = T.length pat - go src = search 0 src - where - search !n !s - | T.null s = [src] -- not found - | pat `T.isPrefixOf` s = T.take n src : go (T.drop l s) - | otherwise = search (n+1) (unsafeTail s) diff -Nru cabal-install-3.2-3.2+git20191216.2.e076113/src/text-1.2.4.0/tests/Tests/Utils.hs cabal-install-3.2-3.2+git20200127.2.b84fd4f/src/text-1.2.4.0/tests/Tests/Utils.hs --- cabal-install-3.2-3.2+git20191216.2.e076113/src/text-1.2.4.0/tests/Tests/Utils.hs 2001-09-09 01:46:40.000000000 +0000 +++ cabal-install-3.2-3.2+git20200127.2.b84fd4f/src/text-1.2.4.0/tests/Tests/Utils.hs 1970-01-01 00:00:00.000000000 +0000 @@ -1,52 +0,0 @@ --- | Miscellaneous testing utilities --- -{-# LANGUAGE ScopedTypeVariables #-} -module Tests.Utils - ( - (=^=) - , withRedirect - , withTempFile - ) where - -import Control.Exception (SomeException, bracket, bracket_, evaluate, try) -import Control.Monad (when) -import Debug.Trace (trace) -import GHC.IO.Handle.Internals (withHandle) -import System.Directory (removeFile) -import System.IO (Handle, hClose, hFlush, hIsOpen, hIsWritable, openTempFile) -import System.IO.Unsafe (unsafePerformIO) - --- Ensure that two potentially bottom values (in the sense of crashing --- for some inputs, not looping infinitely) either both crash, or both --- give comparable results for some input. -(=^=) :: (Eq a, Show a) => a -> a -> Bool -i =^= j = unsafePerformIO $ do - x <- try (evaluate i) - y <- try (evaluate j) - case (x,y) of - (Left (_ :: SomeException), Left (_ :: SomeException)) - -> return True - (Right a, Right b) -> return (a == b) - e -> trace ("*** Divergence: " ++ show e) return False -infix 4 =^= -{-# NOINLINE (=^=) #-} - -withTempFile :: (FilePath -> Handle -> IO a) -> IO a -withTempFile = bracket (openTempFile "." "crashy.txt") cleanupTemp . uncurry - where - cleanupTemp (path,h) = do - open <- hIsOpen h - when open (hClose h) - removeFile path - -withRedirect :: Handle -> Handle -> IO a -> IO a -withRedirect tmp h = bracket_ swap swap - where - whenM p a = p >>= (`when` a) - swap = do - whenM (hIsOpen tmp) $ whenM (hIsWritable tmp) $ hFlush tmp - whenM (hIsOpen h) $ whenM (hIsWritable h) $ hFlush h - withHandle "spam" tmp $ \tmph -> do - hh <- withHandle "spam" h $ \hh -> - return (tmph,hh) - return (hh,()) diff -Nru cabal-install-3.2-3.2+git20191216.2.e076113/src/text-1.2.4.0/tests/Tests.hs cabal-install-3.2-3.2+git20200127.2.b84fd4f/src/text-1.2.4.0/tests/Tests.hs --- cabal-install-3.2-3.2+git20191216.2.e076113/src/text-1.2.4.0/tests/Tests.hs 2001-09-09 01:46:40.000000000 +0000 +++ cabal-install-3.2-3.2+git20200127.2.b84fd4f/src/text-1.2.4.0/tests/Tests.hs 1970-01-01 00:00:00.000000000 +0000 @@ -1,13 +0,0 @@ --- | Provides a simple main function which runs all the tests --- -module Main - ( main - ) where - -import Test.Framework (defaultMain) - -import qualified Tests.Properties as Properties -import qualified Tests.Regressions as Regressions - -main :: IO () -main = defaultMain [Properties.tests, Regressions.tests] diff -Nru cabal-install-3.2-3.2+git20191216.2.e076113/src/text-1.2.4.0/tests/text-tests.cabal cabal-install-3.2-3.2+git20200127.2.b84fd4f/src/text-1.2.4.0/tests/text-tests.cabal --- cabal-install-3.2-3.2+git20191216.2.e076113/src/text-1.2.4.0/tests/text-tests.cabal 2001-09-09 01:46:40.000000000 +0000 +++ cabal-install-3.2-3.2+git20200127.2.b84fd4f/src/text-1.2.4.0/tests/text-tests.cabal 1970-01-01 00:00:00.000000000 +0000 @@ -1,156 +0,0 @@ -name: text-tests -version: 0.0.0.0 -synopsis: Functional tests for the text package -description: Functional tests for the text package -homepage: https://github.com/bos/text -license: BSD2 -license-file: ../LICENSE -author: Jasper Van der Jeugt , - Bryan O'Sullivan , - Tom Harper , - Duncan Coutts -maintainer: Bryan O'Sullivan -category: Text -build-type: Simple - -cabal-version: >=1.8 - -flag hpc - description: Enable HPC to generate coverage reports - default: False - manual: True - -flag bytestring-builder - description: Depend on the bytestring-builder package for backwards compatibility. - default: False - manual: False - -executable text-tests - main-is: Tests.hs - - other-modules: - Tests.Properties - Tests.Properties.Mul - Tests.QuickCheckUtils - Tests.Regressions - Tests.SlowFunctions - Tests.Utils - - ghc-options: - -Wall -threaded -O0 -rtsopts - - if flag(hpc) - ghc-options: - -fhpc - - cpp-options: - -DTEST_SUITE - -DASSERTS - - build-depends: - HUnit >= 1.2, - QuickCheck >= 2.7, - base == 4.*, - deepseq, - directory, - quickcheck-unicode >= 1.0.1.0, - random, - test-framework >= 0.4, - test-framework-hunit >= 0.2, - test-framework-quickcheck2 >= 0.2, - text-tests - - if flag(bytestring-builder) - build-depends: bytestring >= 0.9 && < 0.10.4, - bytestring-builder >= 0.10.4 - else - build-depends: bytestring >= 0.10.4 - -executable text-tests-stdio - main-is: Tests/IO.hs - - ghc-options: - -Wall -threaded -rtsopts - - -- Optional HPC support - if flag(hpc) - ghc-options: - -fhpc - - build-depends: - text-tests, - base >= 4 && < 5 - -library - hs-source-dirs: .. - c-sources: ../cbits/cbits.c - include-dirs: ../include - ghc-options: -Wall - exposed-modules: - Data.Text - Data.Text.Array - Data.Text.Encoding - Data.Text.Encoding.Error - Data.Text.Internal.Encoding.Fusion - Data.Text.Internal.Encoding.Fusion.Common - Data.Text.Internal.Encoding.Utf16 - Data.Text.Internal.Encoding.Utf32 - Data.Text.Internal.Encoding.Utf8 - Data.Text.Foreign - Data.Text.Internal.Fusion - Data.Text.Internal.Fusion.CaseMapping - Data.Text.Internal.Fusion.Common - Data.Text.Internal.Fusion.Size - Data.Text.Internal.Fusion.Types - Data.Text.IO - Data.Text.Internal.IO - Data.Text.Internal - Data.Text.Lazy - Data.Text.Lazy.Builder - Data.Text.Internal.Builder.Functions - Data.Text.Lazy.Builder.Int - Data.Text.Internal.Builder.Int.Digits - Data.Text.Internal.Builder - Data.Text.Lazy.Builder.RealFloat - Data.Text.Internal.Builder.RealFloat.Functions - Data.Text.Lazy.Encoding - Data.Text.Internal.Lazy.Encoding.Fusion - Data.Text.Internal.Lazy.Fusion - Data.Text.Lazy.IO - Data.Text.Internal.Lazy - Data.Text.Lazy.Read - Data.Text.Internal.Lazy.Search - Data.Text.Internal.Private - Data.Text.Read - Data.Text.Show - Data.Text.Internal.Read - Data.Text.Internal.Search - Data.Text.Unsafe - Data.Text.Internal.Unsafe - Data.Text.Internal.Unsafe.Char - Data.Text.Internal.Unsafe.Shift - Data.Text.Internal.Functions - - if flag(hpc) - ghc-options: - -fhpc - - cpp-options: - -DTEST_SUITE - -DASSERTS - -DINTEGER_GMP - - build-depends: - array, - base == 4.*, - binary, - deepseq, - ghc-prim, - integer-gmp, - template-haskell - - if flag(bytestring-builder) - build-depends: bytestring >= 0.9 && < 0.10.4, - bytestring-builder >= 0.10.4 - else - build-depends: bytestring >= 0.10.4 diff -Nru cabal-install-3.2-3.2+git20191216.2.e076113/src/text-1.2.4.0/tests-and-benchmarks.markdown cabal-install-3.2-3.2+git20200127.2.b84fd4f/src/text-1.2.4.0/tests-and-benchmarks.markdown --- cabal-install-3.2-3.2+git20191216.2.e076113/src/text-1.2.4.0/tests-and-benchmarks.markdown 2001-09-09 01:46:40.000000000 +0000 +++ cabal-install-3.2-3.2+git20200127.2.b84fd4f/src/text-1.2.4.0/tests-and-benchmarks.markdown 1970-01-01 00:00:00.000000000 +0000 @@ -1,68 +0,0 @@ -Tests and benchmarks -==================== - -Prerequisites -------------- - -To run the tests and benchmarks, you will need the test data, which -you can clone from one of the following locations: - -* Mercurial master repository: - [bitbucket.org/bos/text-test-data](https://bitbucket.org/bos/text-test-data) - -* Git mirror repository: - [github.com/bos/text-test-data](https://github.com/bos/text-test-data) - -You can clone either repository into the `tests` subdirectory using - - cd tests/ - make text-test-data # to clone from mercurial, OR - make VCS=git text-test-data # to clone from git - -Many tests and benchmarks will fail if the test files are missing. - -Functional tests ----------------- - -The functional tests are located in the `tests` subdirectory. An overview of -what's in that directory: - - Makefile Has targets for common tasks - Tests Source files of the testing code - scripts Various utility scripts - text-tests.cabal Cabal file that compiles all benchmarks - -The `text-tests.cabal` builds: - -- A copy of the text library, sharing the source code, but exposing all internal - modules, for testing purposes -- The different test suites - -To compile, run all tests, and generate a coverage report, simply use `make`. - -Benchmarks ----------- - -The benchmarks are located in the `benchmarks` subdirectory. An overview of -what's in that directory: - - Makefile Has targets for common tasks - haskell Source files of the haskell benchmarks - python Python implementations of some benchmarks - ruby Ruby implementations of some benchmarks - text-benchmarks.cabal Cabal file which compiles all benchmarks - -To compile the benchmarks, navigate to the `benchmarks` subdirectory and run -`cabal configure && cabal build`. Then, you can run the benchmarks using: - - ./dist/build/text-benchmarks/text-benchmarks - -Or if you have a recent enough `cabal`, you can build and run the -benchmarks via - - cabal new-run exe:text-benchmarks -- --help - -However, since there's quite a lot of benchmarks, you usually don't want to -run them all. Instead, use the `-l` flag to get a list of benchmarks -and run the ones you want to inspect. If you want to configure the benchmarks -further, the exact parameters can be changed in `Benchmarks.hs`. diff -Nru cabal-install-3.2-3.2+git20191216.2.e076113/src/text-1.2.4.0/text.cabal cabal-install-3.2-3.2+git20200127.2.b84fd4f/src/text-1.2.4.0/text.cabal --- cabal-install-3.2-3.2+git20191216.2.e076113/src/text-1.2.4.0/text.cabal 2019-12-17 14:07:30.000000000 +0000 +++ cabal-install-3.2-3.2+git20200127.2.b84fd4f/src/text-1.2.4.0/text.cabal 1970-01-01 00:00:00.000000000 +0000 @@ -1,329 +0,0 @@ -cabal-version: >= 1.10 -name: text -version: 1.2.4.0 - -homepage: https://github.com/haskell/text -bug-reports: https://github.com/haskell/text/issues -synopsis: An efficient packed Unicode text type. -description: - . - An efficient packed, immutable Unicode text type (both strict and - lazy), with a powerful loop fusion optimization framework. - . - The 'Text' type represents Unicode character strings, in a time and - space-efficient manner. This package provides text processing - capabilities that are optimized for performance critical use, both - in terms of large data quantities and high speed. - . - The 'Text' type provides character-encoding, type-safe case - conversion via whole-string case conversion functions (see "Data.Text"). - It also provides a range of functions for converting 'Text' values to - and from 'ByteStrings', using several standard encodings - (see "Data.Text.Encoding"). - . - Efficient locale-sensitive support for text IO is also supported - (see "Data.Text.IO"). - . - These modules are intended to be imported qualified, to avoid name - clashes with Prelude functions, e.g. - . - > import qualified Data.Text as T - . - == ICU Support - . - To use an extended and very rich family of functions for working - with Unicode text (including normalization, regular expressions, - non-standard encodings, text breaking, and locales), see - the [text-icu package](https://hackage.haskell.org/package/text-icu) - based on the well-respected and liberally - licensed [ICU library](http://site.icu-project.org/). - . - == Internal Representation: UTF-16 vs. UTF-8 - . - Currently the @text@ library uses UTF-16 as its internal representation - which is [neither a fixed-width nor always the most dense representation](http://utf8everywhere.org/) - for Unicode text. We're currently investigating the feasibility - of [changing Text's internal representation to UTF-8](https://github.com/text-utf8) - and if you need such a 'Text' type right now you might be interested in using the spin-off - packages and - . - - -license: BSD2 -license-file: LICENSE -author: Bryan O'Sullivan -maintainer: Bryan O'Sullivan , Herbert Valerio Riedel -copyright: 2009-2011 Bryan O'Sullivan, 2008-2009 Tom Harper -category: Data, Text -build-type: Simple -tested-with: GHC==8.6.5, GHC==8.4.4, - GHC==8.2.2, GHC==8.0.2, GHC==7.10.3, GHC==7.8.4, - GHC==7.6.3, GHC==7.4.2, GHC==7.2.2, GHC==7.0.4 -extra-source-files: - -- scripts/CaseFolding.txt - -- scripts/SpecialCasing.txt - README.markdown - benchmarks/Setup.hs - benchmarks/cbits/*.c - benchmarks/haskell/*.hs - benchmarks/haskell/Benchmarks/*.hs - benchmarks/haskell/Benchmarks/Programs/*.hs - benchmarks/python/*.py - benchmarks/ruby/*.rb - benchmarks/text-benchmarks.cabal - changelog.md - include/*.h - scripts/*.hs - tests-and-benchmarks.markdown - tests/*.hs - tests/.ghci - tests/Makefile - tests/Tests/*.hs - tests/Tests/Properties/*.hs - tests/cabal.config - tests/scripts/*.sh - tests/text-tests.cabal - -flag bytestring-builder - description: - Depend on the [bytestring-builder](https://hackage.haskell.org/package/bytestring-builder) - package for backwards compatibility. - default: False - manual: False - -flag developer - description: operate in developer mode - default: False - manual: True - -flag integer-simple - description: - Use the [simple integer library](http://hackage.haskell.org/package/integer-simple) - instead of [integer-gmp](http://hackage.haskell.org/package/integer-gmp) - default: False - manual: False - -library - c-sources: cbits/cbits.c - include-dirs: include - - exposed-modules: - Data.Text - Data.Text.Array - Data.Text.Encoding - Data.Text.Encoding.Error - Data.Text.Foreign - Data.Text.IO - Data.Text.Internal - Data.Text.Internal.Builder - Data.Text.Internal.Builder.Functions - Data.Text.Internal.Builder.Int.Digits - Data.Text.Internal.Builder.RealFloat.Functions - Data.Text.Internal.Encoding.Fusion - Data.Text.Internal.Encoding.Fusion.Common - Data.Text.Internal.Encoding.Utf16 - Data.Text.Internal.Encoding.Utf32 - Data.Text.Internal.Encoding.Utf8 - Data.Text.Internal.Functions - Data.Text.Internal.Fusion - Data.Text.Internal.Fusion.CaseMapping - Data.Text.Internal.Fusion.Common - Data.Text.Internal.Fusion.Size - Data.Text.Internal.Fusion.Types - Data.Text.Internal.IO - Data.Text.Internal.Lazy - Data.Text.Internal.Lazy.Encoding.Fusion - Data.Text.Internal.Lazy.Fusion - Data.Text.Internal.Lazy.Search - Data.Text.Internal.Private - Data.Text.Internal.Read - Data.Text.Internal.Search - Data.Text.Internal.Unsafe - Data.Text.Internal.Unsafe.Char - Data.Text.Internal.Unsafe.Shift - Data.Text.Lazy - Data.Text.Lazy.Builder - Data.Text.Lazy.Builder.Int - Data.Text.Lazy.Builder.RealFloat - Data.Text.Lazy.Encoding - Data.Text.Lazy.IO - Data.Text.Lazy.Internal - Data.Text.Lazy.Read - Data.Text.Read - Data.Text.Unsafe - - other-modules: - Data.Text.Show - - build-depends: - array >= 0.3 && < 0.6, - base >= 4.3 && < 5, - binary >= 0.5 && < 0.9, - deepseq >= 1.1 && < 1.5, - ghc-prim >= 0.2 && < 0.6, - template-haskell >= 2.5 && < 2.16 - - if flag(bytestring-builder) - build-depends: bytestring >= 0.9 && < 0.10.4, - bytestring-builder >= 0.10.4.0.2 && < 0.11 - else - build-depends: bytestring >= 0.10.4 && < 0.11 - - ghc-options: -Wall -fwarn-tabs -funbox-strict-fields -O2 - if flag(developer) - ghc-prof-options: -auto-all - ghc-options: -Werror - cpp-options: -DASSERTS - - if flag(integer-simple) - cpp-options: -DINTEGER_SIMPLE - build-depends: integer-simple >= 0.1 && < 0.5 - else - cpp-options: -DINTEGER_GMP - build-depends: integer-gmp >= 0.2 && < 1.1 - - -- compiler specification - default-language: Haskell2010 - default-extensions: - NondecreasingIndentation - other-extensions: - BangPatterns - CPP - DeriveDataTypeable - ExistentialQuantification - ForeignFunctionInterface - GeneralizedNewtypeDeriving - MagicHash - OverloadedStrings - Rank2Types - RankNTypes - RecordWildCards - ScopedTypeVariables - TypeFamilies - UnboxedTuples - UnliftedFFITypes - - if impl(ghc >= 7.2) - other-extensions: Trustworthy - if impl(ghc >= 7.4) - other-extensions: Safe - if impl(ghc >= 8.0) - other-extensions: TemplateHaskellQuotes - else - other-extensions: TemplateHaskell - -test-suite tests - type: exitcode-stdio-1.0 - c-sources: cbits/cbits.c - include-dirs: include - - ghc-options: - -Wall -threaded -rtsopts - - cpp-options: - -DASSERTS -DTEST_SUITE - - -- modules specific to test-suite - hs-source-dirs: tests - main-is: Tests.hs - other-modules: - Tests.Properties - Tests.Properties.Mul - Tests.QuickCheckUtils - Tests.Regressions - Tests.SlowFunctions - Tests.Utils - - -- Same as in `library` stanza; this is needed by cabal for accurate - -- file-monitoring as well as to avoid `-Wmissing-home-modules` - -- warnings We can't use an inter-package library dependency because - -- of different `ghc-options`/`cpp-options` (as a side-benefitt, - -- this enables per-component build parallelism in `cabal - -- new-build`!); We could, however, use cabal-version:2.2's `common` - -- blocks at some point in the future to reduce the duplication. - hs-source-dirs: . - other-modules: - Data.Text - Data.Text.Array - Data.Text.Encoding - Data.Text.Encoding.Error - Data.Text.Foreign - Data.Text.IO - Data.Text.Internal - Data.Text.Internal.Builder - Data.Text.Internal.Builder.Functions - Data.Text.Internal.Builder.Int.Digits - Data.Text.Internal.Builder.RealFloat.Functions - Data.Text.Internal.Encoding.Fusion - Data.Text.Internal.Encoding.Fusion.Common - Data.Text.Internal.Encoding.Utf16 - Data.Text.Internal.Encoding.Utf32 - Data.Text.Internal.Encoding.Utf8 - Data.Text.Internal.Functions - Data.Text.Internal.Fusion - Data.Text.Internal.Fusion.CaseMapping - Data.Text.Internal.Fusion.Common - Data.Text.Internal.Fusion.Size - Data.Text.Internal.Fusion.Types - Data.Text.Internal.IO - Data.Text.Internal.Lazy - Data.Text.Internal.Lazy.Encoding.Fusion - Data.Text.Internal.Lazy.Fusion - Data.Text.Internal.Lazy.Search - Data.Text.Internal.Private - Data.Text.Internal.Read - Data.Text.Internal.Search - Data.Text.Internal.Unsafe - Data.Text.Internal.Unsafe.Char - Data.Text.Internal.Unsafe.Shift - Data.Text.Lazy - Data.Text.Lazy.Builder - Data.Text.Lazy.Builder.Int - Data.Text.Lazy.Builder.RealFloat - Data.Text.Lazy.Encoding - Data.Text.Lazy.IO - Data.Text.Lazy.Internal - Data.Text.Lazy.Read - Data.Text.Read - Data.Text.Unsafe - Data.Text.Show - - build-depends: - HUnit >= 1.2, - QuickCheck >= 2.7 && < 2.11, - array, - base, - binary, - deepseq, - directory, - ghc-prim, - quickcheck-unicode >= 1.0.1.0, - random, - template-haskell, - test-framework >= 0.4, - test-framework-hunit >= 0.2, - test-framework-quickcheck2 >= 0.2 - - if flag(bytestring-builder) - build-depends: bytestring >= 0.9 && < 0.10.4, - bytestring-builder >= 0.10.4 - else - build-depends: bytestring >= 0.10.4 - - if flag(integer-simple) - cpp-options: -DINTEGER_SIMPLE - build-depends: integer-simple >= 0.1 && < 0.5 - else - cpp-options: -DINTEGER_GMP - build-depends: integer-gmp >= 0.2 - - default-language: Haskell2010 - default-extensions: NondecreasingIndentation - -source-repository head - type: git - location: https://github.com/haskell/text - -source-repository head - type: mercurial - location: https://bitbucket.org/bos/text